id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
19,800
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.astype
def astype(self, dtype, copy=True): """ Coerce this type to another dtype Parameters ---------- dtype : numpy dtype or pandas type copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and dtype is categorical, the original object is returned. .. versionadded:: 0.19.0 """ if is_categorical_dtype(dtype): # GH 10696/18593 dtype = self.dtype.update_dtype(dtype) self = self.copy() if copy else self if dtype == self.dtype: return self return self._set_dtype(dtype) return np.array(self, dtype=dtype, copy=copy)
python
def astype(self, dtype, copy=True): """ Coerce this type to another dtype Parameters ---------- dtype : numpy dtype or pandas type copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and dtype is categorical, the original object is returned. .. versionadded:: 0.19.0 """ if is_categorical_dtype(dtype): # GH 10696/18593 dtype = self.dtype.update_dtype(dtype) self = self.copy() if copy else self if dtype == self.dtype: return self return self._set_dtype(dtype) return np.array(self, dtype=dtype, copy=copy)
[ "def", "astype", "(", "self", ",", "dtype", ",", "copy", "=", "True", ")", ":", "if", "is_categorical_dtype", "(", "dtype", ")", ":", "# GH 10696/18593", "dtype", "=", "self", ".", "dtype", ".", "update_dtype", "(", "dtype", ")", "self", "=", "self", ".", "copy", "(", ")", "if", "copy", "else", "self", "if", "dtype", "==", "self", ".", "dtype", ":", "return", "self", "return", "self", ".", "_set_dtype", "(", "dtype", ")", "return", "np", ".", "array", "(", "self", ",", "dtype", "=", "dtype", ",", "copy", "=", "copy", ")" ]
Coerce this type to another dtype Parameters ---------- dtype : numpy dtype or pandas type copy : bool, default True By default, astype always returns a newly allocated object. If copy is set to False and dtype is categorical, the original object is returned. .. versionadded:: 0.19.0
[ "Coerce", "this", "type", "to", "another", "dtype" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L463-L485
19,801
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical._from_inferred_categories
def _from_inferred_categories(cls, inferred_categories, inferred_codes, dtype, true_values=None): """ Construct a Categorical from inferred values. For inferred categories (`dtype` is None) the categories are sorted. For explicit `dtype`, the `inferred_categories` are cast to the appropriate type. Parameters ---------- inferred_categories : Index inferred_codes : Index dtype : CategoricalDtype or 'category' true_values : list, optional If none are provided, the default ones are "True", "TRUE", and "true." Returns ------- Categorical """ from pandas import Index, to_numeric, to_datetime, to_timedelta cats = Index(inferred_categories) known_categories = (isinstance(dtype, CategoricalDtype) and dtype.categories is not None) if known_categories: # Convert to a specialized type with `dtype` if specified. if dtype.categories.is_numeric(): cats = to_numeric(inferred_categories, errors="coerce") elif is_datetime64_dtype(dtype.categories): cats = to_datetime(inferred_categories, errors="coerce") elif is_timedelta64_dtype(dtype.categories): cats = to_timedelta(inferred_categories, errors="coerce") elif dtype.categories.is_boolean(): if true_values is None: true_values = ["True", "TRUE", "true"] cats = cats.isin(true_values) if known_categories: # Recode from observation order to dtype.categories order. categories = dtype.categories codes = _recode_for_categories(inferred_codes, cats, categories) elif not cats.is_monotonic_increasing: # Sort categories and recode for unknown categories. unsorted = cats.copy() categories = cats.sort_values() codes = _recode_for_categories(inferred_codes, unsorted, categories) dtype = CategoricalDtype(categories, ordered=False) else: dtype = CategoricalDtype(cats, ordered=False) codes = inferred_codes return cls(codes, dtype=dtype, fastpath=True)
python
def _from_inferred_categories(cls, inferred_categories, inferred_codes, dtype, true_values=None): """ Construct a Categorical from inferred values. For inferred categories (`dtype` is None) the categories are sorted. For explicit `dtype`, the `inferred_categories` are cast to the appropriate type. Parameters ---------- inferred_categories : Index inferred_codes : Index dtype : CategoricalDtype or 'category' true_values : list, optional If none are provided, the default ones are "True", "TRUE", and "true." Returns ------- Categorical """ from pandas import Index, to_numeric, to_datetime, to_timedelta cats = Index(inferred_categories) known_categories = (isinstance(dtype, CategoricalDtype) and dtype.categories is not None) if known_categories: # Convert to a specialized type with `dtype` if specified. if dtype.categories.is_numeric(): cats = to_numeric(inferred_categories, errors="coerce") elif is_datetime64_dtype(dtype.categories): cats = to_datetime(inferred_categories, errors="coerce") elif is_timedelta64_dtype(dtype.categories): cats = to_timedelta(inferred_categories, errors="coerce") elif dtype.categories.is_boolean(): if true_values is None: true_values = ["True", "TRUE", "true"] cats = cats.isin(true_values) if known_categories: # Recode from observation order to dtype.categories order. categories = dtype.categories codes = _recode_for_categories(inferred_codes, cats, categories) elif not cats.is_monotonic_increasing: # Sort categories and recode for unknown categories. unsorted = cats.copy() categories = cats.sort_values() codes = _recode_for_categories(inferred_codes, unsorted, categories) dtype = CategoricalDtype(categories, ordered=False) else: dtype = CategoricalDtype(cats, ordered=False) codes = inferred_codes return cls(codes, dtype=dtype, fastpath=True)
[ "def", "_from_inferred_categories", "(", "cls", ",", "inferred_categories", ",", "inferred_codes", ",", "dtype", ",", "true_values", "=", "None", ")", ":", "from", "pandas", "import", "Index", ",", "to_numeric", ",", "to_datetime", ",", "to_timedelta", "cats", "=", "Index", "(", "inferred_categories", ")", "known_categories", "=", "(", "isinstance", "(", "dtype", ",", "CategoricalDtype", ")", "and", "dtype", ".", "categories", "is", "not", "None", ")", "if", "known_categories", ":", "# Convert to a specialized type with `dtype` if specified.", "if", "dtype", ".", "categories", ".", "is_numeric", "(", ")", ":", "cats", "=", "to_numeric", "(", "inferred_categories", ",", "errors", "=", "\"coerce\"", ")", "elif", "is_datetime64_dtype", "(", "dtype", ".", "categories", ")", ":", "cats", "=", "to_datetime", "(", "inferred_categories", ",", "errors", "=", "\"coerce\"", ")", "elif", "is_timedelta64_dtype", "(", "dtype", ".", "categories", ")", ":", "cats", "=", "to_timedelta", "(", "inferred_categories", ",", "errors", "=", "\"coerce\"", ")", "elif", "dtype", ".", "categories", ".", "is_boolean", "(", ")", ":", "if", "true_values", "is", "None", ":", "true_values", "=", "[", "\"True\"", ",", "\"TRUE\"", ",", "\"true\"", "]", "cats", "=", "cats", ".", "isin", "(", "true_values", ")", "if", "known_categories", ":", "# Recode from observation order to dtype.categories order.", "categories", "=", "dtype", ".", "categories", "codes", "=", "_recode_for_categories", "(", "inferred_codes", ",", "cats", ",", "categories", ")", "elif", "not", "cats", ".", "is_monotonic_increasing", ":", "# Sort categories and recode for unknown categories.", "unsorted", "=", "cats", ".", "copy", "(", ")", "categories", "=", "cats", ".", "sort_values", "(", ")", "codes", "=", "_recode_for_categories", "(", "inferred_codes", ",", "unsorted", ",", "categories", ")", "dtype", "=", "CategoricalDtype", "(", "categories", ",", "ordered", "=", "False", ")", "else", ":", "dtype", "=", "CategoricalDtype", "(", "cats", ",", "ordered", "=", "False", ")", "codes", "=", "inferred_codes", "return", "cls", "(", "codes", ",", "dtype", "=", "dtype", ",", "fastpath", "=", "True", ")" ]
Construct a Categorical from inferred values. For inferred categories (`dtype` is None) the categories are sorted. For explicit `dtype`, the `inferred_categories` are cast to the appropriate type. Parameters ---------- inferred_categories : Index inferred_codes : Index dtype : CategoricalDtype or 'category' true_values : list, optional If none are provided, the default ones are "True", "TRUE", and "true." Returns ------- Categorical
[ "Construct", "a", "Categorical", "from", "inferred", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L528-L586
19,802
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.from_codes
def from_codes(cls, codes, categories=None, ordered=None, dtype=None): """ Make a Categorical type from codes and categories or dtype. This constructor is useful if you already have codes and categories/dtype and so do not need the (computation intensive) factorization step, which is usually done on the constructor. If your data does not follow this convention, please use the normal constructor. Parameters ---------- codes : array-like, integers An integer array, where each integer points to a category in categories or dtype.categories, or else is -1 for NaN. categories : index-like, optional The categories for the categorical. Items need to be unique. If the categories are not given here, then they must be provided in `dtype`. ordered : bool, optional Whether or not this categorical is treated as an ordered categorical. If not given here or in `dtype`, the resulting categorical will be unordered. dtype : CategoricalDtype or the string "category", optional If :class:`CategoricalDtype`, cannot be used together with `categories` or `ordered`. .. versionadded:: 0.24.0 When `dtype` is provided, neither `categories` nor `ordered` should be provided. Examples -------- >>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True) >>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype) [a, b, a, b] Categories (2, object): [a < b] """ dtype = CategoricalDtype._from_values_or_dtype(categories=categories, ordered=ordered, dtype=dtype) if dtype.categories is None: msg = ("The categories must be provided in 'categories' or " "'dtype'. Both were None.") raise ValueError(msg) codes = np.asarray(codes) # #21767 if not is_integer_dtype(codes): msg = "codes need to be array-like integers" if is_float_dtype(codes): icodes = codes.astype('i8') if (icodes == codes).all(): msg = None codes = icodes warn(("float codes will be disallowed in the future and " "raise a ValueError"), FutureWarning, stacklevel=2) if msg: raise ValueError(msg) if len(codes) and ( codes.max() >= len(dtype.categories) or codes.min() < -1): raise ValueError("codes need to be between -1 and " "len(categories)-1") return cls(codes, dtype=dtype, fastpath=True)
python
def from_codes(cls, codes, categories=None, ordered=None, dtype=None): """ Make a Categorical type from codes and categories or dtype. This constructor is useful if you already have codes and categories/dtype and so do not need the (computation intensive) factorization step, which is usually done on the constructor. If your data does not follow this convention, please use the normal constructor. Parameters ---------- codes : array-like, integers An integer array, where each integer points to a category in categories or dtype.categories, or else is -1 for NaN. categories : index-like, optional The categories for the categorical. Items need to be unique. If the categories are not given here, then they must be provided in `dtype`. ordered : bool, optional Whether or not this categorical is treated as an ordered categorical. If not given here or in `dtype`, the resulting categorical will be unordered. dtype : CategoricalDtype or the string "category", optional If :class:`CategoricalDtype`, cannot be used together with `categories` or `ordered`. .. versionadded:: 0.24.0 When `dtype` is provided, neither `categories` nor `ordered` should be provided. Examples -------- >>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True) >>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype) [a, b, a, b] Categories (2, object): [a < b] """ dtype = CategoricalDtype._from_values_or_dtype(categories=categories, ordered=ordered, dtype=dtype) if dtype.categories is None: msg = ("The categories must be provided in 'categories' or " "'dtype'. Both were None.") raise ValueError(msg) codes = np.asarray(codes) # #21767 if not is_integer_dtype(codes): msg = "codes need to be array-like integers" if is_float_dtype(codes): icodes = codes.astype('i8') if (icodes == codes).all(): msg = None codes = icodes warn(("float codes will be disallowed in the future and " "raise a ValueError"), FutureWarning, stacklevel=2) if msg: raise ValueError(msg) if len(codes) and ( codes.max() >= len(dtype.categories) or codes.min() < -1): raise ValueError("codes need to be between -1 and " "len(categories)-1") return cls(codes, dtype=dtype, fastpath=True)
[ "def", "from_codes", "(", "cls", ",", "codes", ",", "categories", "=", "None", ",", "ordered", "=", "None", ",", "dtype", "=", "None", ")", ":", "dtype", "=", "CategoricalDtype", ".", "_from_values_or_dtype", "(", "categories", "=", "categories", ",", "ordered", "=", "ordered", ",", "dtype", "=", "dtype", ")", "if", "dtype", ".", "categories", "is", "None", ":", "msg", "=", "(", "\"The categories must be provided in 'categories' or \"", "\"'dtype'. Both were None.\"", ")", "raise", "ValueError", "(", "msg", ")", "codes", "=", "np", ".", "asarray", "(", "codes", ")", "# #21767", "if", "not", "is_integer_dtype", "(", "codes", ")", ":", "msg", "=", "\"codes need to be array-like integers\"", "if", "is_float_dtype", "(", "codes", ")", ":", "icodes", "=", "codes", ".", "astype", "(", "'i8'", ")", "if", "(", "icodes", "==", "codes", ")", ".", "all", "(", ")", ":", "msg", "=", "None", "codes", "=", "icodes", "warn", "(", "(", "\"float codes will be disallowed in the future and \"", "\"raise a ValueError\"", ")", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "if", "msg", ":", "raise", "ValueError", "(", "msg", ")", "if", "len", "(", "codes", ")", "and", "(", "codes", ".", "max", "(", ")", ">=", "len", "(", "dtype", ".", "categories", ")", "or", "codes", ".", "min", "(", ")", "<", "-", "1", ")", ":", "raise", "ValueError", "(", "\"codes need to be between -1 and \"", "\"len(categories)-1\"", ")", "return", "cls", "(", "codes", ",", "dtype", "=", "dtype", ",", "fastpath", "=", "True", ")" ]
Make a Categorical type from codes and categories or dtype. This constructor is useful if you already have codes and categories/dtype and so do not need the (computation intensive) factorization step, which is usually done on the constructor. If your data does not follow this convention, please use the normal constructor. Parameters ---------- codes : array-like, integers An integer array, where each integer points to a category in categories or dtype.categories, or else is -1 for NaN. categories : index-like, optional The categories for the categorical. Items need to be unique. If the categories are not given here, then they must be provided in `dtype`. ordered : bool, optional Whether or not this categorical is treated as an ordered categorical. If not given here or in `dtype`, the resulting categorical will be unordered. dtype : CategoricalDtype or the string "category", optional If :class:`CategoricalDtype`, cannot be used together with `categories` or `ordered`. .. versionadded:: 0.24.0 When `dtype` is provided, neither `categories` nor `ordered` should be provided. Examples -------- >>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True) >>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype) [a, b, a, b] Categories (2, object): [a < b]
[ "Make", "a", "Categorical", "type", "from", "codes", "and", "categories", "or", "dtype", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L589-L655
19,803
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical._get_codes
def _get_codes(self): """ Get the codes. Returns ------- codes : integer array view A non writable view of the `codes` array. """ v = self._codes.view() v.flags.writeable = False return v
python
def _get_codes(self): """ Get the codes. Returns ------- codes : integer array view A non writable view of the `codes` array. """ v = self._codes.view() v.flags.writeable = False return v
[ "def", "_get_codes", "(", "self", ")", ":", "v", "=", "self", ".", "_codes", ".", "view", "(", ")", "v", ".", "flags", ".", "writeable", "=", "False", "return", "v" ]
Get the codes. Returns ------- codes : integer array view A non writable view of the `codes` array.
[ "Get", "the", "codes", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L659-L670
19,804
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical._set_categories
def _set_categories(self, categories, fastpath=False): """ Sets new categories inplace Parameters ---------- fastpath : bool, default False Don't perform validation of the categories for uniqueness or nulls Examples -------- >>> c = pd.Categorical(['a', 'b']) >>> c [a, b] Categories (2, object): [a, b] >>> c._set_categories(pd.Index(['a', 'c'])) >>> c [a, c] Categories (2, object): [a, c] """ if fastpath: new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered) else: new_dtype = CategoricalDtype(categories, ordered=self.ordered) if (not fastpath and self.dtype.categories is not None and len(new_dtype.categories) != len(self.dtype.categories)): raise ValueError("new categories need to have the same number of " "items than the old categories!") self._dtype = new_dtype
python
def _set_categories(self, categories, fastpath=False): """ Sets new categories inplace Parameters ---------- fastpath : bool, default False Don't perform validation of the categories for uniqueness or nulls Examples -------- >>> c = pd.Categorical(['a', 'b']) >>> c [a, b] Categories (2, object): [a, b] >>> c._set_categories(pd.Index(['a', 'c'])) >>> c [a, c] Categories (2, object): [a, c] """ if fastpath: new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered) else: new_dtype = CategoricalDtype(categories, ordered=self.ordered) if (not fastpath and self.dtype.categories is not None and len(new_dtype.categories) != len(self.dtype.categories)): raise ValueError("new categories need to have the same number of " "items than the old categories!") self._dtype = new_dtype
[ "def", "_set_categories", "(", "self", ",", "categories", ",", "fastpath", "=", "False", ")", ":", "if", "fastpath", ":", "new_dtype", "=", "CategoricalDtype", ".", "_from_fastpath", "(", "categories", ",", "self", ".", "ordered", ")", "else", ":", "new_dtype", "=", "CategoricalDtype", "(", "categories", ",", "ordered", "=", "self", ".", "ordered", ")", "if", "(", "not", "fastpath", "and", "self", ".", "dtype", ".", "categories", "is", "not", "None", "and", "len", "(", "new_dtype", ".", "categories", ")", "!=", "len", "(", "self", ".", "dtype", ".", "categories", ")", ")", ":", "raise", "ValueError", "(", "\"new categories need to have the same number of \"", "\"items than the old categories!\"", ")", "self", ".", "_dtype", "=", "new_dtype" ]
Sets new categories inplace Parameters ---------- fastpath : bool, default False Don't perform validation of the categories for uniqueness or nulls Examples -------- >>> c = pd.Categorical(['a', 'b']) >>> c [a, b] Categories (2, object): [a, b] >>> c._set_categories(pd.Index(['a', 'c'])) >>> c [a, c] Categories (2, object): [a, c]
[ "Sets", "new", "categories", "inplace" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L680-L712
19,805
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical._set_dtype
def _set_dtype(self, dtype): """ Internal method for directly updating the CategoricalDtype Parameters ---------- dtype : CategoricalDtype Notes ----- We don't do any validation here. It's assumed that the dtype is a (valid) instance of `CategoricalDtype`. """ codes = _recode_for_categories(self.codes, self.categories, dtype.categories) return type(self)(codes, dtype=dtype, fastpath=True)
python
def _set_dtype(self, dtype): """ Internal method for directly updating the CategoricalDtype Parameters ---------- dtype : CategoricalDtype Notes ----- We don't do any validation here. It's assumed that the dtype is a (valid) instance of `CategoricalDtype`. """ codes = _recode_for_categories(self.codes, self.categories, dtype.categories) return type(self)(codes, dtype=dtype, fastpath=True)
[ "def", "_set_dtype", "(", "self", ",", "dtype", ")", ":", "codes", "=", "_recode_for_categories", "(", "self", ".", "codes", ",", "self", ".", "categories", ",", "dtype", ".", "categories", ")", "return", "type", "(", "self", ")", "(", "codes", ",", "dtype", "=", "dtype", ",", "fastpath", "=", "True", ")" ]
Internal method for directly updating the CategoricalDtype Parameters ---------- dtype : CategoricalDtype Notes ----- We don't do any validation here. It's assumed that the dtype is a (valid) instance of `CategoricalDtype`.
[ "Internal", "method", "for", "directly", "updating", "the", "CategoricalDtype" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L714-L729
19,806
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.set_ordered
def set_ordered(self, value, inplace=False): """ Set the ordered attribute to the boolean value. Parameters ---------- value : bool Set whether this categorical is ordered (True) or not (False). inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to the value. """ inplace = validate_bool_kwarg(inplace, 'inplace') new_dtype = CategoricalDtype(self.categories, ordered=value) cat = self if inplace else self.copy() cat._dtype = new_dtype if not inplace: return cat
python
def set_ordered(self, value, inplace=False): """ Set the ordered attribute to the boolean value. Parameters ---------- value : bool Set whether this categorical is ordered (True) or not (False). inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to the value. """ inplace = validate_bool_kwarg(inplace, 'inplace') new_dtype = CategoricalDtype(self.categories, ordered=value) cat = self if inplace else self.copy() cat._dtype = new_dtype if not inplace: return cat
[ "def", "set_ordered", "(", "self", ",", "value", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "new_dtype", "=", "CategoricalDtype", "(", "self", ".", "categories", ",", "ordered", "=", "value", ")", "cat", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "cat", ".", "_dtype", "=", "new_dtype", "if", "not", "inplace", ":", "return", "cat" ]
Set the ordered attribute to the boolean value. Parameters ---------- value : bool Set whether this categorical is ordered (True) or not (False). inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to the value.
[ "Set", "the", "ordered", "attribute", "to", "the", "boolean", "value", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L731-L748
19,807
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.as_ordered
def as_ordered(self, inplace=False): """ Set the Categorical to be ordered. Parameters ---------- inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to True. """ inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(True, inplace=inplace)
python
def as_ordered(self, inplace=False): """ Set the Categorical to be ordered. Parameters ---------- inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to True. """ inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(True, inplace=inplace)
[ "def", "as_ordered", "(", "self", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "return", "self", ".", "set_ordered", "(", "True", ",", "inplace", "=", "inplace", ")" ]
Set the Categorical to be ordered. Parameters ---------- inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to True.
[ "Set", "the", "Categorical", "to", "be", "ordered", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L750-L761
19,808
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.as_unordered
def as_unordered(self, inplace=False): """ Set the Categorical to be unordered. Parameters ---------- inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to False. """ inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(False, inplace=inplace)
python
def as_unordered(self, inplace=False): """ Set the Categorical to be unordered. Parameters ---------- inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to False. """ inplace = validate_bool_kwarg(inplace, 'inplace') return self.set_ordered(False, inplace=inplace)
[ "def", "as_unordered", "(", "self", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "return", "self", ".", "set_ordered", "(", "False", ",", "inplace", "=", "inplace", ")" ]
Set the Categorical to be unordered. Parameters ---------- inplace : bool, default False Whether or not to set the ordered attribute in-place or return a copy of this categorical with ordered set to False.
[ "Set", "the", "Categorical", "to", "be", "unordered", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L763-L774
19,809
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.set_categories
def set_categories(self, new_categories, ordered=None, rename=False, inplace=False): """ Set the categories to the specified new_categories. `new_categories` can include new categories (which will result in unused categories) or remove old categories (which results in values set to NaN). If `rename==True`, the categories will simple be renamed (less or more items than in old categories will result in values set to NaN or in unused categories respectively). This method can be used to perform more than one action of adding, removing, and reordering simultaneously and is therefore faster than performing the individual steps via the more specialised methods. On the other hand this methods does not do checks (e.g., whether the old categories are included in the new categories on a reorder), which can result in surprising changes, for example when using special string dtypes on python3, which does not considers a S1 string equal to a single char python string. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, default False Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. rename : bool, default False Whether or not the new_categories should be considered as a rename of the old categories or as reordered categories. inplace : bool, default False Whether or not to reorder the categories in-place or return a copy of this categorical with reordered categories. Returns ------- Categorical with reordered categories or None if inplace. Raises ------ ValueError If new_categories does not validate as categories See Also -------- rename_categories reorder_categories add_categories remove_categories remove_unused_categories """ inplace = validate_bool_kwarg(inplace, 'inplace') if ordered is None: ordered = self.dtype.ordered new_dtype = CategoricalDtype(new_categories, ordered=ordered) cat = self if inplace else self.copy() if rename: if (cat.dtype.categories is not None and len(new_dtype.categories) < len(cat.dtype.categories)): # remove all _codes which are larger and set to -1/NaN cat._codes[cat._codes >= len(new_dtype.categories)] = -1 else: codes = _recode_for_categories(cat.codes, cat.categories, new_dtype.categories) cat._codes = codes cat._dtype = new_dtype if not inplace: return cat
python
def set_categories(self, new_categories, ordered=None, rename=False, inplace=False): """ Set the categories to the specified new_categories. `new_categories` can include new categories (which will result in unused categories) or remove old categories (which results in values set to NaN). If `rename==True`, the categories will simple be renamed (less or more items than in old categories will result in values set to NaN or in unused categories respectively). This method can be used to perform more than one action of adding, removing, and reordering simultaneously and is therefore faster than performing the individual steps via the more specialised methods. On the other hand this methods does not do checks (e.g., whether the old categories are included in the new categories on a reorder), which can result in surprising changes, for example when using special string dtypes on python3, which does not considers a S1 string equal to a single char python string. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, default False Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. rename : bool, default False Whether or not the new_categories should be considered as a rename of the old categories or as reordered categories. inplace : bool, default False Whether or not to reorder the categories in-place or return a copy of this categorical with reordered categories. Returns ------- Categorical with reordered categories or None if inplace. Raises ------ ValueError If new_categories does not validate as categories See Also -------- rename_categories reorder_categories add_categories remove_categories remove_unused_categories """ inplace = validate_bool_kwarg(inplace, 'inplace') if ordered is None: ordered = self.dtype.ordered new_dtype = CategoricalDtype(new_categories, ordered=ordered) cat = self if inplace else self.copy() if rename: if (cat.dtype.categories is not None and len(new_dtype.categories) < len(cat.dtype.categories)): # remove all _codes which are larger and set to -1/NaN cat._codes[cat._codes >= len(new_dtype.categories)] = -1 else: codes = _recode_for_categories(cat.codes, cat.categories, new_dtype.categories) cat._codes = codes cat._dtype = new_dtype if not inplace: return cat
[ "def", "set_categories", "(", "self", ",", "new_categories", ",", "ordered", "=", "None", ",", "rename", "=", "False", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "if", "ordered", "is", "None", ":", "ordered", "=", "self", ".", "dtype", ".", "ordered", "new_dtype", "=", "CategoricalDtype", "(", "new_categories", ",", "ordered", "=", "ordered", ")", "cat", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "if", "rename", ":", "if", "(", "cat", ".", "dtype", ".", "categories", "is", "not", "None", "and", "len", "(", "new_dtype", ".", "categories", ")", "<", "len", "(", "cat", ".", "dtype", ".", "categories", ")", ")", ":", "# remove all _codes which are larger and set to -1/NaN", "cat", ".", "_codes", "[", "cat", ".", "_codes", ">=", "len", "(", "new_dtype", ".", "categories", ")", "]", "=", "-", "1", "else", ":", "codes", "=", "_recode_for_categories", "(", "cat", ".", "codes", ",", "cat", ".", "categories", ",", "new_dtype", ".", "categories", ")", "cat", ".", "_codes", "=", "codes", "cat", ".", "_dtype", "=", "new_dtype", "if", "not", "inplace", ":", "return", "cat" ]
Set the categories to the specified new_categories. `new_categories` can include new categories (which will result in unused categories) or remove old categories (which results in values set to NaN). If `rename==True`, the categories will simple be renamed (less or more items than in old categories will result in values set to NaN or in unused categories respectively). This method can be used to perform more than one action of adding, removing, and reordering simultaneously and is therefore faster than performing the individual steps via the more specialised methods. On the other hand this methods does not do checks (e.g., whether the old categories are included in the new categories on a reorder), which can result in surprising changes, for example when using special string dtypes on python3, which does not considers a S1 string equal to a single char python string. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, default False Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. rename : bool, default False Whether or not the new_categories should be considered as a rename of the old categories or as reordered categories. inplace : bool, default False Whether or not to reorder the categories in-place or return a copy of this categorical with reordered categories. Returns ------- Categorical with reordered categories or None if inplace. Raises ------ ValueError If new_categories does not validate as categories See Also -------- rename_categories reorder_categories add_categories remove_categories remove_unused_categories
[ "Set", "the", "categories", "to", "the", "specified", "new_categories", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L776-L846
19,810
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.rename_categories
def rename_categories(self, new_categories, inplace=False): """ Rename categories. Parameters ---------- new_categories : list-like, dict-like or callable * list-like: all items must be unique and the number of items in the new categories must match the existing number of categories. * dict-like: specifies a mapping from old categories to new. Categories not contained in the mapping are passed through and extra categories in the mapping are ignored. .. versionadded:: 0.21.0 * callable : a callable that is called on all items in the old categories and whose return values comprise the new categories. .. versionadded:: 0.23.0 .. warning:: Currently, Series are considered list like. In a future version of pandas they'll be considered dict-like. inplace : bool, default False Whether or not to rename the categories inplace or return a copy of this categorical with renamed categories. Returns ------- cat : Categorical or None With ``inplace=False``, the new categorical is returned. With ``inplace=True``, there is no return value. Raises ------ ValueError If new categories are list-like and do not have the same number of items than the current categories or do not validate as categories See Also -------- reorder_categories add_categories remove_categories remove_unused_categories set_categories Examples -------- >>> c = pd.Categorical(['a', 'a', 'b']) >>> c.rename_categories([0, 1]) [0, 0, 1] Categories (2, int64): [0, 1] For dict-like ``new_categories``, extra keys are ignored and categories not in the dictionary are passed through >>> c.rename_categories({'a': 'A', 'c': 'C'}) [A, A, b] Categories (2, object): [A, b] You may also provide a callable to create the new categories >>> c.rename_categories(lambda x: x.upper()) [A, A, B] Categories (2, object): [A, B] """ inplace = validate_bool_kwarg(inplace, 'inplace') cat = self if inplace else self.copy() if isinstance(new_categories, ABCSeries): msg = ("Treating Series 'new_categories' as a list-like and using " "the values. In a future version, 'rename_categories' will " "treat Series like a dictionary.\n" "For dict-like, use 'new_categories.to_dict()'\n" "For list-like, use 'new_categories.values'.") warn(msg, FutureWarning, stacklevel=2) new_categories = list(new_categories) if is_dict_like(new_categories): cat.categories = [new_categories.get(item, item) for item in cat.categories] elif callable(new_categories): cat.categories = [new_categories(item) for item in cat.categories] else: cat.categories = new_categories if not inplace: return cat
python
def rename_categories(self, new_categories, inplace=False): """ Rename categories. Parameters ---------- new_categories : list-like, dict-like or callable * list-like: all items must be unique and the number of items in the new categories must match the existing number of categories. * dict-like: specifies a mapping from old categories to new. Categories not contained in the mapping are passed through and extra categories in the mapping are ignored. .. versionadded:: 0.21.0 * callable : a callable that is called on all items in the old categories and whose return values comprise the new categories. .. versionadded:: 0.23.0 .. warning:: Currently, Series are considered list like. In a future version of pandas they'll be considered dict-like. inplace : bool, default False Whether or not to rename the categories inplace or return a copy of this categorical with renamed categories. Returns ------- cat : Categorical or None With ``inplace=False``, the new categorical is returned. With ``inplace=True``, there is no return value. Raises ------ ValueError If new categories are list-like and do not have the same number of items than the current categories or do not validate as categories See Also -------- reorder_categories add_categories remove_categories remove_unused_categories set_categories Examples -------- >>> c = pd.Categorical(['a', 'a', 'b']) >>> c.rename_categories([0, 1]) [0, 0, 1] Categories (2, int64): [0, 1] For dict-like ``new_categories``, extra keys are ignored and categories not in the dictionary are passed through >>> c.rename_categories({'a': 'A', 'c': 'C'}) [A, A, b] Categories (2, object): [A, b] You may also provide a callable to create the new categories >>> c.rename_categories(lambda x: x.upper()) [A, A, B] Categories (2, object): [A, B] """ inplace = validate_bool_kwarg(inplace, 'inplace') cat = self if inplace else self.copy() if isinstance(new_categories, ABCSeries): msg = ("Treating Series 'new_categories' as a list-like and using " "the values. In a future version, 'rename_categories' will " "treat Series like a dictionary.\n" "For dict-like, use 'new_categories.to_dict()'\n" "For list-like, use 'new_categories.values'.") warn(msg, FutureWarning, stacklevel=2) new_categories = list(new_categories) if is_dict_like(new_categories): cat.categories = [new_categories.get(item, item) for item in cat.categories] elif callable(new_categories): cat.categories = [new_categories(item) for item in cat.categories] else: cat.categories = new_categories if not inplace: return cat
[ "def", "rename_categories", "(", "self", ",", "new_categories", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "cat", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "if", "isinstance", "(", "new_categories", ",", "ABCSeries", ")", ":", "msg", "=", "(", "\"Treating Series 'new_categories' as a list-like and using \"", "\"the values. In a future version, 'rename_categories' will \"", "\"treat Series like a dictionary.\\n\"", "\"For dict-like, use 'new_categories.to_dict()'\\n\"", "\"For list-like, use 'new_categories.values'.\"", ")", "warn", "(", "msg", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "new_categories", "=", "list", "(", "new_categories", ")", "if", "is_dict_like", "(", "new_categories", ")", ":", "cat", ".", "categories", "=", "[", "new_categories", ".", "get", "(", "item", ",", "item", ")", "for", "item", "in", "cat", ".", "categories", "]", "elif", "callable", "(", "new_categories", ")", ":", "cat", ".", "categories", "=", "[", "new_categories", "(", "item", ")", "for", "item", "in", "cat", ".", "categories", "]", "else", ":", "cat", ".", "categories", "=", "new_categories", "if", "not", "inplace", ":", "return", "cat" ]
Rename categories. Parameters ---------- new_categories : list-like, dict-like or callable * list-like: all items must be unique and the number of items in the new categories must match the existing number of categories. * dict-like: specifies a mapping from old categories to new. Categories not contained in the mapping are passed through and extra categories in the mapping are ignored. .. versionadded:: 0.21.0 * callable : a callable that is called on all items in the old categories and whose return values comprise the new categories. .. versionadded:: 0.23.0 .. warning:: Currently, Series are considered list like. In a future version of pandas they'll be considered dict-like. inplace : bool, default False Whether or not to rename the categories inplace or return a copy of this categorical with renamed categories. Returns ------- cat : Categorical or None With ``inplace=False``, the new categorical is returned. With ``inplace=True``, there is no return value. Raises ------ ValueError If new categories are list-like and do not have the same number of items than the current categories or do not validate as categories See Also -------- reorder_categories add_categories remove_categories remove_unused_categories set_categories Examples -------- >>> c = pd.Categorical(['a', 'a', 'b']) >>> c.rename_categories([0, 1]) [0, 0, 1] Categories (2, int64): [0, 1] For dict-like ``new_categories``, extra keys are ignored and categories not in the dictionary are passed through >>> c.rename_categories({'a': 'A', 'c': 'C'}) [A, A, b] Categories (2, object): [A, b] You may also provide a callable to create the new categories >>> c.rename_categories(lambda x: x.upper()) [A, A, B] Categories (2, object): [A, B]
[ "Rename", "categories", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L848-L940
19,811
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.reorder_categories
def reorder_categories(self, new_categories, ordered=None, inplace=False): """ Reorder categories as specified in new_categories. `new_categories` need to include all old categories and no new category items. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, optional Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. inplace : bool, default False Whether or not to reorder the categories inplace or return a copy of this categorical with reordered categories. Returns ------- cat : Categorical with reordered categories or None if inplace. Raises ------ ValueError If the new categories do not contain all old category items or any new ones See Also -------- rename_categories add_categories remove_categories remove_unused_categories set_categories """ inplace = validate_bool_kwarg(inplace, 'inplace') if set(self.dtype.categories) != set(new_categories): raise ValueError("items in new_categories are not the same as in " "old categories") return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
python
def reorder_categories(self, new_categories, ordered=None, inplace=False): """ Reorder categories as specified in new_categories. `new_categories` need to include all old categories and no new category items. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, optional Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. inplace : bool, default False Whether or not to reorder the categories inplace or return a copy of this categorical with reordered categories. Returns ------- cat : Categorical with reordered categories or None if inplace. Raises ------ ValueError If the new categories do not contain all old category items or any new ones See Also -------- rename_categories add_categories remove_categories remove_unused_categories set_categories """ inplace = validate_bool_kwarg(inplace, 'inplace') if set(self.dtype.categories) != set(new_categories): raise ValueError("items in new_categories are not the same as in " "old categories") return self.set_categories(new_categories, ordered=ordered, inplace=inplace)
[ "def", "reorder_categories", "(", "self", ",", "new_categories", ",", "ordered", "=", "None", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "if", "set", "(", "self", ".", "dtype", ".", "categories", ")", "!=", "set", "(", "new_categories", ")", ":", "raise", "ValueError", "(", "\"items in new_categories are not the same as in \"", "\"old categories\"", ")", "return", "self", ".", "set_categories", "(", "new_categories", ",", "ordered", "=", "ordered", ",", "inplace", "=", "inplace", ")" ]
Reorder categories as specified in new_categories. `new_categories` need to include all old categories and no new category items. Parameters ---------- new_categories : Index-like The categories in new order. ordered : bool, optional Whether or not the categorical is treated as a ordered categorical. If not given, do not change the ordered information. inplace : bool, default False Whether or not to reorder the categories inplace or return a copy of this categorical with reordered categories. Returns ------- cat : Categorical with reordered categories or None if inplace. Raises ------ ValueError If the new categories do not contain all old category items or any new ones See Also -------- rename_categories add_categories remove_categories remove_unused_categories set_categories
[ "Reorder", "categories", "as", "specified", "in", "new_categories", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L942-L983
19,812
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.add_categories
def add_categories(self, new_categories, inplace=False): """ Add new categories. `new_categories` will be included at the last/highest place in the categories and will be unused directly after this call. Parameters ---------- new_categories : category or list-like of category The new categories to be included. inplace : bool, default False Whether or not to add the categories inplace or return a copy of this categorical with added categories. Returns ------- cat : Categorical with new categories added or None if inplace. Raises ------ ValueError If the new categories include old categories or do not validate as categories See Also -------- rename_categories reorder_categories remove_categories remove_unused_categories set_categories """ inplace = validate_bool_kwarg(inplace, 'inplace') if not is_list_like(new_categories): new_categories = [new_categories] already_included = set(new_categories) & set(self.dtype.categories) if len(already_included) != 0: msg = ("new categories must not include old categories: " "{already_included!s}") raise ValueError(msg.format(already_included=already_included)) new_categories = list(self.dtype.categories) + list(new_categories) new_dtype = CategoricalDtype(new_categories, self.ordered) cat = self if inplace else self.copy() cat._dtype = new_dtype cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories) if not inplace: return cat
python
def add_categories(self, new_categories, inplace=False): """ Add new categories. `new_categories` will be included at the last/highest place in the categories and will be unused directly after this call. Parameters ---------- new_categories : category or list-like of category The new categories to be included. inplace : bool, default False Whether or not to add the categories inplace or return a copy of this categorical with added categories. Returns ------- cat : Categorical with new categories added or None if inplace. Raises ------ ValueError If the new categories include old categories or do not validate as categories See Also -------- rename_categories reorder_categories remove_categories remove_unused_categories set_categories """ inplace = validate_bool_kwarg(inplace, 'inplace') if not is_list_like(new_categories): new_categories = [new_categories] already_included = set(new_categories) & set(self.dtype.categories) if len(already_included) != 0: msg = ("new categories must not include old categories: " "{already_included!s}") raise ValueError(msg.format(already_included=already_included)) new_categories = list(self.dtype.categories) + list(new_categories) new_dtype = CategoricalDtype(new_categories, self.ordered) cat = self if inplace else self.copy() cat._dtype = new_dtype cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories) if not inplace: return cat
[ "def", "add_categories", "(", "self", ",", "new_categories", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "if", "not", "is_list_like", "(", "new_categories", ")", ":", "new_categories", "=", "[", "new_categories", "]", "already_included", "=", "set", "(", "new_categories", ")", "&", "set", "(", "self", ".", "dtype", ".", "categories", ")", "if", "len", "(", "already_included", ")", "!=", "0", ":", "msg", "=", "(", "\"new categories must not include old categories: \"", "\"{already_included!s}\"", ")", "raise", "ValueError", "(", "msg", ".", "format", "(", "already_included", "=", "already_included", ")", ")", "new_categories", "=", "list", "(", "self", ".", "dtype", ".", "categories", ")", "+", "list", "(", "new_categories", ")", "new_dtype", "=", "CategoricalDtype", "(", "new_categories", ",", "self", ".", "ordered", ")", "cat", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "cat", ".", "_dtype", "=", "new_dtype", "cat", ".", "_codes", "=", "coerce_indexer_dtype", "(", "cat", ".", "_codes", ",", "new_dtype", ".", "categories", ")", "if", "not", "inplace", ":", "return", "cat" ]
Add new categories. `new_categories` will be included at the last/highest place in the categories and will be unused directly after this call. Parameters ---------- new_categories : category or list-like of category The new categories to be included. inplace : bool, default False Whether or not to add the categories inplace or return a copy of this categorical with added categories. Returns ------- cat : Categorical with new categories added or None if inplace. Raises ------ ValueError If the new categories include old categories or do not validate as categories See Also -------- rename_categories reorder_categories remove_categories remove_unused_categories set_categories
[ "Add", "new", "categories", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L985-L1033
19,813
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.remove_categories
def remove_categories(self, removals, inplace=False): """ Remove the specified categories. `removals` must be included in the old categories. Values which were in the removed categories will be set to NaN Parameters ---------- removals : category or list of categories The categories which should be removed. inplace : bool, default False Whether or not to remove the categories inplace or return a copy of this categorical with removed categories. Returns ------- cat : Categorical with removed categories or None if inplace. Raises ------ ValueError If the removals are not contained in the categories See Also -------- rename_categories reorder_categories add_categories remove_unused_categories set_categories """ inplace = validate_bool_kwarg(inplace, 'inplace') if not is_list_like(removals): removals = [removals] removal_set = set(list(removals)) not_included = removal_set - set(self.dtype.categories) new_categories = [c for c in self.dtype.categories if c not in removal_set] # GH 10156 if any(isna(removals)): not_included = [x for x in not_included if notna(x)] new_categories = [x for x in new_categories if notna(x)] if len(not_included) != 0: msg = "removals must all be in old categories: {not_included!s}" raise ValueError(msg.format(not_included=not_included)) return self.set_categories(new_categories, ordered=self.ordered, rename=False, inplace=inplace)
python
def remove_categories(self, removals, inplace=False): """ Remove the specified categories. `removals` must be included in the old categories. Values which were in the removed categories will be set to NaN Parameters ---------- removals : category or list of categories The categories which should be removed. inplace : bool, default False Whether or not to remove the categories inplace or return a copy of this categorical with removed categories. Returns ------- cat : Categorical with removed categories or None if inplace. Raises ------ ValueError If the removals are not contained in the categories See Also -------- rename_categories reorder_categories add_categories remove_unused_categories set_categories """ inplace = validate_bool_kwarg(inplace, 'inplace') if not is_list_like(removals): removals = [removals] removal_set = set(list(removals)) not_included = removal_set - set(self.dtype.categories) new_categories = [c for c in self.dtype.categories if c not in removal_set] # GH 10156 if any(isna(removals)): not_included = [x for x in not_included if notna(x)] new_categories = [x for x in new_categories if notna(x)] if len(not_included) != 0: msg = "removals must all be in old categories: {not_included!s}" raise ValueError(msg.format(not_included=not_included)) return self.set_categories(new_categories, ordered=self.ordered, rename=False, inplace=inplace)
[ "def", "remove_categories", "(", "self", ",", "removals", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "if", "not", "is_list_like", "(", "removals", ")", ":", "removals", "=", "[", "removals", "]", "removal_set", "=", "set", "(", "list", "(", "removals", ")", ")", "not_included", "=", "removal_set", "-", "set", "(", "self", ".", "dtype", ".", "categories", ")", "new_categories", "=", "[", "c", "for", "c", "in", "self", ".", "dtype", ".", "categories", "if", "c", "not", "in", "removal_set", "]", "# GH 10156", "if", "any", "(", "isna", "(", "removals", ")", ")", ":", "not_included", "=", "[", "x", "for", "x", "in", "not_included", "if", "notna", "(", "x", ")", "]", "new_categories", "=", "[", "x", "for", "x", "in", "new_categories", "if", "notna", "(", "x", ")", "]", "if", "len", "(", "not_included", ")", "!=", "0", ":", "msg", "=", "\"removals must all be in old categories: {not_included!s}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "not_included", "=", "not_included", ")", ")", "return", "self", ".", "set_categories", "(", "new_categories", ",", "ordered", "=", "self", ".", "ordered", ",", "rename", "=", "False", ",", "inplace", "=", "inplace", ")" ]
Remove the specified categories. `removals` must be included in the old categories. Values which were in the removed categories will be set to NaN Parameters ---------- removals : category or list of categories The categories which should be removed. inplace : bool, default False Whether or not to remove the categories inplace or return a copy of this categorical with removed categories. Returns ------- cat : Categorical with removed categories or None if inplace. Raises ------ ValueError If the removals are not contained in the categories See Also -------- rename_categories reorder_categories add_categories remove_unused_categories set_categories
[ "Remove", "the", "specified", "categories", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L1035-L1086
19,814
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.remove_unused_categories
def remove_unused_categories(self, inplace=False): """ Remove categories which are not used. Parameters ---------- inplace : bool, default False Whether or not to drop unused categories inplace or return a copy of this categorical with unused categories dropped. Returns ------- cat : Categorical with unused categories dropped or None if inplace. See Also -------- rename_categories reorder_categories add_categories remove_categories set_categories """ inplace = validate_bool_kwarg(inplace, 'inplace') cat = self if inplace else self.copy() idx, inv = np.unique(cat._codes, return_inverse=True) if idx.size != 0 and idx[0] == -1: # na sentinel idx, inv = idx[1:], inv - 1 new_categories = cat.dtype.categories.take(idx) new_dtype = CategoricalDtype._from_fastpath(new_categories, ordered=self.ordered) cat._dtype = new_dtype cat._codes = coerce_indexer_dtype(inv, new_dtype.categories) if not inplace: return cat
python
def remove_unused_categories(self, inplace=False): """ Remove categories which are not used. Parameters ---------- inplace : bool, default False Whether or not to drop unused categories inplace or return a copy of this categorical with unused categories dropped. Returns ------- cat : Categorical with unused categories dropped or None if inplace. See Also -------- rename_categories reorder_categories add_categories remove_categories set_categories """ inplace = validate_bool_kwarg(inplace, 'inplace') cat = self if inplace else self.copy() idx, inv = np.unique(cat._codes, return_inverse=True) if idx.size != 0 and idx[0] == -1: # na sentinel idx, inv = idx[1:], inv - 1 new_categories = cat.dtype.categories.take(idx) new_dtype = CategoricalDtype._from_fastpath(new_categories, ordered=self.ordered) cat._dtype = new_dtype cat._codes = coerce_indexer_dtype(inv, new_dtype.categories) if not inplace: return cat
[ "def", "remove_unused_categories", "(", "self", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "cat", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "idx", ",", "inv", "=", "np", ".", "unique", "(", "cat", ".", "_codes", ",", "return_inverse", "=", "True", ")", "if", "idx", ".", "size", "!=", "0", "and", "idx", "[", "0", "]", "==", "-", "1", ":", "# na sentinel", "idx", ",", "inv", "=", "idx", "[", "1", ":", "]", ",", "inv", "-", "1", "new_categories", "=", "cat", ".", "dtype", ".", "categories", ".", "take", "(", "idx", ")", "new_dtype", "=", "CategoricalDtype", ".", "_from_fastpath", "(", "new_categories", ",", "ordered", "=", "self", ".", "ordered", ")", "cat", ".", "_dtype", "=", "new_dtype", "cat", ".", "_codes", "=", "coerce_indexer_dtype", "(", "inv", ",", "new_dtype", ".", "categories", ")", "if", "not", "inplace", ":", "return", "cat" ]
Remove categories which are not used. Parameters ---------- inplace : bool, default False Whether or not to drop unused categories inplace or return a copy of this categorical with unused categories dropped. Returns ------- cat : Categorical with unused categories dropped or None if inplace. See Also -------- rename_categories reorder_categories add_categories remove_categories set_categories
[ "Remove", "categories", "which", "are", "not", "used", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L1088-L1124
19,815
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.shift
def shift(self, periods, fill_value=None): """ Shift Categorical by desired number of periods. Parameters ---------- periods : int Number of periods to move, can be positive or negative fill_value : object, optional The scalar value to use for newly introduced missing values. .. versionadded:: 0.24.0 Returns ------- shifted : Categorical """ # since categoricals always have ndim == 1, an axis parameter # doesn't make any sense here. codes = self.codes if codes.ndim > 1: raise NotImplementedError("Categorical with ndim > 1.") if np.prod(codes.shape) and (periods != 0): codes = np.roll(codes, ensure_platform_int(periods), axis=0) if isna(fill_value): fill_value = -1 elif fill_value in self.categories: fill_value = self.categories.get_loc(fill_value) else: raise ValueError("'fill_value={}' is not present " "in this Categorical's " "categories".format(fill_value)) if periods > 0: codes[:periods] = fill_value else: codes[periods:] = fill_value return self.from_codes(codes, dtype=self.dtype)
python
def shift(self, periods, fill_value=None): """ Shift Categorical by desired number of periods. Parameters ---------- periods : int Number of periods to move, can be positive or negative fill_value : object, optional The scalar value to use for newly introduced missing values. .. versionadded:: 0.24.0 Returns ------- shifted : Categorical """ # since categoricals always have ndim == 1, an axis parameter # doesn't make any sense here. codes = self.codes if codes.ndim > 1: raise NotImplementedError("Categorical with ndim > 1.") if np.prod(codes.shape) and (periods != 0): codes = np.roll(codes, ensure_platform_int(periods), axis=0) if isna(fill_value): fill_value = -1 elif fill_value in self.categories: fill_value = self.categories.get_loc(fill_value) else: raise ValueError("'fill_value={}' is not present " "in this Categorical's " "categories".format(fill_value)) if periods > 0: codes[:periods] = fill_value else: codes[periods:] = fill_value return self.from_codes(codes, dtype=self.dtype)
[ "def", "shift", "(", "self", ",", "periods", ",", "fill_value", "=", "None", ")", ":", "# since categoricals always have ndim == 1, an axis parameter", "# doesn't make any sense here.", "codes", "=", "self", ".", "codes", "if", "codes", ".", "ndim", ">", "1", ":", "raise", "NotImplementedError", "(", "\"Categorical with ndim > 1.\"", ")", "if", "np", ".", "prod", "(", "codes", ".", "shape", ")", "and", "(", "periods", "!=", "0", ")", ":", "codes", "=", "np", ".", "roll", "(", "codes", ",", "ensure_platform_int", "(", "periods", ")", ",", "axis", "=", "0", ")", "if", "isna", "(", "fill_value", ")", ":", "fill_value", "=", "-", "1", "elif", "fill_value", "in", "self", ".", "categories", ":", "fill_value", "=", "self", ".", "categories", ".", "get_loc", "(", "fill_value", ")", "else", ":", "raise", "ValueError", "(", "\"'fill_value={}' is not present \"", "\"in this Categorical's \"", "\"categories\"", ".", "format", "(", "fill_value", ")", ")", "if", "periods", ">", "0", ":", "codes", "[", ":", "periods", "]", "=", "fill_value", "else", ":", "codes", "[", "periods", ":", "]", "=", "fill_value", "return", "self", ".", "from_codes", "(", "codes", ",", "dtype", "=", "self", ".", "dtype", ")" ]
Shift Categorical by desired number of periods. Parameters ---------- periods : int Number of periods to move, can be positive or negative fill_value : object, optional The scalar value to use for newly introduced missing values. .. versionadded:: 0.24.0 Returns ------- shifted : Categorical
[ "Shift", "Categorical", "by", "desired", "number", "of", "periods", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L1230-L1267
19,816
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.memory_usage
def memory_usage(self, deep=False): """ Memory usage of my values Parameters ---------- deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption Returns ------- bytes used Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False See Also -------- numpy.ndarray.nbytes """ return self._codes.nbytes + self.dtype.categories.memory_usage( deep=deep)
python
def memory_usage(self, deep=False): """ Memory usage of my values Parameters ---------- deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption Returns ------- bytes used Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False See Also -------- numpy.ndarray.nbytes """ return self._codes.nbytes + self.dtype.categories.memory_usage( deep=deep)
[ "def", "memory_usage", "(", "self", ",", "deep", "=", "False", ")", ":", "return", "self", ".", "_codes", ".", "nbytes", "+", "self", ".", "dtype", ".", "categories", ".", "memory_usage", "(", "deep", "=", "deep", ")" ]
Memory usage of my values Parameters ---------- deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption Returns ------- bytes used Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False See Also -------- numpy.ndarray.nbytes
[ "Memory", "usage", "of", "my", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L1331-L1355
19,817
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.value_counts
def value_counts(self, dropna=True): """ Return a Series containing counts of each category. Every category will have an entry, even those with a count of 0. Parameters ---------- dropna : bool, default True Don't include counts of NaN. Returns ------- counts : Series See Also -------- Series.value_counts """ from numpy import bincount from pandas import Series, CategoricalIndex code, cat = self._codes, self.categories ncat, mask = len(cat), 0 <= code ix, clean = np.arange(ncat), mask.all() if dropna or clean: obs = code if clean else code[mask] count = bincount(obs, minlength=ncat or None) else: count = bincount(np.where(mask, code, ncat)) ix = np.append(ix, -1) ix = self._constructor(ix, dtype=self.dtype, fastpath=True) return Series(count, index=CategoricalIndex(ix), dtype='int64')
python
def value_counts(self, dropna=True): """ Return a Series containing counts of each category. Every category will have an entry, even those with a count of 0. Parameters ---------- dropna : bool, default True Don't include counts of NaN. Returns ------- counts : Series See Also -------- Series.value_counts """ from numpy import bincount from pandas import Series, CategoricalIndex code, cat = self._codes, self.categories ncat, mask = len(cat), 0 <= code ix, clean = np.arange(ncat), mask.all() if dropna or clean: obs = code if clean else code[mask] count = bincount(obs, minlength=ncat or None) else: count = bincount(np.where(mask, code, ncat)) ix = np.append(ix, -1) ix = self._constructor(ix, dtype=self.dtype, fastpath=True) return Series(count, index=CategoricalIndex(ix), dtype='int64')
[ "def", "value_counts", "(", "self", ",", "dropna", "=", "True", ")", ":", "from", "numpy", "import", "bincount", "from", "pandas", "import", "Series", ",", "CategoricalIndex", "code", ",", "cat", "=", "self", ".", "_codes", ",", "self", ".", "categories", "ncat", ",", "mask", "=", "len", "(", "cat", ")", ",", "0", "<=", "code", "ix", ",", "clean", "=", "np", ".", "arange", "(", "ncat", ")", ",", "mask", ".", "all", "(", ")", "if", "dropna", "or", "clean", ":", "obs", "=", "code", "if", "clean", "else", "code", "[", "mask", "]", "count", "=", "bincount", "(", "obs", ",", "minlength", "=", "ncat", "or", "None", ")", "else", ":", "count", "=", "bincount", "(", "np", ".", "where", "(", "mask", ",", "code", ",", "ncat", ")", ")", "ix", "=", "np", ".", "append", "(", "ix", ",", "-", "1", ")", "ix", "=", "self", ".", "_constructor", "(", "ix", ",", "dtype", "=", "self", ".", "dtype", ",", "fastpath", "=", "True", ")", "return", "Series", "(", "count", ",", "index", "=", "CategoricalIndex", "(", "ix", ")", ",", "dtype", "=", "'int64'", ")" ]
Return a Series containing counts of each category. Every category will have an entry, even those with a count of 0. Parameters ---------- dropna : bool, default True Don't include counts of NaN. Returns ------- counts : Series See Also -------- Series.value_counts
[ "Return", "a", "Series", "containing", "counts", "of", "each", "category", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L1438-L1475
19,818
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.get_values
def get_values(self): """ Return the values. For internal compatibility with pandas formatting. Returns ------- numpy.array A numpy array of the same dtype as categorical.categories.dtype or Index if datetime / periods. """ # if we are a datetime and period index, return Index to keep metadata if is_datetimelike(self.categories): return self.categories.take(self._codes, fill_value=np.nan) elif is_integer_dtype(self.categories) and -1 in self._codes: return self.categories.astype("object").take(self._codes, fill_value=np.nan) return np.array(self)
python
def get_values(self): """ Return the values. For internal compatibility with pandas formatting. Returns ------- numpy.array A numpy array of the same dtype as categorical.categories.dtype or Index if datetime / periods. """ # if we are a datetime and period index, return Index to keep metadata if is_datetimelike(self.categories): return self.categories.take(self._codes, fill_value=np.nan) elif is_integer_dtype(self.categories) and -1 in self._codes: return self.categories.astype("object").take(self._codes, fill_value=np.nan) return np.array(self)
[ "def", "get_values", "(", "self", ")", ":", "# if we are a datetime and period index, return Index to keep metadata", "if", "is_datetimelike", "(", "self", ".", "categories", ")", ":", "return", "self", ".", "categories", ".", "take", "(", "self", ".", "_codes", ",", "fill_value", "=", "np", ".", "nan", ")", "elif", "is_integer_dtype", "(", "self", ".", "categories", ")", "and", "-", "1", "in", "self", ".", "_codes", ":", "return", "self", ".", "categories", ".", "astype", "(", "\"object\"", ")", ".", "take", "(", "self", ".", "_codes", ",", "fill_value", "=", "np", ".", "nan", ")", "return", "np", ".", "array", "(", "self", ")" ]
Return the values. For internal compatibility with pandas formatting. Returns ------- numpy.array A numpy array of the same dtype as categorical.categories.dtype or Index if datetime / periods.
[ "Return", "the", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L1477-L1495
19,819
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.sort_values
def sort_values(self, inplace=False, ascending=True, na_position='last'): """ Sort the Categorical by category value returning a new Categorical by default. While an ordering is applied to the category values, sorting in this context refers more to organizing and grouping together based on matching category values. Thus, this function can be called on an unordered Categorical instance unlike the functions 'Categorical.min' and 'Categorical.max'. Parameters ---------- inplace : bool, default False Do operation in place. ascending : bool, default True Order ascending. Passing False orders descending. The ordering parameter provides the method by which the category values are organized. na_position : {'first', 'last'} (optional, default='last') 'first' puts NaNs at the beginning 'last' puts NaNs at the end Returns ------- Categorical or None See Also -------- Categorical.sort Series.sort_values Examples -------- >>> c = pd.Categorical([1, 2, 2, 1, 5]) >>> c [1, 2, 2, 1, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values() [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values(ascending=False) [5, 2, 2, 1, 1] Categories (3, int64): [1, 2, 5] Inplace sorting can be done as well: >>> c.sort_values(inplace=True) >>> c [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> >>> c = pd.Categorical([1, 2, 2, 1, 5]) 'sort_values' behaviour with NaNs. Note that 'na_position' is independent of the 'ascending' parameter: >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5]) >>> c [NaN, 2.0, 2.0, NaN, 5.0] Categories (2, int64): [2, 5] >>> c.sort_values() [2.0, 2.0, 5.0, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False) [5.0, 2.0, 2.0, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(na_position='first') [NaN, NaN, 2.0, 2.0, 5.0] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False, na_position='first') [NaN, NaN, 5.0, 2.0, 2.0] Categories (2, int64): [2, 5] """ inplace = validate_bool_kwarg(inplace, 'inplace') if na_position not in ['last', 'first']: msg = 'invalid na_position: {na_position!r}' raise ValueError(msg.format(na_position=na_position)) sorted_idx = nargsort(self, ascending=ascending, na_position=na_position) if inplace: self._codes = self._codes[sorted_idx] else: return self._constructor(values=self._codes[sorted_idx], dtype=self.dtype, fastpath=True)
python
def sort_values(self, inplace=False, ascending=True, na_position='last'): """ Sort the Categorical by category value returning a new Categorical by default. While an ordering is applied to the category values, sorting in this context refers more to organizing and grouping together based on matching category values. Thus, this function can be called on an unordered Categorical instance unlike the functions 'Categorical.min' and 'Categorical.max'. Parameters ---------- inplace : bool, default False Do operation in place. ascending : bool, default True Order ascending. Passing False orders descending. The ordering parameter provides the method by which the category values are organized. na_position : {'first', 'last'} (optional, default='last') 'first' puts NaNs at the beginning 'last' puts NaNs at the end Returns ------- Categorical or None See Also -------- Categorical.sort Series.sort_values Examples -------- >>> c = pd.Categorical([1, 2, 2, 1, 5]) >>> c [1, 2, 2, 1, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values() [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values(ascending=False) [5, 2, 2, 1, 1] Categories (3, int64): [1, 2, 5] Inplace sorting can be done as well: >>> c.sort_values(inplace=True) >>> c [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> >>> c = pd.Categorical([1, 2, 2, 1, 5]) 'sort_values' behaviour with NaNs. Note that 'na_position' is independent of the 'ascending' parameter: >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5]) >>> c [NaN, 2.0, 2.0, NaN, 5.0] Categories (2, int64): [2, 5] >>> c.sort_values() [2.0, 2.0, 5.0, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False) [5.0, 2.0, 2.0, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(na_position='first') [NaN, NaN, 2.0, 2.0, 5.0] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False, na_position='first') [NaN, NaN, 5.0, 2.0, 2.0] Categories (2, int64): [2, 5] """ inplace = validate_bool_kwarg(inplace, 'inplace') if na_position not in ['last', 'first']: msg = 'invalid na_position: {na_position!r}' raise ValueError(msg.format(na_position=na_position)) sorted_idx = nargsort(self, ascending=ascending, na_position=na_position) if inplace: self._codes = self._codes[sorted_idx] else: return self._constructor(values=self._codes[sorted_idx], dtype=self.dtype, fastpath=True)
[ "def", "sort_values", "(", "self", ",", "inplace", "=", "False", ",", "ascending", "=", "True", ",", "na_position", "=", "'last'", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "if", "na_position", "not", "in", "[", "'last'", ",", "'first'", "]", ":", "msg", "=", "'invalid na_position: {na_position!r}'", "raise", "ValueError", "(", "msg", ".", "format", "(", "na_position", "=", "na_position", ")", ")", "sorted_idx", "=", "nargsort", "(", "self", ",", "ascending", "=", "ascending", ",", "na_position", "=", "na_position", ")", "if", "inplace", ":", "self", ".", "_codes", "=", "self", ".", "_codes", "[", "sorted_idx", "]", "else", ":", "return", "self", ".", "_constructor", "(", "values", "=", "self", ".", "_codes", "[", "sorted_idx", "]", ",", "dtype", "=", "self", ".", "dtype", ",", "fastpath", "=", "True", ")" ]
Sort the Categorical by category value returning a new Categorical by default. While an ordering is applied to the category values, sorting in this context refers more to organizing and grouping together based on matching category values. Thus, this function can be called on an unordered Categorical instance unlike the functions 'Categorical.min' and 'Categorical.max'. Parameters ---------- inplace : bool, default False Do operation in place. ascending : bool, default True Order ascending. Passing False orders descending. The ordering parameter provides the method by which the category values are organized. na_position : {'first', 'last'} (optional, default='last') 'first' puts NaNs at the beginning 'last' puts NaNs at the end Returns ------- Categorical or None See Also -------- Categorical.sort Series.sort_values Examples -------- >>> c = pd.Categorical([1, 2, 2, 1, 5]) >>> c [1, 2, 2, 1, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values() [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values(ascending=False) [5, 2, 2, 1, 1] Categories (3, int64): [1, 2, 5] Inplace sorting can be done as well: >>> c.sort_values(inplace=True) >>> c [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> >>> c = pd.Categorical([1, 2, 2, 1, 5]) 'sort_values' behaviour with NaNs. Note that 'na_position' is independent of the 'ascending' parameter: >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5]) >>> c [NaN, 2.0, 2.0, NaN, 5.0] Categories (2, int64): [2, 5] >>> c.sort_values() [2.0, 2.0, 5.0, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False) [5.0, 2.0, 2.0, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(na_position='first') [NaN, NaN, 2.0, 2.0, 5.0] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False, na_position='first') [NaN, NaN, 5.0, 2.0, 2.0] Categories (2, int64): [2, 5]
[ "Sort", "the", "Categorical", "by", "category", "value", "returning", "a", "new", "Categorical", "by", "default", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L1554-L1642
19,820
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.take_nd
def take_nd(self, indexer, allow_fill=None, fill_value=None): """ Take elements from the Categorical. Parameters ---------- indexer : sequence of int The indices in `self` to take. The meaning of negative values in `indexer` depends on the value of `allow_fill`. allow_fill : bool, default None How to handle negative values in `indexer`. * False: negative values in `indices` indicate positional indices from the right. This is similar to :func:`numpy.take`. * True: negative values in `indices` indicate missing values (the default). These values are set to `fill_value`. Any other other negative values raise a ``ValueError``. .. versionchanged:: 0.23.0 Deprecated the default value of `allow_fill`. The deprecated default is ``True``. In the future, this will change to ``False``. fill_value : object The value to use for `indices` that are missing (-1), when ``allow_fill=True``. This should be the category, i.e. a value in ``self.categories``, not a code. Returns ------- Categorical This Categorical will have the same categories and ordered as `self`. See Also -------- Series.take : Similar method for Series. numpy.ndarray.take : Similar method for NumPy arrays. Examples -------- >>> cat = pd.Categorical(['a', 'a', 'b']) >>> cat [a, a, b] Categories (2, object): [a, b] Specify ``allow_fill==False`` to have negative indices mean indexing from the right. >>> cat.take([0, -1, -2], allow_fill=False) [a, b, a] Categories (2, object): [a, b] With ``allow_fill=True``, indices equal to ``-1`` mean "missing" values that should be filled with the `fill_value`, which is ``np.nan`` by default. >>> cat.take([0, -1, -1], allow_fill=True) [a, NaN, NaN] Categories (2, object): [a, b] The fill value can be specified. >>> cat.take([0, -1, -1], allow_fill=True, fill_value='a') [a, a, a] Categories (3, object): [a, b] Specifying a fill value that's not in ``self.categories`` will raise a ``TypeError``. """ indexer = np.asarray(indexer, dtype=np.intp) if allow_fill is None: if (indexer < 0).any(): warn(_take_msg, FutureWarning, stacklevel=2) allow_fill = True dtype = self.dtype if isna(fill_value): fill_value = -1 elif allow_fill: # convert user-provided `fill_value` to codes if fill_value in self.categories: fill_value = self.categories.get_loc(fill_value) else: msg = ( "'fill_value' ('{}') is not in this Categorical's " "categories." ) raise TypeError(msg.format(fill_value)) codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value) result = type(self).from_codes(codes, dtype=dtype) return result
python
def take_nd(self, indexer, allow_fill=None, fill_value=None): """ Take elements from the Categorical. Parameters ---------- indexer : sequence of int The indices in `self` to take. The meaning of negative values in `indexer` depends on the value of `allow_fill`. allow_fill : bool, default None How to handle negative values in `indexer`. * False: negative values in `indices` indicate positional indices from the right. This is similar to :func:`numpy.take`. * True: negative values in `indices` indicate missing values (the default). These values are set to `fill_value`. Any other other negative values raise a ``ValueError``. .. versionchanged:: 0.23.0 Deprecated the default value of `allow_fill`. The deprecated default is ``True``. In the future, this will change to ``False``. fill_value : object The value to use for `indices` that are missing (-1), when ``allow_fill=True``. This should be the category, i.e. a value in ``self.categories``, not a code. Returns ------- Categorical This Categorical will have the same categories and ordered as `self`. See Also -------- Series.take : Similar method for Series. numpy.ndarray.take : Similar method for NumPy arrays. Examples -------- >>> cat = pd.Categorical(['a', 'a', 'b']) >>> cat [a, a, b] Categories (2, object): [a, b] Specify ``allow_fill==False`` to have negative indices mean indexing from the right. >>> cat.take([0, -1, -2], allow_fill=False) [a, b, a] Categories (2, object): [a, b] With ``allow_fill=True``, indices equal to ``-1`` mean "missing" values that should be filled with the `fill_value`, which is ``np.nan`` by default. >>> cat.take([0, -1, -1], allow_fill=True) [a, NaN, NaN] Categories (2, object): [a, b] The fill value can be specified. >>> cat.take([0, -1, -1], allow_fill=True, fill_value='a') [a, a, a] Categories (3, object): [a, b] Specifying a fill value that's not in ``self.categories`` will raise a ``TypeError``. """ indexer = np.asarray(indexer, dtype=np.intp) if allow_fill is None: if (indexer < 0).any(): warn(_take_msg, FutureWarning, stacklevel=2) allow_fill = True dtype = self.dtype if isna(fill_value): fill_value = -1 elif allow_fill: # convert user-provided `fill_value` to codes if fill_value in self.categories: fill_value = self.categories.get_loc(fill_value) else: msg = ( "'fill_value' ('{}') is not in this Categorical's " "categories." ) raise TypeError(msg.format(fill_value)) codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value) result = type(self).from_codes(codes, dtype=dtype) return result
[ "def", "take_nd", "(", "self", ",", "indexer", ",", "allow_fill", "=", "None", ",", "fill_value", "=", "None", ")", ":", "indexer", "=", "np", ".", "asarray", "(", "indexer", ",", "dtype", "=", "np", ".", "intp", ")", "if", "allow_fill", "is", "None", ":", "if", "(", "indexer", "<", "0", ")", ".", "any", "(", ")", ":", "warn", "(", "_take_msg", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "allow_fill", "=", "True", "dtype", "=", "self", ".", "dtype", "if", "isna", "(", "fill_value", ")", ":", "fill_value", "=", "-", "1", "elif", "allow_fill", ":", "# convert user-provided `fill_value` to codes", "if", "fill_value", "in", "self", ".", "categories", ":", "fill_value", "=", "self", ".", "categories", ".", "get_loc", "(", "fill_value", ")", "else", ":", "msg", "=", "(", "\"'fill_value' ('{}') is not in this Categorical's \"", "\"categories.\"", ")", "raise", "TypeError", "(", "msg", ".", "format", "(", "fill_value", ")", ")", "codes", "=", "take", "(", "self", ".", "_codes", ",", "indexer", ",", "allow_fill", "=", "allow_fill", ",", "fill_value", "=", "fill_value", ")", "result", "=", "type", "(", "self", ")", ".", "from_codes", "(", "codes", ",", "dtype", "=", "dtype", ")", "return", "result" ]
Take elements from the Categorical. Parameters ---------- indexer : sequence of int The indices in `self` to take. The meaning of negative values in `indexer` depends on the value of `allow_fill`. allow_fill : bool, default None How to handle negative values in `indexer`. * False: negative values in `indices` indicate positional indices from the right. This is similar to :func:`numpy.take`. * True: negative values in `indices` indicate missing values (the default). These values are set to `fill_value`. Any other other negative values raise a ``ValueError``. .. versionchanged:: 0.23.0 Deprecated the default value of `allow_fill`. The deprecated default is ``True``. In the future, this will change to ``False``. fill_value : object The value to use for `indices` that are missing (-1), when ``allow_fill=True``. This should be the category, i.e. a value in ``self.categories``, not a code. Returns ------- Categorical This Categorical will have the same categories and ordered as `self`. See Also -------- Series.take : Similar method for Series. numpy.ndarray.take : Similar method for NumPy arrays. Examples -------- >>> cat = pd.Categorical(['a', 'a', 'b']) >>> cat [a, a, b] Categories (2, object): [a, b] Specify ``allow_fill==False`` to have negative indices mean indexing from the right. >>> cat.take([0, -1, -2], allow_fill=False) [a, b, a] Categories (2, object): [a, b] With ``allow_fill=True``, indices equal to ``-1`` mean "missing" values that should be filled with the `fill_value`, which is ``np.nan`` by default. >>> cat.take([0, -1, -1], allow_fill=True) [a, NaN, NaN] Categories (2, object): [a, b] The fill value can be specified. >>> cat.take([0, -1, -1], allow_fill=True, fill_value='a') [a, a, a] Categories (3, object): [a, b] Specifying a fill value that's not in ``self.categories`` will raise a ``TypeError``.
[ "Take", "elements", "from", "the", "Categorical", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L1792-L1889
19,821
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical._slice
def _slice(self, slicer): """ Return a slice of myself. For internal compatibility with numpy arrays. """ # only allow 1 dimensional slicing, but can # in a 2-d case be passd (slice(None),....) if isinstance(slicer, tuple) and len(slicer) == 2: if not com.is_null_slice(slicer[0]): raise AssertionError("invalid slicing for a 1-ndim " "categorical") slicer = slicer[1] codes = self._codes[slicer] return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
python
def _slice(self, slicer): """ Return a slice of myself. For internal compatibility with numpy arrays. """ # only allow 1 dimensional slicing, but can # in a 2-d case be passd (slice(None),....) if isinstance(slicer, tuple) and len(slicer) == 2: if not com.is_null_slice(slicer[0]): raise AssertionError("invalid slicing for a 1-ndim " "categorical") slicer = slicer[1] codes = self._codes[slicer] return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
[ "def", "_slice", "(", "self", ",", "slicer", ")", ":", "# only allow 1 dimensional slicing, but can", "# in a 2-d case be passd (slice(None),....)", "if", "isinstance", "(", "slicer", ",", "tuple", ")", "and", "len", "(", "slicer", ")", "==", "2", ":", "if", "not", "com", ".", "is_null_slice", "(", "slicer", "[", "0", "]", ")", ":", "raise", "AssertionError", "(", "\"invalid slicing for a 1-ndim \"", "\"categorical\"", ")", "slicer", "=", "slicer", "[", "1", "]", "codes", "=", "self", ".", "_codes", "[", "slicer", "]", "return", "self", ".", "_constructor", "(", "values", "=", "codes", ",", "dtype", "=", "self", ".", "dtype", ",", "fastpath", "=", "True", ")" ]
Return a slice of myself. For internal compatibility with numpy arrays.
[ "Return", "a", "slice", "of", "myself", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L1893-L1909
19,822
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical._repr_categories
def _repr_categories(self): """ return the base repr for the categories """ max_categories = (10 if get_option("display.max_categories") == 0 else get_option("display.max_categories")) from pandas.io.formats import format as fmt if len(self.categories) > max_categories: num = max_categories // 2 head = fmt.format_array(self.categories[:num], None) tail = fmt.format_array(self.categories[-num:], None) category_strs = head + ["..."] + tail else: category_strs = fmt.format_array(self.categories, None) # Strip all leading spaces, which format_array adds for columns... category_strs = [x.strip() for x in category_strs] return category_strs
python
def _repr_categories(self): """ return the base repr for the categories """ max_categories = (10 if get_option("display.max_categories") == 0 else get_option("display.max_categories")) from pandas.io.formats import format as fmt if len(self.categories) > max_categories: num = max_categories // 2 head = fmt.format_array(self.categories[:num], None) tail = fmt.format_array(self.categories[-num:], None) category_strs = head + ["..."] + tail else: category_strs = fmt.format_array(self.categories, None) # Strip all leading spaces, which format_array adds for columns... category_strs = [x.strip() for x in category_strs] return category_strs
[ "def", "_repr_categories", "(", "self", ")", ":", "max_categories", "=", "(", "10", "if", "get_option", "(", "\"display.max_categories\"", ")", "==", "0", "else", "get_option", "(", "\"display.max_categories\"", ")", ")", "from", "pandas", ".", "io", ".", "formats", "import", "format", "as", "fmt", "if", "len", "(", "self", ".", "categories", ")", ">", "max_categories", ":", "num", "=", "max_categories", "//", "2", "head", "=", "fmt", ".", "format_array", "(", "self", ".", "categories", "[", ":", "num", "]", ",", "None", ")", "tail", "=", "fmt", ".", "format_array", "(", "self", ".", "categories", "[", "-", "num", ":", "]", ",", "None", ")", "category_strs", "=", "head", "+", "[", "\"...\"", "]", "+", "tail", "else", ":", "category_strs", "=", "fmt", ".", "format_array", "(", "self", ".", "categories", ",", "None", ")", "# Strip all leading spaces, which format_array adds for columns...", "category_strs", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "category_strs", "]", "return", "category_strs" ]
return the base repr for the categories
[ "return", "the", "base", "repr", "for", "the", "categories" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L1948-L1965
19,823
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical._repr_categories_info
def _repr_categories_info(self): """ Returns a string representation of the footer. """ category_strs = self._repr_categories() dtype = getattr(self.categories, 'dtype_str', str(self.categories.dtype)) levheader = "Categories ({length}, {dtype}): ".format( length=len(self.categories), dtype=dtype) width, height = get_terminal_size() max_width = get_option("display.width") or width if console.in_ipython_frontend(): # 0 = no breaks max_width = 0 levstring = "" start = True cur_col_len = len(levheader) # header sep_len, sep = (3, " < ") if self.ordered else (2, ", ") linesep = sep.rstrip() + "\n" # remove whitespace for val in category_strs: if max_width != 0 and cur_col_len + sep_len + len(val) > max_width: levstring += linesep + (" " * (len(levheader) + 1)) cur_col_len = len(levheader) + 1 # header + a whitespace elif not start: levstring += sep cur_col_len += len(val) levstring += val start = False # replace to simple save space by return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
python
def _repr_categories_info(self): """ Returns a string representation of the footer. """ category_strs = self._repr_categories() dtype = getattr(self.categories, 'dtype_str', str(self.categories.dtype)) levheader = "Categories ({length}, {dtype}): ".format( length=len(self.categories), dtype=dtype) width, height = get_terminal_size() max_width = get_option("display.width") or width if console.in_ipython_frontend(): # 0 = no breaks max_width = 0 levstring = "" start = True cur_col_len = len(levheader) # header sep_len, sep = (3, " < ") if self.ordered else (2, ", ") linesep = sep.rstrip() + "\n" # remove whitespace for val in category_strs: if max_width != 0 and cur_col_len + sep_len + len(val) > max_width: levstring += linesep + (" " * (len(levheader) + 1)) cur_col_len = len(levheader) + 1 # header + a whitespace elif not start: levstring += sep cur_col_len += len(val) levstring += val start = False # replace to simple save space by return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]"
[ "def", "_repr_categories_info", "(", "self", ")", ":", "category_strs", "=", "self", ".", "_repr_categories", "(", ")", "dtype", "=", "getattr", "(", "self", ".", "categories", ",", "'dtype_str'", ",", "str", "(", "self", ".", "categories", ".", "dtype", ")", ")", "levheader", "=", "\"Categories ({length}, {dtype}): \"", ".", "format", "(", "length", "=", "len", "(", "self", ".", "categories", ")", ",", "dtype", "=", "dtype", ")", "width", ",", "height", "=", "get_terminal_size", "(", ")", "max_width", "=", "get_option", "(", "\"display.width\"", ")", "or", "width", "if", "console", ".", "in_ipython_frontend", "(", ")", ":", "# 0 = no breaks", "max_width", "=", "0", "levstring", "=", "\"\"", "start", "=", "True", "cur_col_len", "=", "len", "(", "levheader", ")", "# header", "sep_len", ",", "sep", "=", "(", "3", ",", "\" < \"", ")", "if", "self", ".", "ordered", "else", "(", "2", ",", "\", \"", ")", "linesep", "=", "sep", ".", "rstrip", "(", ")", "+", "\"\\n\"", "# remove whitespace", "for", "val", "in", "category_strs", ":", "if", "max_width", "!=", "0", "and", "cur_col_len", "+", "sep_len", "+", "len", "(", "val", ")", ">", "max_width", ":", "levstring", "+=", "linesep", "+", "(", "\" \"", "*", "(", "len", "(", "levheader", ")", "+", "1", ")", ")", "cur_col_len", "=", "len", "(", "levheader", ")", "+", "1", "# header + a whitespace", "elif", "not", "start", ":", "levstring", "+=", "sep", "cur_col_len", "+=", "len", "(", "val", ")", "levstring", "+=", "val", "start", "=", "False", "# replace to simple save space by", "return", "levheader", "+", "\"[\"", "+", "levstring", ".", "replace", "(", "\" < ... < \"", ",", "\" ... \"", ")", "+", "\"]\"" ]
Returns a string representation of the footer.
[ "Returns", "a", "string", "representation", "of", "the", "footer", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L1967-L1998
19,824
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical._maybe_coerce_indexer
def _maybe_coerce_indexer(self, indexer): """ return an indexer coerced to the codes dtype """ if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i': indexer = indexer.astype(self._codes.dtype) return indexer
python
def _maybe_coerce_indexer(self, indexer): """ return an indexer coerced to the codes dtype """ if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i': indexer = indexer.astype(self._codes.dtype) return indexer
[ "def", "_maybe_coerce_indexer", "(", "self", ",", "indexer", ")", ":", "if", "isinstance", "(", "indexer", ",", "np", ".", "ndarray", ")", "and", "indexer", ".", "dtype", ".", "kind", "==", "'i'", ":", "indexer", "=", "indexer", ".", "astype", "(", "self", ".", "_codes", ".", "dtype", ")", "return", "indexer" ]
return an indexer coerced to the codes dtype
[ "return", "an", "indexer", "coerced", "to", "the", "codes", "dtype" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2031-L2037
19,825
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical._reverse_indexer
def _reverse_indexer(self): """ Compute the inverse of a categorical, returning a dict of categories -> indexers. *This is an internal function* Returns ------- dict of categories -> indexers Example ------- In [1]: c = pd.Categorical(list('aabca')) In [2]: c Out[2]: [a, a, b, c, a] Categories (3, object): [a, b, c] In [3]: c.categories Out[3]: Index(['a', 'b', 'c'], dtype='object') In [4]: c.codes Out[4]: array([0, 0, 1, 2, 0], dtype=int8) In [5]: c._reverse_indexer() Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])} """ categories = self.categories r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'), categories.size) counts = counts.cumsum() result = (r[start:end] for start, end in zip(counts, counts[1:])) result = dict(zip(categories, result)) return result
python
def _reverse_indexer(self): """ Compute the inverse of a categorical, returning a dict of categories -> indexers. *This is an internal function* Returns ------- dict of categories -> indexers Example ------- In [1]: c = pd.Categorical(list('aabca')) In [2]: c Out[2]: [a, a, b, c, a] Categories (3, object): [a, b, c] In [3]: c.categories Out[3]: Index(['a', 'b', 'c'], dtype='object') In [4]: c.codes Out[4]: array([0, 0, 1, 2, 0], dtype=int8) In [5]: c._reverse_indexer() Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])} """ categories = self.categories r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'), categories.size) counts = counts.cumsum() result = (r[start:end] for start, end in zip(counts, counts[1:])) result = dict(zip(categories, result)) return result
[ "def", "_reverse_indexer", "(", "self", ")", ":", "categories", "=", "self", ".", "categories", "r", ",", "counts", "=", "libalgos", ".", "groupsort_indexer", "(", "self", ".", "codes", ".", "astype", "(", "'int64'", ")", ",", "categories", ".", "size", ")", "counts", "=", "counts", ".", "cumsum", "(", ")", "result", "=", "(", "r", "[", "start", ":", "end", "]", "for", "start", ",", "end", "in", "zip", "(", "counts", ",", "counts", "[", "1", ":", "]", ")", ")", "result", "=", "dict", "(", "zip", "(", "categories", ",", "result", ")", ")", "return", "result" ]
Compute the inverse of a categorical, returning a dict of categories -> indexers. *This is an internal function* Returns ------- dict of categories -> indexers Example ------- In [1]: c = pd.Categorical(list('aabca')) In [2]: c Out[2]: [a, a, b, c, a] Categories (3, object): [a, b, c] In [3]: c.categories Out[3]: Index(['a', 'b', 'c'], dtype='object') In [4]: c.codes Out[4]: array([0, 0, 1, 2, 0], dtype=int8) In [5]: c._reverse_indexer() Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
[ "Compute", "the", "inverse", "of", "a", "categorical", "returning", "a", "dict", "of", "categories", "-", ">", "indexers", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2119-L2155
19,826
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.min
def min(self, numeric_only=None, **kwargs): """ The minimum value of the object. Only ordered `Categoricals` have a minimum! Raises ------ TypeError If the `Categorical` is not `ordered`. Returns ------- min : the minimum of this `Categorical` """ self.check_for_ordered('min') if numeric_only: good = self._codes != -1 pointer = self._codes[good].min(**kwargs) else: pointer = self._codes.min(**kwargs) if pointer == -1: return np.nan else: return self.categories[pointer]
python
def min(self, numeric_only=None, **kwargs): """ The minimum value of the object. Only ordered `Categoricals` have a minimum! Raises ------ TypeError If the `Categorical` is not `ordered`. Returns ------- min : the minimum of this `Categorical` """ self.check_for_ordered('min') if numeric_only: good = self._codes != -1 pointer = self._codes[good].min(**kwargs) else: pointer = self._codes.min(**kwargs) if pointer == -1: return np.nan else: return self.categories[pointer]
[ "def", "min", "(", "self", ",", "numeric_only", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "check_for_ordered", "(", "'min'", ")", "if", "numeric_only", ":", "good", "=", "self", ".", "_codes", "!=", "-", "1", "pointer", "=", "self", ".", "_codes", "[", "good", "]", ".", "min", "(", "*", "*", "kwargs", ")", "else", ":", "pointer", "=", "self", ".", "_codes", ".", "min", "(", "*", "*", "kwargs", ")", "if", "pointer", "==", "-", "1", ":", "return", "np", ".", "nan", "else", ":", "return", "self", ".", "categories", "[", "pointer", "]" ]
The minimum value of the object. Only ordered `Categoricals` have a minimum! Raises ------ TypeError If the `Categorical` is not `ordered`. Returns ------- min : the minimum of this `Categorical`
[ "The", "minimum", "value", "of", "the", "object", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2165-L2189
19,827
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.unique
def unique(self): """ Return the ``Categorical`` which ``categories`` and ``codes`` are unique. Unused categories are NOT returned. - unordered category: values and categories are sorted by appearance order. - ordered category: values are sorted by appearance order, categories keeps existing order. Returns ------- unique values : ``Categorical`` Examples -------- An unordered Categorical will return categories in the order of appearance. >>> pd.Categorical(list('baabc')) [b, a, c] Categories (3, object): [b, a, c] >>> pd.Categorical(list('baabc'), categories=list('abc')) [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.Categorical(list('baabc'), ... categories=list('abc'), ... ordered=True) [b, a, c] Categories (3, object): [a < b < c] See Also -------- unique CategoricalIndex.unique Series.unique """ # unlike np.unique, unique1d does not sort unique_codes = unique1d(self.codes) cat = self.copy() # keep nan in codes cat._codes = unique_codes # exclude nan from indexer for categories take_codes = unique_codes[unique_codes != -1] if self.ordered: take_codes = np.sort(take_codes) return cat.set_categories(cat.categories.take(take_codes))
python
def unique(self): """ Return the ``Categorical`` which ``categories`` and ``codes`` are unique. Unused categories are NOT returned. - unordered category: values and categories are sorted by appearance order. - ordered category: values are sorted by appearance order, categories keeps existing order. Returns ------- unique values : ``Categorical`` Examples -------- An unordered Categorical will return categories in the order of appearance. >>> pd.Categorical(list('baabc')) [b, a, c] Categories (3, object): [b, a, c] >>> pd.Categorical(list('baabc'), categories=list('abc')) [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.Categorical(list('baabc'), ... categories=list('abc'), ... ordered=True) [b, a, c] Categories (3, object): [a < b < c] See Also -------- unique CategoricalIndex.unique Series.unique """ # unlike np.unique, unique1d does not sort unique_codes = unique1d(self.codes) cat = self.copy() # keep nan in codes cat._codes = unique_codes # exclude nan from indexer for categories take_codes = unique_codes[unique_codes != -1] if self.ordered: take_codes = np.sort(take_codes) return cat.set_categories(cat.categories.take(take_codes))
[ "def", "unique", "(", "self", ")", ":", "# unlike np.unique, unique1d does not sort", "unique_codes", "=", "unique1d", "(", "self", ".", "codes", ")", "cat", "=", "self", ".", "copy", "(", ")", "# keep nan in codes", "cat", ".", "_codes", "=", "unique_codes", "# exclude nan from indexer for categories", "take_codes", "=", "unique_codes", "[", "unique_codes", "!=", "-", "1", "]", "if", "self", ".", "ordered", ":", "take_codes", "=", "np", ".", "sort", "(", "take_codes", ")", "return", "cat", ".", "set_categories", "(", "cat", ".", "categories", ".", "take", "(", "take_codes", ")", ")" ]
Return the ``Categorical`` which ``categories`` and ``codes`` are unique. Unused categories are NOT returned. - unordered category: values and categories are sorted by appearance order. - ordered category: values are sorted by appearance order, categories keeps existing order. Returns ------- unique values : ``Categorical`` Examples -------- An unordered Categorical will return categories in the order of appearance. >>> pd.Categorical(list('baabc')) [b, a, c] Categories (3, object): [b, a, c] >>> pd.Categorical(list('baabc'), categories=list('abc')) [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.Categorical(list('baabc'), ... categories=list('abc'), ... ordered=True) [b, a, c] Categories (3, object): [a < b < c] See Also -------- unique CategoricalIndex.unique Series.unique
[ "Return", "the", "Categorical", "which", "categories", "and", "codes", "are", "unique", ".", "Unused", "categories", "are", "NOT", "returned", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2243-L2297
19,828
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.equals
def equals(self, other): """ Returns True if categorical arrays are equal. Parameters ---------- other : `Categorical` Returns ------- bool """ if self.is_dtype_equal(other): if self.categories.equals(other.categories): # fastpath to avoid re-coding other_codes = other._codes else: other_codes = _recode_for_categories(other.codes, other.categories, self.categories) return np.array_equal(self._codes, other_codes) return False
python
def equals(self, other): """ Returns True if categorical arrays are equal. Parameters ---------- other : `Categorical` Returns ------- bool """ if self.is_dtype_equal(other): if self.categories.equals(other.categories): # fastpath to avoid re-coding other_codes = other._codes else: other_codes = _recode_for_categories(other.codes, other.categories, self.categories) return np.array_equal(self._codes, other_codes) return False
[ "def", "equals", "(", "self", ",", "other", ")", ":", "if", "self", ".", "is_dtype_equal", "(", "other", ")", ":", "if", "self", ".", "categories", ".", "equals", "(", "other", ".", "categories", ")", ":", "# fastpath to avoid re-coding", "other_codes", "=", "other", ".", "_codes", "else", ":", "other_codes", "=", "_recode_for_categories", "(", "other", ".", "codes", ",", "other", ".", "categories", ",", "self", ".", "categories", ")", "return", "np", ".", "array_equal", "(", "self", ".", "_codes", ",", "other_codes", ")", "return", "False" ]
Returns True if categorical arrays are equal. Parameters ---------- other : `Categorical` Returns ------- bool
[ "Returns", "True", "if", "categorical", "arrays", "are", "equal", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2308-L2329
19,829
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.is_dtype_equal
def is_dtype_equal(self, other): """ Returns True if categoricals are the same dtype same categories, and same ordered Parameters ---------- other : Categorical Returns ------- bool """ try: return hash(self.dtype) == hash(other.dtype) except (AttributeError, TypeError): return False
python
def is_dtype_equal(self, other): """ Returns True if categoricals are the same dtype same categories, and same ordered Parameters ---------- other : Categorical Returns ------- bool """ try: return hash(self.dtype) == hash(other.dtype) except (AttributeError, TypeError): return False
[ "def", "is_dtype_equal", "(", "self", ",", "other", ")", ":", "try", ":", "return", "hash", "(", "self", ".", "dtype", ")", "==", "hash", "(", "other", ".", "dtype", ")", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "return", "False" ]
Returns True if categoricals are the same dtype same categories, and same ordered Parameters ---------- other : Categorical Returns ------- bool
[ "Returns", "True", "if", "categoricals", "are", "the", "same", "dtype", "same", "categories", "and", "same", "ordered" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2331-L2348
19,830
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.describe
def describe(self): """ Describes this Categorical Returns ------- description: `DataFrame` A dataframe with frequency and counts by category. """ counts = self.value_counts(dropna=False) freqs = counts / float(counts.sum()) from pandas.core.reshape.concat import concat result = concat([counts, freqs], axis=1) result.columns = ['counts', 'freqs'] result.index.name = 'categories' return result
python
def describe(self): """ Describes this Categorical Returns ------- description: `DataFrame` A dataframe with frequency and counts by category. """ counts = self.value_counts(dropna=False) freqs = counts / float(counts.sum()) from pandas.core.reshape.concat import concat result = concat([counts, freqs], axis=1) result.columns = ['counts', 'freqs'] result.index.name = 'categories' return result
[ "def", "describe", "(", "self", ")", ":", "counts", "=", "self", ".", "value_counts", "(", "dropna", "=", "False", ")", "freqs", "=", "counts", "/", "float", "(", "counts", ".", "sum", "(", ")", ")", "from", "pandas", ".", "core", ".", "reshape", ".", "concat", "import", "concat", "result", "=", "concat", "(", "[", "counts", ",", "freqs", "]", ",", "axis", "=", "1", ")", "result", ".", "columns", "=", "[", "'counts'", ",", "'freqs'", "]", "result", ".", "index", ".", "name", "=", "'categories'", "return", "result" ]
Describes this Categorical Returns ------- description: `DataFrame` A dataframe with frequency and counts by category.
[ "Describes", "this", "Categorical" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2350-L2367
19,831
pandas-dev/pandas
pandas/core/arrays/categorical.py
Categorical.isin
def isin(self, values): """ Check whether `values` are contained in Categorical. Return a boolean NumPy Array showing whether each element in the Categorical matches an element in the passed sequence of `values` exactly. Parameters ---------- values : set or list-like The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a list of one element. Returns ------- isin : numpy.ndarray (bool dtype) Raises ------ TypeError * If `values` is not a set or list-like See Also -------- pandas.Series.isin : Equivalent method on Series. Examples -------- >>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama', ... 'hippo']) >>> s.isin(['cow', 'lama']) array([ True, True, True, False, True, False]) Passing a single string as ``s.isin('lama')`` will raise an error. Use a list of one element instead: >>> s.isin(['lama']) array([ True, False, True, False, True, False]) """ from pandas.core.internals.construction import sanitize_array if not is_list_like(values): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{values_type}]" .format(values_type=type(values).__name__)) values = sanitize_array(values, None, None) null_mask = np.asarray(isna(values)) code_values = self.categories.get_indexer(values) code_values = code_values[null_mask | (code_values >= 0)] return algorithms.isin(self.codes, code_values)
python
def isin(self, values): """ Check whether `values` are contained in Categorical. Return a boolean NumPy Array showing whether each element in the Categorical matches an element in the passed sequence of `values` exactly. Parameters ---------- values : set or list-like The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a list of one element. Returns ------- isin : numpy.ndarray (bool dtype) Raises ------ TypeError * If `values` is not a set or list-like See Also -------- pandas.Series.isin : Equivalent method on Series. Examples -------- >>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama', ... 'hippo']) >>> s.isin(['cow', 'lama']) array([ True, True, True, False, True, False]) Passing a single string as ``s.isin('lama')`` will raise an error. Use a list of one element instead: >>> s.isin(['lama']) array([ True, False, True, False, True, False]) """ from pandas.core.internals.construction import sanitize_array if not is_list_like(values): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{values_type}]" .format(values_type=type(values).__name__)) values = sanitize_array(values, None, None) null_mask = np.asarray(isna(values)) code_values = self.categories.get_indexer(values) code_values = code_values[null_mask | (code_values >= 0)] return algorithms.isin(self.codes, code_values)
[ "def", "isin", "(", "self", ",", "values", ")", ":", "from", "pandas", ".", "core", ".", "internals", ".", "construction", "import", "sanitize_array", "if", "not", "is_list_like", "(", "values", ")", ":", "raise", "TypeError", "(", "\"only list-like objects are allowed to be passed\"", "\" to isin(), you passed a [{values_type}]\"", ".", "format", "(", "values_type", "=", "type", "(", "values", ")", ".", "__name__", ")", ")", "values", "=", "sanitize_array", "(", "values", ",", "None", ",", "None", ")", "null_mask", "=", "np", ".", "asarray", "(", "isna", "(", "values", ")", ")", "code_values", "=", "self", ".", "categories", ".", "get_indexer", "(", "values", ")", "code_values", "=", "code_values", "[", "null_mask", "|", "(", "code_values", ">=", "0", ")", "]", "return", "algorithms", ".", "isin", "(", "self", ".", "codes", ",", "code_values", ")" ]
Check whether `values` are contained in Categorical. Return a boolean NumPy Array showing whether each element in the Categorical matches an element in the passed sequence of `values` exactly. Parameters ---------- values : set or list-like The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a list of one element. Returns ------- isin : numpy.ndarray (bool dtype) Raises ------ TypeError * If `values` is not a set or list-like See Also -------- pandas.Series.isin : Equivalent method on Series. Examples -------- >>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama', ... 'hippo']) >>> s.isin(['cow', 'lama']) array([ True, True, True, False, True, False]) Passing a single string as ``s.isin('lama')`` will raise an error. Use a list of one element instead: >>> s.isin(['lama']) array([ True, False, True, False, True, False])
[ "Check", "whether", "values", "are", "contained", "in", "Categorical", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2387-L2438
19,832
pandas-dev/pandas
pandas/core/tools/timedeltas.py
to_timedelta
def to_timedelta(arg, unit='ns', box=True, errors='raise'): """ Convert argument to timedelta. Timedeltas are absolute differences in times, expressed in difference units (e.g. days, hours, minutes, seconds). This method converts an argument from a recognized timedelta format / value into a Timedelta type. Parameters ---------- arg : str, timedelta, list-like or Series The data to be converted to timedelta. unit : str, default 'ns' Denotes the unit of the arg. Possible values: ('Y', 'M', 'W', 'D', 'days', 'day', 'hours', hour', 'hr', 'h', 'm', 'minute', 'min', 'minutes', 'T', 'S', 'seconds', 'sec', 'second', 'ms', 'milliseconds', 'millisecond', 'milli', 'millis', 'L', 'us', 'microseconds', 'microsecond', 'micro', 'micros', 'U', 'ns', 'nanoseconds', 'nano', 'nanos', 'nanosecond', 'N'). box : bool, default True - If True returns a Timedelta/TimedeltaIndex of the results. - If False returns a numpy.timedelta64 or numpy.darray of values of dtype timedelta64[ns]. .. deprecated:: 0.25.0 Use :meth:`.to_numpy` or :meth:`Timedelta.to_timedelta64` instead to get an ndarray of values or numpy.timedelta64, respectively. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaT. - If 'ignore', then invalid parsing will return the input. Returns ------- timedelta64 or numpy.array of timedelta64 Output type returned if parsing succeeded. See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. Examples -------- Parsing a single string to a Timedelta: >>> pd.to_timedelta('1 days 06:05:01.00003') Timedelta('1 days 06:05:01.000030') >>> pd.to_timedelta('15.5us') Timedelta('0 days 00:00:00.000015') Parsing a list or array of strings: >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015', NaT], dtype='timedelta64[ns]', freq=None) Converting numbers by specifying the `unit` keyword argument: >>> pd.to_timedelta(np.arange(5), unit='s') TimedeltaIndex(['00:00:00', '00:00:01', '00:00:02', '00:00:03', '00:00:04'], dtype='timedelta64[ns]', freq=None) >>> pd.to_timedelta(np.arange(5), unit='d') TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None) Returning an ndarray by using the 'box' keyword argument: >>> pd.to_timedelta(np.arange(5), box=False) array([0, 1, 2, 3, 4], dtype='timedelta64[ns]') """ unit = parse_timedelta_unit(unit) if errors not in ('ignore', 'raise', 'coerce'): raise ValueError("errors must be one of 'ignore', " "'raise', or 'coerce'}") if unit in {'Y', 'y', 'M'}: warnings.warn("M and Y units are deprecated and " "will be removed in a future version.", FutureWarning, stacklevel=2) if arg is None: return arg elif isinstance(arg, ABCSeries): values = _convert_listlike(arg._values, unit=unit, box=False, errors=errors) return arg._constructor(values, index=arg.index, name=arg.name) elif isinstance(arg, ABCIndexClass): return _convert_listlike(arg, unit=unit, box=box, errors=errors, name=arg.name) elif isinstance(arg, np.ndarray) and arg.ndim == 0: # extract array scalar and process below arg = arg.item() elif is_list_like(arg) and getattr(arg, 'ndim', 1) == 1: return _convert_listlike(arg, unit=unit, box=box, errors=errors) elif getattr(arg, 'ndim', 1) > 1: raise TypeError('arg must be a string, timedelta, list, tuple, ' '1-d array, or Series') # ...so it must be a scalar value. Return scalar. return _coerce_scalar_to_timedelta_type(arg, unit=unit, box=box, errors=errors)
python
def to_timedelta(arg, unit='ns', box=True, errors='raise'): """ Convert argument to timedelta. Timedeltas are absolute differences in times, expressed in difference units (e.g. days, hours, minutes, seconds). This method converts an argument from a recognized timedelta format / value into a Timedelta type. Parameters ---------- arg : str, timedelta, list-like or Series The data to be converted to timedelta. unit : str, default 'ns' Denotes the unit of the arg. Possible values: ('Y', 'M', 'W', 'D', 'days', 'day', 'hours', hour', 'hr', 'h', 'm', 'minute', 'min', 'minutes', 'T', 'S', 'seconds', 'sec', 'second', 'ms', 'milliseconds', 'millisecond', 'milli', 'millis', 'L', 'us', 'microseconds', 'microsecond', 'micro', 'micros', 'U', 'ns', 'nanoseconds', 'nano', 'nanos', 'nanosecond', 'N'). box : bool, default True - If True returns a Timedelta/TimedeltaIndex of the results. - If False returns a numpy.timedelta64 or numpy.darray of values of dtype timedelta64[ns]. .. deprecated:: 0.25.0 Use :meth:`.to_numpy` or :meth:`Timedelta.to_timedelta64` instead to get an ndarray of values or numpy.timedelta64, respectively. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaT. - If 'ignore', then invalid parsing will return the input. Returns ------- timedelta64 or numpy.array of timedelta64 Output type returned if parsing succeeded. See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. Examples -------- Parsing a single string to a Timedelta: >>> pd.to_timedelta('1 days 06:05:01.00003') Timedelta('1 days 06:05:01.000030') >>> pd.to_timedelta('15.5us') Timedelta('0 days 00:00:00.000015') Parsing a list or array of strings: >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015', NaT], dtype='timedelta64[ns]', freq=None) Converting numbers by specifying the `unit` keyword argument: >>> pd.to_timedelta(np.arange(5), unit='s') TimedeltaIndex(['00:00:00', '00:00:01', '00:00:02', '00:00:03', '00:00:04'], dtype='timedelta64[ns]', freq=None) >>> pd.to_timedelta(np.arange(5), unit='d') TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None) Returning an ndarray by using the 'box' keyword argument: >>> pd.to_timedelta(np.arange(5), box=False) array([0, 1, 2, 3, 4], dtype='timedelta64[ns]') """ unit = parse_timedelta_unit(unit) if errors not in ('ignore', 'raise', 'coerce'): raise ValueError("errors must be one of 'ignore', " "'raise', or 'coerce'}") if unit in {'Y', 'y', 'M'}: warnings.warn("M and Y units are deprecated and " "will be removed in a future version.", FutureWarning, stacklevel=2) if arg is None: return arg elif isinstance(arg, ABCSeries): values = _convert_listlike(arg._values, unit=unit, box=False, errors=errors) return arg._constructor(values, index=arg.index, name=arg.name) elif isinstance(arg, ABCIndexClass): return _convert_listlike(arg, unit=unit, box=box, errors=errors, name=arg.name) elif isinstance(arg, np.ndarray) and arg.ndim == 0: # extract array scalar and process below arg = arg.item() elif is_list_like(arg) and getattr(arg, 'ndim', 1) == 1: return _convert_listlike(arg, unit=unit, box=box, errors=errors) elif getattr(arg, 'ndim', 1) > 1: raise TypeError('arg must be a string, timedelta, list, tuple, ' '1-d array, or Series') # ...so it must be a scalar value. Return scalar. return _coerce_scalar_to_timedelta_type(arg, unit=unit, box=box, errors=errors)
[ "def", "to_timedelta", "(", "arg", ",", "unit", "=", "'ns'", ",", "box", "=", "True", ",", "errors", "=", "'raise'", ")", ":", "unit", "=", "parse_timedelta_unit", "(", "unit", ")", "if", "errors", "not", "in", "(", "'ignore'", ",", "'raise'", ",", "'coerce'", ")", ":", "raise", "ValueError", "(", "\"errors must be one of 'ignore', \"", "\"'raise', or 'coerce'}\"", ")", "if", "unit", "in", "{", "'Y'", ",", "'y'", ",", "'M'", "}", ":", "warnings", ".", "warn", "(", "\"M and Y units are deprecated and \"", "\"will be removed in a future version.\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "if", "arg", "is", "None", ":", "return", "arg", "elif", "isinstance", "(", "arg", ",", "ABCSeries", ")", ":", "values", "=", "_convert_listlike", "(", "arg", ".", "_values", ",", "unit", "=", "unit", ",", "box", "=", "False", ",", "errors", "=", "errors", ")", "return", "arg", ".", "_constructor", "(", "values", ",", "index", "=", "arg", ".", "index", ",", "name", "=", "arg", ".", "name", ")", "elif", "isinstance", "(", "arg", ",", "ABCIndexClass", ")", ":", "return", "_convert_listlike", "(", "arg", ",", "unit", "=", "unit", ",", "box", "=", "box", ",", "errors", "=", "errors", ",", "name", "=", "arg", ".", "name", ")", "elif", "isinstance", "(", "arg", ",", "np", ".", "ndarray", ")", "and", "arg", ".", "ndim", "==", "0", ":", "# extract array scalar and process below", "arg", "=", "arg", ".", "item", "(", ")", "elif", "is_list_like", "(", "arg", ")", "and", "getattr", "(", "arg", ",", "'ndim'", ",", "1", ")", "==", "1", ":", "return", "_convert_listlike", "(", "arg", ",", "unit", "=", "unit", ",", "box", "=", "box", ",", "errors", "=", "errors", ")", "elif", "getattr", "(", "arg", ",", "'ndim'", ",", "1", ")", ">", "1", ":", "raise", "TypeError", "(", "'arg must be a string, timedelta, list, tuple, '", "'1-d array, or Series'", ")", "# ...so it must be a scalar value. Return scalar.", "return", "_coerce_scalar_to_timedelta_type", "(", "arg", ",", "unit", "=", "unit", ",", "box", "=", "box", ",", "errors", "=", "errors", ")" ]
Convert argument to timedelta. Timedeltas are absolute differences in times, expressed in difference units (e.g. days, hours, minutes, seconds). This method converts an argument from a recognized timedelta format / value into a Timedelta type. Parameters ---------- arg : str, timedelta, list-like or Series The data to be converted to timedelta. unit : str, default 'ns' Denotes the unit of the arg. Possible values: ('Y', 'M', 'W', 'D', 'days', 'day', 'hours', hour', 'hr', 'h', 'm', 'minute', 'min', 'minutes', 'T', 'S', 'seconds', 'sec', 'second', 'ms', 'milliseconds', 'millisecond', 'milli', 'millis', 'L', 'us', 'microseconds', 'microsecond', 'micro', 'micros', 'U', 'ns', 'nanoseconds', 'nano', 'nanos', 'nanosecond', 'N'). box : bool, default True - If True returns a Timedelta/TimedeltaIndex of the results. - If False returns a numpy.timedelta64 or numpy.darray of values of dtype timedelta64[ns]. .. deprecated:: 0.25.0 Use :meth:`.to_numpy` or :meth:`Timedelta.to_timedelta64` instead to get an ndarray of values or numpy.timedelta64, respectively. errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception. - If 'coerce', then invalid parsing will be set as NaT. - If 'ignore', then invalid parsing will return the input. Returns ------- timedelta64 or numpy.array of timedelta64 Output type returned if parsing succeeded. See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. Examples -------- Parsing a single string to a Timedelta: >>> pd.to_timedelta('1 days 06:05:01.00003') Timedelta('1 days 06:05:01.000030') >>> pd.to_timedelta('15.5us') Timedelta('0 days 00:00:00.000015') Parsing a list or array of strings: >>> pd.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015', NaT], dtype='timedelta64[ns]', freq=None) Converting numbers by specifying the `unit` keyword argument: >>> pd.to_timedelta(np.arange(5), unit='s') TimedeltaIndex(['00:00:00', '00:00:01', '00:00:02', '00:00:03', '00:00:04'], dtype='timedelta64[ns]', freq=None) >>> pd.to_timedelta(np.arange(5), unit='d') TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None) Returning an ndarray by using the 'box' keyword argument: >>> pd.to_timedelta(np.arange(5), box=False) array([0, 1, 2, 3, 4], dtype='timedelta64[ns]')
[ "Convert", "argument", "to", "timedelta", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/timedeltas.py#L20-L128
19,833
pandas-dev/pandas
pandas/core/tools/timedeltas.py
_coerce_scalar_to_timedelta_type
def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'): """Convert string 'r' to a timedelta object.""" try: result = Timedelta(r, unit) if not box: # explicitly view as timedelta64 for case when result is pd.NaT result = result.asm8.view('timedelta64[ns]') except ValueError: if errors == 'raise': raise elif errors == 'ignore': return r # coerce result = NaT return result
python
def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'): """Convert string 'r' to a timedelta object.""" try: result = Timedelta(r, unit) if not box: # explicitly view as timedelta64 for case when result is pd.NaT result = result.asm8.view('timedelta64[ns]') except ValueError: if errors == 'raise': raise elif errors == 'ignore': return r # coerce result = NaT return result
[ "def", "_coerce_scalar_to_timedelta_type", "(", "r", ",", "unit", "=", "'ns'", ",", "box", "=", "True", ",", "errors", "=", "'raise'", ")", ":", "try", ":", "result", "=", "Timedelta", "(", "r", ",", "unit", ")", "if", "not", "box", ":", "# explicitly view as timedelta64 for case when result is pd.NaT", "result", "=", "result", ".", "asm8", ".", "view", "(", "'timedelta64[ns]'", ")", "except", "ValueError", ":", "if", "errors", "==", "'raise'", ":", "raise", "elif", "errors", "==", "'ignore'", ":", "return", "r", "# coerce", "result", "=", "NaT", "return", "result" ]
Convert string 'r' to a timedelta object.
[ "Convert", "string", "r", "to", "a", "timedelta", "object", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/timedeltas.py#L131-L148
19,834
pandas-dev/pandas
pandas/core/tools/timedeltas.py
_convert_listlike
def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None): """Convert a list of objects to a timedelta index object.""" if isinstance(arg, (list, tuple)) or not hasattr(arg, 'dtype'): # This is needed only to ensure that in the case where we end up # returning arg (errors == "ignore"), and where the input is a # generator, we return a useful list-like instead of a # used-up generator arg = np.array(list(arg), dtype=object) try: value = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0] except ValueError: if errors == 'ignore': return arg else: # This else-block accounts for the cases when errors='raise' # and errors='coerce'. If errors == 'raise', these errors # should be raised. If errors == 'coerce', we shouldn't # expect any errors to be raised, since all parsing errors # cause coercion to pd.NaT. However, if an error / bug is # introduced that causes an Exception to be raised, we would # like to surface it. raise if box: from pandas import TimedeltaIndex value = TimedeltaIndex(value, unit='ns', name=name) return value
python
def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None): """Convert a list of objects to a timedelta index object.""" if isinstance(arg, (list, tuple)) or not hasattr(arg, 'dtype'): # This is needed only to ensure that in the case where we end up # returning arg (errors == "ignore"), and where the input is a # generator, we return a useful list-like instead of a # used-up generator arg = np.array(list(arg), dtype=object) try: value = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0] except ValueError: if errors == 'ignore': return arg else: # This else-block accounts for the cases when errors='raise' # and errors='coerce'. If errors == 'raise', these errors # should be raised. If errors == 'coerce', we shouldn't # expect any errors to be raised, since all parsing errors # cause coercion to pd.NaT. However, if an error / bug is # introduced that causes an Exception to be raised, we would # like to surface it. raise if box: from pandas import TimedeltaIndex value = TimedeltaIndex(value, unit='ns', name=name) return value
[ "def", "_convert_listlike", "(", "arg", ",", "unit", "=", "'ns'", ",", "box", "=", "True", ",", "errors", "=", "'raise'", ",", "name", "=", "None", ")", ":", "if", "isinstance", "(", "arg", ",", "(", "list", ",", "tuple", ")", ")", "or", "not", "hasattr", "(", "arg", ",", "'dtype'", ")", ":", "# This is needed only to ensure that in the case where we end up", "# returning arg (errors == \"ignore\"), and where the input is a", "# generator, we return a useful list-like instead of a", "# used-up generator", "arg", "=", "np", ".", "array", "(", "list", "(", "arg", ")", ",", "dtype", "=", "object", ")", "try", ":", "value", "=", "sequence_to_td64ns", "(", "arg", ",", "unit", "=", "unit", ",", "errors", "=", "errors", ",", "copy", "=", "False", ")", "[", "0", "]", "except", "ValueError", ":", "if", "errors", "==", "'ignore'", ":", "return", "arg", "else", ":", "# This else-block accounts for the cases when errors='raise'", "# and errors='coerce'. If errors == 'raise', these errors", "# should be raised. If errors == 'coerce', we shouldn't", "# expect any errors to be raised, since all parsing errors", "# cause coercion to pd.NaT. However, if an error / bug is", "# introduced that causes an Exception to be raised, we would", "# like to surface it.", "raise", "if", "box", ":", "from", "pandas", "import", "TimedeltaIndex", "value", "=", "TimedeltaIndex", "(", "value", ",", "unit", "=", "'ns'", ",", "name", "=", "name", ")", "return", "value" ]
Convert a list of objects to a timedelta index object.
[ "Convert", "a", "list", "of", "objects", "to", "a", "timedelta", "index", "object", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/timedeltas.py#L151-L180
19,835
pandas-dev/pandas
pandas/tseries/offsets.py
generate_range
def generate_range(start=None, end=None, periods=None, offset=BDay()): """ Generates a sequence of dates corresponding to the specified time offset. Similar to dateutil.rrule except uses pandas DateOffset objects to represent time increments. Parameters ---------- start : datetime (default None) end : datetime (default None) periods : int, (default None) offset : DateOffset, (default BDay()) Notes ----- * This method is faster for generating weekdays than dateutil.rrule * At least two of (start, end, periods) must be specified. * If both start and end are specified, the returned dates will satisfy start <= date <= end. Returns ------- dates : generator object """ from pandas.tseries.frequencies import to_offset offset = to_offset(offset) start = to_datetime(start) end = to_datetime(end) if start and not offset.onOffset(start): start = offset.rollforward(start) elif end and not offset.onOffset(end): end = offset.rollback(end) if periods is None and end < start and offset.n >= 0: end = None periods = 0 if end is None: end = start + (periods - 1) * offset if start is None: start = end - (periods - 1) * offset cur = start if offset.n >= 0: while cur <= end: yield cur # faster than cur + offset next_date = offset.apply(cur) if next_date <= cur: raise ValueError('Offset {offset} did not increment date' .format(offset=offset)) cur = next_date else: while cur >= end: yield cur # faster than cur + offset next_date = offset.apply(cur) if next_date >= cur: raise ValueError('Offset {offset} did not decrement date' .format(offset=offset)) cur = next_date
python
def generate_range(start=None, end=None, periods=None, offset=BDay()): """ Generates a sequence of dates corresponding to the specified time offset. Similar to dateutil.rrule except uses pandas DateOffset objects to represent time increments. Parameters ---------- start : datetime (default None) end : datetime (default None) periods : int, (default None) offset : DateOffset, (default BDay()) Notes ----- * This method is faster for generating weekdays than dateutil.rrule * At least two of (start, end, periods) must be specified. * If both start and end are specified, the returned dates will satisfy start <= date <= end. Returns ------- dates : generator object """ from pandas.tseries.frequencies import to_offset offset = to_offset(offset) start = to_datetime(start) end = to_datetime(end) if start and not offset.onOffset(start): start = offset.rollforward(start) elif end and not offset.onOffset(end): end = offset.rollback(end) if periods is None and end < start and offset.n >= 0: end = None periods = 0 if end is None: end = start + (periods - 1) * offset if start is None: start = end - (periods - 1) * offset cur = start if offset.n >= 0: while cur <= end: yield cur # faster than cur + offset next_date = offset.apply(cur) if next_date <= cur: raise ValueError('Offset {offset} did not increment date' .format(offset=offset)) cur = next_date else: while cur >= end: yield cur # faster than cur + offset next_date = offset.apply(cur) if next_date >= cur: raise ValueError('Offset {offset} did not decrement date' .format(offset=offset)) cur = next_date
[ "def", "generate_range", "(", "start", "=", "None", ",", "end", "=", "None", ",", "periods", "=", "None", ",", "offset", "=", "BDay", "(", ")", ")", ":", "from", "pandas", ".", "tseries", ".", "frequencies", "import", "to_offset", "offset", "=", "to_offset", "(", "offset", ")", "start", "=", "to_datetime", "(", "start", ")", "end", "=", "to_datetime", "(", "end", ")", "if", "start", "and", "not", "offset", ".", "onOffset", "(", "start", ")", ":", "start", "=", "offset", ".", "rollforward", "(", "start", ")", "elif", "end", "and", "not", "offset", ".", "onOffset", "(", "end", ")", ":", "end", "=", "offset", ".", "rollback", "(", "end", ")", "if", "periods", "is", "None", "and", "end", "<", "start", "and", "offset", ".", "n", ">=", "0", ":", "end", "=", "None", "periods", "=", "0", "if", "end", "is", "None", ":", "end", "=", "start", "+", "(", "periods", "-", "1", ")", "*", "offset", "if", "start", "is", "None", ":", "start", "=", "end", "-", "(", "periods", "-", "1", ")", "*", "offset", "cur", "=", "start", "if", "offset", ".", "n", ">=", "0", ":", "while", "cur", "<=", "end", ":", "yield", "cur", "# faster than cur + offset", "next_date", "=", "offset", ".", "apply", "(", "cur", ")", "if", "next_date", "<=", "cur", ":", "raise", "ValueError", "(", "'Offset {offset} did not increment date'", ".", "format", "(", "offset", "=", "offset", ")", ")", "cur", "=", "next_date", "else", ":", "while", "cur", ">=", "end", ":", "yield", "cur", "# faster than cur + offset", "next_date", "=", "offset", ".", "apply", "(", "cur", ")", "if", "next_date", ">=", "cur", ":", "raise", "ValueError", "(", "'Offset {offset} did not decrement date'", ".", "format", "(", "offset", "=", "offset", ")", ")", "cur", "=", "next_date" ]
Generates a sequence of dates corresponding to the specified time offset. Similar to dateutil.rrule except uses pandas DateOffset objects to represent time increments. Parameters ---------- start : datetime (default None) end : datetime (default None) periods : int, (default None) offset : DateOffset, (default BDay()) Notes ----- * This method is faster for generating weekdays than dateutil.rrule * At least two of (start, end, periods) must be specified. * If both start and end are specified, the returned dates will satisfy start <= date <= end. Returns ------- dates : generator object
[ "Generates", "a", "sequence", "of", "dates", "corresponding", "to", "the", "specified", "time", "offset", ".", "Similar", "to", "dateutil", ".", "rrule", "except", "uses", "pandas", "DateOffset", "objects", "to", "represent", "time", "increments", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L2412-L2478
19,836
pandas-dev/pandas
pandas/tseries/offsets.py
DateOffset.apply_index
def apply_index(self, i): """ Vectorized apply of DateOffset to DatetimeIndex, raises NotImplentedError for offsets without a vectorized implementation. Parameters ---------- i : DatetimeIndex Returns ------- y : DatetimeIndex """ if type(self) is not DateOffset: raise NotImplementedError("DateOffset subclass {name} " "does not have a vectorized " "implementation".format( name=self.__class__.__name__)) kwds = self.kwds relativedelta_fast = {'years', 'months', 'weeks', 'days', 'hours', 'minutes', 'seconds', 'microseconds'} # relativedelta/_offset path only valid for base DateOffset if (self._use_relativedelta and set(kwds).issubset(relativedelta_fast)): months = ((kwds.get('years', 0) * 12 + kwds.get('months', 0)) * self.n) if months: shifted = liboffsets.shift_months(i.asi8, months) i = type(i)(shifted, freq=i.freq, dtype=i.dtype) weeks = (kwds.get('weeks', 0)) * self.n if weeks: # integer addition on PeriodIndex is deprecated, # so we directly use _time_shift instead asper = i.to_period('W') if not isinstance(asper._data, np.ndarray): # unwrap PeriodIndex --> PeriodArray asper = asper._data shifted = asper._time_shift(weeks) i = shifted.to_timestamp() + i.to_perioddelta('W') timedelta_kwds = {k: v for k, v in kwds.items() if k in ['days', 'hours', 'minutes', 'seconds', 'microseconds']} if timedelta_kwds: delta = Timedelta(**timedelta_kwds) i = i + (self.n * delta) return i elif not self._use_relativedelta and hasattr(self, '_offset'): # timedelta return i + (self._offset * self.n) else: # relativedelta with other keywords kwd = set(kwds) - relativedelta_fast raise NotImplementedError("DateOffset with relativedelta " "keyword(s) {kwd} not able to be " "applied vectorized".format(kwd=kwd))
python
def apply_index(self, i): """ Vectorized apply of DateOffset to DatetimeIndex, raises NotImplentedError for offsets without a vectorized implementation. Parameters ---------- i : DatetimeIndex Returns ------- y : DatetimeIndex """ if type(self) is not DateOffset: raise NotImplementedError("DateOffset subclass {name} " "does not have a vectorized " "implementation".format( name=self.__class__.__name__)) kwds = self.kwds relativedelta_fast = {'years', 'months', 'weeks', 'days', 'hours', 'minutes', 'seconds', 'microseconds'} # relativedelta/_offset path only valid for base DateOffset if (self._use_relativedelta and set(kwds).issubset(relativedelta_fast)): months = ((kwds.get('years', 0) * 12 + kwds.get('months', 0)) * self.n) if months: shifted = liboffsets.shift_months(i.asi8, months) i = type(i)(shifted, freq=i.freq, dtype=i.dtype) weeks = (kwds.get('weeks', 0)) * self.n if weeks: # integer addition on PeriodIndex is deprecated, # so we directly use _time_shift instead asper = i.to_period('W') if not isinstance(asper._data, np.ndarray): # unwrap PeriodIndex --> PeriodArray asper = asper._data shifted = asper._time_shift(weeks) i = shifted.to_timestamp() + i.to_perioddelta('W') timedelta_kwds = {k: v for k, v in kwds.items() if k in ['days', 'hours', 'minutes', 'seconds', 'microseconds']} if timedelta_kwds: delta = Timedelta(**timedelta_kwds) i = i + (self.n * delta) return i elif not self._use_relativedelta and hasattr(self, '_offset'): # timedelta return i + (self._offset * self.n) else: # relativedelta with other keywords kwd = set(kwds) - relativedelta_fast raise NotImplementedError("DateOffset with relativedelta " "keyword(s) {kwd} not able to be " "applied vectorized".format(kwd=kwd))
[ "def", "apply_index", "(", "self", ",", "i", ")", ":", "if", "type", "(", "self", ")", "is", "not", "DateOffset", ":", "raise", "NotImplementedError", "(", "\"DateOffset subclass {name} \"", "\"does not have a vectorized \"", "\"implementation\"", ".", "format", "(", "name", "=", "self", ".", "__class__", ".", "__name__", ")", ")", "kwds", "=", "self", ".", "kwds", "relativedelta_fast", "=", "{", "'years'", ",", "'months'", ",", "'weeks'", ",", "'days'", ",", "'hours'", ",", "'minutes'", ",", "'seconds'", ",", "'microseconds'", "}", "# relativedelta/_offset path only valid for base DateOffset", "if", "(", "self", ".", "_use_relativedelta", "and", "set", "(", "kwds", ")", ".", "issubset", "(", "relativedelta_fast", ")", ")", ":", "months", "=", "(", "(", "kwds", ".", "get", "(", "'years'", ",", "0", ")", "*", "12", "+", "kwds", ".", "get", "(", "'months'", ",", "0", ")", ")", "*", "self", ".", "n", ")", "if", "months", ":", "shifted", "=", "liboffsets", ".", "shift_months", "(", "i", ".", "asi8", ",", "months", ")", "i", "=", "type", "(", "i", ")", "(", "shifted", ",", "freq", "=", "i", ".", "freq", ",", "dtype", "=", "i", ".", "dtype", ")", "weeks", "=", "(", "kwds", ".", "get", "(", "'weeks'", ",", "0", ")", ")", "*", "self", ".", "n", "if", "weeks", ":", "# integer addition on PeriodIndex is deprecated,", "# so we directly use _time_shift instead", "asper", "=", "i", ".", "to_period", "(", "'W'", ")", "if", "not", "isinstance", "(", "asper", ".", "_data", ",", "np", ".", "ndarray", ")", ":", "# unwrap PeriodIndex --> PeriodArray", "asper", "=", "asper", ".", "_data", "shifted", "=", "asper", ".", "_time_shift", "(", "weeks", ")", "i", "=", "shifted", ".", "to_timestamp", "(", ")", "+", "i", ".", "to_perioddelta", "(", "'W'", ")", "timedelta_kwds", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "kwds", ".", "items", "(", ")", "if", "k", "in", "[", "'days'", ",", "'hours'", ",", "'minutes'", ",", "'seconds'", ",", "'microseconds'", "]", "}", "if", "timedelta_kwds", ":", "delta", "=", "Timedelta", "(", "*", "*", "timedelta_kwds", ")", "i", "=", "i", "+", "(", "self", ".", "n", "*", "delta", ")", "return", "i", "elif", "not", "self", ".", "_use_relativedelta", "and", "hasattr", "(", "self", ",", "'_offset'", ")", ":", "# timedelta", "return", "i", "+", "(", "self", ".", "_offset", "*", "self", ".", "n", ")", "else", ":", "# relativedelta with other keywords", "kwd", "=", "set", "(", "kwds", ")", "-", "relativedelta_fast", "raise", "NotImplementedError", "(", "\"DateOffset with relativedelta \"", "\"keyword(s) {kwd} not able to be \"", "\"applied vectorized\"", ".", "format", "(", "kwd", "=", "kwd", ")", ")" ]
Vectorized apply of DateOffset to DatetimeIndex, raises NotImplentedError for offsets without a vectorized implementation. Parameters ---------- i : DatetimeIndex Returns ------- y : DatetimeIndex
[ "Vectorized", "apply", "of", "DateOffset", "to", "DatetimeIndex", "raises", "NotImplentedError", "for", "offsets", "without", "a", "vectorized", "implementation", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L245-L304
19,837
pandas-dev/pandas
pandas/tseries/offsets.py
BusinessHourMixin.next_bday
def next_bday(self): """ Used for moving to next business day. """ if self.n >= 0: nb_offset = 1 else: nb_offset = -1 if self._prefix.startswith('C'): # CustomBusinessHour return CustomBusinessDay(n=nb_offset, weekmask=self.weekmask, holidays=self.holidays, calendar=self.calendar) else: return BusinessDay(n=nb_offset)
python
def next_bday(self): """ Used for moving to next business day. """ if self.n >= 0: nb_offset = 1 else: nb_offset = -1 if self._prefix.startswith('C'): # CustomBusinessHour return CustomBusinessDay(n=nb_offset, weekmask=self.weekmask, holidays=self.holidays, calendar=self.calendar) else: return BusinessDay(n=nb_offset)
[ "def", "next_bday", "(", "self", ")", ":", "if", "self", ".", "n", ">=", "0", ":", "nb_offset", "=", "1", "else", ":", "nb_offset", "=", "-", "1", "if", "self", ".", "_prefix", ".", "startswith", "(", "'C'", ")", ":", "# CustomBusinessHour", "return", "CustomBusinessDay", "(", "n", "=", "nb_offset", ",", "weekmask", "=", "self", ".", "weekmask", ",", "holidays", "=", "self", ".", "holidays", ",", "calendar", "=", "self", ".", "calendar", ")", "else", ":", "return", "BusinessDay", "(", "n", "=", "nb_offset", ")" ]
Used for moving to next business day.
[ "Used", "for", "moving", "to", "next", "business", "day", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L579-L594
19,838
pandas-dev/pandas
pandas/tseries/offsets.py
BusinessHourMixin._next_opening_time
def _next_opening_time(self, other): """ If n is positive, return tomorrow's business day opening time. Otherwise yesterday's business day's opening time. Opening time always locates on BusinessDay. Otherwise, closing time may not if business hour extends over midnight. """ if not self.next_bday.onOffset(other): other = other + self.next_bday else: if self.n >= 0 and self.start < other.time(): other = other + self.next_bday elif self.n < 0 and other.time() < self.start: other = other + self.next_bday return datetime(other.year, other.month, other.day, self.start.hour, self.start.minute)
python
def _next_opening_time(self, other): """ If n is positive, return tomorrow's business day opening time. Otherwise yesterday's business day's opening time. Opening time always locates on BusinessDay. Otherwise, closing time may not if business hour extends over midnight. """ if not self.next_bday.onOffset(other): other = other + self.next_bday else: if self.n >= 0 and self.start < other.time(): other = other + self.next_bday elif self.n < 0 and other.time() < self.start: other = other + self.next_bday return datetime(other.year, other.month, other.day, self.start.hour, self.start.minute)
[ "def", "_next_opening_time", "(", "self", ",", "other", ")", ":", "if", "not", "self", ".", "next_bday", ".", "onOffset", "(", "other", ")", ":", "other", "=", "other", "+", "self", ".", "next_bday", "else", ":", "if", "self", ".", "n", ">=", "0", "and", "self", ".", "start", "<", "other", ".", "time", "(", ")", ":", "other", "=", "other", "+", "self", ".", "next_bday", "elif", "self", ".", "n", "<", "0", "and", "other", ".", "time", "(", ")", "<", "self", ".", "start", ":", "other", "=", "other", "+", "self", ".", "next_bday", "return", "datetime", "(", "other", ".", "year", ",", "other", ".", "month", ",", "other", ".", "day", ",", "self", ".", "start", ".", "hour", ",", "self", ".", "start", ".", "minute", ")" ]
If n is positive, return tomorrow's business day opening time. Otherwise yesterday's business day's opening time. Opening time always locates on BusinessDay. Otherwise, closing time may not if business hour extends over midnight.
[ "If", "n", "is", "positive", "return", "tomorrow", "s", "business", "day", "opening", "time", ".", "Otherwise", "yesterday", "s", "business", "day", "s", "opening", "time", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L605-L621
19,839
pandas-dev/pandas
pandas/tseries/offsets.py
BusinessHourMixin._get_business_hours_by_sec
def _get_business_hours_by_sec(self): """ Return business hours in a day by seconds. """ if self._get_daytime_flag: # create dummy datetime to calculate businesshours in a day dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute) until = datetime(2014, 4, 1, self.end.hour, self.end.minute) return (until - dtstart).total_seconds() else: dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute) until = datetime(2014, 4, 2, self.end.hour, self.end.minute) return (until - dtstart).total_seconds()
python
def _get_business_hours_by_sec(self): """ Return business hours in a day by seconds. """ if self._get_daytime_flag: # create dummy datetime to calculate businesshours in a day dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute) until = datetime(2014, 4, 1, self.end.hour, self.end.minute) return (until - dtstart).total_seconds() else: dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute) until = datetime(2014, 4, 2, self.end.hour, self.end.minute) return (until - dtstart).total_seconds()
[ "def", "_get_business_hours_by_sec", "(", "self", ")", ":", "if", "self", ".", "_get_daytime_flag", ":", "# create dummy datetime to calculate businesshours in a day", "dtstart", "=", "datetime", "(", "2014", ",", "4", ",", "1", ",", "self", ".", "start", ".", "hour", ",", "self", ".", "start", ".", "minute", ")", "until", "=", "datetime", "(", "2014", ",", "4", ",", "1", ",", "self", ".", "end", ".", "hour", ",", "self", ".", "end", ".", "minute", ")", "return", "(", "until", "-", "dtstart", ")", ".", "total_seconds", "(", ")", "else", ":", "dtstart", "=", "datetime", "(", "2014", ",", "4", ",", "1", ",", "self", ".", "start", ".", "hour", ",", "self", ".", "start", ".", "minute", ")", "until", "=", "datetime", "(", "2014", ",", "4", ",", "2", ",", "self", ".", "end", ".", "hour", ",", "self", ".", "end", ".", "minute", ")", "return", "(", "until", "-", "dtstart", ")", ".", "total_seconds", "(", ")" ]
Return business hours in a day by seconds.
[ "Return", "business", "hours", "in", "a", "day", "by", "seconds", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L639-L651
19,840
pandas-dev/pandas
pandas/tseries/offsets.py
BusinessHourMixin._onOffset
def _onOffset(self, dt, businesshours): """ Slight speedups using calculated values. """ # if self.normalize and not _is_normalized(dt): # return False # Valid BH can be on the different BusinessDay during midnight # Distinguish by the time spent from previous opening time if self.n >= 0: op = self._prev_opening_time(dt) else: op = self._next_opening_time(dt) span = (dt - op).total_seconds() if span <= businesshours: return True else: return False
python
def _onOffset(self, dt, businesshours): """ Slight speedups using calculated values. """ # if self.normalize and not _is_normalized(dt): # return False # Valid BH can be on the different BusinessDay during midnight # Distinguish by the time spent from previous opening time if self.n >= 0: op = self._prev_opening_time(dt) else: op = self._next_opening_time(dt) span = (dt - op).total_seconds() if span <= businesshours: return True else: return False
[ "def", "_onOffset", "(", "self", ",", "dt", ",", "businesshours", ")", ":", "# if self.normalize and not _is_normalized(dt):", "# return False", "# Valid BH can be on the different BusinessDay during midnight", "# Distinguish by the time spent from previous opening time", "if", "self", ".", "n", ">=", "0", ":", "op", "=", "self", ".", "_prev_opening_time", "(", "dt", ")", "else", ":", "op", "=", "self", ".", "_next_opening_time", "(", "dt", ")", "span", "=", "(", "dt", "-", "op", ")", ".", "total_seconds", "(", ")", "if", "span", "<=", "businesshours", ":", "return", "True", "else", ":", "return", "False" ]
Slight speedups using calculated values.
[ "Slight", "speedups", "using", "calculated", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L767-L783
19,841
pandas-dev/pandas
pandas/tseries/offsets.py
SemiMonthBegin._apply_index_days
def _apply_index_days(self, i, roll): """ Add days portion of offset to DatetimeIndex i. Parameters ---------- i : DatetimeIndex roll : ndarray[int64_t] Returns ------- result : DatetimeIndex """ nanos = (roll % 2) * Timedelta(days=self.day_of_month - 1).value return i + nanos.astype('timedelta64[ns]')
python
def _apply_index_days(self, i, roll): """ Add days portion of offset to DatetimeIndex i. Parameters ---------- i : DatetimeIndex roll : ndarray[int64_t] Returns ------- result : DatetimeIndex """ nanos = (roll % 2) * Timedelta(days=self.day_of_month - 1).value return i + nanos.astype('timedelta64[ns]')
[ "def", "_apply_index_days", "(", "self", ",", "i", ",", "roll", ")", ":", "nanos", "=", "(", "roll", "%", "2", ")", "*", "Timedelta", "(", "days", "=", "self", ".", "day_of_month", "-", "1", ")", ".", "value", "return", "i", "+", "nanos", ".", "astype", "(", "'timedelta64[ns]'", ")" ]
Add days portion of offset to DatetimeIndex i. Parameters ---------- i : DatetimeIndex roll : ndarray[int64_t] Returns ------- result : DatetimeIndex
[ "Add", "days", "portion", "of", "offset", "to", "DatetimeIndex", "i", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L1279-L1293
19,842
pandas-dev/pandas
pandas/tseries/offsets.py
Week._end_apply_index
def _end_apply_index(self, dtindex): """ Add self to the given DatetimeIndex, specialized for case where self.weekday is non-null. Parameters ---------- dtindex : DatetimeIndex Returns ------- result : DatetimeIndex """ off = dtindex.to_perioddelta('D') base, mult = libfrequencies.get_freq_code(self.freqstr) base_period = dtindex.to_period(base) if not isinstance(base_period._data, np.ndarray): # unwrap PeriodIndex --> PeriodArray base_period = base_period._data if self.n > 0: # when adding, dates on end roll to next normed = dtindex - off + Timedelta(1, 'D') - Timedelta(1, 'ns') roll = np.where(base_period.to_timestamp(how='end') == normed, self.n, self.n - 1) # integer-array addition on PeriodIndex is deprecated, # so we use _addsub_int_array directly shifted = base_period._addsub_int_array(roll, operator.add) base = shifted.to_timestamp(how='end') else: # integer addition on PeriodIndex is deprecated, # so we use _time_shift directly roll = self.n base = base_period._time_shift(roll).to_timestamp(how='end') return base + off + Timedelta(1, 'ns') - Timedelta(1, 'D')
python
def _end_apply_index(self, dtindex): """ Add self to the given DatetimeIndex, specialized for case where self.weekday is non-null. Parameters ---------- dtindex : DatetimeIndex Returns ------- result : DatetimeIndex """ off = dtindex.to_perioddelta('D') base, mult = libfrequencies.get_freq_code(self.freqstr) base_period = dtindex.to_period(base) if not isinstance(base_period._data, np.ndarray): # unwrap PeriodIndex --> PeriodArray base_period = base_period._data if self.n > 0: # when adding, dates on end roll to next normed = dtindex - off + Timedelta(1, 'D') - Timedelta(1, 'ns') roll = np.where(base_period.to_timestamp(how='end') == normed, self.n, self.n - 1) # integer-array addition on PeriodIndex is deprecated, # so we use _addsub_int_array directly shifted = base_period._addsub_int_array(roll, operator.add) base = shifted.to_timestamp(how='end') else: # integer addition on PeriodIndex is deprecated, # so we use _time_shift directly roll = self.n base = base_period._time_shift(roll).to_timestamp(how='end') return base + off + Timedelta(1, 'ns') - Timedelta(1, 'D')
[ "def", "_end_apply_index", "(", "self", ",", "dtindex", ")", ":", "off", "=", "dtindex", ".", "to_perioddelta", "(", "'D'", ")", "base", ",", "mult", "=", "libfrequencies", ".", "get_freq_code", "(", "self", ".", "freqstr", ")", "base_period", "=", "dtindex", ".", "to_period", "(", "base", ")", "if", "not", "isinstance", "(", "base_period", ".", "_data", ",", "np", ".", "ndarray", ")", ":", "# unwrap PeriodIndex --> PeriodArray", "base_period", "=", "base_period", ".", "_data", "if", "self", ".", "n", ">", "0", ":", "# when adding, dates on end roll to next", "normed", "=", "dtindex", "-", "off", "+", "Timedelta", "(", "1", ",", "'D'", ")", "-", "Timedelta", "(", "1", ",", "'ns'", ")", "roll", "=", "np", ".", "where", "(", "base_period", ".", "to_timestamp", "(", "how", "=", "'end'", ")", "==", "normed", ",", "self", ".", "n", ",", "self", ".", "n", "-", "1", ")", "# integer-array addition on PeriodIndex is deprecated,", "# so we use _addsub_int_array directly", "shifted", "=", "base_period", ".", "_addsub_int_array", "(", "roll", ",", "operator", ".", "add", ")", "base", "=", "shifted", ".", "to_timestamp", "(", "how", "=", "'end'", ")", "else", ":", "# integer addition on PeriodIndex is deprecated,", "# so we use _time_shift directly", "roll", "=", "self", ".", "n", "base", "=", "base_period", ".", "_time_shift", "(", "roll", ")", ".", "to_timestamp", "(", "how", "=", "'end'", ")", "return", "base", "+", "off", "+", "Timedelta", "(", "1", ",", "'ns'", ")", "-", "Timedelta", "(", "1", ",", "'D'", ")" ]
Add self to the given DatetimeIndex, specialized for case where self.weekday is non-null. Parameters ---------- dtindex : DatetimeIndex Returns ------- result : DatetimeIndex
[ "Add", "self", "to", "the", "given", "DatetimeIndex", "specialized", "for", "case", "where", "self", ".", "weekday", "is", "non", "-", "null", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L1354-L1390
19,843
pandas-dev/pandas
pandas/tseries/offsets.py
WeekOfMonth._get_offset_day
def _get_offset_day(self, other): """ Find the day in the same month as other that has the same weekday as self.weekday and is the self.week'th such day in the month. Parameters ---------- other : datetime Returns ------- day : int """ mstart = datetime(other.year, other.month, 1) wday = mstart.weekday() shift_days = (self.weekday - wday) % 7 return 1 + shift_days + self.week * 7
python
def _get_offset_day(self, other): """ Find the day in the same month as other that has the same weekday as self.weekday and is the self.week'th such day in the month. Parameters ---------- other : datetime Returns ------- day : int """ mstart = datetime(other.year, other.month, 1) wday = mstart.weekday() shift_days = (self.weekday - wday) % 7 return 1 + shift_days + self.week * 7
[ "def", "_get_offset_day", "(", "self", ",", "other", ")", ":", "mstart", "=", "datetime", "(", "other", ".", "year", ",", "other", ".", "month", ",", "1", ")", "wday", "=", "mstart", ".", "weekday", "(", ")", "shift_days", "=", "(", "self", ".", "weekday", "-", "wday", ")", "%", "7", "return", "1", "+", "shift_days", "+", "self", ".", "week", "*", "7" ]
Find the day in the same month as other that has the same weekday as self.weekday and is the self.week'th such day in the month. Parameters ---------- other : datetime Returns ------- day : int
[ "Find", "the", "day", "in", "the", "same", "month", "as", "other", "that", "has", "the", "same", "weekday", "as", "self", ".", "weekday", "and", "is", "the", "self", ".", "week", "th", "such", "day", "in", "the", "month", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L1474-L1490
19,844
pandas-dev/pandas
pandas/tseries/offsets.py
LastWeekOfMonth._get_offset_day
def _get_offset_day(self, other): """ Find the day in the same month as other that has the same weekday as self.weekday and is the last such day in the month. Parameters ---------- other: datetime Returns ------- day: int """ dim = ccalendar.get_days_in_month(other.year, other.month) mend = datetime(other.year, other.month, dim) wday = mend.weekday() shift_days = (wday - self.weekday) % 7 return dim - shift_days
python
def _get_offset_day(self, other): """ Find the day in the same month as other that has the same weekday as self.weekday and is the last such day in the month. Parameters ---------- other: datetime Returns ------- day: int """ dim = ccalendar.get_days_in_month(other.year, other.month) mend = datetime(other.year, other.month, dim) wday = mend.weekday() shift_days = (wday - self.weekday) % 7 return dim - shift_days
[ "def", "_get_offset_day", "(", "self", ",", "other", ")", ":", "dim", "=", "ccalendar", ".", "get_days_in_month", "(", "other", ".", "year", ",", "other", ".", "month", ")", "mend", "=", "datetime", "(", "other", ".", "year", ",", "other", ".", "month", ",", "dim", ")", "wday", "=", "mend", ".", "weekday", "(", ")", "shift_days", "=", "(", "wday", "-", "self", ".", "weekday", ")", "%", "7", "return", "dim", "-", "shift_days" ]
Find the day in the same month as other that has the same weekday as self.weekday and is the last such day in the month. Parameters ---------- other: datetime Returns ------- day: int
[ "Find", "the", "day", "in", "the", "same", "month", "as", "other", "that", "has", "the", "same", "weekday", "as", "self", ".", "weekday", "and", "is", "the", "last", "such", "day", "in", "the", "month", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L1543-L1560
19,845
pandas-dev/pandas
pandas/tseries/offsets.py
FY5253Quarter._rollback_to_year
def _rollback_to_year(self, other): """ Roll `other` back to the most recent date that was on a fiscal year end. Return the date of that year-end, the number of full quarters elapsed between that year-end and other, and the remaining Timedelta since the most recent quarter-end. Parameters ---------- other : datetime or Timestamp Returns ------- tuple of prev_year_end : Timestamp giving most recent fiscal year end num_qtrs : int tdelta : Timedelta """ num_qtrs = 0 norm = Timestamp(other).tz_localize(None) start = self._offset.rollback(norm) # Note: start <= norm and self._offset.onOffset(start) if start < norm: # roll adjustment qtr_lens = self.get_weeks(norm) # check thet qtr_lens is consistent with self._offset addition end = liboffsets.shift_day(start, days=7 * sum(qtr_lens)) assert self._offset.onOffset(end), (start, end, qtr_lens) tdelta = norm - start for qlen in qtr_lens: if qlen * 7 <= tdelta.days: num_qtrs += 1 tdelta -= Timedelta(days=qlen * 7) else: break else: tdelta = Timedelta(0) # Note: we always have tdelta.value >= 0 return start, num_qtrs, tdelta
python
def _rollback_to_year(self, other): """ Roll `other` back to the most recent date that was on a fiscal year end. Return the date of that year-end, the number of full quarters elapsed between that year-end and other, and the remaining Timedelta since the most recent quarter-end. Parameters ---------- other : datetime or Timestamp Returns ------- tuple of prev_year_end : Timestamp giving most recent fiscal year end num_qtrs : int tdelta : Timedelta """ num_qtrs = 0 norm = Timestamp(other).tz_localize(None) start = self._offset.rollback(norm) # Note: start <= norm and self._offset.onOffset(start) if start < norm: # roll adjustment qtr_lens = self.get_weeks(norm) # check thet qtr_lens is consistent with self._offset addition end = liboffsets.shift_day(start, days=7 * sum(qtr_lens)) assert self._offset.onOffset(end), (start, end, qtr_lens) tdelta = norm - start for qlen in qtr_lens: if qlen * 7 <= tdelta.days: num_qtrs += 1 tdelta -= Timedelta(days=qlen * 7) else: break else: tdelta = Timedelta(0) # Note: we always have tdelta.value >= 0 return start, num_qtrs, tdelta
[ "def", "_rollback_to_year", "(", "self", ",", "other", ")", ":", "num_qtrs", "=", "0", "norm", "=", "Timestamp", "(", "other", ")", ".", "tz_localize", "(", "None", ")", "start", "=", "self", ".", "_offset", ".", "rollback", "(", "norm", ")", "# Note: start <= norm and self._offset.onOffset(start)", "if", "start", "<", "norm", ":", "# roll adjustment", "qtr_lens", "=", "self", ".", "get_weeks", "(", "norm", ")", "# check thet qtr_lens is consistent with self._offset addition", "end", "=", "liboffsets", ".", "shift_day", "(", "start", ",", "days", "=", "7", "*", "sum", "(", "qtr_lens", ")", ")", "assert", "self", ".", "_offset", ".", "onOffset", "(", "end", ")", ",", "(", "start", ",", "end", ",", "qtr_lens", ")", "tdelta", "=", "norm", "-", "start", "for", "qlen", "in", "qtr_lens", ":", "if", "qlen", "*", "7", "<=", "tdelta", ".", "days", ":", "num_qtrs", "+=", "1", "tdelta", "-=", "Timedelta", "(", "days", "=", "qlen", "*", "7", ")", "else", ":", "break", "else", ":", "tdelta", "=", "Timedelta", "(", "0", ")", "# Note: we always have tdelta.value >= 0", "return", "start", ",", "num_qtrs", ",", "tdelta" ]
Roll `other` back to the most recent date that was on a fiscal year end. Return the date of that year-end, the number of full quarters elapsed between that year-end and other, and the remaining Timedelta since the most recent quarter-end. Parameters ---------- other : datetime or Timestamp Returns ------- tuple of prev_year_end : Timestamp giving most recent fiscal year end num_qtrs : int tdelta : Timedelta
[ "Roll", "other", "back", "to", "the", "most", "recent", "date", "that", "was", "on", "a", "fiscal", "year", "end", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L2054-L2099
19,846
pandas-dev/pandas
pandas/core/reshape/concat.py
concat
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False, sort=None, copy=True): """ Concatenate pandas objects along a particular axis with optional set logic along the other axes. Can also add a layer of hierarchical indexing on the concatenation axis, which may be useful if the labels are the same (or overlapping) on the passed axis number. Parameters ---------- objs : a sequence or mapping of Series, DataFrame, or Panel objects If a dict is passed, the sorted keys will be used as the `keys` argument, unless it is passed, in which case the values will be selected (see below). Any None objects will be dropped silently unless they are all None in which case a ValueError will be raised. axis : {0/'index', 1/'columns'}, default 0 The axis to concatenate along. join : {'inner', 'outer'}, default 'outer' How to handle indexes on other axis (or axes). join_axes : list of Index objects Specific indexes to use for the other n - 1 axes instead of performing inner/outer set logic. ignore_index : bool, default False If True, do not use the index values along the concatenation axis. The resulting axis will be labeled 0, ..., n - 1. This is useful if you are concatenating objects where the concatenation axis does not have meaningful indexing information. Note the index values on the other axes are still respected in the join. keys : sequence, default None If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level. levels : list of sequences, default None Specific levels (unique values) to use for constructing a MultiIndex. Otherwise they will be inferred from the keys. names : list, default None Names for the levels in the resulting hierarchical index. verify_integrity : bool, default False Check whether the new concatenated axis contains duplicates. This can be very expensive relative to the actual data concatenation. sort : bool, default None Sort non-concatenation axis if it is not already aligned when `join` is 'outer'. The current default of sorting is deprecated and will change to not-sorting in a future version of pandas. Explicitly pass ``sort=True`` to silence the warning and sort. Explicitly pass ``sort=False`` to silence the warning and not sort. This has no effect when ``join='inner'``, which already preserves the order of the non-concatenation axis. .. versionadded:: 0.23.0 copy : bool, default True If False, do not copy data unnecessarily. Returns ------- object, type of objs When concatenating all ``Series`` along the index (axis=0), a ``Series`` is returned. When ``objs`` contains at least one ``DataFrame``, a ``DataFrame`` is returned. When concatenating along the columns (axis=1), a ``DataFrame`` is returned. See Also -------- Series.append : Concatenate Series. DataFrame.append : Concatenate DataFrames. DataFrame.join : Join DataFrames using indexes. DataFrame.merge : Merge DataFrames by indexes or columns. Notes ----- The keys, levels, and names arguments are all optional. A walkthrough of how this method fits in with other tools for combining pandas objects can be found `here <http://pandas.pydata.org/pandas-docs/stable/merging.html>`__. Examples -------- Combine two ``Series``. >>> s1 = pd.Series(['a', 'b']) >>> s2 = pd.Series(['c', 'd']) >>> pd.concat([s1, s2]) 0 a 1 b 0 c 1 d dtype: object Clear the existing index and reset it in the result by setting the ``ignore_index`` option to ``True``. >>> pd.concat([s1, s2], ignore_index=True) 0 a 1 b 2 c 3 d dtype: object Add a hierarchical index at the outermost level of the data with the ``keys`` option. >>> pd.concat([s1, s2], keys=['s1', 's2']) s1 0 a 1 b s2 0 c 1 d dtype: object Label the index keys you create with the ``names`` option. >>> pd.concat([s1, s2], keys=['s1', 's2'], ... names=['Series name', 'Row ID']) Series name Row ID s1 0 a 1 b s2 0 c 1 d dtype: object Combine two ``DataFrame`` objects with identical columns. >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], ... columns=['letter', 'number']) >>> df1 letter number 0 a 1 1 b 2 >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], ... columns=['letter', 'number']) >>> df2 letter number 0 c 3 1 d 4 >>> pd.concat([df1, df2]) letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects with overlapping columns and return everything. Columns outside the intersection will be filled with ``NaN`` values. >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], ... columns=['letter', 'number', 'animal']) >>> df3 letter number animal 0 c 3 cat 1 d 4 dog >>> pd.concat([df1, df3], sort=False) letter number animal 0 a 1 NaN 1 b 2 NaN 0 c 3 cat 1 d 4 dog Combine ``DataFrame`` objects with overlapping columns and return only those that are shared by passing ``inner`` to the ``join`` keyword argument. >>> pd.concat([df1, df3], join="inner") letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects horizontally along the x axis by passing in ``axis=1``. >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], ... columns=['animal', 'name']) >>> pd.concat([df1, df4], axis=1) letter number animal name 0 a 1 bird polly 1 b 2 monkey george Prevent the result from including duplicate index values with the ``verify_integrity`` option. >>> df5 = pd.DataFrame([1], index=['a']) >>> df5 0 a 1 >>> df6 = pd.DataFrame([2], index=['a']) >>> df6 0 a 2 >>> pd.concat([df5, df6], verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: ['a'] """ op = _Concatenator(objs, axis=axis, join_axes=join_axes, ignore_index=ignore_index, join=join, keys=keys, levels=levels, names=names, verify_integrity=verify_integrity, copy=copy, sort=sort) return op.get_result()
python
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False, sort=None, copy=True): """ Concatenate pandas objects along a particular axis with optional set logic along the other axes. Can also add a layer of hierarchical indexing on the concatenation axis, which may be useful if the labels are the same (or overlapping) on the passed axis number. Parameters ---------- objs : a sequence or mapping of Series, DataFrame, or Panel objects If a dict is passed, the sorted keys will be used as the `keys` argument, unless it is passed, in which case the values will be selected (see below). Any None objects will be dropped silently unless they are all None in which case a ValueError will be raised. axis : {0/'index', 1/'columns'}, default 0 The axis to concatenate along. join : {'inner', 'outer'}, default 'outer' How to handle indexes on other axis (or axes). join_axes : list of Index objects Specific indexes to use for the other n - 1 axes instead of performing inner/outer set logic. ignore_index : bool, default False If True, do not use the index values along the concatenation axis. The resulting axis will be labeled 0, ..., n - 1. This is useful if you are concatenating objects where the concatenation axis does not have meaningful indexing information. Note the index values on the other axes are still respected in the join. keys : sequence, default None If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level. levels : list of sequences, default None Specific levels (unique values) to use for constructing a MultiIndex. Otherwise they will be inferred from the keys. names : list, default None Names for the levels in the resulting hierarchical index. verify_integrity : bool, default False Check whether the new concatenated axis contains duplicates. This can be very expensive relative to the actual data concatenation. sort : bool, default None Sort non-concatenation axis if it is not already aligned when `join` is 'outer'. The current default of sorting is deprecated and will change to not-sorting in a future version of pandas. Explicitly pass ``sort=True`` to silence the warning and sort. Explicitly pass ``sort=False`` to silence the warning and not sort. This has no effect when ``join='inner'``, which already preserves the order of the non-concatenation axis. .. versionadded:: 0.23.0 copy : bool, default True If False, do not copy data unnecessarily. Returns ------- object, type of objs When concatenating all ``Series`` along the index (axis=0), a ``Series`` is returned. When ``objs`` contains at least one ``DataFrame``, a ``DataFrame`` is returned. When concatenating along the columns (axis=1), a ``DataFrame`` is returned. See Also -------- Series.append : Concatenate Series. DataFrame.append : Concatenate DataFrames. DataFrame.join : Join DataFrames using indexes. DataFrame.merge : Merge DataFrames by indexes or columns. Notes ----- The keys, levels, and names arguments are all optional. A walkthrough of how this method fits in with other tools for combining pandas objects can be found `here <http://pandas.pydata.org/pandas-docs/stable/merging.html>`__. Examples -------- Combine two ``Series``. >>> s1 = pd.Series(['a', 'b']) >>> s2 = pd.Series(['c', 'd']) >>> pd.concat([s1, s2]) 0 a 1 b 0 c 1 d dtype: object Clear the existing index and reset it in the result by setting the ``ignore_index`` option to ``True``. >>> pd.concat([s1, s2], ignore_index=True) 0 a 1 b 2 c 3 d dtype: object Add a hierarchical index at the outermost level of the data with the ``keys`` option. >>> pd.concat([s1, s2], keys=['s1', 's2']) s1 0 a 1 b s2 0 c 1 d dtype: object Label the index keys you create with the ``names`` option. >>> pd.concat([s1, s2], keys=['s1', 's2'], ... names=['Series name', 'Row ID']) Series name Row ID s1 0 a 1 b s2 0 c 1 d dtype: object Combine two ``DataFrame`` objects with identical columns. >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], ... columns=['letter', 'number']) >>> df1 letter number 0 a 1 1 b 2 >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], ... columns=['letter', 'number']) >>> df2 letter number 0 c 3 1 d 4 >>> pd.concat([df1, df2]) letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects with overlapping columns and return everything. Columns outside the intersection will be filled with ``NaN`` values. >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], ... columns=['letter', 'number', 'animal']) >>> df3 letter number animal 0 c 3 cat 1 d 4 dog >>> pd.concat([df1, df3], sort=False) letter number animal 0 a 1 NaN 1 b 2 NaN 0 c 3 cat 1 d 4 dog Combine ``DataFrame`` objects with overlapping columns and return only those that are shared by passing ``inner`` to the ``join`` keyword argument. >>> pd.concat([df1, df3], join="inner") letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects horizontally along the x axis by passing in ``axis=1``. >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], ... columns=['animal', 'name']) >>> pd.concat([df1, df4], axis=1) letter number animal name 0 a 1 bird polly 1 b 2 monkey george Prevent the result from including duplicate index values with the ``verify_integrity`` option. >>> df5 = pd.DataFrame([1], index=['a']) >>> df5 0 a 1 >>> df6 = pd.DataFrame([2], index=['a']) >>> df6 0 a 2 >>> pd.concat([df5, df6], verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: ['a'] """ op = _Concatenator(objs, axis=axis, join_axes=join_axes, ignore_index=ignore_index, join=join, keys=keys, levels=levels, names=names, verify_integrity=verify_integrity, copy=copy, sort=sort) return op.get_result()
[ "def", "concat", "(", "objs", ",", "axis", "=", "0", ",", "join", "=", "'outer'", ",", "join_axes", "=", "None", ",", "ignore_index", "=", "False", ",", "keys", "=", "None", ",", "levels", "=", "None", ",", "names", "=", "None", ",", "verify_integrity", "=", "False", ",", "sort", "=", "None", ",", "copy", "=", "True", ")", ":", "op", "=", "_Concatenator", "(", "objs", ",", "axis", "=", "axis", ",", "join_axes", "=", "join_axes", ",", "ignore_index", "=", "ignore_index", ",", "join", "=", "join", ",", "keys", "=", "keys", ",", "levels", "=", "levels", ",", "names", "=", "names", ",", "verify_integrity", "=", "verify_integrity", ",", "copy", "=", "copy", ",", "sort", "=", "sort", ")", "return", "op", ".", "get_result", "(", ")" ]
Concatenate pandas objects along a particular axis with optional set logic along the other axes. Can also add a layer of hierarchical indexing on the concatenation axis, which may be useful if the labels are the same (or overlapping) on the passed axis number. Parameters ---------- objs : a sequence or mapping of Series, DataFrame, or Panel objects If a dict is passed, the sorted keys will be used as the `keys` argument, unless it is passed, in which case the values will be selected (see below). Any None objects will be dropped silently unless they are all None in which case a ValueError will be raised. axis : {0/'index', 1/'columns'}, default 0 The axis to concatenate along. join : {'inner', 'outer'}, default 'outer' How to handle indexes on other axis (or axes). join_axes : list of Index objects Specific indexes to use for the other n - 1 axes instead of performing inner/outer set logic. ignore_index : bool, default False If True, do not use the index values along the concatenation axis. The resulting axis will be labeled 0, ..., n - 1. This is useful if you are concatenating objects where the concatenation axis does not have meaningful indexing information. Note the index values on the other axes are still respected in the join. keys : sequence, default None If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level. levels : list of sequences, default None Specific levels (unique values) to use for constructing a MultiIndex. Otherwise they will be inferred from the keys. names : list, default None Names for the levels in the resulting hierarchical index. verify_integrity : bool, default False Check whether the new concatenated axis contains duplicates. This can be very expensive relative to the actual data concatenation. sort : bool, default None Sort non-concatenation axis if it is not already aligned when `join` is 'outer'. The current default of sorting is deprecated and will change to not-sorting in a future version of pandas. Explicitly pass ``sort=True`` to silence the warning and sort. Explicitly pass ``sort=False`` to silence the warning and not sort. This has no effect when ``join='inner'``, which already preserves the order of the non-concatenation axis. .. versionadded:: 0.23.0 copy : bool, default True If False, do not copy data unnecessarily. Returns ------- object, type of objs When concatenating all ``Series`` along the index (axis=0), a ``Series`` is returned. When ``objs`` contains at least one ``DataFrame``, a ``DataFrame`` is returned. When concatenating along the columns (axis=1), a ``DataFrame`` is returned. See Also -------- Series.append : Concatenate Series. DataFrame.append : Concatenate DataFrames. DataFrame.join : Join DataFrames using indexes. DataFrame.merge : Merge DataFrames by indexes or columns. Notes ----- The keys, levels, and names arguments are all optional. A walkthrough of how this method fits in with other tools for combining pandas objects can be found `here <http://pandas.pydata.org/pandas-docs/stable/merging.html>`__. Examples -------- Combine two ``Series``. >>> s1 = pd.Series(['a', 'b']) >>> s2 = pd.Series(['c', 'd']) >>> pd.concat([s1, s2]) 0 a 1 b 0 c 1 d dtype: object Clear the existing index and reset it in the result by setting the ``ignore_index`` option to ``True``. >>> pd.concat([s1, s2], ignore_index=True) 0 a 1 b 2 c 3 d dtype: object Add a hierarchical index at the outermost level of the data with the ``keys`` option. >>> pd.concat([s1, s2], keys=['s1', 's2']) s1 0 a 1 b s2 0 c 1 d dtype: object Label the index keys you create with the ``names`` option. >>> pd.concat([s1, s2], keys=['s1', 's2'], ... names=['Series name', 'Row ID']) Series name Row ID s1 0 a 1 b s2 0 c 1 d dtype: object Combine two ``DataFrame`` objects with identical columns. >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], ... columns=['letter', 'number']) >>> df1 letter number 0 a 1 1 b 2 >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], ... columns=['letter', 'number']) >>> df2 letter number 0 c 3 1 d 4 >>> pd.concat([df1, df2]) letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects with overlapping columns and return everything. Columns outside the intersection will be filled with ``NaN`` values. >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], ... columns=['letter', 'number', 'animal']) >>> df3 letter number animal 0 c 3 cat 1 d 4 dog >>> pd.concat([df1, df3], sort=False) letter number animal 0 a 1 NaN 1 b 2 NaN 0 c 3 cat 1 d 4 dog Combine ``DataFrame`` objects with overlapping columns and return only those that are shared by passing ``inner`` to the ``join`` keyword argument. >>> pd.concat([df1, df3], join="inner") letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects horizontally along the x axis by passing in ``axis=1``. >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], ... columns=['animal', 'name']) >>> pd.concat([df1, df4], axis=1) letter number animal name 0 a 1 bird polly 1 b 2 monkey george Prevent the result from including duplicate index values with the ``verify_integrity`` option. >>> df5 = pd.DataFrame([1], index=['a']) >>> df5 0 a 1 >>> df6 = pd.DataFrame([2], index=['a']) >>> df6 0 a 2 >>> pd.concat([df5, df6], verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: ['a']
[ "Concatenate", "pandas", "objects", "along", "a", "particular", "axis", "with", "optional", "set", "logic", "along", "the", "other", "axes", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/concat.py#L24-L229
19,847
pandas-dev/pandas
pandas/core/reshape/concat.py
_Concatenator._get_concat_axis
def _get_concat_axis(self): """ Return index to be used along concatenation axis. """ if self._is_series: if self.axis == 0: indexes = [x.index for x in self.objs] elif self.ignore_index: idx = ibase.default_index(len(self.objs)) return idx elif self.keys is None: names = [None] * len(self.objs) num = 0 has_names = False for i, x in enumerate(self.objs): if not isinstance(x, Series): raise TypeError("Cannot concatenate type 'Series' " "with object of type {type!r}" .format(type=type(x).__name__)) if x.name is not None: names[i] = x.name has_names = True else: names[i] = num num += 1 if has_names: return Index(names) else: return ibase.default_index(len(self.objs)) else: return ensure_index(self.keys).set_names(self.names) else: indexes = [x._data.axes[self.axis] for x in self.objs] if self.ignore_index: idx = ibase.default_index(sum(len(i) for i in indexes)) return idx if self.keys is None: concat_axis = _concat_indexes(indexes) else: concat_axis = _make_concat_multiindex(indexes, self.keys, self.levels, self.names) self._maybe_check_integrity(concat_axis) return concat_axis
python
def _get_concat_axis(self): """ Return index to be used along concatenation axis. """ if self._is_series: if self.axis == 0: indexes = [x.index for x in self.objs] elif self.ignore_index: idx = ibase.default_index(len(self.objs)) return idx elif self.keys is None: names = [None] * len(self.objs) num = 0 has_names = False for i, x in enumerate(self.objs): if not isinstance(x, Series): raise TypeError("Cannot concatenate type 'Series' " "with object of type {type!r}" .format(type=type(x).__name__)) if x.name is not None: names[i] = x.name has_names = True else: names[i] = num num += 1 if has_names: return Index(names) else: return ibase.default_index(len(self.objs)) else: return ensure_index(self.keys).set_names(self.names) else: indexes = [x._data.axes[self.axis] for x in self.objs] if self.ignore_index: idx = ibase.default_index(sum(len(i) for i in indexes)) return idx if self.keys is None: concat_axis = _concat_indexes(indexes) else: concat_axis = _make_concat_multiindex(indexes, self.keys, self.levels, self.names) self._maybe_check_integrity(concat_axis) return concat_axis
[ "def", "_get_concat_axis", "(", "self", ")", ":", "if", "self", ".", "_is_series", ":", "if", "self", ".", "axis", "==", "0", ":", "indexes", "=", "[", "x", ".", "index", "for", "x", "in", "self", ".", "objs", "]", "elif", "self", ".", "ignore_index", ":", "idx", "=", "ibase", ".", "default_index", "(", "len", "(", "self", ".", "objs", ")", ")", "return", "idx", "elif", "self", ".", "keys", "is", "None", ":", "names", "=", "[", "None", "]", "*", "len", "(", "self", ".", "objs", ")", "num", "=", "0", "has_names", "=", "False", "for", "i", ",", "x", "in", "enumerate", "(", "self", ".", "objs", ")", ":", "if", "not", "isinstance", "(", "x", ",", "Series", ")", ":", "raise", "TypeError", "(", "\"Cannot concatenate type 'Series' \"", "\"with object of type {type!r}\"", ".", "format", "(", "type", "=", "type", "(", "x", ")", ".", "__name__", ")", ")", "if", "x", ".", "name", "is", "not", "None", ":", "names", "[", "i", "]", "=", "x", ".", "name", "has_names", "=", "True", "else", ":", "names", "[", "i", "]", "=", "num", "num", "+=", "1", "if", "has_names", ":", "return", "Index", "(", "names", ")", "else", ":", "return", "ibase", ".", "default_index", "(", "len", "(", "self", ".", "objs", ")", ")", "else", ":", "return", "ensure_index", "(", "self", ".", "keys", ")", ".", "set_names", "(", "self", ".", "names", ")", "else", ":", "indexes", "=", "[", "x", ".", "_data", ".", "axes", "[", "self", ".", "axis", "]", "for", "x", "in", "self", ".", "objs", "]", "if", "self", ".", "ignore_index", ":", "idx", "=", "ibase", ".", "default_index", "(", "sum", "(", "len", "(", "i", ")", "for", "i", "in", "indexes", ")", ")", "return", "idx", "if", "self", ".", "keys", "is", "None", ":", "concat_axis", "=", "_concat_indexes", "(", "indexes", ")", "else", ":", "concat_axis", "=", "_make_concat_multiindex", "(", "indexes", ",", "self", ".", "keys", ",", "self", ".", "levels", ",", "self", ".", "names", ")", "self", ".", "_maybe_check_integrity", "(", "concat_axis", ")", "return", "concat_axis" ]
Return index to be used along concatenation axis.
[ "Return", "index", "to", "be", "used", "along", "concatenation", "axis", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/concat.py#L475-L521
19,848
pandas-dev/pandas
pandas/core/computation/ops.py
_in
def _in(x, y): """Compute the vectorized membership of ``x in y`` if possible, otherwise use Python. """ try: return x.isin(y) except AttributeError: if is_list_like(x): try: return y.isin(x) except AttributeError: pass return x in y
python
def _in(x, y): """Compute the vectorized membership of ``x in y`` if possible, otherwise use Python. """ try: return x.isin(y) except AttributeError: if is_list_like(x): try: return y.isin(x) except AttributeError: pass return x in y
[ "def", "_in", "(", "x", ",", "y", ")", ":", "try", ":", "return", "x", ".", "isin", "(", "y", ")", "except", "AttributeError", ":", "if", "is_list_like", "(", "x", ")", ":", "try", ":", "return", "y", ".", "isin", "(", "x", ")", "except", "AttributeError", ":", "pass", "return", "x", "in", "y" ]
Compute the vectorized membership of ``x in y`` if possible, otherwise use Python.
[ "Compute", "the", "vectorized", "membership", "of", "x", "in", "y", "if", "possible", "otherwise", "use", "Python", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/ops.py#L234-L246
19,849
pandas-dev/pandas
pandas/core/computation/ops.py
_not_in
def _not_in(x, y): """Compute the vectorized membership of ``x not in y`` if possible, otherwise use Python. """ try: return ~x.isin(y) except AttributeError: if is_list_like(x): try: return ~y.isin(x) except AttributeError: pass return x not in y
python
def _not_in(x, y): """Compute the vectorized membership of ``x not in y`` if possible, otherwise use Python. """ try: return ~x.isin(y) except AttributeError: if is_list_like(x): try: return ~y.isin(x) except AttributeError: pass return x not in y
[ "def", "_not_in", "(", "x", ",", "y", ")", ":", "try", ":", "return", "~", "x", ".", "isin", "(", "y", ")", "except", "AttributeError", ":", "if", "is_list_like", "(", "x", ")", ":", "try", ":", "return", "~", "y", ".", "isin", "(", "x", ")", "except", "AttributeError", ":", "pass", "return", "x", "not", "in", "y" ]
Compute the vectorized membership of ``x not in y`` if possible, otherwise use Python.
[ "Compute", "the", "vectorized", "membership", "of", "x", "not", "in", "y", "if", "possible", "otherwise", "use", "Python", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/ops.py#L249-L261
19,850
pandas-dev/pandas
pandas/core/computation/ops.py
_cast_inplace
def _cast_inplace(terms, acceptable_dtypes, dtype): """Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. .. versionadded:: 0.19.0 dtype : str or numpy.dtype The dtype to cast to. """ dt = np.dtype(dtype) for term in terms: if term.type in acceptable_dtypes: continue try: new_value = term.value.astype(dt) except AttributeError: new_value = dt.type(term.value) term.update(new_value)
python
def _cast_inplace(terms, acceptable_dtypes, dtype): """Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. .. versionadded:: 0.19.0 dtype : str or numpy.dtype The dtype to cast to. """ dt = np.dtype(dtype) for term in terms: if term.type in acceptable_dtypes: continue try: new_value = term.value.astype(dt) except AttributeError: new_value = dt.type(term.value) term.update(new_value)
[ "def", "_cast_inplace", "(", "terms", ",", "acceptable_dtypes", ",", "dtype", ")", ":", "dt", "=", "np", ".", "dtype", "(", "dtype", ")", "for", "term", "in", "terms", ":", "if", "term", ".", "type", "in", "acceptable_dtypes", ":", "continue", "try", ":", "new_value", "=", "term", ".", "value", ".", "astype", "(", "dt", ")", "except", "AttributeError", ":", "new_value", "=", "dt", ".", "type", "(", "term", ".", "value", ")", "term", ".", "update", "(", "new_value", ")" ]
Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. .. versionadded:: 0.19.0 dtype : str or numpy.dtype The dtype to cast to.
[ "Cast", "an", "expression", "inplace", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/ops.py#L288-L312
19,851
pandas-dev/pandas
pandas/core/computation/ops.py
BinOp.convert_values
def convert_values(self): """Convert datetimes to a comparable value in an expression. """ def stringify(value): if self.encoding is not None: encoder = partial(pprint_thing_encoded, encoding=self.encoding) else: encoder = pprint_thing return encoder(value) lhs, rhs = self.lhs, self.rhs if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar: v = rhs.value if isinstance(v, (int, float)): v = stringify(v) v = Timestamp(_ensure_decoded(v)) if v.tz is not None: v = v.tz_convert('UTC') self.rhs.update(v) if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar: v = lhs.value if isinstance(v, (int, float)): v = stringify(v) v = Timestamp(_ensure_decoded(v)) if v.tz is not None: v = v.tz_convert('UTC') self.lhs.update(v)
python
def convert_values(self): """Convert datetimes to a comparable value in an expression. """ def stringify(value): if self.encoding is not None: encoder = partial(pprint_thing_encoded, encoding=self.encoding) else: encoder = pprint_thing return encoder(value) lhs, rhs = self.lhs, self.rhs if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar: v = rhs.value if isinstance(v, (int, float)): v = stringify(v) v = Timestamp(_ensure_decoded(v)) if v.tz is not None: v = v.tz_convert('UTC') self.rhs.update(v) if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar: v = lhs.value if isinstance(v, (int, float)): v = stringify(v) v = Timestamp(_ensure_decoded(v)) if v.tz is not None: v = v.tz_convert('UTC') self.lhs.update(v)
[ "def", "convert_values", "(", "self", ")", ":", "def", "stringify", "(", "value", ")", ":", "if", "self", ".", "encoding", "is", "not", "None", ":", "encoder", "=", "partial", "(", "pprint_thing_encoded", ",", "encoding", "=", "self", ".", "encoding", ")", "else", ":", "encoder", "=", "pprint_thing", "return", "encoder", "(", "value", ")", "lhs", ",", "rhs", "=", "self", ".", "lhs", ",", "self", ".", "rhs", "if", "is_term", "(", "lhs", ")", "and", "lhs", ".", "is_datetime", "and", "is_term", "(", "rhs", ")", "and", "rhs", ".", "is_scalar", ":", "v", "=", "rhs", ".", "value", "if", "isinstance", "(", "v", ",", "(", "int", ",", "float", ")", ")", ":", "v", "=", "stringify", "(", "v", ")", "v", "=", "Timestamp", "(", "_ensure_decoded", "(", "v", ")", ")", "if", "v", ".", "tz", "is", "not", "None", ":", "v", "=", "v", ".", "tz_convert", "(", "'UTC'", ")", "self", ".", "rhs", ".", "update", "(", "v", ")", "if", "is_term", "(", "rhs", ")", "and", "rhs", ".", "is_datetime", "and", "is_term", "(", "lhs", ")", "and", "lhs", ".", "is_scalar", ":", "v", "=", "lhs", ".", "value", "if", "isinstance", "(", "v", ",", "(", "int", ",", "float", ")", ")", ":", "v", "=", "stringify", "(", "v", ")", "v", "=", "Timestamp", "(", "_ensure_decoded", "(", "v", ")", ")", "if", "v", ".", "tz", "is", "not", "None", ":", "v", "=", "v", ".", "tz_convert", "(", "'UTC'", ")", "self", ".", "lhs", ".", "update", "(", "v", ")" ]
Convert datetimes to a comparable value in an expression.
[ "Convert", "datetimes", "to", "a", "comparable", "value", "in", "an", "expression", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/ops.py#L407-L436
19,852
pandas-dev/pandas
pandas/util/_doctools.py
TablePlotter._shape
def _shape(self, df): """ Calculate table chape considering index levels. """ row, col = df.shape return row + df.columns.nlevels, col + df.index.nlevels
python
def _shape(self, df): """ Calculate table chape considering index levels. """ row, col = df.shape return row + df.columns.nlevels, col + df.index.nlevels
[ "def", "_shape", "(", "self", ",", "df", ")", ":", "row", ",", "col", "=", "df", ".", "shape", "return", "row", "+", "df", ".", "columns", ".", "nlevels", ",", "col", "+", "df", ".", "index", ".", "nlevels" ]
Calculate table chape considering index levels.
[ "Calculate", "table", "chape", "considering", "index", "levels", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_doctools.py#L17-L23
19,853
pandas-dev/pandas
pandas/util/_doctools.py
TablePlotter._get_cells
def _get_cells(self, left, right, vertical): """ Calculate appropriate figure size based on left and right data. """ if vertical: # calculate required number of cells vcells = max(sum(self._shape(l)[0] for l in left), self._shape(right)[0]) hcells = (max(self._shape(l)[1] for l in left) + self._shape(right)[1]) else: vcells = max([self._shape(l)[0] for l in left] + [self._shape(right)[0]]) hcells = sum([self._shape(l)[1] for l in left] + [self._shape(right)[1]]) return hcells, vcells
python
def _get_cells(self, left, right, vertical): """ Calculate appropriate figure size based on left and right data. """ if vertical: # calculate required number of cells vcells = max(sum(self._shape(l)[0] for l in left), self._shape(right)[0]) hcells = (max(self._shape(l)[1] for l in left) + self._shape(right)[1]) else: vcells = max([self._shape(l)[0] for l in left] + [self._shape(right)[0]]) hcells = sum([self._shape(l)[1] for l in left] + [self._shape(right)[1]]) return hcells, vcells
[ "def", "_get_cells", "(", "self", ",", "left", ",", "right", ",", "vertical", ")", ":", "if", "vertical", ":", "# calculate required number of cells", "vcells", "=", "max", "(", "sum", "(", "self", ".", "_shape", "(", "l", ")", "[", "0", "]", "for", "l", "in", "left", ")", ",", "self", ".", "_shape", "(", "right", ")", "[", "0", "]", ")", "hcells", "=", "(", "max", "(", "self", ".", "_shape", "(", "l", ")", "[", "1", "]", "for", "l", "in", "left", ")", "+", "self", ".", "_shape", "(", "right", ")", "[", "1", "]", ")", "else", ":", "vcells", "=", "max", "(", "[", "self", ".", "_shape", "(", "l", ")", "[", "0", "]", "for", "l", "in", "left", "]", "+", "[", "self", ".", "_shape", "(", "right", ")", "[", "0", "]", "]", ")", "hcells", "=", "sum", "(", "[", "self", ".", "_shape", "(", "l", ")", "[", "1", "]", "for", "l", "in", "left", "]", "+", "[", "self", ".", "_shape", "(", "right", ")", "[", "1", "]", "]", ")", "return", "hcells", ",", "vcells" ]
Calculate appropriate figure size based on left and right data.
[ "Calculate", "appropriate", "figure", "size", "based", "on", "left", "and", "right", "data", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_doctools.py#L25-L41
19,854
pandas-dev/pandas
pandas/util/_doctools.py
TablePlotter._conv
def _conv(self, data): """Convert each input to appropriate for table outplot""" if isinstance(data, pd.Series): if data.name is None: data = data.to_frame(name='') else: data = data.to_frame() data = data.fillna('NaN') return data
python
def _conv(self, data): """Convert each input to appropriate for table outplot""" if isinstance(data, pd.Series): if data.name is None: data = data.to_frame(name='') else: data = data.to_frame() data = data.fillna('NaN') return data
[ "def", "_conv", "(", "self", ",", "data", ")", ":", "if", "isinstance", "(", "data", ",", "pd", ".", "Series", ")", ":", "if", "data", ".", "name", "is", "None", ":", "data", "=", "data", ".", "to_frame", "(", "name", "=", "''", ")", "else", ":", "data", "=", "data", ".", "to_frame", "(", ")", "data", "=", "data", ".", "fillna", "(", "'NaN'", ")", "return", "data" ]
Convert each input to appropriate for table outplot
[ "Convert", "each", "input", "to", "appropriate", "for", "table", "outplot" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_doctools.py#L103-L111
19,855
pandas-dev/pandas
pandas/core/reshape/tile.py
cut
def cut(x, bins, right=True, labels=None, retbins=False, precision=3, include_lowest=False, duplicates='raise'): """ Bin values into discrete intervals. Use `cut` when you need to segment and sort data values into bins. This function is also useful for going from a continuous variable to a categorical variable. For example, `cut` could convert ages to groups of age ranges. Supports binning into an equal number of bins, or a pre-specified array of bins. Parameters ---------- x : array-like The input array to be binned. Must be 1-dimensional. bins : int, sequence of scalars, or IntervalIndex The criteria to bin by. * int : Defines the number of equal-width bins in the range of `x`. The range of `x` is extended by .1% on each side to include the minimum and maximum values of `x`. * sequence of scalars : Defines the bin edges allowing for non-uniform width. No extension of the range of `x` is done. * IntervalIndex : Defines the exact bins to be used. Note that IntervalIndex for `bins` must be non-overlapping. right : bool, default True Indicates whether `bins` includes the rightmost edge or not. If ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` indicate (1,2], (2,3], (3,4]. This argument is ignored when `bins` is an IntervalIndex. labels : array or bool, optional Specifies the labels for the returned bins. Must be the same length as the resulting bins. If False, returns only integer indicators of the bins. This affects the type of the output container (see below). This argument is ignored when `bins` is an IntervalIndex. retbins : bool, default False Whether to return the bins or not. Useful when bins is provided as a scalar. precision : int, default 3 The precision at which to store and display the bins labels. include_lowest : bool, default False Whether the first interval should be left-inclusive or not. duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. .. versionadded:: 0.23.0 Returns ------- out : Categorical, Series, or ndarray An array-like object representing the respective bin for each value of `x`. The type depends on the value of `labels`. * True (default) : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are Interval dtype. * sequence of scalars : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are whatever the type in the sequence is. * False : returns an ndarray of integers. bins : numpy.ndarray or IntervalIndex. The computed or specified bins. Only returned when `retbins=True`. For scalar or sequence `bins`, this is an ndarray with the computed bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For an IntervalIndex `bins`, this is equal to `bins`. See Also -------- qcut : Discretize variable into equal-sized buckets based on rank or based on sample quantiles. Categorical : Array type for storing data that come from a fixed set of values. Series : One-dimensional array with axis labels (including time series). IntervalIndex : Immutable Index implementing an ordered, sliceable set. Notes ----- Any NA values will be NA in the result. Out of bounds values will be NA in the resulting Series or Categorical object. Examples -------- Discretize into three equal-sized bins. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3) ... # doctest: +ELLIPSIS [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True) ... # doctest: +ELLIPSIS ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... array([0.994, 3. , 5. , 7. ])) Discovers the same bins, but assign them specific labels. Notice that the returned Categorical's categories are `labels` and is ordered. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), ... 3, labels=["bad", "medium", "good"]) [bad, good, medium, medium, good, bad] Categories (3, object): [bad < medium < good] ``labels=False`` implies you just want the bins back. >>> pd.cut([0, 1, 1, 2], bins=4, labels=False) array([0, 1, 1, 3]) Passing a Series as an input returns a Series with categorical dtype: >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, 3) ... # doctest: +ELLIPSIS a (1.992, 4.667] b (1.992, 4.667] c (4.667, 7.333] d (7.333, 10.0] e (7.333, 10.0] dtype: category Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ... Passing a Series as an input returns a Series with mapping value. It is used to map numerically to intervals based on bins. >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False) ... # doctest: +ELLIPSIS (a 0.0 b 1.0 c 2.0 d 3.0 e 4.0 dtype: float64, array([0, 2, 4, 6, 8])) Use `drop` optional when bins is not unique >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True, ... right=False, duplicates='drop') ... # doctest: +ELLIPSIS (a 0.0 b 1.0 c 2.0 d 3.0 e 3.0 dtype: float64, array([0, 2, 4, 6, 8])) Passing an IntervalIndex for `bins` results in those categories exactly. Notice that values not covered by the IntervalIndex are set to NaN. 0 is to the left of the first bin (which is closed on the right), and 1.5 falls between two bins. >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins) [NaN, (0, 1], NaN, (2, 3], (4, 5]] Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]] """ # NOTE: this binning code is changed a bit from histogram for var(x) == 0 # for handling the cut for datetime and timedelta objects x_is_series, series_index, name, x = _preprocess_for_cut(x) x, dtype = _coerce_to_type(x) if not np.iterable(bins): if is_scalar(bins) and bins < 1: raise ValueError("`bins` should be a positive integer.") try: # for array-like sz = x.size except AttributeError: x = np.asarray(x) sz = x.size if sz == 0: raise ValueError('Cannot cut empty array') rng = (nanops.nanmin(x), nanops.nanmax(x)) mn, mx = [mi + 0.0 for mi in rng] if np.isinf(mn) or np.isinf(mx): # GH 24314 raise ValueError('cannot specify integer `bins` when input data ' 'contains infinity') elif mn == mx: # adjust end points before binning mn -= .001 * abs(mn) if mn != 0 else .001 mx += .001 * abs(mx) if mx != 0 else .001 bins = np.linspace(mn, mx, bins + 1, endpoint=True) else: # adjust end points after binning bins = np.linspace(mn, mx, bins + 1, endpoint=True) adj = (mx - mn) * 0.001 # 0.1% of the range if right: bins[0] -= adj else: bins[-1] += adj elif isinstance(bins, IntervalIndex): if bins.is_overlapping: raise ValueError('Overlapping IntervalIndex is not accepted.') else: if is_datetime64tz_dtype(bins): bins = np.asarray(bins, dtype=_NS_DTYPE) else: bins = np.asarray(bins) bins = _convert_bin_to_numeric_type(bins, dtype) # GH 26045: cast to float64 to avoid an overflow if (np.diff(bins.astype('float64')) < 0).any(): raise ValueError('bins must increase monotonically.') fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, dtype=dtype, duplicates=duplicates) return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name, dtype)
python
def cut(x, bins, right=True, labels=None, retbins=False, precision=3, include_lowest=False, duplicates='raise'): """ Bin values into discrete intervals. Use `cut` when you need to segment and sort data values into bins. This function is also useful for going from a continuous variable to a categorical variable. For example, `cut` could convert ages to groups of age ranges. Supports binning into an equal number of bins, or a pre-specified array of bins. Parameters ---------- x : array-like The input array to be binned. Must be 1-dimensional. bins : int, sequence of scalars, or IntervalIndex The criteria to bin by. * int : Defines the number of equal-width bins in the range of `x`. The range of `x` is extended by .1% on each side to include the minimum and maximum values of `x`. * sequence of scalars : Defines the bin edges allowing for non-uniform width. No extension of the range of `x` is done. * IntervalIndex : Defines the exact bins to be used. Note that IntervalIndex for `bins` must be non-overlapping. right : bool, default True Indicates whether `bins` includes the rightmost edge or not. If ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` indicate (1,2], (2,3], (3,4]. This argument is ignored when `bins` is an IntervalIndex. labels : array or bool, optional Specifies the labels for the returned bins. Must be the same length as the resulting bins. If False, returns only integer indicators of the bins. This affects the type of the output container (see below). This argument is ignored when `bins` is an IntervalIndex. retbins : bool, default False Whether to return the bins or not. Useful when bins is provided as a scalar. precision : int, default 3 The precision at which to store and display the bins labels. include_lowest : bool, default False Whether the first interval should be left-inclusive or not. duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. .. versionadded:: 0.23.0 Returns ------- out : Categorical, Series, or ndarray An array-like object representing the respective bin for each value of `x`. The type depends on the value of `labels`. * True (default) : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are Interval dtype. * sequence of scalars : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are whatever the type in the sequence is. * False : returns an ndarray of integers. bins : numpy.ndarray or IntervalIndex. The computed or specified bins. Only returned when `retbins=True`. For scalar or sequence `bins`, this is an ndarray with the computed bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For an IntervalIndex `bins`, this is equal to `bins`. See Also -------- qcut : Discretize variable into equal-sized buckets based on rank or based on sample quantiles. Categorical : Array type for storing data that come from a fixed set of values. Series : One-dimensional array with axis labels (including time series). IntervalIndex : Immutable Index implementing an ordered, sliceable set. Notes ----- Any NA values will be NA in the result. Out of bounds values will be NA in the resulting Series or Categorical object. Examples -------- Discretize into three equal-sized bins. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3) ... # doctest: +ELLIPSIS [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True) ... # doctest: +ELLIPSIS ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... array([0.994, 3. , 5. , 7. ])) Discovers the same bins, but assign them specific labels. Notice that the returned Categorical's categories are `labels` and is ordered. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), ... 3, labels=["bad", "medium", "good"]) [bad, good, medium, medium, good, bad] Categories (3, object): [bad < medium < good] ``labels=False`` implies you just want the bins back. >>> pd.cut([0, 1, 1, 2], bins=4, labels=False) array([0, 1, 1, 3]) Passing a Series as an input returns a Series with categorical dtype: >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, 3) ... # doctest: +ELLIPSIS a (1.992, 4.667] b (1.992, 4.667] c (4.667, 7.333] d (7.333, 10.0] e (7.333, 10.0] dtype: category Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ... Passing a Series as an input returns a Series with mapping value. It is used to map numerically to intervals based on bins. >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False) ... # doctest: +ELLIPSIS (a 0.0 b 1.0 c 2.0 d 3.0 e 4.0 dtype: float64, array([0, 2, 4, 6, 8])) Use `drop` optional when bins is not unique >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True, ... right=False, duplicates='drop') ... # doctest: +ELLIPSIS (a 0.0 b 1.0 c 2.0 d 3.0 e 3.0 dtype: float64, array([0, 2, 4, 6, 8])) Passing an IntervalIndex for `bins` results in those categories exactly. Notice that values not covered by the IntervalIndex are set to NaN. 0 is to the left of the first bin (which is closed on the right), and 1.5 falls between two bins. >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins) [NaN, (0, 1], NaN, (2, 3], (4, 5]] Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]] """ # NOTE: this binning code is changed a bit from histogram for var(x) == 0 # for handling the cut for datetime and timedelta objects x_is_series, series_index, name, x = _preprocess_for_cut(x) x, dtype = _coerce_to_type(x) if not np.iterable(bins): if is_scalar(bins) and bins < 1: raise ValueError("`bins` should be a positive integer.") try: # for array-like sz = x.size except AttributeError: x = np.asarray(x) sz = x.size if sz == 0: raise ValueError('Cannot cut empty array') rng = (nanops.nanmin(x), nanops.nanmax(x)) mn, mx = [mi + 0.0 for mi in rng] if np.isinf(mn) or np.isinf(mx): # GH 24314 raise ValueError('cannot specify integer `bins` when input data ' 'contains infinity') elif mn == mx: # adjust end points before binning mn -= .001 * abs(mn) if mn != 0 else .001 mx += .001 * abs(mx) if mx != 0 else .001 bins = np.linspace(mn, mx, bins + 1, endpoint=True) else: # adjust end points after binning bins = np.linspace(mn, mx, bins + 1, endpoint=True) adj = (mx - mn) * 0.001 # 0.1% of the range if right: bins[0] -= adj else: bins[-1] += adj elif isinstance(bins, IntervalIndex): if bins.is_overlapping: raise ValueError('Overlapping IntervalIndex is not accepted.') else: if is_datetime64tz_dtype(bins): bins = np.asarray(bins, dtype=_NS_DTYPE) else: bins = np.asarray(bins) bins = _convert_bin_to_numeric_type(bins, dtype) # GH 26045: cast to float64 to avoid an overflow if (np.diff(bins.astype('float64')) < 0).any(): raise ValueError('bins must increase monotonically.') fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, dtype=dtype, duplicates=duplicates) return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name, dtype)
[ "def", "cut", "(", "x", ",", "bins", ",", "right", "=", "True", ",", "labels", "=", "None", ",", "retbins", "=", "False", ",", "precision", "=", "3", ",", "include_lowest", "=", "False", ",", "duplicates", "=", "'raise'", ")", ":", "# NOTE: this binning code is changed a bit from histogram for var(x) == 0", "# for handling the cut for datetime and timedelta objects", "x_is_series", ",", "series_index", ",", "name", ",", "x", "=", "_preprocess_for_cut", "(", "x", ")", "x", ",", "dtype", "=", "_coerce_to_type", "(", "x", ")", "if", "not", "np", ".", "iterable", "(", "bins", ")", ":", "if", "is_scalar", "(", "bins", ")", "and", "bins", "<", "1", ":", "raise", "ValueError", "(", "\"`bins` should be a positive integer.\"", ")", "try", ":", "# for array-like", "sz", "=", "x", ".", "size", "except", "AttributeError", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "sz", "=", "x", ".", "size", "if", "sz", "==", "0", ":", "raise", "ValueError", "(", "'Cannot cut empty array'", ")", "rng", "=", "(", "nanops", ".", "nanmin", "(", "x", ")", ",", "nanops", ".", "nanmax", "(", "x", ")", ")", "mn", ",", "mx", "=", "[", "mi", "+", "0.0", "for", "mi", "in", "rng", "]", "if", "np", ".", "isinf", "(", "mn", ")", "or", "np", ".", "isinf", "(", "mx", ")", ":", "# GH 24314", "raise", "ValueError", "(", "'cannot specify integer `bins` when input data '", "'contains infinity'", ")", "elif", "mn", "==", "mx", ":", "# adjust end points before binning", "mn", "-=", ".001", "*", "abs", "(", "mn", ")", "if", "mn", "!=", "0", "else", ".001", "mx", "+=", ".001", "*", "abs", "(", "mx", ")", "if", "mx", "!=", "0", "else", ".001", "bins", "=", "np", ".", "linspace", "(", "mn", ",", "mx", ",", "bins", "+", "1", ",", "endpoint", "=", "True", ")", "else", ":", "# adjust end points after binning", "bins", "=", "np", ".", "linspace", "(", "mn", ",", "mx", ",", "bins", "+", "1", ",", "endpoint", "=", "True", ")", "adj", "=", "(", "mx", "-", "mn", ")", "*", "0.001", "# 0.1% of the range", "if", "right", ":", "bins", "[", "0", "]", "-=", "adj", "else", ":", "bins", "[", "-", "1", "]", "+=", "adj", "elif", "isinstance", "(", "bins", ",", "IntervalIndex", ")", ":", "if", "bins", ".", "is_overlapping", ":", "raise", "ValueError", "(", "'Overlapping IntervalIndex is not accepted.'", ")", "else", ":", "if", "is_datetime64tz_dtype", "(", "bins", ")", ":", "bins", "=", "np", ".", "asarray", "(", "bins", ",", "dtype", "=", "_NS_DTYPE", ")", "else", ":", "bins", "=", "np", ".", "asarray", "(", "bins", ")", "bins", "=", "_convert_bin_to_numeric_type", "(", "bins", ",", "dtype", ")", "# GH 26045: cast to float64 to avoid an overflow", "if", "(", "np", ".", "diff", "(", "bins", ".", "astype", "(", "'float64'", ")", ")", "<", "0", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "'bins must increase monotonically.'", ")", "fac", ",", "bins", "=", "_bins_to_cuts", "(", "x", ",", "bins", ",", "right", "=", "right", ",", "labels", "=", "labels", ",", "precision", "=", "precision", ",", "include_lowest", "=", "include_lowest", ",", "dtype", "=", "dtype", ",", "duplicates", "=", "duplicates", ")", "return", "_postprocess_for_cut", "(", "fac", ",", "bins", ",", "retbins", ",", "x_is_series", ",", "series_index", ",", "name", ",", "dtype", ")" ]
Bin values into discrete intervals. Use `cut` when you need to segment and sort data values into bins. This function is also useful for going from a continuous variable to a categorical variable. For example, `cut` could convert ages to groups of age ranges. Supports binning into an equal number of bins, or a pre-specified array of bins. Parameters ---------- x : array-like The input array to be binned. Must be 1-dimensional. bins : int, sequence of scalars, or IntervalIndex The criteria to bin by. * int : Defines the number of equal-width bins in the range of `x`. The range of `x` is extended by .1% on each side to include the minimum and maximum values of `x`. * sequence of scalars : Defines the bin edges allowing for non-uniform width. No extension of the range of `x` is done. * IntervalIndex : Defines the exact bins to be used. Note that IntervalIndex for `bins` must be non-overlapping. right : bool, default True Indicates whether `bins` includes the rightmost edge or not. If ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` indicate (1,2], (2,3], (3,4]. This argument is ignored when `bins` is an IntervalIndex. labels : array or bool, optional Specifies the labels for the returned bins. Must be the same length as the resulting bins. If False, returns only integer indicators of the bins. This affects the type of the output container (see below). This argument is ignored when `bins` is an IntervalIndex. retbins : bool, default False Whether to return the bins or not. Useful when bins is provided as a scalar. precision : int, default 3 The precision at which to store and display the bins labels. include_lowest : bool, default False Whether the first interval should be left-inclusive or not. duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. .. versionadded:: 0.23.0 Returns ------- out : Categorical, Series, or ndarray An array-like object representing the respective bin for each value of `x`. The type depends on the value of `labels`. * True (default) : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are Interval dtype. * sequence of scalars : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are whatever the type in the sequence is. * False : returns an ndarray of integers. bins : numpy.ndarray or IntervalIndex. The computed or specified bins. Only returned when `retbins=True`. For scalar or sequence `bins`, this is an ndarray with the computed bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For an IntervalIndex `bins`, this is equal to `bins`. See Also -------- qcut : Discretize variable into equal-sized buckets based on rank or based on sample quantiles. Categorical : Array type for storing data that come from a fixed set of values. Series : One-dimensional array with axis labels (including time series). IntervalIndex : Immutable Index implementing an ordered, sliceable set. Notes ----- Any NA values will be NA in the result. Out of bounds values will be NA in the resulting Series or Categorical object. Examples -------- Discretize into three equal-sized bins. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3) ... # doctest: +ELLIPSIS [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True) ... # doctest: +ELLIPSIS ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... array([0.994, 3. , 5. , 7. ])) Discovers the same bins, but assign them specific labels. Notice that the returned Categorical's categories are `labels` and is ordered. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), ... 3, labels=["bad", "medium", "good"]) [bad, good, medium, medium, good, bad] Categories (3, object): [bad < medium < good] ``labels=False`` implies you just want the bins back. >>> pd.cut([0, 1, 1, 2], bins=4, labels=False) array([0, 1, 1, 3]) Passing a Series as an input returns a Series with categorical dtype: >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, 3) ... # doctest: +ELLIPSIS a (1.992, 4.667] b (1.992, 4.667] c (4.667, 7.333] d (7.333, 10.0] e (7.333, 10.0] dtype: category Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ... Passing a Series as an input returns a Series with mapping value. It is used to map numerically to intervals based on bins. >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False) ... # doctest: +ELLIPSIS (a 0.0 b 1.0 c 2.0 d 3.0 e 4.0 dtype: float64, array([0, 2, 4, 6, 8])) Use `drop` optional when bins is not unique >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True, ... right=False, duplicates='drop') ... # doctest: +ELLIPSIS (a 0.0 b 1.0 c 2.0 d 3.0 e 3.0 dtype: float64, array([0, 2, 4, 6, 8])) Passing an IntervalIndex for `bins` results in those categories exactly. Notice that values not covered by the IntervalIndex are set to NaN. 0 is to the left of the first bin (which is closed on the right), and 1.5 falls between two bins. >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins) [NaN, (0, 1], NaN, (2, 3], (4, 5]] Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]]
[ "Bin", "values", "into", "discrete", "intervals", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L23-L245
19,856
pandas-dev/pandas
pandas/core/reshape/tile.py
qcut
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'): """ Quantile-based discretization function. Discretize variable into equal-sized buckets based on rank or based on sample quantiles. For example 1000 values for 10 quantiles would produce a Categorical object indicating quantile membership for each data point. Parameters ---------- x : 1d ndarray or Series q : integer or array of quantiles Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles labels : array or boolean, default None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, return only integer indicators of the bins. retbins : bool, optional Whether to return the (bins, labels) or not. Can be useful if bins is given as a scalar. precision : int, optional The precision at which to store and display the bins labels duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. .. versionadded:: 0.20.0 Returns ------- out : Categorical or Series or array of integers if labels is False The return type (Categorical or Series) depends on the input: a Series of type category if input is a Series else Categorical. Bins are represented as categories when categorical data is returned. bins : ndarray of floats Returned only if `retbins` is True. Notes ----- Out of bounds values will be NA in the resulting Categorical object Examples -------- >>> pd.qcut(range(5), 4) ... # doctest: +ELLIPSIS [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]] Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ... >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"]) ... # doctest: +SKIP [good, good, medium, bad, bad] Categories (3, object): [good < medium < bad] >>> pd.qcut(range(5), 4, labels=False) array([0, 0, 1, 2, 3]) """ x_is_series, series_index, name, x = _preprocess_for_cut(x) x, dtype = _coerce_to_type(x) if is_integer(q): quantiles = np.linspace(0, 1, q + 1) else: quantiles = q bins = algos.quantile(x, quantiles) fac, bins = _bins_to_cuts(x, bins, labels=labels, precision=precision, include_lowest=True, dtype=dtype, duplicates=duplicates) return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name, dtype)
python
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'): """ Quantile-based discretization function. Discretize variable into equal-sized buckets based on rank or based on sample quantiles. For example 1000 values for 10 quantiles would produce a Categorical object indicating quantile membership for each data point. Parameters ---------- x : 1d ndarray or Series q : integer or array of quantiles Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles labels : array or boolean, default None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, return only integer indicators of the bins. retbins : bool, optional Whether to return the (bins, labels) or not. Can be useful if bins is given as a scalar. precision : int, optional The precision at which to store and display the bins labels duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. .. versionadded:: 0.20.0 Returns ------- out : Categorical or Series or array of integers if labels is False The return type (Categorical or Series) depends on the input: a Series of type category if input is a Series else Categorical. Bins are represented as categories when categorical data is returned. bins : ndarray of floats Returned only if `retbins` is True. Notes ----- Out of bounds values will be NA in the resulting Categorical object Examples -------- >>> pd.qcut(range(5), 4) ... # doctest: +ELLIPSIS [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]] Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ... >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"]) ... # doctest: +SKIP [good, good, medium, bad, bad] Categories (3, object): [good < medium < bad] >>> pd.qcut(range(5), 4, labels=False) array([0, 0, 1, 2, 3]) """ x_is_series, series_index, name, x = _preprocess_for_cut(x) x, dtype = _coerce_to_type(x) if is_integer(q): quantiles = np.linspace(0, 1, q + 1) else: quantiles = q bins = algos.quantile(x, quantiles) fac, bins = _bins_to_cuts(x, bins, labels=labels, precision=precision, include_lowest=True, dtype=dtype, duplicates=duplicates) return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name, dtype)
[ "def", "qcut", "(", "x", ",", "q", ",", "labels", "=", "None", ",", "retbins", "=", "False", ",", "precision", "=", "3", ",", "duplicates", "=", "'raise'", ")", ":", "x_is_series", ",", "series_index", ",", "name", ",", "x", "=", "_preprocess_for_cut", "(", "x", ")", "x", ",", "dtype", "=", "_coerce_to_type", "(", "x", ")", "if", "is_integer", "(", "q", ")", ":", "quantiles", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "q", "+", "1", ")", "else", ":", "quantiles", "=", "q", "bins", "=", "algos", ".", "quantile", "(", "x", ",", "quantiles", ")", "fac", ",", "bins", "=", "_bins_to_cuts", "(", "x", ",", "bins", ",", "labels", "=", "labels", ",", "precision", "=", "precision", ",", "include_lowest", "=", "True", ",", "dtype", "=", "dtype", ",", "duplicates", "=", "duplicates", ")", "return", "_postprocess_for_cut", "(", "fac", ",", "bins", ",", "retbins", ",", "x_is_series", ",", "series_index", ",", "name", ",", "dtype", ")" ]
Quantile-based discretization function. Discretize variable into equal-sized buckets based on rank or based on sample quantiles. For example 1000 values for 10 quantiles would produce a Categorical object indicating quantile membership for each data point. Parameters ---------- x : 1d ndarray or Series q : integer or array of quantiles Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles labels : array or boolean, default None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, return only integer indicators of the bins. retbins : bool, optional Whether to return the (bins, labels) or not. Can be useful if bins is given as a scalar. precision : int, optional The precision at which to store and display the bins labels duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. .. versionadded:: 0.20.0 Returns ------- out : Categorical or Series or array of integers if labels is False The return type (Categorical or Series) depends on the input: a Series of type category if input is a Series else Categorical. Bins are represented as categories when categorical data is returned. bins : ndarray of floats Returned only if `retbins` is True. Notes ----- Out of bounds values will be NA in the resulting Categorical object Examples -------- >>> pd.qcut(range(5), 4) ... # doctest: +ELLIPSIS [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]] Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ... >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"]) ... # doctest: +SKIP [good, good, medium, bad, bad] Categories (3, object): [good < medium < bad] >>> pd.qcut(range(5), 4, labels=False) array([0, 0, 1, 2, 3])
[ "Quantile", "-", "based", "discretization", "function", ".", "Discretize", "variable", "into", "equal", "-", "sized", "buckets", "based", "on", "rank", "or", "based", "on", "sample", "quantiles", ".", "For", "example", "1000", "values", "for", "10", "quantiles", "would", "produce", "a", "Categorical", "object", "indicating", "quantile", "membership", "for", "each", "data", "point", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L248-L317
19,857
pandas-dev/pandas
pandas/core/reshape/tile.py
_convert_bin_to_datelike_type
def _convert_bin_to_datelike_type(bins, dtype): """ Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is datelike Parameters ---------- bins : list-like of bins dtype : dtype of data Returns ------- bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is datelike """ if is_datetime64tz_dtype(dtype): bins = to_datetime(bins.astype(np.int64), utc=True).tz_convert(dtype.tz) elif is_datetime_or_timedelta_dtype(dtype): bins = Index(bins.astype(np.int64), dtype=dtype) return bins
python
def _convert_bin_to_datelike_type(bins, dtype): """ Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is datelike Parameters ---------- bins : list-like of bins dtype : dtype of data Returns ------- bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is datelike """ if is_datetime64tz_dtype(dtype): bins = to_datetime(bins.astype(np.int64), utc=True).tz_convert(dtype.tz) elif is_datetime_or_timedelta_dtype(dtype): bins = Index(bins.astype(np.int64), dtype=dtype) return bins
[ "def", "_convert_bin_to_datelike_type", "(", "bins", ",", "dtype", ")", ":", "if", "is_datetime64tz_dtype", "(", "dtype", ")", ":", "bins", "=", "to_datetime", "(", "bins", ".", "astype", "(", "np", ".", "int64", ")", ",", "utc", "=", "True", ")", ".", "tz_convert", "(", "dtype", ".", "tz", ")", "elif", "is_datetime_or_timedelta_dtype", "(", "dtype", ")", ":", "bins", "=", "Index", "(", "bins", ".", "astype", "(", "np", ".", "int64", ")", ",", "dtype", "=", "dtype", ")", "return", "bins" ]
Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is datelike Parameters ---------- bins : list-like of bins dtype : dtype of data Returns ------- bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is datelike
[ "Convert", "bins", "to", "a", "DatetimeIndex", "or", "TimedeltaIndex", "if", "the", "orginal", "dtype", "is", "datelike" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L430-L450
19,858
pandas-dev/pandas
pandas/core/reshape/tile.py
_format_labels
def _format_labels(bins, precision, right=True, include_lowest=False, dtype=None): """ based on the dtype, return our labels """ closed = 'right' if right else 'left' if is_datetime64tz_dtype(dtype): formatter = partial(Timestamp, tz=dtype.tz) adjust = lambda x: x - Timedelta('1ns') elif is_datetime64_dtype(dtype): formatter = Timestamp adjust = lambda x: x - Timedelta('1ns') elif is_timedelta64_dtype(dtype): formatter = Timedelta adjust = lambda x: x - Timedelta('1ns') else: precision = _infer_precision(precision, bins) formatter = lambda x: _round_frac(x, precision) adjust = lambda x: x - 10 ** (-precision) breaks = [formatter(b) for b in bins] labels = IntervalIndex.from_breaks(breaks, closed=closed) if right and include_lowest: # we will adjust the left hand side by precision to # account that we are all right closed v = adjust(labels[0].left) i = IntervalIndex([Interval(v, labels[0].right, closed='right')]) labels = i.append(labels[1:]) return labels
python
def _format_labels(bins, precision, right=True, include_lowest=False, dtype=None): """ based on the dtype, return our labels """ closed = 'right' if right else 'left' if is_datetime64tz_dtype(dtype): formatter = partial(Timestamp, tz=dtype.tz) adjust = lambda x: x - Timedelta('1ns') elif is_datetime64_dtype(dtype): formatter = Timestamp adjust = lambda x: x - Timedelta('1ns') elif is_timedelta64_dtype(dtype): formatter = Timedelta adjust = lambda x: x - Timedelta('1ns') else: precision = _infer_precision(precision, bins) formatter = lambda x: _round_frac(x, precision) adjust = lambda x: x - 10 ** (-precision) breaks = [formatter(b) for b in bins] labels = IntervalIndex.from_breaks(breaks, closed=closed) if right and include_lowest: # we will adjust the left hand side by precision to # account that we are all right closed v = adjust(labels[0].left) i = IntervalIndex([Interval(v, labels[0].right, closed='right')]) labels = i.append(labels[1:]) return labels
[ "def", "_format_labels", "(", "bins", ",", "precision", ",", "right", "=", "True", ",", "include_lowest", "=", "False", ",", "dtype", "=", "None", ")", ":", "closed", "=", "'right'", "if", "right", "else", "'left'", "if", "is_datetime64tz_dtype", "(", "dtype", ")", ":", "formatter", "=", "partial", "(", "Timestamp", ",", "tz", "=", "dtype", ".", "tz", ")", "adjust", "=", "lambda", "x", ":", "x", "-", "Timedelta", "(", "'1ns'", ")", "elif", "is_datetime64_dtype", "(", "dtype", ")", ":", "formatter", "=", "Timestamp", "adjust", "=", "lambda", "x", ":", "x", "-", "Timedelta", "(", "'1ns'", ")", "elif", "is_timedelta64_dtype", "(", "dtype", ")", ":", "formatter", "=", "Timedelta", "adjust", "=", "lambda", "x", ":", "x", "-", "Timedelta", "(", "'1ns'", ")", "else", ":", "precision", "=", "_infer_precision", "(", "precision", ",", "bins", ")", "formatter", "=", "lambda", "x", ":", "_round_frac", "(", "x", ",", "precision", ")", "adjust", "=", "lambda", "x", ":", "x", "-", "10", "**", "(", "-", "precision", ")", "breaks", "=", "[", "formatter", "(", "b", ")", "for", "b", "in", "bins", "]", "labels", "=", "IntervalIndex", ".", "from_breaks", "(", "breaks", ",", "closed", "=", "closed", ")", "if", "right", "and", "include_lowest", ":", "# we will adjust the left hand side by precision to", "# account that we are all right closed", "v", "=", "adjust", "(", "labels", "[", "0", "]", ".", "left", ")", "i", "=", "IntervalIndex", "(", "[", "Interval", "(", "v", ",", "labels", "[", "0", "]", ".", "right", ",", "closed", "=", "'right'", ")", "]", ")", "labels", "=", "i", ".", "append", "(", "labels", "[", "1", ":", "]", ")", "return", "labels" ]
based on the dtype, return our labels
[ "based", "on", "the", "dtype", "return", "our", "labels" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L453-L484
19,859
pandas-dev/pandas
pandas/core/reshape/tile.py
_preprocess_for_cut
def _preprocess_for_cut(x): """ handles preprocessing for cut where we convert passed input to array, strip the index information and store it separately """ x_is_series = isinstance(x, Series) series_index = None name = None if x_is_series: series_index = x.index name = x.name # Check that the passed array is a Pandas or Numpy object # We don't want to strip away a Pandas data-type here (e.g. datetimetz) ndim = getattr(x, 'ndim', None) if ndim is None: x = np.asarray(x) if x.ndim != 1: raise ValueError("Input array must be 1 dimensional") return x_is_series, series_index, name, x
python
def _preprocess_for_cut(x): """ handles preprocessing for cut where we convert passed input to array, strip the index information and store it separately """ x_is_series = isinstance(x, Series) series_index = None name = None if x_is_series: series_index = x.index name = x.name # Check that the passed array is a Pandas or Numpy object # We don't want to strip away a Pandas data-type here (e.g. datetimetz) ndim = getattr(x, 'ndim', None) if ndim is None: x = np.asarray(x) if x.ndim != 1: raise ValueError("Input array must be 1 dimensional") return x_is_series, series_index, name, x
[ "def", "_preprocess_for_cut", "(", "x", ")", ":", "x_is_series", "=", "isinstance", "(", "x", ",", "Series", ")", "series_index", "=", "None", "name", "=", "None", "if", "x_is_series", ":", "series_index", "=", "x", ".", "index", "name", "=", "x", ".", "name", "# Check that the passed array is a Pandas or Numpy object", "# We don't want to strip away a Pandas data-type here (e.g. datetimetz)", "ndim", "=", "getattr", "(", "x", ",", "'ndim'", ",", "None", ")", "if", "ndim", "is", "None", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "if", "x", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"Input array must be 1 dimensional\"", ")", "return", "x_is_series", ",", "series_index", ",", "name", ",", "x" ]
handles preprocessing for cut where we convert passed input to array, strip the index information and store it separately
[ "handles", "preprocessing", "for", "cut", "where", "we", "convert", "passed", "input", "to", "array", "strip", "the", "index", "information", "and", "store", "it", "separately" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L487-L509
19,860
pandas-dev/pandas
pandas/core/reshape/tile.py
_postprocess_for_cut
def _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name, dtype): """ handles post processing for the cut method where we combine the index information if the originally passed datatype was a series """ if x_is_series: fac = Series(fac, index=series_index, name=name) if not retbins: return fac bins = _convert_bin_to_datelike_type(bins, dtype) return fac, bins
python
def _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name, dtype): """ handles post processing for the cut method where we combine the index information if the originally passed datatype was a series """ if x_is_series: fac = Series(fac, index=series_index, name=name) if not retbins: return fac bins = _convert_bin_to_datelike_type(bins, dtype) return fac, bins
[ "def", "_postprocess_for_cut", "(", "fac", ",", "bins", ",", "retbins", ",", "x_is_series", ",", "series_index", ",", "name", ",", "dtype", ")", ":", "if", "x_is_series", ":", "fac", "=", "Series", "(", "fac", ",", "index", "=", "series_index", ",", "name", "=", "name", ")", "if", "not", "retbins", ":", "return", "fac", "bins", "=", "_convert_bin_to_datelike_type", "(", "bins", ",", "dtype", ")", "return", "fac", ",", "bins" ]
handles post processing for the cut method where we combine the index information if the originally passed datatype was a series
[ "handles", "post", "processing", "for", "the", "cut", "method", "where", "we", "combine", "the", "index", "information", "if", "the", "originally", "passed", "datatype", "was", "a", "series" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L512-L527
19,861
pandas-dev/pandas
pandas/core/reshape/tile.py
_round_frac
def _round_frac(x, precision): """ Round the fractional part of the given number """ if not np.isfinite(x) or x == 0: return x else: frac, whole = np.modf(x) if whole == 0: digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision else: digits = precision return np.around(x, digits)
python
def _round_frac(x, precision): """ Round the fractional part of the given number """ if not np.isfinite(x) or x == 0: return x else: frac, whole = np.modf(x) if whole == 0: digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision else: digits = precision return np.around(x, digits)
[ "def", "_round_frac", "(", "x", ",", "precision", ")", ":", "if", "not", "np", ".", "isfinite", "(", "x", ")", "or", "x", "==", "0", ":", "return", "x", "else", ":", "frac", ",", "whole", "=", "np", ".", "modf", "(", "x", ")", "if", "whole", "==", "0", ":", "digits", "=", "-", "int", "(", "np", ".", "floor", "(", "np", ".", "log10", "(", "abs", "(", "frac", ")", ")", ")", ")", "-", "1", "+", "precision", "else", ":", "digits", "=", "precision", "return", "np", ".", "around", "(", "x", ",", "digits", ")" ]
Round the fractional part of the given number
[ "Round", "the", "fractional", "part", "of", "the", "given", "number" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L530-L542
19,862
pandas-dev/pandas
pandas/core/reshape/tile.py
_infer_precision
def _infer_precision(base_precision, bins): """Infer an appropriate precision for _round_frac """ for precision in range(base_precision, 20): levels = [_round_frac(b, precision) for b in bins] if algos.unique(levels).size == bins.size: return precision return base_precision
python
def _infer_precision(base_precision, bins): """Infer an appropriate precision for _round_frac """ for precision in range(base_precision, 20): levels = [_round_frac(b, precision) for b in bins] if algos.unique(levels).size == bins.size: return precision return base_precision
[ "def", "_infer_precision", "(", "base_precision", ",", "bins", ")", ":", "for", "precision", "in", "range", "(", "base_precision", ",", "20", ")", ":", "levels", "=", "[", "_round_frac", "(", "b", ",", "precision", ")", "for", "b", "in", "bins", "]", "if", "algos", ".", "unique", "(", "levels", ")", ".", "size", "==", "bins", ".", "size", ":", "return", "precision", "return", "base_precision" ]
Infer an appropriate precision for _round_frac
[ "Infer", "an", "appropriate", "precision", "for", "_round_frac" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L545-L552
19,863
pandas-dev/pandas
pandas/_config/display.py
detect_console_encoding
def detect_console_encoding(): """ Try to find the most capable encoding supported by the console. slightly modified from the way IPython handles the same issue. """ global _initial_defencoding encoding = None try: encoding = sys.stdout.encoding or sys.stdin.encoding except (AttributeError, IOError): pass # try again for something better if not encoding or 'ascii' in encoding.lower(): try: encoding = locale.getpreferredencoding() except Exception: pass # when all else fails. this will usually be "ascii" if not encoding or 'ascii' in encoding.lower(): encoding = sys.getdefaultencoding() # GH#3360, save the reported defencoding at import time # MPL backends may change it. Make available for debugging. if not _initial_defencoding: _initial_defencoding = sys.getdefaultencoding() return encoding
python
def detect_console_encoding(): """ Try to find the most capable encoding supported by the console. slightly modified from the way IPython handles the same issue. """ global _initial_defencoding encoding = None try: encoding = sys.stdout.encoding or sys.stdin.encoding except (AttributeError, IOError): pass # try again for something better if not encoding or 'ascii' in encoding.lower(): try: encoding = locale.getpreferredencoding() except Exception: pass # when all else fails. this will usually be "ascii" if not encoding or 'ascii' in encoding.lower(): encoding = sys.getdefaultencoding() # GH#3360, save the reported defencoding at import time # MPL backends may change it. Make available for debugging. if not _initial_defencoding: _initial_defencoding = sys.getdefaultencoding() return encoding
[ "def", "detect_console_encoding", "(", ")", ":", "global", "_initial_defencoding", "encoding", "=", "None", "try", ":", "encoding", "=", "sys", ".", "stdout", ".", "encoding", "or", "sys", ".", "stdin", ".", "encoding", "except", "(", "AttributeError", ",", "IOError", ")", ":", "pass", "# try again for something better", "if", "not", "encoding", "or", "'ascii'", "in", "encoding", ".", "lower", "(", ")", ":", "try", ":", "encoding", "=", "locale", ".", "getpreferredencoding", "(", ")", "except", "Exception", ":", "pass", "# when all else fails. this will usually be \"ascii\"", "if", "not", "encoding", "or", "'ascii'", "in", "encoding", ".", "lower", "(", ")", ":", "encoding", "=", "sys", ".", "getdefaultencoding", "(", ")", "# GH#3360, save the reported defencoding at import time", "# MPL backends may change it. Make available for debugging.", "if", "not", "_initial_defencoding", ":", "_initial_defencoding", "=", "sys", ".", "getdefaultencoding", "(", ")", "return", "encoding" ]
Try to find the most capable encoding supported by the console. slightly modified from the way IPython handles the same issue.
[ "Try", "to", "find", "the", "most", "capable", "encoding", "supported", "by", "the", "console", ".", "slightly", "modified", "from", "the", "way", "IPython", "handles", "the", "same", "issue", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/_config/display.py#L14-L43
19,864
pandas-dev/pandas
pandas/util/_validators.py
_check_arg_length
def _check_arg_length(fname, args, max_fname_arg_count, compat_args): """ Checks whether 'args' has length of at most 'compat_args'. Raises a TypeError if that is not the case, similar to in Python when a function is called with too many arguments. """ if max_fname_arg_count < 0: raise ValueError("'max_fname_arg_count' must be non-negative") if len(args) > len(compat_args): max_arg_count = len(compat_args) + max_fname_arg_count actual_arg_count = len(args) + max_fname_arg_count argument = 'argument' if max_arg_count == 1 else 'arguments' raise TypeError( "{fname}() takes at most {max_arg} {argument} " "({given_arg} given)".format( fname=fname, max_arg=max_arg_count, argument=argument, given_arg=actual_arg_count))
python
def _check_arg_length(fname, args, max_fname_arg_count, compat_args): """ Checks whether 'args' has length of at most 'compat_args'. Raises a TypeError if that is not the case, similar to in Python when a function is called with too many arguments. """ if max_fname_arg_count < 0: raise ValueError("'max_fname_arg_count' must be non-negative") if len(args) > len(compat_args): max_arg_count = len(compat_args) + max_fname_arg_count actual_arg_count = len(args) + max_fname_arg_count argument = 'argument' if max_arg_count == 1 else 'arguments' raise TypeError( "{fname}() takes at most {max_arg} {argument} " "({given_arg} given)".format( fname=fname, max_arg=max_arg_count, argument=argument, given_arg=actual_arg_count))
[ "def", "_check_arg_length", "(", "fname", ",", "args", ",", "max_fname_arg_count", ",", "compat_args", ")", ":", "if", "max_fname_arg_count", "<", "0", ":", "raise", "ValueError", "(", "\"'max_fname_arg_count' must be non-negative\"", ")", "if", "len", "(", "args", ")", ">", "len", "(", "compat_args", ")", ":", "max_arg_count", "=", "len", "(", "compat_args", ")", "+", "max_fname_arg_count", "actual_arg_count", "=", "len", "(", "args", ")", "+", "max_fname_arg_count", "argument", "=", "'argument'", "if", "max_arg_count", "==", "1", "else", "'arguments'", "raise", "TypeError", "(", "\"{fname}() takes at most {max_arg} {argument} \"", "\"({given_arg} given)\"", ".", "format", "(", "fname", "=", "fname", ",", "max_arg", "=", "max_arg_count", ",", "argument", "=", "argument", ",", "given_arg", "=", "actual_arg_count", ")", ")" ]
Checks whether 'args' has length of at most 'compat_args'. Raises a TypeError if that is not the case, similar to in Python when a function is called with too many arguments.
[ "Checks", "whether", "args", "has", "length", "of", "at", "most", "compat_args", ".", "Raises", "a", "TypeError", "if", "that", "is", "not", "the", "case", "similar", "to", "in", "Python", "when", "a", "function", "is", "called", "with", "too", "many", "arguments", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L10-L29
19,865
pandas-dev/pandas
pandas/util/_validators.py
_check_for_default_values
def _check_for_default_values(fname, arg_val_dict, compat_args): """ Check that the keys in `arg_val_dict` are mapped to their default values as specified in `compat_args`. Note that this function is to be called only when it has been checked that arg_val_dict.keys() is a subset of compat_args """ for key in arg_val_dict: # try checking equality directly with '=' operator, # as comparison may have been overridden for the left # hand object try: v1 = arg_val_dict[key] v2 = compat_args[key] # check for None-ness otherwise we could end up # comparing a numpy array vs None if (v1 is not None and v2 is None) or \ (v1 is None and v2 is not None): match = False else: match = (v1 == v2) if not is_bool(match): raise ValueError("'match' is not a boolean") # could not compare them directly, so try comparison # using the 'is' operator except ValueError: match = (arg_val_dict[key] is compat_args[key]) if not match: raise ValueError(("the '{arg}' parameter is not " "supported in the pandas " "implementation of {fname}()". format(fname=fname, arg=key)))
python
def _check_for_default_values(fname, arg_val_dict, compat_args): """ Check that the keys in `arg_val_dict` are mapped to their default values as specified in `compat_args`. Note that this function is to be called only when it has been checked that arg_val_dict.keys() is a subset of compat_args """ for key in arg_val_dict: # try checking equality directly with '=' operator, # as comparison may have been overridden for the left # hand object try: v1 = arg_val_dict[key] v2 = compat_args[key] # check for None-ness otherwise we could end up # comparing a numpy array vs None if (v1 is not None and v2 is None) or \ (v1 is None and v2 is not None): match = False else: match = (v1 == v2) if not is_bool(match): raise ValueError("'match' is not a boolean") # could not compare them directly, so try comparison # using the 'is' operator except ValueError: match = (arg_val_dict[key] is compat_args[key]) if not match: raise ValueError(("the '{arg}' parameter is not " "supported in the pandas " "implementation of {fname}()". format(fname=fname, arg=key)))
[ "def", "_check_for_default_values", "(", "fname", ",", "arg_val_dict", ",", "compat_args", ")", ":", "for", "key", "in", "arg_val_dict", ":", "# try checking equality directly with '=' operator,", "# as comparison may have been overridden for the left", "# hand object", "try", ":", "v1", "=", "arg_val_dict", "[", "key", "]", "v2", "=", "compat_args", "[", "key", "]", "# check for None-ness otherwise we could end up", "# comparing a numpy array vs None", "if", "(", "v1", "is", "not", "None", "and", "v2", "is", "None", ")", "or", "(", "v1", "is", "None", "and", "v2", "is", "not", "None", ")", ":", "match", "=", "False", "else", ":", "match", "=", "(", "v1", "==", "v2", ")", "if", "not", "is_bool", "(", "match", ")", ":", "raise", "ValueError", "(", "\"'match' is not a boolean\"", ")", "# could not compare them directly, so try comparison", "# using the 'is' operator", "except", "ValueError", ":", "match", "=", "(", "arg_val_dict", "[", "key", "]", "is", "compat_args", "[", "key", "]", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "(", "\"the '{arg}' parameter is not \"", "\"supported in the pandas \"", "\"implementation of {fname}()\"", ".", "format", "(", "fname", "=", "fname", ",", "arg", "=", "key", ")", ")", ")" ]
Check that the keys in `arg_val_dict` are mapped to their default values as specified in `compat_args`. Note that this function is to be called only when it has been checked that arg_val_dict.keys() is a subset of compat_args
[ "Check", "that", "the", "keys", "in", "arg_val_dict", "are", "mapped", "to", "their", "default", "values", "as", "specified", "in", "compat_args", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L32-L69
19,866
pandas-dev/pandas
pandas/util/_validators.py
_check_for_invalid_keys
def _check_for_invalid_keys(fname, kwargs, compat_args): """ Checks whether 'kwargs' contains any keys that are not in 'compat_args' and raises a TypeError if there is one. """ # set(dict) --> set of the dictionary's keys diff = set(kwargs) - set(compat_args) if diff: bad_arg = list(diff)[0] raise TypeError(("{fname}() got an unexpected " "keyword argument '{arg}'". format(fname=fname, arg=bad_arg)))
python
def _check_for_invalid_keys(fname, kwargs, compat_args): """ Checks whether 'kwargs' contains any keys that are not in 'compat_args' and raises a TypeError if there is one. """ # set(dict) --> set of the dictionary's keys diff = set(kwargs) - set(compat_args) if diff: bad_arg = list(diff)[0] raise TypeError(("{fname}() got an unexpected " "keyword argument '{arg}'". format(fname=fname, arg=bad_arg)))
[ "def", "_check_for_invalid_keys", "(", "fname", ",", "kwargs", ",", "compat_args", ")", ":", "# set(dict) --> set of the dictionary's keys", "diff", "=", "set", "(", "kwargs", ")", "-", "set", "(", "compat_args", ")", "if", "diff", ":", "bad_arg", "=", "list", "(", "diff", ")", "[", "0", "]", "raise", "TypeError", "(", "(", "\"{fname}() got an unexpected \"", "\"keyword argument '{arg}'\"", ".", "format", "(", "fname", "=", "fname", ",", "arg", "=", "bad_arg", ")", ")", ")" ]
Checks whether 'kwargs' contains any keys that are not in 'compat_args' and raises a TypeError if there is one.
[ "Checks", "whether", "kwargs", "contains", "any", "keys", "that", "are", "not", "in", "compat_args", "and", "raises", "a", "TypeError", "if", "there", "is", "one", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L114-L127
19,867
pandas-dev/pandas
pandas/util/_validators.py
validate_bool_kwarg
def validate_bool_kwarg(value, arg_name): """ Ensures that argument passed in arg_name is of type bool. """ if not (is_bool(value) or value is None): raise ValueError('For argument "{arg}" expected type bool, received ' 'type {typ}.'.format(arg=arg_name, typ=type(value).__name__)) return value
python
def validate_bool_kwarg(value, arg_name): """ Ensures that argument passed in arg_name is of type bool. """ if not (is_bool(value) or value is None): raise ValueError('For argument "{arg}" expected type bool, received ' 'type {typ}.'.format(arg=arg_name, typ=type(value).__name__)) return value
[ "def", "validate_bool_kwarg", "(", "value", ",", "arg_name", ")", ":", "if", "not", "(", "is_bool", "(", "value", ")", "or", "value", "is", "None", ")", ":", "raise", "ValueError", "(", "'For argument \"{arg}\" expected type bool, received '", "'type {typ}.'", ".", "format", "(", "arg", "=", "arg_name", ",", "typ", "=", "type", "(", "value", ")", ".", "__name__", ")", ")", "return", "value" ]
Ensures that argument passed in arg_name is of type bool.
[ "Ensures", "that", "argument", "passed", "in", "arg_name", "is", "of", "type", "bool", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L221-L227
19,868
pandas-dev/pandas
pandas/util/_validators.py
validate_fillna_kwargs
def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True): """Validate the keyword arguments to 'fillna'. This checks that exactly one of 'value' and 'method' is specified. If 'method' is specified, this validates that it's a valid method. Parameters ---------- value, method : object The 'value' and 'method' keyword arguments for 'fillna'. validate_scalar_dict_value : bool, default True Whether to validate that 'value' is a scalar or dict. Specifically, validate that it is not a list or tuple. Returns ------- value, method : object """ from pandas.core.missing import clean_fill_method if value is None and method is None: raise ValueError("Must specify a fill 'value' or 'method'.") elif value is None and method is not None: method = clean_fill_method(method) elif value is not None and method is None: if validate_scalar_dict_value and isinstance(value, (list, tuple)): raise TypeError('"value" parameter must be a scalar or dict, but ' 'you passed a "{0}"'.format(type(value).__name__)) elif value is not None and method is not None: raise ValueError("Cannot specify both 'value' and 'method'.") return value, method
python
def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True): """Validate the keyword arguments to 'fillna'. This checks that exactly one of 'value' and 'method' is specified. If 'method' is specified, this validates that it's a valid method. Parameters ---------- value, method : object The 'value' and 'method' keyword arguments for 'fillna'. validate_scalar_dict_value : bool, default True Whether to validate that 'value' is a scalar or dict. Specifically, validate that it is not a list or tuple. Returns ------- value, method : object """ from pandas.core.missing import clean_fill_method if value is None and method is None: raise ValueError("Must specify a fill 'value' or 'method'.") elif value is None and method is not None: method = clean_fill_method(method) elif value is not None and method is None: if validate_scalar_dict_value and isinstance(value, (list, tuple)): raise TypeError('"value" parameter must be a scalar or dict, but ' 'you passed a "{0}"'.format(type(value).__name__)) elif value is not None and method is not None: raise ValueError("Cannot specify both 'value' and 'method'.") return value, method
[ "def", "validate_fillna_kwargs", "(", "value", ",", "method", ",", "validate_scalar_dict_value", "=", "True", ")", ":", "from", "pandas", ".", "core", ".", "missing", "import", "clean_fill_method", "if", "value", "is", "None", "and", "method", "is", "None", ":", "raise", "ValueError", "(", "\"Must specify a fill 'value' or 'method'.\"", ")", "elif", "value", "is", "None", "and", "method", "is", "not", "None", ":", "method", "=", "clean_fill_method", "(", "method", ")", "elif", "value", "is", "not", "None", "and", "method", "is", "None", ":", "if", "validate_scalar_dict_value", "and", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "TypeError", "(", "'\"value\" parameter must be a scalar or dict, but '", "'you passed a \"{0}\"'", ".", "format", "(", "type", "(", "value", ")", ".", "__name__", ")", ")", "elif", "value", "is", "not", "None", "and", "method", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Cannot specify both 'value' and 'method'.\"", ")", "return", "value", ",", "method" ]
Validate the keyword arguments to 'fillna'. This checks that exactly one of 'value' and 'method' is specified. If 'method' is specified, this validates that it's a valid method. Parameters ---------- value, method : object The 'value' and 'method' keyword arguments for 'fillna'. validate_scalar_dict_value : bool, default True Whether to validate that 'value' is a scalar or dict. Specifically, validate that it is not a list or tuple. Returns ------- value, method : object
[ "Validate", "the", "keyword", "arguments", "to", "fillna", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L325-L358
19,869
pandas-dev/pandas
pandas/core/resample.py
_maybe_process_deprecations
def _maybe_process_deprecations(r, how=None, fill_method=None, limit=None): """ Potentially we might have a deprecation warning, show it but call the appropriate methods anyhow. """ if how is not None: # .resample(..., how='sum') if isinstance(how, str): method = "{0}()".format(how) # .resample(..., how=lambda x: ....) else: method = ".apply(<func>)" # if we have both a how and fill_method, then show # the following warning if fill_method is None: warnings.warn("how in .resample() is deprecated\n" "the new syntax is " ".resample(...).{method}".format( method=method), FutureWarning, stacklevel=3) r = r.aggregate(how) if fill_method is not None: # show the prior function call method = '.' + method if how is not None else '' args = "limit={0}".format(limit) if limit is not None else "" warnings.warn("fill_method is deprecated to .resample()\n" "the new syntax is .resample(...){method}" ".{fill_method}({args})".format( method=method, fill_method=fill_method, args=args), FutureWarning, stacklevel=3) if how is not None: r = getattr(r, fill_method)(limit=limit) else: r = r.aggregate(fill_method, limit=limit) return r
python
def _maybe_process_deprecations(r, how=None, fill_method=None, limit=None): """ Potentially we might have a deprecation warning, show it but call the appropriate methods anyhow. """ if how is not None: # .resample(..., how='sum') if isinstance(how, str): method = "{0}()".format(how) # .resample(..., how=lambda x: ....) else: method = ".apply(<func>)" # if we have both a how and fill_method, then show # the following warning if fill_method is None: warnings.warn("how in .resample() is deprecated\n" "the new syntax is " ".resample(...).{method}".format( method=method), FutureWarning, stacklevel=3) r = r.aggregate(how) if fill_method is not None: # show the prior function call method = '.' + method if how is not None else '' args = "limit={0}".format(limit) if limit is not None else "" warnings.warn("fill_method is deprecated to .resample()\n" "the new syntax is .resample(...){method}" ".{fill_method}({args})".format( method=method, fill_method=fill_method, args=args), FutureWarning, stacklevel=3) if how is not None: r = getattr(r, fill_method)(limit=limit) else: r = r.aggregate(fill_method, limit=limit) return r
[ "def", "_maybe_process_deprecations", "(", "r", ",", "how", "=", "None", ",", "fill_method", "=", "None", ",", "limit", "=", "None", ")", ":", "if", "how", "is", "not", "None", ":", "# .resample(..., how='sum')", "if", "isinstance", "(", "how", ",", "str", ")", ":", "method", "=", "\"{0}()\"", ".", "format", "(", "how", ")", "# .resample(..., how=lambda x: ....)", "else", ":", "method", "=", "\".apply(<func>)\"", "# if we have both a how and fill_method, then show", "# the following warning", "if", "fill_method", "is", "None", ":", "warnings", ".", "warn", "(", "\"how in .resample() is deprecated\\n\"", "\"the new syntax is \"", "\".resample(...).{method}\"", ".", "format", "(", "method", "=", "method", ")", ",", "FutureWarning", ",", "stacklevel", "=", "3", ")", "r", "=", "r", ".", "aggregate", "(", "how", ")", "if", "fill_method", "is", "not", "None", ":", "# show the prior function call", "method", "=", "'.'", "+", "method", "if", "how", "is", "not", "None", "else", "''", "args", "=", "\"limit={0}\"", ".", "format", "(", "limit", ")", "if", "limit", "is", "not", "None", "else", "\"\"", "warnings", ".", "warn", "(", "\"fill_method is deprecated to .resample()\\n\"", "\"the new syntax is .resample(...){method}\"", "\".{fill_method}({args})\"", ".", "format", "(", "method", "=", "method", ",", "fill_method", "=", "fill_method", ",", "args", "=", "args", ")", ",", "FutureWarning", ",", "stacklevel", "=", "3", ")", "if", "how", "is", "not", "None", ":", "r", "=", "getattr", "(", "r", ",", "fill_method", ")", "(", "limit", "=", "limit", ")", "else", ":", "r", "=", "r", ".", "aggregate", "(", "fill_method", ",", "limit", "=", "limit", ")", "return", "r" ]
Potentially we might have a deprecation warning, show it but call the appropriate methods anyhow.
[ "Potentially", "we", "might", "have", "a", "deprecation", "warning", "show", "it", "but", "call", "the", "appropriate", "methods", "anyhow", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L877-L922
19,870
pandas-dev/pandas
pandas/core/resample.py
resample
def resample(obj, kind=None, **kwds): """ Create a TimeGrouper and return our resampler. """ tg = TimeGrouper(**kwds) return tg._get_resampler(obj, kind=kind)
python
def resample(obj, kind=None, **kwds): """ Create a TimeGrouper and return our resampler. """ tg = TimeGrouper(**kwds) return tg._get_resampler(obj, kind=kind)
[ "def", "resample", "(", "obj", ",", "kind", "=", "None", ",", "*", "*", "kwds", ")", ":", "tg", "=", "TimeGrouper", "(", "*", "*", "kwds", ")", "return", "tg", ".", "_get_resampler", "(", "obj", ",", "kind", "=", "kind", ")" ]
Create a TimeGrouper and return our resampler.
[ "Create", "a", "TimeGrouper", "and", "return", "our", "resampler", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1238-L1243
19,871
pandas-dev/pandas
pandas/core/resample.py
get_resampler_for_grouping
def get_resampler_for_grouping(groupby, rule, how=None, fill_method=None, limit=None, kind=None, **kwargs): """ Return our appropriate resampler when grouping as well. """ # .resample uses 'on' similar to how .groupby uses 'key' kwargs['key'] = kwargs.pop('on', None) tg = TimeGrouper(freq=rule, **kwargs) resampler = tg._get_resampler(groupby.obj, kind=kind) r = resampler._get_resampler_for_grouping(groupby=groupby) return _maybe_process_deprecations(r, how=how, fill_method=fill_method, limit=limit)
python
def get_resampler_for_grouping(groupby, rule, how=None, fill_method=None, limit=None, kind=None, **kwargs): """ Return our appropriate resampler when grouping as well. """ # .resample uses 'on' similar to how .groupby uses 'key' kwargs['key'] = kwargs.pop('on', None) tg = TimeGrouper(freq=rule, **kwargs) resampler = tg._get_resampler(groupby.obj, kind=kind) r = resampler._get_resampler_for_grouping(groupby=groupby) return _maybe_process_deprecations(r, how=how, fill_method=fill_method, limit=limit)
[ "def", "get_resampler_for_grouping", "(", "groupby", ",", "rule", ",", "how", "=", "None", ",", "fill_method", "=", "None", ",", "limit", "=", "None", ",", "kind", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# .resample uses 'on' similar to how .groupby uses 'key'", "kwargs", "[", "'key'", "]", "=", "kwargs", ".", "pop", "(", "'on'", ",", "None", ")", "tg", "=", "TimeGrouper", "(", "freq", "=", "rule", ",", "*", "*", "kwargs", ")", "resampler", "=", "tg", ".", "_get_resampler", "(", "groupby", ".", "obj", ",", "kind", "=", "kind", ")", "r", "=", "resampler", ".", "_get_resampler_for_grouping", "(", "groupby", "=", "groupby", ")", "return", "_maybe_process_deprecations", "(", "r", ",", "how", "=", "how", ",", "fill_method", "=", "fill_method", ",", "limit", "=", "limit", ")" ]
Return our appropriate resampler when grouping as well.
[ "Return", "our", "appropriate", "resampler", "when", "grouping", "as", "well", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1249-L1264
19,872
pandas-dev/pandas
pandas/core/resample.py
_get_timestamp_range_edges
def _get_timestamp_range_edges(first, last, offset, closed='left', base=0): """ Adjust the `first` Timestamp to the preceeding Timestamp that resides on the provided offset. Adjust the `last` Timestamp to the following Timestamp that resides on the provided offset. Input Timestamps that already reside on the offset will be adjusted depending on the type of offset and the `closed` parameter. Parameters ---------- first : pd.Timestamp The beginning Timestamp of the range to be adjusted. last : pd.Timestamp The ending Timestamp of the range to be adjusted. offset : pd.DateOffset The dateoffset to which the Timestamps will be adjusted. closed : {'right', 'left'}, default None Which side of bin interval is closed. base : int, default 0 The "origin" of the adjusted Timestamps. Returns ------- A tuple of length 2, containing the adjusted pd.Timestamp objects. """ if isinstance(offset, Tick): if isinstance(offset, Day): # _adjust_dates_anchored assumes 'D' means 24H, but first/last # might contain a DST transition (23H, 24H, or 25H). # So "pretend" the dates are naive when adjusting the endpoints tz = first.tz first = first.tz_localize(None) last = last.tz_localize(None) first, last = _adjust_dates_anchored(first, last, offset, closed=closed, base=base) if isinstance(offset, Day): first = first.tz_localize(tz) last = last.tz_localize(tz) return first, last else: first = first.normalize() last = last.normalize() if closed == 'left': first = Timestamp(offset.rollback(first)) else: first = Timestamp(first - offset) last = Timestamp(last + offset) return first, last
python
def _get_timestamp_range_edges(first, last, offset, closed='left', base=0): """ Adjust the `first` Timestamp to the preceeding Timestamp that resides on the provided offset. Adjust the `last` Timestamp to the following Timestamp that resides on the provided offset. Input Timestamps that already reside on the offset will be adjusted depending on the type of offset and the `closed` parameter. Parameters ---------- first : pd.Timestamp The beginning Timestamp of the range to be adjusted. last : pd.Timestamp The ending Timestamp of the range to be adjusted. offset : pd.DateOffset The dateoffset to which the Timestamps will be adjusted. closed : {'right', 'left'}, default None Which side of bin interval is closed. base : int, default 0 The "origin" of the adjusted Timestamps. Returns ------- A tuple of length 2, containing the adjusted pd.Timestamp objects. """ if isinstance(offset, Tick): if isinstance(offset, Day): # _adjust_dates_anchored assumes 'D' means 24H, but first/last # might contain a DST transition (23H, 24H, or 25H). # So "pretend" the dates are naive when adjusting the endpoints tz = first.tz first = first.tz_localize(None) last = last.tz_localize(None) first, last = _adjust_dates_anchored(first, last, offset, closed=closed, base=base) if isinstance(offset, Day): first = first.tz_localize(tz) last = last.tz_localize(tz) return first, last else: first = first.normalize() last = last.normalize() if closed == 'left': first = Timestamp(offset.rollback(first)) else: first = Timestamp(first - offset) last = Timestamp(last + offset) return first, last
[ "def", "_get_timestamp_range_edges", "(", "first", ",", "last", ",", "offset", ",", "closed", "=", "'left'", ",", "base", "=", "0", ")", ":", "if", "isinstance", "(", "offset", ",", "Tick", ")", ":", "if", "isinstance", "(", "offset", ",", "Day", ")", ":", "# _adjust_dates_anchored assumes 'D' means 24H, but first/last", "# might contain a DST transition (23H, 24H, or 25H).", "# So \"pretend\" the dates are naive when adjusting the endpoints", "tz", "=", "first", ".", "tz", "first", "=", "first", ".", "tz_localize", "(", "None", ")", "last", "=", "last", ".", "tz_localize", "(", "None", ")", "first", ",", "last", "=", "_adjust_dates_anchored", "(", "first", ",", "last", ",", "offset", ",", "closed", "=", "closed", ",", "base", "=", "base", ")", "if", "isinstance", "(", "offset", ",", "Day", ")", ":", "first", "=", "first", ".", "tz_localize", "(", "tz", ")", "last", "=", "last", ".", "tz_localize", "(", "tz", ")", "return", "first", ",", "last", "else", ":", "first", "=", "first", ".", "normalize", "(", ")", "last", "=", "last", ".", "normalize", "(", ")", "if", "closed", "==", "'left'", ":", "first", "=", "Timestamp", "(", "offset", ".", "rollback", "(", "first", ")", ")", "else", ":", "first", "=", "Timestamp", "(", "first", "-", "offset", ")", "last", "=", "Timestamp", "(", "last", "+", "offset", ")", "return", "first", ",", "last" ]
Adjust the `first` Timestamp to the preceeding Timestamp that resides on the provided offset. Adjust the `last` Timestamp to the following Timestamp that resides on the provided offset. Input Timestamps that already reside on the offset will be adjusted depending on the type of offset and the `closed` parameter. Parameters ---------- first : pd.Timestamp The beginning Timestamp of the range to be adjusted. last : pd.Timestamp The ending Timestamp of the range to be adjusted. offset : pd.DateOffset The dateoffset to which the Timestamps will be adjusted. closed : {'right', 'left'}, default None Which side of bin interval is closed. base : int, default 0 The "origin" of the adjusted Timestamps. Returns ------- A tuple of length 2, containing the adjusted pd.Timestamp objects.
[ "Adjust", "the", "first", "Timestamp", "to", "the", "preceeding", "Timestamp", "that", "resides", "on", "the", "provided", "offset", ".", "Adjust", "the", "last", "Timestamp", "to", "the", "following", "Timestamp", "that", "resides", "on", "the", "provided", "offset", ".", "Input", "Timestamps", "that", "already", "reside", "on", "the", "offset", "will", "be", "adjusted", "depending", "on", "the", "type", "of", "offset", "and", "the", "closed", "parameter", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1582-L1634
19,873
pandas-dev/pandas
pandas/core/resample.py
_get_period_range_edges
def _get_period_range_edges(first, last, offset, closed='left', base=0): """ Adjust the provided `first` and `last` Periods to the respective Period of the given offset that encompasses them. Parameters ---------- first : pd.Period The beginning Period of the range to be adjusted. last : pd.Period The ending Period of the range to be adjusted. offset : pd.DateOffset The dateoffset to which the Periods will be adjusted. closed : {'right', 'left'}, default None Which side of bin interval is closed. base : int, default 0 The "origin" of the adjusted Periods. Returns ------- A tuple of length 2, containing the adjusted pd.Period objects. """ if not all(isinstance(obj, pd.Period) for obj in [first, last]): raise TypeError("'first' and 'last' must be instances of type Period") # GH 23882 first = first.to_timestamp() last = last.to_timestamp() adjust_first = not offset.onOffset(first) adjust_last = offset.onOffset(last) first, last = _get_timestamp_range_edges(first, last, offset, closed=closed, base=base) first = (first + adjust_first * offset).to_period(offset) last = (last - adjust_last * offset).to_period(offset) return first, last
python
def _get_period_range_edges(first, last, offset, closed='left', base=0): """ Adjust the provided `first` and `last` Periods to the respective Period of the given offset that encompasses them. Parameters ---------- first : pd.Period The beginning Period of the range to be adjusted. last : pd.Period The ending Period of the range to be adjusted. offset : pd.DateOffset The dateoffset to which the Periods will be adjusted. closed : {'right', 'left'}, default None Which side of bin interval is closed. base : int, default 0 The "origin" of the adjusted Periods. Returns ------- A tuple of length 2, containing the adjusted pd.Period objects. """ if not all(isinstance(obj, pd.Period) for obj in [first, last]): raise TypeError("'first' and 'last' must be instances of type Period") # GH 23882 first = first.to_timestamp() last = last.to_timestamp() adjust_first = not offset.onOffset(first) adjust_last = offset.onOffset(last) first, last = _get_timestamp_range_edges(first, last, offset, closed=closed, base=base) first = (first + adjust_first * offset).to_period(offset) last = (last - adjust_last * offset).to_period(offset) return first, last
[ "def", "_get_period_range_edges", "(", "first", ",", "last", ",", "offset", ",", "closed", "=", "'left'", ",", "base", "=", "0", ")", ":", "if", "not", "all", "(", "isinstance", "(", "obj", ",", "pd", ".", "Period", ")", "for", "obj", "in", "[", "first", ",", "last", "]", ")", ":", "raise", "TypeError", "(", "\"'first' and 'last' must be instances of type Period\"", ")", "# GH 23882", "first", "=", "first", ".", "to_timestamp", "(", ")", "last", "=", "last", ".", "to_timestamp", "(", ")", "adjust_first", "=", "not", "offset", ".", "onOffset", "(", "first", ")", "adjust_last", "=", "offset", ".", "onOffset", "(", "last", ")", "first", ",", "last", "=", "_get_timestamp_range_edges", "(", "first", ",", "last", ",", "offset", ",", "closed", "=", "closed", ",", "base", "=", "base", ")", "first", "=", "(", "first", "+", "adjust_first", "*", "offset", ")", ".", "to_period", "(", "offset", ")", "last", "=", "(", "last", "-", "adjust_last", "*", "offset", ")", ".", "to_period", "(", "offset", ")", "return", "first", ",", "last" ]
Adjust the provided `first` and `last` Periods to the respective Period of the given offset that encompasses them. Parameters ---------- first : pd.Period The beginning Period of the range to be adjusted. last : pd.Period The ending Period of the range to be adjusted. offset : pd.DateOffset The dateoffset to which the Periods will be adjusted. closed : {'right', 'left'}, default None Which side of bin interval is closed. base : int, default 0 The "origin" of the adjusted Periods. Returns ------- A tuple of length 2, containing the adjusted pd.Period objects.
[ "Adjust", "the", "provided", "first", "and", "last", "Periods", "to", "the", "respective", "Period", "of", "the", "given", "offset", "that", "encompasses", "them", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1637-L1673
19,874
pandas-dev/pandas
pandas/core/resample.py
Resampler._from_selection
def _from_selection(self): """ Is the resampling from a DataFrame column or MultiIndex level. """ # upsampling and PeriodIndex resampling do not work # with selection, this state used to catch and raise an error return (self.groupby is not None and (self.groupby.key is not None or self.groupby.level is not None))
python
def _from_selection(self): """ Is the resampling from a DataFrame column or MultiIndex level. """ # upsampling and PeriodIndex resampling do not work # with selection, this state used to catch and raise an error return (self.groupby is not None and (self.groupby.key is not None or self.groupby.level is not None))
[ "def", "_from_selection", "(", "self", ")", ":", "# upsampling and PeriodIndex resampling do not work", "# with selection, this state used to catch and raise an error", "return", "(", "self", ".", "groupby", "is", "not", "None", "and", "(", "self", ".", "groupby", ".", "key", "is", "not", "None", "or", "self", ".", "groupby", ".", "level", "is", "not", "None", ")", ")" ]
Is the resampling from a DataFrame column or MultiIndex level.
[ "Is", "the", "resampling", "from", "a", "DataFrame", "column", "or", "MultiIndex", "level", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L135-L143
19,875
pandas-dev/pandas
pandas/core/resample.py
Resampler._set_binner
def _set_binner(self): """ Setup our binners. Cache these as we are an immutable object """ if self.binner is None: self.binner, self.grouper = self._get_binner()
python
def _set_binner(self): """ Setup our binners. Cache these as we are an immutable object """ if self.binner is None: self.binner, self.grouper = self._get_binner()
[ "def", "_set_binner", "(", "self", ")", ":", "if", "self", ".", "binner", "is", "None", ":", "self", ".", "binner", ",", "self", ".", "grouper", "=", "self", ".", "_get_binner", "(", ")" ]
Setup our binners. Cache these as we are an immutable object
[ "Setup", "our", "binners", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L163-L170
19,876
pandas-dev/pandas
pandas/core/resample.py
Resampler.transform
def transform(self, arg, *args, **kwargs): """ Call function producing a like-indexed Series on each group and return a Series with the transformed values. Parameters ---------- arg : function To apply to each group. Should return a Series with the same index. Returns ------- transformed : Series Examples -------- >>> resampled.transform(lambda x: (x - x.mean()) / x.std()) """ return self._selected_obj.groupby(self.groupby).transform( arg, *args, **kwargs)
python
def transform(self, arg, *args, **kwargs): """ Call function producing a like-indexed Series on each group and return a Series with the transformed values. Parameters ---------- arg : function To apply to each group. Should return a Series with the same index. Returns ------- transformed : Series Examples -------- >>> resampled.transform(lambda x: (x - x.mean()) / x.std()) """ return self._selected_obj.groupby(self.groupby).transform( arg, *args, **kwargs)
[ "def", "transform", "(", "self", ",", "arg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_selected_obj", ".", "groupby", "(", "self", ".", "groupby", ")", ".", "transform", "(", "arg", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Call function producing a like-indexed Series on each group and return a Series with the transformed values. Parameters ---------- arg : function To apply to each group. Should return a Series with the same index. Returns ------- transformed : Series Examples -------- >>> resampled.transform(lambda x: (x - x.mean()) / x.std())
[ "Call", "function", "producing", "a", "like", "-", "indexed", "Series", "on", "each", "group", "and", "return", "a", "Series", "with", "the", "transformed", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L280-L299
19,877
pandas-dev/pandas
pandas/core/resample.py
Resampler._groupby_and_aggregate
def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs): """ Re-evaluate the obj with a groupby aggregation. """ if grouper is None: self._set_binner() grouper = self.grouper obj = self._selected_obj grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis) try: if isinstance(obj, ABCDataFrame) and callable(how): # Check if the function is reducing or not. result = grouped._aggregate_item_by_item(how, *args, **kwargs) else: result = grouped.aggregate(how, *args, **kwargs) except Exception: # we have a non-reducing function # try to evaluate result = grouped.apply(how, *args, **kwargs) result = self._apply_loffset(result) return self._wrap_result(result)
python
def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs): """ Re-evaluate the obj with a groupby aggregation. """ if grouper is None: self._set_binner() grouper = self.grouper obj = self._selected_obj grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis) try: if isinstance(obj, ABCDataFrame) and callable(how): # Check if the function is reducing or not. result = grouped._aggregate_item_by_item(how, *args, **kwargs) else: result = grouped.aggregate(how, *args, **kwargs) except Exception: # we have a non-reducing function # try to evaluate result = grouped.apply(how, *args, **kwargs) result = self._apply_loffset(result) return self._wrap_result(result)
[ "def", "_groupby_and_aggregate", "(", "self", ",", "how", ",", "grouper", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "grouper", "is", "None", ":", "self", ".", "_set_binner", "(", ")", "grouper", "=", "self", ".", "grouper", "obj", "=", "self", ".", "_selected_obj", "grouped", "=", "groupby", "(", "obj", ",", "by", "=", "None", ",", "grouper", "=", "grouper", ",", "axis", "=", "self", ".", "axis", ")", "try", ":", "if", "isinstance", "(", "obj", ",", "ABCDataFrame", ")", "and", "callable", "(", "how", ")", ":", "# Check if the function is reducing or not.", "result", "=", "grouped", ".", "_aggregate_item_by_item", "(", "how", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "result", "=", "grouped", ".", "aggregate", "(", "how", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Exception", ":", "# we have a non-reducing function", "# try to evaluate", "result", "=", "grouped", ".", "apply", "(", "how", ",", "*", "args", ",", "*", "*", "kwargs", ")", "result", "=", "self", ".", "_apply_loffset", "(", "result", ")", "return", "self", ".", "_wrap_result", "(", "result", ")" ]
Re-evaluate the obj with a groupby aggregation.
[ "Re", "-", "evaluate", "the", "obj", "with", "a", "groupby", "aggregation", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L331-L357
19,878
pandas-dev/pandas
pandas/core/resample.py
Resampler._apply_loffset
def _apply_loffset(self, result): """ If loffset is set, offset the result index. This is NOT an idempotent routine, it will be applied exactly once to the result. Parameters ---------- result : Series or DataFrame the result of resample """ needs_offset = ( isinstance(self.loffset, (DateOffset, timedelta, np.timedelta64)) and isinstance(result.index, DatetimeIndex) and len(result.index) > 0 ) if needs_offset: result.index = result.index + self.loffset self.loffset = None return result
python
def _apply_loffset(self, result): """ If loffset is set, offset the result index. This is NOT an idempotent routine, it will be applied exactly once to the result. Parameters ---------- result : Series or DataFrame the result of resample """ needs_offset = ( isinstance(self.loffset, (DateOffset, timedelta, np.timedelta64)) and isinstance(result.index, DatetimeIndex) and len(result.index) > 0 ) if needs_offset: result.index = result.index + self.loffset self.loffset = None return result
[ "def", "_apply_loffset", "(", "self", ",", "result", ")", ":", "needs_offset", "=", "(", "isinstance", "(", "self", ".", "loffset", ",", "(", "DateOffset", ",", "timedelta", ",", "np", ".", "timedelta64", ")", ")", "and", "isinstance", "(", "result", ".", "index", ",", "DatetimeIndex", ")", "and", "len", "(", "result", ".", "index", ")", ">", "0", ")", "if", "needs_offset", ":", "result", ".", "index", "=", "result", ".", "index", "+", "self", ".", "loffset", "self", ".", "loffset", "=", "None", "return", "result" ]
If loffset is set, offset the result index. This is NOT an idempotent routine, it will be applied exactly once to the result. Parameters ---------- result : Series or DataFrame the result of resample
[ "If", "loffset", "is", "set", "offset", "the", "result", "index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L359-L383
19,879
pandas-dev/pandas
pandas/core/resample.py
Resampler._get_resampler_for_grouping
def _get_resampler_for_grouping(self, groupby, **kwargs): """ Return the correct class for resampling with groupby. """ return self._resampler_for_grouping(self, groupby=groupby, **kwargs)
python
def _get_resampler_for_grouping(self, groupby, **kwargs): """ Return the correct class for resampling with groupby. """ return self._resampler_for_grouping(self, groupby=groupby, **kwargs)
[ "def", "_get_resampler_for_grouping", "(", "self", ",", "groupby", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_resampler_for_grouping", "(", "self", ",", "groupby", "=", "groupby", ",", "*", "*", "kwargs", ")" ]
Return the correct class for resampling with groupby.
[ "Return", "the", "correct", "class", "for", "resampling", "with", "groupby", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L385-L389
19,880
pandas-dev/pandas
pandas/core/resample.py
Resampler._wrap_result
def _wrap_result(self, result): """ Potentially wrap any results. """ if isinstance(result, ABCSeries) and self._selection is not None: result.name = self._selection if isinstance(result, ABCSeries) and result.empty: obj = self.obj if isinstance(obj.index, PeriodIndex): result.index = obj.index.asfreq(self.freq) else: result.index = obj.index._shallow_copy(freq=self.freq) result.name = getattr(obj, 'name', None) return result
python
def _wrap_result(self, result): """ Potentially wrap any results. """ if isinstance(result, ABCSeries) and self._selection is not None: result.name = self._selection if isinstance(result, ABCSeries) and result.empty: obj = self.obj if isinstance(obj.index, PeriodIndex): result.index = obj.index.asfreq(self.freq) else: result.index = obj.index._shallow_copy(freq=self.freq) result.name = getattr(obj, 'name', None) return result
[ "def", "_wrap_result", "(", "self", ",", "result", ")", ":", "if", "isinstance", "(", "result", ",", "ABCSeries", ")", "and", "self", ".", "_selection", "is", "not", "None", ":", "result", ".", "name", "=", "self", ".", "_selection", "if", "isinstance", "(", "result", ",", "ABCSeries", ")", "and", "result", ".", "empty", ":", "obj", "=", "self", ".", "obj", "if", "isinstance", "(", "obj", ".", "index", ",", "PeriodIndex", ")", ":", "result", ".", "index", "=", "obj", ".", "index", ".", "asfreq", "(", "self", ".", "freq", ")", "else", ":", "result", ".", "index", "=", "obj", ".", "index", ".", "_shallow_copy", "(", "freq", "=", "self", ".", "freq", ")", "result", ".", "name", "=", "getattr", "(", "obj", ",", "'name'", ",", "None", ")", "return", "result" ]
Potentially wrap any results.
[ "Potentially", "wrap", "any", "results", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L391-L406
19,881
pandas-dev/pandas
pandas/core/resample.py
_GroupByMixin._apply
def _apply(self, f, grouper=None, *args, **kwargs): """ Dispatch to _upsample; we are stripping all of the _upsample kwargs and performing the original function call on the grouped object. """ def func(x): x = self._shallow_copy(x, groupby=self.groupby) if isinstance(f, str): return getattr(x, f)(**kwargs) return x.apply(f, *args, **kwargs) result = self._groupby.apply(func) return self._wrap_result(result)
python
def _apply(self, f, grouper=None, *args, **kwargs): """ Dispatch to _upsample; we are stripping all of the _upsample kwargs and performing the original function call on the grouped object. """ def func(x): x = self._shallow_copy(x, groupby=self.groupby) if isinstance(f, str): return getattr(x, f)(**kwargs) return x.apply(f, *args, **kwargs) result = self._groupby.apply(func) return self._wrap_result(result)
[ "def", "_apply", "(", "self", ",", "f", ",", "grouper", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "func", "(", "x", ")", ":", "x", "=", "self", ".", "_shallow_copy", "(", "x", ",", "groupby", "=", "self", ".", "groupby", ")", "if", "isinstance", "(", "f", ",", "str", ")", ":", "return", "getattr", "(", "x", ",", "f", ")", "(", "*", "*", "kwargs", ")", "return", "x", ".", "apply", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", "result", "=", "self", ".", "_groupby", ".", "apply", "(", "func", ")", "return", "self", ".", "_wrap_result", "(", "result", ")" ]
Dispatch to _upsample; we are stripping all of the _upsample kwargs and performing the original function call on the grouped object.
[ "Dispatch", "to", "_upsample", ";", "we", "are", "stripping", "all", "of", "the", "_upsample", "kwargs", "and", "performing", "the", "original", "function", "call", "on", "the", "grouped", "object", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L947-L962
19,882
pandas-dev/pandas
pandas/core/resample.py
DatetimeIndexResampler._adjust_binner_for_upsample
def _adjust_binner_for_upsample(self, binner): """ Adjust our binner when upsampling. The range of a new index should not be outside specified range """ if self.closed == 'right': binner = binner[1:] else: binner = binner[:-1] return binner
python
def _adjust_binner_for_upsample(self, binner): """ Adjust our binner when upsampling. The range of a new index should not be outside specified range """ if self.closed == 'right': binner = binner[1:] else: binner = binner[:-1] return binner
[ "def", "_adjust_binner_for_upsample", "(", "self", ",", "binner", ")", ":", "if", "self", ".", "closed", "==", "'right'", ":", "binner", "=", "binner", "[", "1", ":", "]", "else", ":", "binner", "=", "binner", "[", ":", "-", "1", "]", "return", "binner" ]
Adjust our binner when upsampling. The range of a new index should not be outside specified range
[ "Adjust", "our", "binner", "when", "upsampling", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1018-L1028
19,883
pandas-dev/pandas
pandas/core/resample.py
TimeGrouper._get_resampler
def _get_resampler(self, obj, kind=None): """ Return my resampler or raise if we have an invalid axis. Parameters ---------- obj : input object kind : string, optional 'period','timestamp','timedelta' are valid Returns ------- a Resampler Raises ------ TypeError if incompatible axis """ self._set_grouper(obj) ax = self.ax if isinstance(ax, DatetimeIndex): return DatetimeIndexResampler(obj, groupby=self, kind=kind, axis=self.axis) elif isinstance(ax, PeriodIndex) or kind == 'period': return PeriodIndexResampler(obj, groupby=self, kind=kind, axis=self.axis) elif isinstance(ax, TimedeltaIndex): return TimedeltaIndexResampler(obj, groupby=self, axis=self.axis) raise TypeError("Only valid with DatetimeIndex, " "TimedeltaIndex or PeriodIndex, " "but got an instance of %r" % type(ax).__name__)
python
def _get_resampler(self, obj, kind=None): """ Return my resampler or raise if we have an invalid axis. Parameters ---------- obj : input object kind : string, optional 'period','timestamp','timedelta' are valid Returns ------- a Resampler Raises ------ TypeError if incompatible axis """ self._set_grouper(obj) ax = self.ax if isinstance(ax, DatetimeIndex): return DatetimeIndexResampler(obj, groupby=self, kind=kind, axis=self.axis) elif isinstance(ax, PeriodIndex) or kind == 'period': return PeriodIndexResampler(obj, groupby=self, kind=kind, axis=self.axis) elif isinstance(ax, TimedeltaIndex): return TimedeltaIndexResampler(obj, groupby=self, axis=self.axis) raise TypeError("Only valid with DatetimeIndex, " "TimedeltaIndex or PeriodIndex, " "but got an instance of %r" % type(ax).__name__)
[ "def", "_get_resampler", "(", "self", ",", "obj", ",", "kind", "=", "None", ")", ":", "self", ".", "_set_grouper", "(", "obj", ")", "ax", "=", "self", ".", "ax", "if", "isinstance", "(", "ax", ",", "DatetimeIndex", ")", ":", "return", "DatetimeIndexResampler", "(", "obj", ",", "groupby", "=", "self", ",", "kind", "=", "kind", ",", "axis", "=", "self", ".", "axis", ")", "elif", "isinstance", "(", "ax", ",", "PeriodIndex", ")", "or", "kind", "==", "'period'", ":", "return", "PeriodIndexResampler", "(", "obj", ",", "groupby", "=", "self", ",", "kind", "=", "kind", ",", "axis", "=", "self", ".", "axis", ")", "elif", "isinstance", "(", "ax", ",", "TimedeltaIndex", ")", ":", "return", "TimedeltaIndexResampler", "(", "obj", ",", "groupby", "=", "self", ",", "axis", "=", "self", ".", "axis", ")", "raise", "TypeError", "(", "\"Only valid with DatetimeIndex, \"", "\"TimedeltaIndex or PeriodIndex, \"", "\"but got an instance of %r\"", "%", "type", "(", "ax", ")", ".", "__name__", ")" ]
Return my resampler or raise if we have an invalid axis. Parameters ---------- obj : input object kind : string, optional 'period','timestamp','timedelta' are valid Returns ------- a Resampler Raises ------ TypeError if incompatible axis
[ "Return", "my", "resampler", "or", "raise", "if", "we", "have", "an", "invalid", "axis", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1334-L1373
19,884
pandas-dev/pandas
pandas/core/util/hashing.py
hash_tuple
def hash_tuple(val, encoding='utf8', hash_key=None): """ Hash a single tuple efficiently Parameters ---------- val : single tuple encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- hash """ hashes = (_hash_scalar(v, encoding=encoding, hash_key=hash_key) for v in val) h = _combine_hash_arrays(hashes, len(val))[0] return h
python
def hash_tuple(val, encoding='utf8', hash_key=None): """ Hash a single tuple efficiently Parameters ---------- val : single tuple encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- hash """ hashes = (_hash_scalar(v, encoding=encoding, hash_key=hash_key) for v in val) h = _combine_hash_arrays(hashes, len(val))[0] return h
[ "def", "hash_tuple", "(", "val", ",", "encoding", "=", "'utf8'", ",", "hash_key", "=", "None", ")", ":", "hashes", "=", "(", "_hash_scalar", "(", "v", ",", "encoding", "=", "encoding", ",", "hash_key", "=", "hash_key", ")", "for", "v", "in", "val", ")", "h", "=", "_combine_hash_arrays", "(", "hashes", ",", "len", "(", "val", ")", ")", "[", "0", "]", "return", "h" ]
Hash a single tuple efficiently Parameters ---------- val : single tuple encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- hash
[ "Hash", "a", "single", "tuple", "efficiently" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/util/hashing.py#L167-L187
19,885
pandas-dev/pandas
pandas/core/util/hashing.py
_hash_categorical
def _hash_categorical(c, encoding, hash_key): """ Hash a Categorical by hashing its categories, and then mapping the codes to the hashes Parameters ---------- c : Categorical encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- ndarray of hashed values array, same size as len(c) """ # Convert ExtensionArrays to ndarrays values = np.asarray(c.categories.values) hashed = hash_array(values, encoding, hash_key, categorize=False) # we have uint64, as we don't directly support missing values # we don't want to use take_nd which will coerce to float # instead, directly construct the result with a # max(np.uint64) as the missing value indicator # # TODO: GH 15362 mask = c.isna() if len(hashed): result = hashed.take(c.codes) else: result = np.zeros(len(mask), dtype='uint64') if mask.any(): result[mask] = np.iinfo(np.uint64).max return result
python
def _hash_categorical(c, encoding, hash_key): """ Hash a Categorical by hashing its categories, and then mapping the codes to the hashes Parameters ---------- c : Categorical encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- ndarray of hashed values array, same size as len(c) """ # Convert ExtensionArrays to ndarrays values = np.asarray(c.categories.values) hashed = hash_array(values, encoding, hash_key, categorize=False) # we have uint64, as we don't directly support missing values # we don't want to use take_nd which will coerce to float # instead, directly construct the result with a # max(np.uint64) as the missing value indicator # # TODO: GH 15362 mask = c.isna() if len(hashed): result = hashed.take(c.codes) else: result = np.zeros(len(mask), dtype='uint64') if mask.any(): result[mask] = np.iinfo(np.uint64).max return result
[ "def", "_hash_categorical", "(", "c", ",", "encoding", ",", "hash_key", ")", ":", "# Convert ExtensionArrays to ndarrays", "values", "=", "np", ".", "asarray", "(", "c", ".", "categories", ".", "values", ")", "hashed", "=", "hash_array", "(", "values", ",", "encoding", ",", "hash_key", ",", "categorize", "=", "False", ")", "# we have uint64, as we don't directly support missing values", "# we don't want to use take_nd which will coerce to float", "# instead, directly construct the result with a", "# max(np.uint64) as the missing value indicator", "#", "# TODO: GH 15362", "mask", "=", "c", ".", "isna", "(", ")", "if", "len", "(", "hashed", ")", ":", "result", "=", "hashed", ".", "take", "(", "c", ".", "codes", ")", "else", ":", "result", "=", "np", ".", "zeros", "(", "len", "(", "mask", ")", ",", "dtype", "=", "'uint64'", ")", "if", "mask", ".", "any", "(", ")", ":", "result", "[", "mask", "]", "=", "np", ".", "iinfo", "(", "np", ".", "uint64", ")", ".", "max", "return", "result" ]
Hash a Categorical by hashing its categories, and then mapping the codes to the hashes Parameters ---------- c : Categorical encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- ndarray of hashed values array, same size as len(c)
[ "Hash", "a", "Categorical", "by", "hashing", "its", "categories", "and", "then", "mapping", "the", "codes", "to", "the", "hashes" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/util/hashing.py#L190-L226
19,886
pandas-dev/pandas
pandas/core/util/hashing.py
hash_array
def hash_array(vals, encoding='utf8', hash_key=None, categorize=True): """ Given a 1d array, return an array of deterministic integers. .. versionadded:: 0.19.2 Parameters ---------- vals : ndarray, Categorical encoding : string, default 'utf8' encoding for data & key when strings hash_key : string key to encode, default to _default_hash_key categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. .. versionadded:: 0.20.0 Returns ------- 1d uint64 numpy array of hash values, same length as the vals """ if not hasattr(vals, 'dtype'): raise TypeError("must pass a ndarray-like") dtype = vals.dtype if hash_key is None: hash_key = _default_hash_key # For categoricals, we hash the categories, then remap the codes to the # hash values. (This check is above the complex check so that we don't ask # numpy if categorical is a subdtype of complex, as it will choke). if is_categorical_dtype(dtype): return _hash_categorical(vals, encoding, hash_key) elif is_extension_array_dtype(dtype): vals, _ = vals._values_for_factorize() dtype = vals.dtype # we'll be working with everything as 64-bit values, so handle this # 128-bit value early if np.issubdtype(dtype, np.complex128): return hash_array(vals.real) + 23 * hash_array(vals.imag) # First, turn whatever array this is into unsigned 64-bit ints, if we can # manage it. elif isinstance(dtype, np.bool): vals = vals.astype('u8') elif issubclass(dtype.type, (np.datetime64, np.timedelta64)): vals = vals.view('i8').astype('u8', copy=False) elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8: vals = vals.view('u{}'.format(vals.dtype.itemsize)).astype('u8') else: # With repeated values, its MUCH faster to categorize object dtypes, # then hash and rename categories. We allow skipping the categorization # when the values are known/likely to be unique. if categorize: from pandas import factorize, Categorical, Index codes, categories = factorize(vals, sort=False) cat = Categorical(codes, Index(categories), ordered=False, fastpath=True) return _hash_categorical(cat, encoding, hash_key) try: vals = hashing.hash_object_array(vals, hash_key, encoding) except TypeError: # we have mixed types vals = hashing.hash_object_array(vals.astype(str).astype(object), hash_key, encoding) # Then, redistribute these 64-bit ints within the space of 64-bit ints vals ^= vals >> 30 vals *= np.uint64(0xbf58476d1ce4e5b9) vals ^= vals >> 27 vals *= np.uint64(0x94d049bb133111eb) vals ^= vals >> 31 return vals
python
def hash_array(vals, encoding='utf8', hash_key=None, categorize=True): """ Given a 1d array, return an array of deterministic integers. .. versionadded:: 0.19.2 Parameters ---------- vals : ndarray, Categorical encoding : string, default 'utf8' encoding for data & key when strings hash_key : string key to encode, default to _default_hash_key categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. .. versionadded:: 0.20.0 Returns ------- 1d uint64 numpy array of hash values, same length as the vals """ if not hasattr(vals, 'dtype'): raise TypeError("must pass a ndarray-like") dtype = vals.dtype if hash_key is None: hash_key = _default_hash_key # For categoricals, we hash the categories, then remap the codes to the # hash values. (This check is above the complex check so that we don't ask # numpy if categorical is a subdtype of complex, as it will choke). if is_categorical_dtype(dtype): return _hash_categorical(vals, encoding, hash_key) elif is_extension_array_dtype(dtype): vals, _ = vals._values_for_factorize() dtype = vals.dtype # we'll be working with everything as 64-bit values, so handle this # 128-bit value early if np.issubdtype(dtype, np.complex128): return hash_array(vals.real) + 23 * hash_array(vals.imag) # First, turn whatever array this is into unsigned 64-bit ints, if we can # manage it. elif isinstance(dtype, np.bool): vals = vals.astype('u8') elif issubclass(dtype.type, (np.datetime64, np.timedelta64)): vals = vals.view('i8').astype('u8', copy=False) elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8: vals = vals.view('u{}'.format(vals.dtype.itemsize)).astype('u8') else: # With repeated values, its MUCH faster to categorize object dtypes, # then hash and rename categories. We allow skipping the categorization # when the values are known/likely to be unique. if categorize: from pandas import factorize, Categorical, Index codes, categories = factorize(vals, sort=False) cat = Categorical(codes, Index(categories), ordered=False, fastpath=True) return _hash_categorical(cat, encoding, hash_key) try: vals = hashing.hash_object_array(vals, hash_key, encoding) except TypeError: # we have mixed types vals = hashing.hash_object_array(vals.astype(str).astype(object), hash_key, encoding) # Then, redistribute these 64-bit ints within the space of 64-bit ints vals ^= vals >> 30 vals *= np.uint64(0xbf58476d1ce4e5b9) vals ^= vals >> 27 vals *= np.uint64(0x94d049bb133111eb) vals ^= vals >> 31 return vals
[ "def", "hash_array", "(", "vals", ",", "encoding", "=", "'utf8'", ",", "hash_key", "=", "None", ",", "categorize", "=", "True", ")", ":", "if", "not", "hasattr", "(", "vals", ",", "'dtype'", ")", ":", "raise", "TypeError", "(", "\"must pass a ndarray-like\"", ")", "dtype", "=", "vals", ".", "dtype", "if", "hash_key", "is", "None", ":", "hash_key", "=", "_default_hash_key", "# For categoricals, we hash the categories, then remap the codes to the", "# hash values. (This check is above the complex check so that we don't ask", "# numpy if categorical is a subdtype of complex, as it will choke).", "if", "is_categorical_dtype", "(", "dtype", ")", ":", "return", "_hash_categorical", "(", "vals", ",", "encoding", ",", "hash_key", ")", "elif", "is_extension_array_dtype", "(", "dtype", ")", ":", "vals", ",", "_", "=", "vals", ".", "_values_for_factorize", "(", ")", "dtype", "=", "vals", ".", "dtype", "# we'll be working with everything as 64-bit values, so handle this", "# 128-bit value early", "if", "np", ".", "issubdtype", "(", "dtype", ",", "np", ".", "complex128", ")", ":", "return", "hash_array", "(", "vals", ".", "real", ")", "+", "23", "*", "hash_array", "(", "vals", ".", "imag", ")", "# First, turn whatever array this is into unsigned 64-bit ints, if we can", "# manage it.", "elif", "isinstance", "(", "dtype", ",", "np", ".", "bool", ")", ":", "vals", "=", "vals", ".", "astype", "(", "'u8'", ")", "elif", "issubclass", "(", "dtype", ".", "type", ",", "(", "np", ".", "datetime64", ",", "np", ".", "timedelta64", ")", ")", ":", "vals", "=", "vals", ".", "view", "(", "'i8'", ")", ".", "astype", "(", "'u8'", ",", "copy", "=", "False", ")", "elif", "issubclass", "(", "dtype", ".", "type", ",", "np", ".", "number", ")", "and", "dtype", ".", "itemsize", "<=", "8", ":", "vals", "=", "vals", ".", "view", "(", "'u{}'", ".", "format", "(", "vals", ".", "dtype", ".", "itemsize", ")", ")", ".", "astype", "(", "'u8'", ")", "else", ":", "# With repeated values, its MUCH faster to categorize object dtypes,", "# then hash and rename categories. We allow skipping the categorization", "# when the values are known/likely to be unique.", "if", "categorize", ":", "from", "pandas", "import", "factorize", ",", "Categorical", ",", "Index", "codes", ",", "categories", "=", "factorize", "(", "vals", ",", "sort", "=", "False", ")", "cat", "=", "Categorical", "(", "codes", ",", "Index", "(", "categories", ")", ",", "ordered", "=", "False", ",", "fastpath", "=", "True", ")", "return", "_hash_categorical", "(", "cat", ",", "encoding", ",", "hash_key", ")", "try", ":", "vals", "=", "hashing", ".", "hash_object_array", "(", "vals", ",", "hash_key", ",", "encoding", ")", "except", "TypeError", ":", "# we have mixed types", "vals", "=", "hashing", ".", "hash_object_array", "(", "vals", ".", "astype", "(", "str", ")", ".", "astype", "(", "object", ")", ",", "hash_key", ",", "encoding", ")", "# Then, redistribute these 64-bit ints within the space of 64-bit ints", "vals", "^=", "vals", ">>", "30", "vals", "*=", "np", ".", "uint64", "(", "0xbf58476d1ce4e5b9", ")", "vals", "^=", "vals", ">>", "27", "vals", "*=", "np", ".", "uint64", "(", "0x94d049bb133111eb", ")", "vals", "^=", "vals", ">>", "31", "return", "vals" ]
Given a 1d array, return an array of deterministic integers. .. versionadded:: 0.19.2 Parameters ---------- vals : ndarray, Categorical encoding : string, default 'utf8' encoding for data & key when strings hash_key : string key to encode, default to _default_hash_key categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. .. versionadded:: 0.20.0 Returns ------- 1d uint64 numpy array of hash values, same length as the vals
[ "Given", "a", "1d", "array", "return", "an", "array", "of", "deterministic", "integers", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/util/hashing.py#L229-L305
19,887
pandas-dev/pandas
pandas/core/util/hashing.py
_hash_scalar
def _hash_scalar(val, encoding='utf8', hash_key=None): """ Hash scalar value Returns ------- 1d uint64 numpy array of hash value, of length 1 """ if isna(val): # this is to be consistent with the _hash_categorical implementation return np.array([np.iinfo(np.uint64).max], dtype='u8') if getattr(val, 'tzinfo', None) is not None: # for tz-aware datetimes, we need the underlying naive UTC value and # not the tz aware object or pd extension type (as # infer_dtype_from_scalar would do) if not isinstance(val, tslibs.Timestamp): val = tslibs.Timestamp(val) val = val.tz_convert(None) dtype, val = infer_dtype_from_scalar(val) vals = np.array([val], dtype=dtype) return hash_array(vals, hash_key=hash_key, encoding=encoding, categorize=False)
python
def _hash_scalar(val, encoding='utf8', hash_key=None): """ Hash scalar value Returns ------- 1d uint64 numpy array of hash value, of length 1 """ if isna(val): # this is to be consistent with the _hash_categorical implementation return np.array([np.iinfo(np.uint64).max], dtype='u8') if getattr(val, 'tzinfo', None) is not None: # for tz-aware datetimes, we need the underlying naive UTC value and # not the tz aware object or pd extension type (as # infer_dtype_from_scalar would do) if not isinstance(val, tslibs.Timestamp): val = tslibs.Timestamp(val) val = val.tz_convert(None) dtype, val = infer_dtype_from_scalar(val) vals = np.array([val], dtype=dtype) return hash_array(vals, hash_key=hash_key, encoding=encoding, categorize=False)
[ "def", "_hash_scalar", "(", "val", ",", "encoding", "=", "'utf8'", ",", "hash_key", "=", "None", ")", ":", "if", "isna", "(", "val", ")", ":", "# this is to be consistent with the _hash_categorical implementation", "return", "np", ".", "array", "(", "[", "np", ".", "iinfo", "(", "np", ".", "uint64", ")", ".", "max", "]", ",", "dtype", "=", "'u8'", ")", "if", "getattr", "(", "val", ",", "'tzinfo'", ",", "None", ")", "is", "not", "None", ":", "# for tz-aware datetimes, we need the underlying naive UTC value and", "# not the tz aware object or pd extension type (as", "# infer_dtype_from_scalar would do)", "if", "not", "isinstance", "(", "val", ",", "tslibs", ".", "Timestamp", ")", ":", "val", "=", "tslibs", ".", "Timestamp", "(", "val", ")", "val", "=", "val", ".", "tz_convert", "(", "None", ")", "dtype", ",", "val", "=", "infer_dtype_from_scalar", "(", "val", ")", "vals", "=", "np", ".", "array", "(", "[", "val", "]", ",", "dtype", "=", "dtype", ")", "return", "hash_array", "(", "vals", ",", "hash_key", "=", "hash_key", ",", "encoding", "=", "encoding", ",", "categorize", "=", "False", ")" ]
Hash scalar value Returns ------- 1d uint64 numpy array of hash value, of length 1
[ "Hash", "scalar", "value" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/util/hashing.py#L308-L333
19,888
pandas-dev/pandas
doc/make.py
DocBuilder._run_os
def _run_os(*args): """ Execute a command as a OS terminal. Parameters ---------- *args : list of str Command and parameters to be executed Examples -------- >>> DocBuilder()._run_os('python', '--version') """ subprocess.check_call(args, stdout=sys.stdout, stderr=sys.stderr)
python
def _run_os(*args): """ Execute a command as a OS terminal. Parameters ---------- *args : list of str Command and parameters to be executed Examples -------- >>> DocBuilder()._run_os('python', '--version') """ subprocess.check_call(args, stdout=sys.stdout, stderr=sys.stderr)
[ "def", "_run_os", "(", "*", "args", ")", ":", "subprocess", ".", "check_call", "(", "args", ",", "stdout", "=", "sys", ".", "stdout", ",", "stderr", "=", "sys", ".", "stderr", ")" ]
Execute a command as a OS terminal. Parameters ---------- *args : list of str Command and parameters to be executed Examples -------- >>> DocBuilder()._run_os('python', '--version')
[ "Execute", "a", "command", "as", "a", "OS", "terminal", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L91-L104
19,889
pandas-dev/pandas
doc/make.py
DocBuilder._sphinx_build
def _sphinx_build(self, kind): """ Call sphinx to build documentation. Attribute `num_jobs` from the class is used. Parameters ---------- kind : {'html', 'latex'} Examples -------- >>> DocBuilder(num_jobs=4)._sphinx_build('html') """ if kind not in ('html', 'latex'): raise ValueError('kind must be html or latex, ' 'not {}'.format(kind)) cmd = ['sphinx-build', '-b', kind] if self.num_jobs: cmd += ['-j', str(self.num_jobs)] if self.warnings_are_errors: cmd += ['-W', '--keep-going'] if self.verbosity: cmd.append('-{}'.format('v' * self.verbosity)) cmd += ['-d', os.path.join(BUILD_PATH, 'doctrees'), SOURCE_PATH, os.path.join(BUILD_PATH, kind)] return subprocess.call(cmd)
python
def _sphinx_build(self, kind): """ Call sphinx to build documentation. Attribute `num_jobs` from the class is used. Parameters ---------- kind : {'html', 'latex'} Examples -------- >>> DocBuilder(num_jobs=4)._sphinx_build('html') """ if kind not in ('html', 'latex'): raise ValueError('kind must be html or latex, ' 'not {}'.format(kind)) cmd = ['sphinx-build', '-b', kind] if self.num_jobs: cmd += ['-j', str(self.num_jobs)] if self.warnings_are_errors: cmd += ['-W', '--keep-going'] if self.verbosity: cmd.append('-{}'.format('v' * self.verbosity)) cmd += ['-d', os.path.join(BUILD_PATH, 'doctrees'), SOURCE_PATH, os.path.join(BUILD_PATH, kind)] return subprocess.call(cmd)
[ "def", "_sphinx_build", "(", "self", ",", "kind", ")", ":", "if", "kind", "not", "in", "(", "'html'", ",", "'latex'", ")", ":", "raise", "ValueError", "(", "'kind must be html or latex, '", "'not {}'", ".", "format", "(", "kind", ")", ")", "cmd", "=", "[", "'sphinx-build'", ",", "'-b'", ",", "kind", "]", "if", "self", ".", "num_jobs", ":", "cmd", "+=", "[", "'-j'", ",", "str", "(", "self", ".", "num_jobs", ")", "]", "if", "self", ".", "warnings_are_errors", ":", "cmd", "+=", "[", "'-W'", ",", "'--keep-going'", "]", "if", "self", ".", "verbosity", ":", "cmd", ".", "append", "(", "'-{}'", ".", "format", "(", "'v'", "*", "self", ".", "verbosity", ")", ")", "cmd", "+=", "[", "'-d'", ",", "os", ".", "path", ".", "join", "(", "BUILD_PATH", ",", "'doctrees'", ")", ",", "SOURCE_PATH", ",", "os", ".", "path", ".", "join", "(", "BUILD_PATH", ",", "kind", ")", "]", "return", "subprocess", ".", "call", "(", "cmd", ")" ]
Call sphinx to build documentation. Attribute `num_jobs` from the class is used. Parameters ---------- kind : {'html', 'latex'} Examples -------- >>> DocBuilder(num_jobs=4)._sphinx_build('html')
[ "Call", "sphinx", "to", "build", "documentation", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L106-L133
19,890
pandas-dev/pandas
doc/make.py
DocBuilder._open_browser
def _open_browser(self, single_doc_html): """ Open a browser tab showing single """ url = os.path.join('file://', DOC_PATH, 'build', 'html', single_doc_html) webbrowser.open(url, new=2)
python
def _open_browser(self, single_doc_html): """ Open a browser tab showing single """ url = os.path.join('file://', DOC_PATH, 'build', 'html', single_doc_html) webbrowser.open(url, new=2)
[ "def", "_open_browser", "(", "self", ",", "single_doc_html", ")", ":", "url", "=", "os", ".", "path", ".", "join", "(", "'file://'", ",", "DOC_PATH", ",", "'build'", ",", "'html'", ",", "single_doc_html", ")", "webbrowser", ".", "open", "(", "url", ",", "new", "=", "2", ")" ]
Open a browser tab showing single
[ "Open", "a", "browser", "tab", "showing", "single" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L135-L141
19,891
pandas-dev/pandas
doc/make.py
DocBuilder._get_page_title
def _get_page_title(self, page): """ Open the rst file `page` and extract its title. """ fname = os.path.join(SOURCE_PATH, '{}.rst'.format(page)) option_parser = docutils.frontend.OptionParser( components=(docutils.parsers.rst.Parser,)) doc = docutils.utils.new_document( '<doc>', option_parser.get_default_values()) with open(fname) as f: data = f.read() parser = docutils.parsers.rst.Parser() # do not generate any warning when parsing the rst with open(os.devnull, 'a') as f: doc.reporter.stream = f parser.parse(data, doc) section = next(node for node in doc.children if isinstance(node, docutils.nodes.section)) title = next(node for node in section.children if isinstance(node, docutils.nodes.title)) return title.astext()
python
def _get_page_title(self, page): """ Open the rst file `page` and extract its title. """ fname = os.path.join(SOURCE_PATH, '{}.rst'.format(page)) option_parser = docutils.frontend.OptionParser( components=(docutils.parsers.rst.Parser,)) doc = docutils.utils.new_document( '<doc>', option_parser.get_default_values()) with open(fname) as f: data = f.read() parser = docutils.parsers.rst.Parser() # do not generate any warning when parsing the rst with open(os.devnull, 'a') as f: doc.reporter.stream = f parser.parse(data, doc) section = next(node for node in doc.children if isinstance(node, docutils.nodes.section)) title = next(node for node in section.children if isinstance(node, docutils.nodes.title)) return title.astext()
[ "def", "_get_page_title", "(", "self", ",", "page", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "SOURCE_PATH", ",", "'{}.rst'", ".", "format", "(", "page", ")", ")", "option_parser", "=", "docutils", ".", "frontend", ".", "OptionParser", "(", "components", "=", "(", "docutils", ".", "parsers", ".", "rst", ".", "Parser", ",", ")", ")", "doc", "=", "docutils", ".", "utils", ".", "new_document", "(", "'<doc>'", ",", "option_parser", ".", "get_default_values", "(", ")", ")", "with", "open", "(", "fname", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", ")", "parser", "=", "docutils", ".", "parsers", ".", "rst", ".", "Parser", "(", ")", "# do not generate any warning when parsing the rst", "with", "open", "(", "os", ".", "devnull", ",", "'a'", ")", "as", "f", ":", "doc", ".", "reporter", ".", "stream", "=", "f", "parser", ".", "parse", "(", "data", ",", "doc", ")", "section", "=", "next", "(", "node", "for", "node", "in", "doc", ".", "children", "if", "isinstance", "(", "node", ",", "docutils", ".", "nodes", ".", "section", ")", ")", "title", "=", "next", "(", "node", "for", "node", "in", "section", ".", "children", "if", "isinstance", "(", "node", ",", "docutils", ".", "nodes", ".", "title", ")", ")", "return", "title", ".", "astext", "(", ")" ]
Open the rst file `page` and extract its title.
[ "Open", "the", "rst", "file", "page", "and", "extract", "its", "title", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L143-L167
19,892
pandas-dev/pandas
doc/make.py
DocBuilder.html
def html(self): """ Build HTML documentation. """ ret_code = self._sphinx_build('html') zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip') if os.path.exists(zip_fname): os.remove(zip_fname) if self.single_doc_html is not None: self._open_browser(self.single_doc_html) else: self._add_redirects() return ret_code
python
def html(self): """ Build HTML documentation. """ ret_code = self._sphinx_build('html') zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip') if os.path.exists(zip_fname): os.remove(zip_fname) if self.single_doc_html is not None: self._open_browser(self.single_doc_html) else: self._add_redirects() return ret_code
[ "def", "html", "(", "self", ")", ":", "ret_code", "=", "self", ".", "_sphinx_build", "(", "'html'", ")", "zip_fname", "=", "os", ".", "path", ".", "join", "(", "BUILD_PATH", ",", "'html'", ",", "'pandas.zip'", ")", "if", "os", ".", "path", ".", "exists", "(", "zip_fname", ")", ":", "os", ".", "remove", "(", "zip_fname", ")", "if", "self", ".", "single_doc_html", "is", "not", "None", ":", "self", ".", "_open_browser", "(", "self", ".", "single_doc_html", ")", "else", ":", "self", ".", "_add_redirects", "(", ")", "return", "ret_code" ]
Build HTML documentation.
[ "Build", "HTML", "documentation", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L214-L227
19,893
pandas-dev/pandas
doc/make.py
DocBuilder.latex
def latex(self, force=False): """ Build PDF documentation. """ if sys.platform == 'win32': sys.stderr.write('latex build has not been tested on windows\n') else: ret_code = self._sphinx_build('latex') os.chdir(os.path.join(BUILD_PATH, 'latex')) if force: for i in range(3): self._run_os('pdflatex', '-interaction=nonstopmode', 'pandas.tex') raise SystemExit('You should check the file ' '"build/latex/pandas.pdf" for problems.') else: self._run_os('make') return ret_code
python
def latex(self, force=False): """ Build PDF documentation. """ if sys.platform == 'win32': sys.stderr.write('latex build has not been tested on windows\n') else: ret_code = self._sphinx_build('latex') os.chdir(os.path.join(BUILD_PATH, 'latex')) if force: for i in range(3): self._run_os('pdflatex', '-interaction=nonstopmode', 'pandas.tex') raise SystemExit('You should check the file ' '"build/latex/pandas.pdf" for problems.') else: self._run_os('make') return ret_code
[ "def", "latex", "(", "self", ",", "force", "=", "False", ")", ":", "if", "sys", ".", "platform", "==", "'win32'", ":", "sys", ".", "stderr", ".", "write", "(", "'latex build has not been tested on windows\\n'", ")", "else", ":", "ret_code", "=", "self", ".", "_sphinx_build", "(", "'latex'", ")", "os", ".", "chdir", "(", "os", ".", "path", ".", "join", "(", "BUILD_PATH", ",", "'latex'", ")", ")", "if", "force", ":", "for", "i", "in", "range", "(", "3", ")", ":", "self", ".", "_run_os", "(", "'pdflatex'", ",", "'-interaction=nonstopmode'", ",", "'pandas.tex'", ")", "raise", "SystemExit", "(", "'You should check the file '", "'\"build/latex/pandas.pdf\" for problems.'", ")", "else", ":", "self", ".", "_run_os", "(", "'make'", ")", "return", "ret_code" ]
Build PDF documentation.
[ "Build", "PDF", "documentation", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L229-L247
19,894
pandas-dev/pandas
doc/make.py
DocBuilder.clean
def clean(): """ Clean documentation generated files. """ shutil.rmtree(BUILD_PATH, ignore_errors=True) shutil.rmtree(os.path.join(SOURCE_PATH, 'reference', 'api'), ignore_errors=True)
python
def clean(): """ Clean documentation generated files. """ shutil.rmtree(BUILD_PATH, ignore_errors=True) shutil.rmtree(os.path.join(SOURCE_PATH, 'reference', 'api'), ignore_errors=True)
[ "def", "clean", "(", ")", ":", "shutil", ".", "rmtree", "(", "BUILD_PATH", ",", "ignore_errors", "=", "True", ")", "shutil", ".", "rmtree", "(", "os", ".", "path", ".", "join", "(", "SOURCE_PATH", ",", "'reference'", ",", "'api'", ")", ",", "ignore_errors", "=", "True", ")" ]
Clean documentation generated files.
[ "Clean", "documentation", "generated", "files", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L256-L262
19,895
pandas-dev/pandas
doc/make.py
DocBuilder.zip_html
def zip_html(self): """ Compress HTML documentation into a zip file. """ zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip') if os.path.exists(zip_fname): os.remove(zip_fname) dirname = os.path.join(BUILD_PATH, 'html') fnames = os.listdir(dirname) os.chdir(dirname) self._run_os('zip', zip_fname, '-r', '-q', *fnames)
python
def zip_html(self): """ Compress HTML documentation into a zip file. """ zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip') if os.path.exists(zip_fname): os.remove(zip_fname) dirname = os.path.join(BUILD_PATH, 'html') fnames = os.listdir(dirname) os.chdir(dirname) self._run_os('zip', zip_fname, '-r', '-q', *fnames)
[ "def", "zip_html", "(", "self", ")", ":", "zip_fname", "=", "os", ".", "path", ".", "join", "(", "BUILD_PATH", ",", "'html'", ",", "'pandas.zip'", ")", "if", "os", ".", "path", ".", "exists", "(", "zip_fname", ")", ":", "os", ".", "remove", "(", "zip_fname", ")", "dirname", "=", "os", ".", "path", ".", "join", "(", "BUILD_PATH", ",", "'html'", ")", "fnames", "=", "os", ".", "listdir", "(", "dirname", ")", "os", ".", "chdir", "(", "dirname", ")", "self", ".", "_run_os", "(", "'zip'", ",", "zip_fname", ",", "'-r'", ",", "'-q'", ",", "*", "fnames", ")" ]
Compress HTML documentation into a zip file.
[ "Compress", "HTML", "documentation", "into", "a", "zip", "file", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L264-L278
19,896
pandas-dev/pandas
pandas/io/formats/latex.py
LatexFormatter._format_multicolumn
def _format_multicolumn(self, row, ilevels): r""" Combine columns belonging to a group to a single multicolumn entry according to self.multicolumn_format e.g.: a & & & b & c & will become \multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c} """ row2 = list(row[:ilevels]) ncol = 1 coltext = '' def append_col(): # write multicolumn if needed if ncol > 1: row2.append('\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}' .format(ncol=ncol, fmt=self.multicolumn_format, txt=coltext.strip())) # don't modify where not needed else: row2.append(coltext) for c in row[ilevels:]: # if next col has text, write the previous if c.strip(): if coltext: append_col() coltext = c ncol = 1 # if not, add it to the previous multicolumn else: ncol += 1 # write last column name if coltext: append_col() return row2
python
def _format_multicolumn(self, row, ilevels): r""" Combine columns belonging to a group to a single multicolumn entry according to self.multicolumn_format e.g.: a & & & b & c & will become \multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c} """ row2 = list(row[:ilevels]) ncol = 1 coltext = '' def append_col(): # write multicolumn if needed if ncol > 1: row2.append('\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}' .format(ncol=ncol, fmt=self.multicolumn_format, txt=coltext.strip())) # don't modify where not needed else: row2.append(coltext) for c in row[ilevels:]: # if next col has text, write the previous if c.strip(): if coltext: append_col() coltext = c ncol = 1 # if not, add it to the previous multicolumn else: ncol += 1 # write last column name if coltext: append_col() return row2
[ "def", "_format_multicolumn", "(", "self", ",", "row", ",", "ilevels", ")", ":", "row2", "=", "list", "(", "row", "[", ":", "ilevels", "]", ")", "ncol", "=", "1", "coltext", "=", "''", "def", "append_col", "(", ")", ":", "# write multicolumn if needed", "if", "ncol", ">", "1", ":", "row2", ".", "append", "(", "'\\\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}'", ".", "format", "(", "ncol", "=", "ncol", ",", "fmt", "=", "self", ".", "multicolumn_format", ",", "txt", "=", "coltext", ".", "strip", "(", ")", ")", ")", "# don't modify where not needed", "else", ":", "row2", ".", "append", "(", "coltext", ")", "for", "c", "in", "row", "[", "ilevels", ":", "]", ":", "# if next col has text, write the previous", "if", "c", ".", "strip", "(", ")", ":", "if", "coltext", ":", "append_col", "(", ")", "coltext", "=", "c", "ncol", "=", "1", "# if not, add it to the previous multicolumn", "else", ":", "ncol", "+=", "1", "# write last column name", "if", "coltext", ":", "append_col", "(", ")", "return", "row2" ]
r""" Combine columns belonging to a group to a single multicolumn entry according to self.multicolumn_format e.g.: a & & & b & c & will become \multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
[ "r", "Combine", "columns", "belonging", "to", "a", "group", "to", "a", "single", "multicolumn", "entry", "according", "to", "self", ".", "multicolumn_format" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/latex.py#L165-L201
19,897
pandas-dev/pandas
pandas/io/formats/latex.py
LatexFormatter._format_multirow
def _format_multirow(self, row, ilevels, i, rows): r""" Check following rows, whether row should be a multirow e.g.: becomes: a & 0 & \multirow{2}{*}{a} & 0 & & 1 & & 1 & b & 0 & \cline{1-2} b & 0 & """ for j in range(ilevels): if row[j].strip(): nrow = 1 for r in rows[i + 1:]: if not r[j].strip(): nrow += 1 else: break if nrow > 1: # overwrite non-multirow entry row[j] = '\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}'.format( nrow=nrow, row=row[j].strip()) # save when to end the current block with \cline self.clinebuf.append([i + nrow - 1, j + 1]) return row
python
def _format_multirow(self, row, ilevels, i, rows): r""" Check following rows, whether row should be a multirow e.g.: becomes: a & 0 & \multirow{2}{*}{a} & 0 & & 1 & & 1 & b & 0 & \cline{1-2} b & 0 & """ for j in range(ilevels): if row[j].strip(): nrow = 1 for r in rows[i + 1:]: if not r[j].strip(): nrow += 1 else: break if nrow > 1: # overwrite non-multirow entry row[j] = '\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}'.format( nrow=nrow, row=row[j].strip()) # save when to end the current block with \cline self.clinebuf.append([i + nrow - 1, j + 1]) return row
[ "def", "_format_multirow", "(", "self", ",", "row", ",", "ilevels", ",", "i", ",", "rows", ")", ":", "for", "j", "in", "range", "(", "ilevels", ")", ":", "if", "row", "[", "j", "]", ".", "strip", "(", ")", ":", "nrow", "=", "1", "for", "r", "in", "rows", "[", "i", "+", "1", ":", "]", ":", "if", "not", "r", "[", "j", "]", ".", "strip", "(", ")", ":", "nrow", "+=", "1", "else", ":", "break", "if", "nrow", ">", "1", ":", "# overwrite non-multirow entry", "row", "[", "j", "]", "=", "'\\\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}'", ".", "format", "(", "nrow", "=", "nrow", ",", "row", "=", "row", "[", "j", "]", ".", "strip", "(", ")", ")", "# save when to end the current block with \\cline", "self", ".", "clinebuf", ".", "append", "(", "[", "i", "+", "nrow", "-", "1", ",", "j", "+", "1", "]", ")", "return", "row" ]
r""" Check following rows, whether row should be a multirow e.g.: becomes: a & 0 & \multirow{2}{*}{a} & 0 & & 1 & & 1 & b & 0 & \cline{1-2} b & 0 &
[ "r", "Check", "following", "rows", "whether", "row", "should", "be", "a", "multirow" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/latex.py#L203-L227
19,898
pandas-dev/pandas
pandas/io/formats/latex.py
LatexFormatter._print_cline
def _print_cline(self, buf, i, icol): """ Print clines after multirow-blocks are finished """ for cl in self.clinebuf: if cl[0] == i: buf.write('\\cline{{{cl:d}-{icol:d}}}\n' .format(cl=cl[1], icol=icol)) # remove entries that have been written to buffer self.clinebuf = [x for x in self.clinebuf if x[0] != i]
python
def _print_cline(self, buf, i, icol): """ Print clines after multirow-blocks are finished """ for cl in self.clinebuf: if cl[0] == i: buf.write('\\cline{{{cl:d}-{icol:d}}}\n' .format(cl=cl[1], icol=icol)) # remove entries that have been written to buffer self.clinebuf = [x for x in self.clinebuf if x[0] != i]
[ "def", "_print_cline", "(", "self", ",", "buf", ",", "i", ",", "icol", ")", ":", "for", "cl", "in", "self", ".", "clinebuf", ":", "if", "cl", "[", "0", "]", "==", "i", ":", "buf", ".", "write", "(", "'\\\\cline{{{cl:d}-{icol:d}}}\\n'", ".", "format", "(", "cl", "=", "cl", "[", "1", "]", ",", "icol", "=", "icol", ")", ")", "# remove entries that have been written to buffer", "self", ".", "clinebuf", "=", "[", "x", "for", "x", "in", "self", ".", "clinebuf", "if", "x", "[", "0", "]", "!=", "i", "]" ]
Print clines after multirow-blocks are finished
[ "Print", "clines", "after", "multirow", "-", "blocks", "are", "finished" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/latex.py#L229-L238
19,899
pandas-dev/pandas
pandas/io/parsers.py
_validate_integer
def _validate_integer(name, val, min_val=0): """ Checks whether the 'name' parameter for parsing is either an integer OR float that can SAFELY be cast to an integer without losing accuracy. Raises a ValueError if that is not the case. Parameters ---------- name : string Parameter name (used for error reporting) val : int or float The value to check min_val : int Minimum allowed value (val < min_val will result in a ValueError) """ msg = "'{name:s}' must be an integer >={min_val:d}".format(name=name, min_val=min_val) if val is not None: if is_float(val): if int(val) != val: raise ValueError(msg) val = int(val) elif not (is_integer(val) and val >= min_val): raise ValueError(msg) return val
python
def _validate_integer(name, val, min_val=0): """ Checks whether the 'name' parameter for parsing is either an integer OR float that can SAFELY be cast to an integer without losing accuracy. Raises a ValueError if that is not the case. Parameters ---------- name : string Parameter name (used for error reporting) val : int or float The value to check min_val : int Minimum allowed value (val < min_val will result in a ValueError) """ msg = "'{name:s}' must be an integer >={min_val:d}".format(name=name, min_val=min_val) if val is not None: if is_float(val): if int(val) != val: raise ValueError(msg) val = int(val) elif not (is_integer(val) and val >= min_val): raise ValueError(msg) return val
[ "def", "_validate_integer", "(", "name", ",", "val", ",", "min_val", "=", "0", ")", ":", "msg", "=", "\"'{name:s}' must be an integer >={min_val:d}\"", ".", "format", "(", "name", "=", "name", ",", "min_val", "=", "min_val", ")", "if", "val", "is", "not", "None", ":", "if", "is_float", "(", "val", ")", ":", "if", "int", "(", "val", ")", "!=", "val", ":", "raise", "ValueError", "(", "msg", ")", "val", "=", "int", "(", "val", ")", "elif", "not", "(", "is_integer", "(", "val", ")", "and", "val", ">=", "min_val", ")", ":", "raise", "ValueError", "(", "msg", ")", "return", "val" ]
Checks whether the 'name' parameter for parsing is either an integer OR float that can SAFELY be cast to an integer without losing accuracy. Raises a ValueError if that is not the case. Parameters ---------- name : string Parameter name (used for error reporting) val : int or float The value to check min_val : int Minimum allowed value (val < min_val will result in a ValueError)
[ "Checks", "whether", "the", "name", "parameter", "for", "parsing", "is", "either", "an", "integer", "OR", "float", "that", "can", "SAFELY", "be", "cast", "to", "an", "integer", "without", "losing", "accuracy", ".", "Raises", "a", "ValueError", "if", "that", "is", "not", "the", "case", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L349-L376