partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
train
Index.slice_indexer
For an ordered or unique index, compute the slice indexer for input labels and step. Parameters ---------- start : label, default None If None, defaults to the beginning end : label, default None If None, defaults to the end step : int, default None kind : string, default None Returns ------- indexer : slice Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril Examples --------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list('abcd')) >>> idx.slice_indexer(start='b', end='c') slice(1, 3) >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3)
pandas/core/indexes/base.py
def slice_indexer(self, start=None, end=None, step=None, kind=None): """ For an ordered or unique index, compute the slice indexer for input labels and step. Parameters ---------- start : label, default None If None, defaults to the beginning end : label, default None If None, defaults to the end step : int, default None kind : string, default None Returns ------- indexer : slice Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril Examples --------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list('abcd')) >>> idx.slice_indexer(start='b', end='c') slice(1, 3) >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3) """ start_slice, end_slice = self.slice_locs(start, end, step=step, kind=kind) # return a slice if not is_scalar(start_slice): raise AssertionError("Start slice bound is non-scalar") if not is_scalar(end_slice): raise AssertionError("End slice bound is non-scalar") return slice(start_slice, end_slice, step)
def slice_indexer(self, start=None, end=None, step=None, kind=None): """ For an ordered or unique index, compute the slice indexer for input labels and step. Parameters ---------- start : label, default None If None, defaults to the beginning end : label, default None If None, defaults to the end step : int, default None kind : string, default None Returns ------- indexer : slice Raises ------ KeyError : If key does not exist, or key is not unique and index is not ordered. Notes ----- This function assumes that the data is sorted, so use at your own peril Examples --------- This is a method on all index types. For example you can do: >>> idx = pd.Index(list('abcd')) >>> idx.slice_indexer(start='b', end='c') slice(1, 3) >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')]) >>> idx.slice_indexer(start='b', end=('c', 'g')) slice(1, 3) """ start_slice, end_slice = self.slice_locs(start, end, step=step, kind=kind) # return a slice if not is_scalar(start_slice): raise AssertionError("Start slice bound is non-scalar") if not is_scalar(end_slice): raise AssertionError("End slice bound is non-scalar") return slice(start_slice, end_slice, step)
[ "For", "an", "ordered", "or", "unique", "index", "compute", "the", "slice", "indexer", "for", "input", "labels", "and", "step", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4625-L4673
[ "def", "slice_indexer", "(", "self", ",", "start", "=", "None", ",", "end", "=", "None", ",", "step", "=", "None", ",", "kind", "=", "None", ")", ":", "start_slice", ",", "end_slice", "=", "self", ".", "slice_locs", "(", "start", ",", "end", ",", "step", "=", "step", ",", "kind", "=", "kind", ")", "# return a slice", "if", "not", "is_scalar", "(", "start_slice", ")", ":", "raise", "AssertionError", "(", "\"Start slice bound is non-scalar\"", ")", "if", "not", "is_scalar", "(", "end_slice", ")", ":", "raise", "AssertionError", "(", "\"End slice bound is non-scalar\"", ")", "return", "slice", "(", "start_slice", ",", "end_slice", ",", "step", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._maybe_cast_indexer
If we have a float key and are not a floating index, then try to cast to an int if equivalent.
pandas/core/indexes/base.py
def _maybe_cast_indexer(self, key): """ If we have a float key and are not a floating index, then try to cast to an int if equivalent. """ if is_float(key) and not self.is_floating(): try: ckey = int(key) if ckey == key: key = ckey except (OverflowError, ValueError, TypeError): pass return key
def _maybe_cast_indexer(self, key): """ If we have a float key and are not a floating index, then try to cast to an int if equivalent. """ if is_float(key) and not self.is_floating(): try: ckey = int(key) if ckey == key: key = ckey except (OverflowError, ValueError, TypeError): pass return key
[ "If", "we", "have", "a", "float", "key", "and", "are", "not", "a", "floating", "index", "then", "try", "to", "cast", "to", "an", "int", "if", "equivalent", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4675-L4688
[ "def", "_maybe_cast_indexer", "(", "self", ",", "key", ")", ":", "if", "is_float", "(", "key", ")", "and", "not", "self", ".", "is_floating", "(", ")", ":", "try", ":", "ckey", "=", "int", "(", "key", ")", "if", "ckey", "==", "key", ":", "key", "=", "ckey", "except", "(", "OverflowError", ",", "ValueError", ",", "TypeError", ")", ":", "pass", "return", "key" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._validate_indexer
If we are positional indexer, validate that we have appropriate typed bounds must be an integer.
pandas/core/indexes/base.py
def _validate_indexer(self, form, key, kind): """ If we are positional indexer, validate that we have appropriate typed bounds must be an integer. """ assert kind in ['ix', 'loc', 'getitem', 'iloc'] if key is None: pass elif is_integer(key): pass elif kind in ['iloc', 'getitem']: self._invalid_indexer(form, key) return key
def _validate_indexer(self, form, key, kind): """ If we are positional indexer, validate that we have appropriate typed bounds must be an integer. """ assert kind in ['ix', 'loc', 'getitem', 'iloc'] if key is None: pass elif is_integer(key): pass elif kind in ['iloc', 'getitem']: self._invalid_indexer(form, key) return key
[ "If", "we", "are", "positional", "indexer", "validate", "that", "we", "have", "appropriate", "typed", "bounds", "must", "be", "an", "integer", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4690-L4703
[ "def", "_validate_indexer", "(", "self", ",", "form", ",", "key", ",", "kind", ")", ":", "assert", "kind", "in", "[", "'ix'", ",", "'loc'", ",", "'getitem'", ",", "'iloc'", "]", "if", "key", "is", "None", ":", "pass", "elif", "is_integer", "(", "key", ")", ":", "pass", "elif", "kind", "in", "[", "'iloc'", ",", "'getitem'", "]", ":", "self", ".", "_invalid_indexer", "(", "form", ",", "key", ")", "return", "key" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.get_slice_bound
Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} kind : {'ix', 'loc', 'getitem'}
pandas/core/indexes/base.py
def get_slice_bound(self, label, side, kind): """ Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} kind : {'ix', 'loc', 'getitem'} """ assert kind in ['ix', 'loc', 'getitem', None] if side not in ('left', 'right'): raise ValueError("Invalid value for side kwarg," " must be either 'left' or 'right': %s" % (side, )) original_label = label # For datetime indices label may be a string that has to be converted # to datetime boundary according to its resolution. label = self._maybe_cast_slice_bound(label, side, kind) # we need to look up the label try: slc = self._get_loc_only_exact_matches(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: # raise the original KeyError raise err if isinstance(slc, np.ndarray): # get_loc may return a boolean array or an array of indices, which # is OK as long as they are representable by a slice. if is_bool_dtype(slc): slc = lib.maybe_booleans_to_slice(slc.view('u1')) else: slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self)) if isinstance(slc, np.ndarray): raise KeyError("Cannot get %s slice bound for non-unique " "label: %r" % (side, original_label)) if isinstance(slc, slice): if side == 'left': return slc.start else: return slc.stop else: if side == 'right': return slc + 1 else: return slc
def get_slice_bound(self, label, side, kind): """ Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if ``side=='right'``) position of given label. Parameters ---------- label : object side : {'left', 'right'} kind : {'ix', 'loc', 'getitem'} """ assert kind in ['ix', 'loc', 'getitem', None] if side not in ('left', 'right'): raise ValueError("Invalid value for side kwarg," " must be either 'left' or 'right': %s" % (side, )) original_label = label # For datetime indices label may be a string that has to be converted # to datetime boundary according to its resolution. label = self._maybe_cast_slice_bound(label, side, kind) # we need to look up the label try: slc = self._get_loc_only_exact_matches(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: # raise the original KeyError raise err if isinstance(slc, np.ndarray): # get_loc may return a boolean array or an array of indices, which # is OK as long as they are representable by a slice. if is_bool_dtype(slc): slc = lib.maybe_booleans_to_slice(slc.view('u1')) else: slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self)) if isinstance(slc, np.ndarray): raise KeyError("Cannot get %s slice bound for non-unique " "label: %r" % (side, original_label)) if isinstance(slc, slice): if side == 'left': return slc.start else: return slc.stop else: if side == 'right': return slc + 1 else: return slc
[ "Calculate", "slice", "bound", "that", "corresponds", "to", "given", "label", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4766-L4822
[ "def", "get_slice_bound", "(", "self", ",", "label", ",", "side", ",", "kind", ")", ":", "assert", "kind", "in", "[", "'ix'", ",", "'loc'", ",", "'getitem'", ",", "None", "]", "if", "side", "not", "in", "(", "'left'", ",", "'right'", ")", ":", "raise", "ValueError", "(", "\"Invalid value for side kwarg,\"", "\" must be either 'left' or 'right': %s\"", "%", "(", "side", ",", ")", ")", "original_label", "=", "label", "# For datetime indices label may be a string that has to be converted", "# to datetime boundary according to its resolution.", "label", "=", "self", ".", "_maybe_cast_slice_bound", "(", "label", ",", "side", ",", "kind", ")", "# we need to look up the label", "try", ":", "slc", "=", "self", ".", "_get_loc_only_exact_matches", "(", "label", ")", "except", "KeyError", "as", "err", ":", "try", ":", "return", "self", ".", "_searchsorted_monotonic", "(", "label", ",", "side", ")", "except", "ValueError", ":", "# raise the original KeyError", "raise", "err", "if", "isinstance", "(", "slc", ",", "np", ".", "ndarray", ")", ":", "# get_loc may return a boolean array or an array of indices, which", "# is OK as long as they are representable by a slice.", "if", "is_bool_dtype", "(", "slc", ")", ":", "slc", "=", "lib", ".", "maybe_booleans_to_slice", "(", "slc", ".", "view", "(", "'u1'", ")", ")", "else", ":", "slc", "=", "lib", ".", "maybe_indices_to_slice", "(", "slc", ".", "astype", "(", "'i8'", ")", ",", "len", "(", "self", ")", ")", "if", "isinstance", "(", "slc", ",", "np", ".", "ndarray", ")", ":", "raise", "KeyError", "(", "\"Cannot get %s slice bound for non-unique \"", "\"label: %r\"", "%", "(", "side", ",", "original_label", ")", ")", "if", "isinstance", "(", "slc", ",", "slice", ")", ":", "if", "side", "==", "'left'", ":", "return", "slc", ".", "start", "else", ":", "return", "slc", ".", "stop", "else", ":", "if", "side", "==", "'right'", ":", "return", "slc", "+", "1", "else", ":", "return", "slc" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.slice_locs
Compute slice locations for input labels. Parameters ---------- start : label, default None If None, defaults to the beginning end : label, default None If None, defaults to the end step : int, defaults None If None, defaults to 1 kind : {'ix', 'loc', 'getitem'} or None Returns ------- start, end : int See Also -------- Index.get_loc : Get location for a single label. Notes ----- This method only works if the index is monotonic or unique. Examples --------- >>> idx = pd.Index(list('abcd')) >>> idx.slice_locs(start='b', end='c') (1, 3)
pandas/core/indexes/base.py
def slice_locs(self, start=None, end=None, step=None, kind=None): """ Compute slice locations for input labels. Parameters ---------- start : label, default None If None, defaults to the beginning end : label, default None If None, defaults to the end step : int, defaults None If None, defaults to 1 kind : {'ix', 'loc', 'getitem'} or None Returns ------- start, end : int See Also -------- Index.get_loc : Get location for a single label. Notes ----- This method only works if the index is monotonic or unique. Examples --------- >>> idx = pd.Index(list('abcd')) >>> idx.slice_locs(start='b', end='c') (1, 3) """ inc = (step is None or step >= 0) if not inc: # If it's a reverse slice, temporarily swap bounds. start, end = end, start # GH 16785: If start and end happen to be date strings with UTC offsets # attempt to parse and check that the offsets are the same if (isinstance(start, (str, datetime)) and isinstance(end, (str, datetime))): try: ts_start = Timestamp(start) ts_end = Timestamp(end) except (ValueError, TypeError): pass else: if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): raise ValueError("Both dates must have the " "same UTC offset") start_slice = None if start is not None: start_slice = self.get_slice_bound(start, 'left', kind) if start_slice is None: start_slice = 0 end_slice = None if end is not None: end_slice = self.get_slice_bound(end, 'right', kind) if end_slice is None: end_slice = len(self) if not inc: # Bounds at this moment are swapped, swap them back and shift by 1. # # slice_locs('B', 'A', step=-1): s='B', e='A' # # s='A' e='B' # AFTER SWAP: | | # v ------------------> V # ----------------------------------- # | | |A|A|A|A| | | | | |B|B| | | | | # ----------------------------------- # ^ <------------------ ^ # SHOULD BE: | | # end=s-1 start=e-1 # end_slice, start_slice = start_slice - 1, end_slice - 1 # i == -1 triggers ``len(self) + i`` selection that points to the # last element, not before-the-first one, subtracting len(self) # compensates that. if end_slice == -1: end_slice -= len(self) if start_slice == -1: start_slice -= len(self) return start_slice, end_slice
def slice_locs(self, start=None, end=None, step=None, kind=None): """ Compute slice locations for input labels. Parameters ---------- start : label, default None If None, defaults to the beginning end : label, default None If None, defaults to the end step : int, defaults None If None, defaults to 1 kind : {'ix', 'loc', 'getitem'} or None Returns ------- start, end : int See Also -------- Index.get_loc : Get location for a single label. Notes ----- This method only works if the index is monotonic or unique. Examples --------- >>> idx = pd.Index(list('abcd')) >>> idx.slice_locs(start='b', end='c') (1, 3) """ inc = (step is None or step >= 0) if not inc: # If it's a reverse slice, temporarily swap bounds. start, end = end, start # GH 16785: If start and end happen to be date strings with UTC offsets # attempt to parse and check that the offsets are the same if (isinstance(start, (str, datetime)) and isinstance(end, (str, datetime))): try: ts_start = Timestamp(start) ts_end = Timestamp(end) except (ValueError, TypeError): pass else: if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): raise ValueError("Both dates must have the " "same UTC offset") start_slice = None if start is not None: start_slice = self.get_slice_bound(start, 'left', kind) if start_slice is None: start_slice = 0 end_slice = None if end is not None: end_slice = self.get_slice_bound(end, 'right', kind) if end_slice is None: end_slice = len(self) if not inc: # Bounds at this moment are swapped, swap them back and shift by 1. # # slice_locs('B', 'A', step=-1): s='B', e='A' # # s='A' e='B' # AFTER SWAP: | | # v ------------------> V # ----------------------------------- # | | |A|A|A|A| | | | | |B|B| | | | | # ----------------------------------- # ^ <------------------ ^ # SHOULD BE: | | # end=s-1 start=e-1 # end_slice, start_slice = start_slice - 1, end_slice - 1 # i == -1 triggers ``len(self) + i`` selection that points to the # last element, not before-the-first one, subtracting len(self) # compensates that. if end_slice == -1: end_slice -= len(self) if start_slice == -1: start_slice -= len(self) return start_slice, end_slice
[ "Compute", "slice", "locations", "for", "input", "labels", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4824-L4913
[ "def", "slice_locs", "(", "self", ",", "start", "=", "None", ",", "end", "=", "None", ",", "step", "=", "None", ",", "kind", "=", "None", ")", ":", "inc", "=", "(", "step", "is", "None", "or", "step", ">=", "0", ")", "if", "not", "inc", ":", "# If it's a reverse slice, temporarily swap bounds.", "start", ",", "end", "=", "end", ",", "start", "# GH 16785: If start and end happen to be date strings with UTC offsets", "# attempt to parse and check that the offsets are the same", "if", "(", "isinstance", "(", "start", ",", "(", "str", ",", "datetime", ")", ")", "and", "isinstance", "(", "end", ",", "(", "str", ",", "datetime", ")", ")", ")", ":", "try", ":", "ts_start", "=", "Timestamp", "(", "start", ")", "ts_end", "=", "Timestamp", "(", "end", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "pass", "else", ":", "if", "not", "tz_compare", "(", "ts_start", ".", "tzinfo", ",", "ts_end", ".", "tzinfo", ")", ":", "raise", "ValueError", "(", "\"Both dates must have the \"", "\"same UTC offset\"", ")", "start_slice", "=", "None", "if", "start", "is", "not", "None", ":", "start_slice", "=", "self", ".", "get_slice_bound", "(", "start", ",", "'left'", ",", "kind", ")", "if", "start_slice", "is", "None", ":", "start_slice", "=", "0", "end_slice", "=", "None", "if", "end", "is", "not", "None", ":", "end_slice", "=", "self", ".", "get_slice_bound", "(", "end", ",", "'right'", ",", "kind", ")", "if", "end_slice", "is", "None", ":", "end_slice", "=", "len", "(", "self", ")", "if", "not", "inc", ":", "# Bounds at this moment are swapped, swap them back and shift by 1.", "#", "# slice_locs('B', 'A', step=-1): s='B', e='A'", "#", "# s='A' e='B'", "# AFTER SWAP: | |", "# v ------------------> V", "# -----------------------------------", "# | | |A|A|A|A| | | | | |B|B| | | | |", "# -----------------------------------", "# ^ <------------------ ^", "# SHOULD BE: | |", "# end=s-1 start=e-1", "#", "end_slice", ",", "start_slice", "=", "start_slice", "-", "1", ",", "end_slice", "-", "1", "# i == -1 triggers ``len(self) + i`` selection that points to the", "# last element, not before-the-first one, subtracting len(self)", "# compensates that.", "if", "end_slice", "==", "-", "1", ":", "end_slice", "-=", "len", "(", "self", ")", "if", "start_slice", "==", "-", "1", ":", "start_slice", "-=", "len", "(", "self", ")", "return", "start_slice", ",", "end_slice" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.delete
Make new Index with passed location(-s) deleted. Returns ------- new_index : Index
pandas/core/indexes/base.py
def delete(self, loc): """ Make new Index with passed location(-s) deleted. Returns ------- new_index : Index """ return self._shallow_copy(np.delete(self._data, loc))
def delete(self, loc): """ Make new Index with passed location(-s) deleted. Returns ------- new_index : Index """ return self._shallow_copy(np.delete(self._data, loc))
[ "Make", "new", "Index", "with", "passed", "location", "(", "-", "s", ")", "deleted", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4915-L4923
[ "def", "delete", "(", "self", ",", "loc", ")", ":", "return", "self", ".", "_shallow_copy", "(", "np", ".", "delete", "(", "self", ".", "_data", ",", "loc", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.insert
Make new Index inserting new item at location. Follows Python list.append semantics for negative values. Parameters ---------- loc : int item : object Returns ------- new_index : Index
pandas/core/indexes/base.py
def insert(self, loc, item): """ Make new Index inserting new item at location. Follows Python list.append semantics for negative values. Parameters ---------- loc : int item : object Returns ------- new_index : Index """ _self = np.asarray(self) item = self._coerce_scalar_to_index(item)._ndarray_values idx = np.concatenate((_self[:loc], item, _self[loc:])) return self._shallow_copy_with_infer(idx)
def insert(self, loc, item): """ Make new Index inserting new item at location. Follows Python list.append semantics for negative values. Parameters ---------- loc : int item : object Returns ------- new_index : Index """ _self = np.asarray(self) item = self._coerce_scalar_to_index(item)._ndarray_values idx = np.concatenate((_self[:loc], item, _self[loc:])) return self._shallow_copy_with_infer(idx)
[ "Make", "new", "Index", "inserting", "new", "item", "at", "location", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4925-L4943
[ "def", "insert", "(", "self", ",", "loc", ",", "item", ")", ":", "_self", "=", "np", ".", "asarray", "(", "self", ")", "item", "=", "self", ".", "_coerce_scalar_to_index", "(", "item", ")", ".", "_ndarray_values", "idx", "=", "np", ".", "concatenate", "(", "(", "_self", "[", ":", "loc", "]", ",", "item", ",", "_self", "[", "loc", ":", "]", ")", ")", "return", "self", ".", "_shallow_copy_with_infer", "(", "idx", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index.drop
Make new Index with passed list of labels deleted. Parameters ---------- labels : array-like errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. Returns ------- dropped : Index Raises ------ KeyError If not all of the labels are found in the selected axis
pandas/core/indexes/base.py
def drop(self, labels, errors='raise'): """ Make new Index with passed list of labels deleted. Parameters ---------- labels : array-like errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. Returns ------- dropped : Index Raises ------ KeyError If not all of the labels are found in the selected axis """ arr_dtype = 'object' if self.dtype == 'object' else None labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): if errors != 'ignore': raise KeyError( '{} not found in axis'.format(labels[mask])) indexer = indexer[~mask] return self.delete(indexer)
def drop(self, labels, errors='raise'): """ Make new Index with passed list of labels deleted. Parameters ---------- labels : array-like errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and existing labels are dropped. Returns ------- dropped : Index Raises ------ KeyError If not all of the labels are found in the selected axis """ arr_dtype = 'object' if self.dtype == 'object' else None labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): if errors != 'ignore': raise KeyError( '{} not found in axis'.format(labels[mask])) indexer = indexer[~mask] return self.delete(indexer)
[ "Make", "new", "Index", "with", "passed", "list", "of", "labels", "deleted", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L4945-L4973
[ "def", "drop", "(", "self", ",", "labels", ",", "errors", "=", "'raise'", ")", ":", "arr_dtype", "=", "'object'", "if", "self", ".", "dtype", "==", "'object'", "else", "None", "labels", "=", "com", ".", "index_labels_to_array", "(", "labels", ",", "dtype", "=", "arr_dtype", ")", "indexer", "=", "self", ".", "get_indexer", "(", "labels", ")", "mask", "=", "indexer", "==", "-", "1", "if", "mask", ".", "any", "(", ")", ":", "if", "errors", "!=", "'ignore'", ":", "raise", "KeyError", "(", "'{} not found in axis'", ".", "format", "(", "labels", "[", "mask", "]", ")", ")", "indexer", "=", "indexer", "[", "~", "mask", "]", "return", "self", ".", "delete", "(", "indexer", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._add_comparison_methods
Add in comparison methods.
pandas/core/indexes/base.py
def _add_comparison_methods(cls): """ Add in comparison methods. """ cls.__eq__ = _make_comparison_op(operator.eq, cls) cls.__ne__ = _make_comparison_op(operator.ne, cls) cls.__lt__ = _make_comparison_op(operator.lt, cls) cls.__gt__ = _make_comparison_op(operator.gt, cls) cls.__le__ = _make_comparison_op(operator.le, cls) cls.__ge__ = _make_comparison_op(operator.ge, cls)
def _add_comparison_methods(cls): """ Add in comparison methods. """ cls.__eq__ = _make_comparison_op(operator.eq, cls) cls.__ne__ = _make_comparison_op(operator.ne, cls) cls.__lt__ = _make_comparison_op(operator.lt, cls) cls.__gt__ = _make_comparison_op(operator.gt, cls) cls.__le__ = _make_comparison_op(operator.le, cls) cls.__ge__ = _make_comparison_op(operator.ge, cls)
[ "Add", "in", "comparison", "methods", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5004-L5013
[ "def", "_add_comparison_methods", "(", "cls", ")", ":", "cls", ".", "__eq__", "=", "_make_comparison_op", "(", "operator", ".", "eq", ",", "cls", ")", "cls", ".", "__ne__", "=", "_make_comparison_op", "(", "operator", ".", "ne", ",", "cls", ")", "cls", ".", "__lt__", "=", "_make_comparison_op", "(", "operator", ".", "lt", ",", "cls", ")", "cls", ".", "__gt__", "=", "_make_comparison_op", "(", "operator", ".", "gt", ",", "cls", ")", "cls", ".", "__le__", "=", "_make_comparison_op", "(", "operator", ".", "le", ",", "cls", ")", "cls", ".", "__ge__", "=", "_make_comparison_op", "(", "operator", ".", "ge", ",", "cls", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._add_numeric_methods_add_sub_disabled
Add in the numeric add/sub methods to disable.
pandas/core/indexes/base.py
def _add_numeric_methods_add_sub_disabled(cls): """ Add in the numeric add/sub methods to disable. """ cls.__add__ = make_invalid_op('__add__') cls.__radd__ = make_invalid_op('__radd__') cls.__iadd__ = make_invalid_op('__iadd__') cls.__sub__ = make_invalid_op('__sub__') cls.__rsub__ = make_invalid_op('__rsub__') cls.__isub__ = make_invalid_op('__isub__')
def _add_numeric_methods_add_sub_disabled(cls): """ Add in the numeric add/sub methods to disable. """ cls.__add__ = make_invalid_op('__add__') cls.__radd__ = make_invalid_op('__radd__') cls.__iadd__ = make_invalid_op('__iadd__') cls.__sub__ = make_invalid_op('__sub__') cls.__rsub__ = make_invalid_op('__rsub__') cls.__isub__ = make_invalid_op('__isub__')
[ "Add", "in", "the", "numeric", "add", "/", "sub", "methods", "to", "disable", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5016-L5025
[ "def", "_add_numeric_methods_add_sub_disabled", "(", "cls", ")", ":", "cls", ".", "__add__", "=", "make_invalid_op", "(", "'__add__'", ")", "cls", ".", "__radd__", "=", "make_invalid_op", "(", "'__radd__'", ")", "cls", ".", "__iadd__", "=", "make_invalid_op", "(", "'__iadd__'", ")", "cls", ".", "__sub__", "=", "make_invalid_op", "(", "'__sub__'", ")", "cls", ".", "__rsub__", "=", "make_invalid_op", "(", "'__rsub__'", ")", "cls", ".", "__isub__", "=", "make_invalid_op", "(", "'__isub__'", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._add_numeric_methods_disabled
Add in numeric methods to disable other than add/sub.
pandas/core/indexes/base.py
def _add_numeric_methods_disabled(cls): """ Add in numeric methods to disable other than add/sub. """ cls.__pow__ = make_invalid_op('__pow__') cls.__rpow__ = make_invalid_op('__rpow__') cls.__mul__ = make_invalid_op('__mul__') cls.__rmul__ = make_invalid_op('__rmul__') cls.__floordiv__ = make_invalid_op('__floordiv__') cls.__rfloordiv__ = make_invalid_op('__rfloordiv__') cls.__truediv__ = make_invalid_op('__truediv__') cls.__rtruediv__ = make_invalid_op('__rtruediv__') cls.__mod__ = make_invalid_op('__mod__') cls.__divmod__ = make_invalid_op('__divmod__') cls.__neg__ = make_invalid_op('__neg__') cls.__pos__ = make_invalid_op('__pos__') cls.__abs__ = make_invalid_op('__abs__') cls.__inv__ = make_invalid_op('__inv__')
def _add_numeric_methods_disabled(cls): """ Add in numeric methods to disable other than add/sub. """ cls.__pow__ = make_invalid_op('__pow__') cls.__rpow__ = make_invalid_op('__rpow__') cls.__mul__ = make_invalid_op('__mul__') cls.__rmul__ = make_invalid_op('__rmul__') cls.__floordiv__ = make_invalid_op('__floordiv__') cls.__rfloordiv__ = make_invalid_op('__rfloordiv__') cls.__truediv__ = make_invalid_op('__truediv__') cls.__rtruediv__ = make_invalid_op('__rtruediv__') cls.__mod__ = make_invalid_op('__mod__') cls.__divmod__ = make_invalid_op('__divmod__') cls.__neg__ = make_invalid_op('__neg__') cls.__pos__ = make_invalid_op('__pos__') cls.__abs__ = make_invalid_op('__abs__') cls.__inv__ = make_invalid_op('__inv__')
[ "Add", "in", "numeric", "methods", "to", "disable", "other", "than", "add", "/", "sub", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5028-L5045
[ "def", "_add_numeric_methods_disabled", "(", "cls", ")", ":", "cls", ".", "__pow__", "=", "make_invalid_op", "(", "'__pow__'", ")", "cls", ".", "__rpow__", "=", "make_invalid_op", "(", "'__rpow__'", ")", "cls", ".", "__mul__", "=", "make_invalid_op", "(", "'__mul__'", ")", "cls", ".", "__rmul__", "=", "make_invalid_op", "(", "'__rmul__'", ")", "cls", ".", "__floordiv__", "=", "make_invalid_op", "(", "'__floordiv__'", ")", "cls", ".", "__rfloordiv__", "=", "make_invalid_op", "(", "'__rfloordiv__'", ")", "cls", ".", "__truediv__", "=", "make_invalid_op", "(", "'__truediv__'", ")", "cls", ".", "__rtruediv__", "=", "make_invalid_op", "(", "'__rtruediv__'", ")", "cls", ".", "__mod__", "=", "make_invalid_op", "(", "'__mod__'", ")", "cls", ".", "__divmod__", "=", "make_invalid_op", "(", "'__divmod__'", ")", "cls", ".", "__neg__", "=", "make_invalid_op", "(", "'__neg__'", ")", "cls", ".", "__pos__", "=", "make_invalid_op", "(", "'__pos__'", ")", "cls", ".", "__abs__", "=", "make_invalid_op", "(", "'__abs__'", ")", "cls", ".", "__inv__", "=", "make_invalid_op", "(", "'__inv__'", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._validate_for_numeric_unaryop
Validate if we can perform a numeric unary operation.
pandas/core/indexes/base.py
def _validate_for_numeric_unaryop(self, op, opstr): """ Validate if we can perform a numeric unary operation. """ if not self._is_numeric_dtype: raise TypeError("cannot evaluate a numeric op " "{opstr} for type: {typ}" .format(opstr=opstr, typ=type(self).__name__))
def _validate_for_numeric_unaryop(self, op, opstr): """ Validate if we can perform a numeric unary operation. """ if not self._is_numeric_dtype: raise TypeError("cannot evaluate a numeric op " "{opstr} for type: {typ}" .format(opstr=opstr, typ=type(self).__name__))
[ "Validate", "if", "we", "can", "perform", "a", "numeric", "unary", "operation", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5053-L5060
[ "def", "_validate_for_numeric_unaryop", "(", "self", ",", "op", ",", "opstr", ")", ":", "if", "not", "self", ".", "_is_numeric_dtype", ":", "raise", "TypeError", "(", "\"cannot evaluate a numeric op \"", "\"{opstr} for type: {typ}\"", ".", "format", "(", "opstr", "=", "opstr", ",", "typ", "=", "type", "(", "self", ")", ".", "__name__", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._validate_for_numeric_binop
Return valid other; evaluate or raise TypeError if we are not of the appropriate type. Notes ----- This is an internal method called by ops.
pandas/core/indexes/base.py
def _validate_for_numeric_binop(self, other, op): """ Return valid other; evaluate or raise TypeError if we are not of the appropriate type. Notes ----- This is an internal method called by ops. """ opstr = '__{opname}__'.format(opname=op.__name__) # if we are an inheritor of numeric, # but not actually numeric (e.g. DatetimeIndex/PeriodIndex) if not self._is_numeric_dtype: raise TypeError("cannot evaluate a numeric op {opstr} " "for type: {typ}" .format(opstr=opstr, typ=type(self).__name__)) if isinstance(other, Index): if not other._is_numeric_dtype: raise TypeError("cannot evaluate a numeric op " "{opstr} with type: {typ}" .format(opstr=opstr, typ=type(other))) elif isinstance(other, np.ndarray) and not other.ndim: other = other.item() if isinstance(other, (Index, ABCSeries, np.ndarray)): if len(self) != len(other): raise ValueError("cannot evaluate a numeric op with " "unequal lengths") other = com.values_from_object(other) if other.dtype.kind not in ['f', 'i', 'u']: raise TypeError("cannot evaluate a numeric op " "with a non-numeric dtype") elif isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)): # higher up to handle pass elif isinstance(other, (datetime, np.datetime64)): # higher up to handle pass else: if not (is_float(other) or is_integer(other)): raise TypeError("can only perform ops with scalar values") return other
def _validate_for_numeric_binop(self, other, op): """ Return valid other; evaluate or raise TypeError if we are not of the appropriate type. Notes ----- This is an internal method called by ops. """ opstr = '__{opname}__'.format(opname=op.__name__) # if we are an inheritor of numeric, # but not actually numeric (e.g. DatetimeIndex/PeriodIndex) if not self._is_numeric_dtype: raise TypeError("cannot evaluate a numeric op {opstr} " "for type: {typ}" .format(opstr=opstr, typ=type(self).__name__)) if isinstance(other, Index): if not other._is_numeric_dtype: raise TypeError("cannot evaluate a numeric op " "{opstr} with type: {typ}" .format(opstr=opstr, typ=type(other))) elif isinstance(other, np.ndarray) and not other.ndim: other = other.item() if isinstance(other, (Index, ABCSeries, np.ndarray)): if len(self) != len(other): raise ValueError("cannot evaluate a numeric op with " "unequal lengths") other = com.values_from_object(other) if other.dtype.kind not in ['f', 'i', 'u']: raise TypeError("cannot evaluate a numeric op " "with a non-numeric dtype") elif isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)): # higher up to handle pass elif isinstance(other, (datetime, np.datetime64)): # higher up to handle pass else: if not (is_float(other) or is_integer(other)): raise TypeError("can only perform ops with scalar values") return other
[ "Return", "valid", "other", ";", "evaluate", "or", "raise", "TypeError", "if", "we", "are", "not", "of", "the", "appropriate", "type", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5062-L5105
[ "def", "_validate_for_numeric_binop", "(", "self", ",", "other", ",", "op", ")", ":", "opstr", "=", "'__{opname}__'", ".", "format", "(", "opname", "=", "op", ".", "__name__", ")", "# if we are an inheritor of numeric,", "# but not actually numeric (e.g. DatetimeIndex/PeriodIndex)", "if", "not", "self", ".", "_is_numeric_dtype", ":", "raise", "TypeError", "(", "\"cannot evaluate a numeric op {opstr} \"", "\"for type: {typ}\"", ".", "format", "(", "opstr", "=", "opstr", ",", "typ", "=", "type", "(", "self", ")", ".", "__name__", ")", ")", "if", "isinstance", "(", "other", ",", "Index", ")", ":", "if", "not", "other", ".", "_is_numeric_dtype", ":", "raise", "TypeError", "(", "\"cannot evaluate a numeric op \"", "\"{opstr} with type: {typ}\"", ".", "format", "(", "opstr", "=", "opstr", ",", "typ", "=", "type", "(", "other", ")", ")", ")", "elif", "isinstance", "(", "other", ",", "np", ".", "ndarray", ")", "and", "not", "other", ".", "ndim", ":", "other", "=", "other", ".", "item", "(", ")", "if", "isinstance", "(", "other", ",", "(", "Index", ",", "ABCSeries", ",", "np", ".", "ndarray", ")", ")", ":", "if", "len", "(", "self", ")", "!=", "len", "(", "other", ")", ":", "raise", "ValueError", "(", "\"cannot evaluate a numeric op with \"", "\"unequal lengths\"", ")", "other", "=", "com", ".", "values_from_object", "(", "other", ")", "if", "other", ".", "dtype", ".", "kind", "not", "in", "[", "'f'", ",", "'i'", ",", "'u'", "]", ":", "raise", "TypeError", "(", "\"cannot evaluate a numeric op \"", "\"with a non-numeric dtype\"", ")", "elif", "isinstance", "(", "other", ",", "(", "ABCDateOffset", ",", "np", ".", "timedelta64", ",", "timedelta", ")", ")", ":", "# higher up to handle", "pass", "elif", "isinstance", "(", "other", ",", "(", "datetime", ",", "np", ".", "datetime64", ")", ")", ":", "# higher up to handle", "pass", "else", ":", "if", "not", "(", "is_float", "(", "other", ")", "or", "is_integer", "(", "other", ")", ")", ":", "raise", "TypeError", "(", "\"can only perform ops with scalar values\"", ")", "return", "other" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._add_numeric_methods_binary
Add in numeric methods.
pandas/core/indexes/base.py
def _add_numeric_methods_binary(cls): """ Add in numeric methods. """ cls.__add__ = _make_arithmetic_op(operator.add, cls) cls.__radd__ = _make_arithmetic_op(ops.radd, cls) cls.__sub__ = _make_arithmetic_op(operator.sub, cls) cls.__rsub__ = _make_arithmetic_op(ops.rsub, cls) cls.__rpow__ = _make_arithmetic_op(ops.rpow, cls) cls.__pow__ = _make_arithmetic_op(operator.pow, cls) cls.__truediv__ = _make_arithmetic_op(operator.truediv, cls) cls.__rtruediv__ = _make_arithmetic_op(ops.rtruediv, cls) # TODO: rmod? rdivmod? cls.__mod__ = _make_arithmetic_op(operator.mod, cls) cls.__floordiv__ = _make_arithmetic_op(operator.floordiv, cls) cls.__rfloordiv__ = _make_arithmetic_op(ops.rfloordiv, cls) cls.__divmod__ = _make_arithmetic_op(divmod, cls) cls.__mul__ = _make_arithmetic_op(operator.mul, cls) cls.__rmul__ = _make_arithmetic_op(ops.rmul, cls)
def _add_numeric_methods_binary(cls): """ Add in numeric methods. """ cls.__add__ = _make_arithmetic_op(operator.add, cls) cls.__radd__ = _make_arithmetic_op(ops.radd, cls) cls.__sub__ = _make_arithmetic_op(operator.sub, cls) cls.__rsub__ = _make_arithmetic_op(ops.rsub, cls) cls.__rpow__ = _make_arithmetic_op(ops.rpow, cls) cls.__pow__ = _make_arithmetic_op(operator.pow, cls) cls.__truediv__ = _make_arithmetic_op(operator.truediv, cls) cls.__rtruediv__ = _make_arithmetic_op(ops.rtruediv, cls) # TODO: rmod? rdivmod? cls.__mod__ = _make_arithmetic_op(operator.mod, cls) cls.__floordiv__ = _make_arithmetic_op(operator.floordiv, cls) cls.__rfloordiv__ = _make_arithmetic_op(ops.rfloordiv, cls) cls.__divmod__ = _make_arithmetic_op(divmod, cls) cls.__mul__ = _make_arithmetic_op(operator.mul, cls) cls.__rmul__ = _make_arithmetic_op(ops.rmul, cls)
[ "Add", "in", "numeric", "methods", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5108-L5128
[ "def", "_add_numeric_methods_binary", "(", "cls", ")", ":", "cls", ".", "__add__", "=", "_make_arithmetic_op", "(", "operator", ".", "add", ",", "cls", ")", "cls", ".", "__radd__", "=", "_make_arithmetic_op", "(", "ops", ".", "radd", ",", "cls", ")", "cls", ".", "__sub__", "=", "_make_arithmetic_op", "(", "operator", ".", "sub", ",", "cls", ")", "cls", ".", "__rsub__", "=", "_make_arithmetic_op", "(", "ops", ".", "rsub", ",", "cls", ")", "cls", ".", "__rpow__", "=", "_make_arithmetic_op", "(", "ops", ".", "rpow", ",", "cls", ")", "cls", ".", "__pow__", "=", "_make_arithmetic_op", "(", "operator", ".", "pow", ",", "cls", ")", "cls", ".", "__truediv__", "=", "_make_arithmetic_op", "(", "operator", ".", "truediv", ",", "cls", ")", "cls", ".", "__rtruediv__", "=", "_make_arithmetic_op", "(", "ops", ".", "rtruediv", ",", "cls", ")", "# TODO: rmod? rdivmod?", "cls", ".", "__mod__", "=", "_make_arithmetic_op", "(", "operator", ".", "mod", ",", "cls", ")", "cls", ".", "__floordiv__", "=", "_make_arithmetic_op", "(", "operator", ".", "floordiv", ",", "cls", ")", "cls", ".", "__rfloordiv__", "=", "_make_arithmetic_op", "(", "ops", ".", "rfloordiv", ",", "cls", ")", "cls", ".", "__divmod__", "=", "_make_arithmetic_op", "(", "divmod", ",", "cls", ")", "cls", ".", "__mul__", "=", "_make_arithmetic_op", "(", "operator", ".", "mul", ",", "cls", ")", "cls", ".", "__rmul__", "=", "_make_arithmetic_op", "(", "ops", ".", "rmul", ",", "cls", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._add_numeric_methods_unary
Add in numeric unary methods.
pandas/core/indexes/base.py
def _add_numeric_methods_unary(cls): """ Add in numeric unary methods. """ def _make_evaluate_unary(op, opstr): def _evaluate_numeric_unary(self): self._validate_for_numeric_unaryop(op, opstr) attrs = self._get_attributes_dict() attrs = self._maybe_update_attributes(attrs) return Index(op(self.values), **attrs) _evaluate_numeric_unary.__name__ = opstr return _evaluate_numeric_unary cls.__neg__ = _make_evaluate_unary(operator.neg, '__neg__') cls.__pos__ = _make_evaluate_unary(operator.pos, '__pos__') cls.__abs__ = _make_evaluate_unary(np.abs, '__abs__') cls.__inv__ = _make_evaluate_unary(lambda x: -x, '__inv__')
def _add_numeric_methods_unary(cls): """ Add in numeric unary methods. """ def _make_evaluate_unary(op, opstr): def _evaluate_numeric_unary(self): self._validate_for_numeric_unaryop(op, opstr) attrs = self._get_attributes_dict() attrs = self._maybe_update_attributes(attrs) return Index(op(self.values), **attrs) _evaluate_numeric_unary.__name__ = opstr return _evaluate_numeric_unary cls.__neg__ = _make_evaluate_unary(operator.neg, '__neg__') cls.__pos__ = _make_evaluate_unary(operator.pos, '__pos__') cls.__abs__ = _make_evaluate_unary(np.abs, '__abs__') cls.__inv__ = _make_evaluate_unary(lambda x: -x, '__inv__')
[ "Add", "in", "numeric", "unary", "methods", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5131-L5150
[ "def", "_add_numeric_methods_unary", "(", "cls", ")", ":", "def", "_make_evaluate_unary", "(", "op", ",", "opstr", ")", ":", "def", "_evaluate_numeric_unary", "(", "self", ")", ":", "self", ".", "_validate_for_numeric_unaryop", "(", "op", ",", "opstr", ")", "attrs", "=", "self", ".", "_get_attributes_dict", "(", ")", "attrs", "=", "self", ".", "_maybe_update_attributes", "(", "attrs", ")", "return", "Index", "(", "op", "(", "self", ".", "values", ")", ",", "*", "*", "attrs", ")", "_evaluate_numeric_unary", ".", "__name__", "=", "opstr", "return", "_evaluate_numeric_unary", "cls", ".", "__neg__", "=", "_make_evaluate_unary", "(", "operator", ".", "neg", ",", "'__neg__'", ")", "cls", ".", "__pos__", "=", "_make_evaluate_unary", "(", "operator", ".", "pos", ",", "'__pos__'", ")", "cls", ".", "__abs__", "=", "_make_evaluate_unary", "(", "np", ".", "abs", ",", "'__abs__'", ")", "cls", ".", "__inv__", "=", "_make_evaluate_unary", "(", "lambda", "x", ":", "-", "x", ",", "'__inv__'", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Index._add_logical_methods
Add in logical methods.
pandas/core/indexes/base.py
def _add_logical_methods(cls): """ Add in logical methods. """ _doc = """ %(desc)s Parameters ---------- *args These parameters will be passed to numpy.%(outname)s. **kwargs These parameters will be passed to numpy.%(outname)s. Returns ------- %(outname)s : bool or array_like (if axis is specified) A single element array_like may be converted to bool.""" _index_shared_docs['index_all'] = dedent(""" See Also -------- Index.any : Return whether any element in an Index is True. Series.any : Return whether any element in a Series is True. Series.all : Return whether all elements in a Series are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- **all** True, because nonzero integers are considered True. >>> pd.Index([1, 2, 3]).all() True False, because ``0`` is considered False. >>> pd.Index([0, 1, 2]).all() False **any** True, because ``1`` is considered True. >>> pd.Index([0, 0, 1]).any() True False, because ``0`` is considered False. >>> pd.Index([0, 0, 0]).any() False """) _index_shared_docs['index_any'] = dedent(""" See Also -------- Index.all : Return whether all elements are True. Series.all : Return whether all elements are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- >>> index = pd.Index([0, 1, 2]) >>> index.any() True >>> index = pd.Index([0, 0, 0]) >>> index.any() False """) def _make_logical_function(name, desc, f): @Substitution(outname=name, desc=desc) @Appender(_index_shared_docs['index_' + name]) @Appender(_doc) def logical_func(self, *args, **kwargs): result = f(self.values) if (isinstance(result, (np.ndarray, ABCSeries, Index)) and result.ndim == 0): # return NumPy type return result.dtype.type(result.item()) else: # pragma: no cover return result logical_func.__name__ = name return logical_func cls.all = _make_logical_function('all', 'Return whether all elements ' 'are True.', np.all) cls.any = _make_logical_function('any', 'Return whether any element is True.', np.any)
def _add_logical_methods(cls): """ Add in logical methods. """ _doc = """ %(desc)s Parameters ---------- *args These parameters will be passed to numpy.%(outname)s. **kwargs These parameters will be passed to numpy.%(outname)s. Returns ------- %(outname)s : bool or array_like (if axis is specified) A single element array_like may be converted to bool.""" _index_shared_docs['index_all'] = dedent(""" See Also -------- Index.any : Return whether any element in an Index is True. Series.any : Return whether any element in a Series is True. Series.all : Return whether all elements in a Series are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- **all** True, because nonzero integers are considered True. >>> pd.Index([1, 2, 3]).all() True False, because ``0`` is considered False. >>> pd.Index([0, 1, 2]).all() False **any** True, because ``1`` is considered True. >>> pd.Index([0, 0, 1]).any() True False, because ``0`` is considered False. >>> pd.Index([0, 0, 0]).any() False """) _index_shared_docs['index_any'] = dedent(""" See Also -------- Index.all : Return whether all elements are True. Series.all : Return whether all elements are True. Notes ----- Not a Number (NaN), positive infinity and negative infinity evaluate to True because these are not equal to zero. Examples -------- >>> index = pd.Index([0, 1, 2]) >>> index.any() True >>> index = pd.Index([0, 0, 0]) >>> index.any() False """) def _make_logical_function(name, desc, f): @Substitution(outname=name, desc=desc) @Appender(_index_shared_docs['index_' + name]) @Appender(_doc) def logical_func(self, *args, **kwargs): result = f(self.values) if (isinstance(result, (np.ndarray, ABCSeries, Index)) and result.ndim == 0): # return NumPy type return result.dtype.type(result.item()) else: # pragma: no cover return result logical_func.__name__ = name return logical_func cls.all = _make_logical_function('all', 'Return whether all elements ' 'are True.', np.all) cls.any = _make_logical_function('any', 'Return whether any element is True.', np.any)
[ "Add", "in", "logical", "methods", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/base.py#L5158-L5261
[ "def", "_add_logical_methods", "(", "cls", ")", ":", "_doc", "=", "\"\"\"\n %(desc)s\n\n Parameters\n ----------\n *args\n These parameters will be passed to numpy.%(outname)s.\n **kwargs\n These parameters will be passed to numpy.%(outname)s.\n\n Returns\n -------\n %(outname)s : bool or array_like (if axis is specified)\n A single element array_like may be converted to bool.\"\"\"", "_index_shared_docs", "[", "'index_all'", "]", "=", "dedent", "(", "\"\"\"\n\n See Also\n --------\n Index.any : Return whether any element in an Index is True.\n Series.any : Return whether any element in a Series is True.\n Series.all : Return whether all elements in a Series are True.\n\n Notes\n -----\n Not a Number (NaN), positive infinity and negative infinity\n evaluate to True because these are not equal to zero.\n\n Examples\n --------\n **all**\n\n True, because nonzero integers are considered True.\n\n >>> pd.Index([1, 2, 3]).all()\n True\n\n False, because ``0`` is considered False.\n\n >>> pd.Index([0, 1, 2]).all()\n False\n\n **any**\n\n True, because ``1`` is considered True.\n\n >>> pd.Index([0, 0, 1]).any()\n True\n\n False, because ``0`` is considered False.\n\n >>> pd.Index([0, 0, 0]).any()\n False\n \"\"\"", ")", "_index_shared_docs", "[", "'index_any'", "]", "=", "dedent", "(", "\"\"\"\n\n See Also\n --------\n Index.all : Return whether all elements are True.\n Series.all : Return whether all elements are True.\n\n Notes\n -----\n Not a Number (NaN), positive infinity and negative infinity\n evaluate to True because these are not equal to zero.\n\n Examples\n --------\n >>> index = pd.Index([0, 1, 2])\n >>> index.any()\n True\n\n >>> index = pd.Index([0, 0, 0])\n >>> index.any()\n False\n \"\"\"", ")", "def", "_make_logical_function", "(", "name", ",", "desc", ",", "f", ")", ":", "@", "Substitution", "(", "outname", "=", "name", ",", "desc", "=", "desc", ")", "@", "Appender", "(", "_index_shared_docs", "[", "'index_'", "+", "name", "]", ")", "@", "Appender", "(", "_doc", ")", "def", "logical_func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "result", "=", "f", "(", "self", ".", "values", ")", "if", "(", "isinstance", "(", "result", ",", "(", "np", ".", "ndarray", ",", "ABCSeries", ",", "Index", ")", ")", "and", "result", ".", "ndim", "==", "0", ")", ":", "# return NumPy type", "return", "result", ".", "dtype", ".", "type", "(", "result", ".", "item", "(", ")", ")", "else", ":", "# pragma: no cover", "return", "result", "logical_func", ".", "__name__", "=", "name", "return", "logical_func", "cls", ".", "all", "=", "_make_logical_function", "(", "'all'", ",", "'Return whether all elements '", "'are True.'", ",", "np", ".", "all", ")", "cls", ".", "any", "=", "_make_logical_function", "(", "'any'", ",", "'Return whether any element is True.'", ",", "np", ".", "any", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_get_grouper
create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. This may be composed of multiple Grouping objects, indicating multiple groupers Groupers are ultimately index mappings. They can originate as: index mappings, keys to columns, functions, or Groupers Groupers enable local references to axis,level,sort, while the passed in axis, level, and sort are 'global'. This routine tries to figure out what the passing in references are and then creates a Grouping for each one, combined into a BaseGrouper. If observed & we have a categorical grouper, only show the observed values If validate, then check for key/level overlaps
pandas/core/groupby/grouper.py
def _get_grouper(obj, key=None, axis=0, level=None, sort=True, observed=False, mutated=False, validate=True): """ create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. This may be composed of multiple Grouping objects, indicating multiple groupers Groupers are ultimately index mappings. They can originate as: index mappings, keys to columns, functions, or Groupers Groupers enable local references to axis,level,sort, while the passed in axis, level, and sort are 'global'. This routine tries to figure out what the passing in references are and then creates a Grouping for each one, combined into a BaseGrouper. If observed & we have a categorical grouper, only show the observed values If validate, then check for key/level overlaps """ group_axis = obj._get_axis(axis) # validate that the passed single level is compatible with the passed # axis of the object if level is not None: # TODO: These if-block and else-block are almost same. # MultiIndex instance check is removable, but it seems that there are # some processes only for non-MultiIndex in else-block, # eg. `obj.index.name != level`. We have to consider carefully whether # these are applicable for MultiIndex. Even if these are applicable, # we need to check if it makes no side effect to subsequent processes # on the outside of this condition. # (GH 17621) if isinstance(group_axis, MultiIndex): if is_list_like(level) and len(level) == 1: level = level[0] if key is None and is_scalar(level): # Get the level values from group_axis key = group_axis.get_level_values(level) level = None else: # allow level to be a length-one list-like object # (e.g., level=[0]) # GH 13901 if is_list_like(level): nlevels = len(level) if nlevels == 1: level = level[0] elif nlevels == 0: raise ValueError('No group keys passed!') else: raise ValueError('multiple levels only valid with ' 'MultiIndex') if isinstance(level, str): if obj.index.name != level: raise ValueError('level name {} is not the name of the ' 'index'.format(level)) elif level > 0 or level < -1: raise ValueError( 'level > 0 or level < -1 only valid with MultiIndex') # NOTE: `group_axis` and `group_axis.get_level_values(level)` # are same in this section. level = None key = group_axis # a passed-in Grouper, directly convert if isinstance(key, Grouper): binner, grouper, obj = key._get_grouper(obj, validate=False) if key.key is None: return grouper, [], obj else: return grouper, {key.key}, obj # already have a BaseGrouper, just return it elif isinstance(key, BaseGrouper): return key, [], obj # In the future, a tuple key will always mean an actual key, # not an iterable of keys. In the meantime, we attempt to provide # a warning. We can assume that the user wanted a list of keys when # the key is not in the index. We just have to be careful with # unhashble elements of `key`. Any unhashable elements implies that # they wanted a list of keys. # https://github.com/pandas-dev/pandas/issues/18314 is_tuple = isinstance(key, tuple) all_hashable = is_tuple and is_hashable(key) if is_tuple: if ((all_hashable and key not in obj and set(key).issubset(obj)) or not all_hashable): # column names ('a', 'b') -> ['a', 'b'] # arrays like (a, b) -> [a, b] msg = ("Interpreting tuple 'by' as a list of keys, rather than " "a single key. Use 'by=[...]' instead of 'by=(...)'. In " "the future, a tuple will always mean a single key.") warnings.warn(msg, FutureWarning, stacklevel=5) key = list(key) if not isinstance(key, list): keys = [key] match_axis_length = False else: keys = key match_axis_length = len(keys) == len(group_axis) # what are we after, exactly? any_callable = any(callable(g) or isinstance(g, dict) for g in keys) any_groupers = any(isinstance(g, Grouper) for g in keys) any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys) # is this an index replacement? if (not any_callable and not any_arraylike and not any_groupers and match_axis_length and level is None): if isinstance(obj, DataFrame): all_in_columns_index = all(g in obj.columns or g in obj.index.names for g in keys) elif isinstance(obj, Series): all_in_columns_index = all(g in obj.index.names for g in keys) if not all_in_columns_index: keys = [com.asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): if key is None: keys = [None] * len(level) levels = level else: levels = [level] * len(keys) groupings = [] exclusions = [] # if the actual grouper should be obj[key] def is_in_axis(key): if not _is_label_like(key): try: obj._data.items.get_loc(key) except Exception: return False return True # if the grouper is obj[name] def is_in_obj(gpr): try: return id(gpr) == id(obj[gpr.name]) except Exception: return False for i, (gpr, level) in enumerate(zip(keys, levels)): if is_in_obj(gpr): # df.groupby(df['name']) in_axis, name = True, gpr.name exclusions.append(name) elif is_in_axis(gpr): # df.groupby('name') if gpr in obj: if validate: obj._check_label_or_level_ambiguity(gpr) in_axis, name, gpr = True, gpr, obj[gpr] exclusions.append(name) elif obj._is_level_reference(gpr): in_axis, name, level, gpr = False, None, gpr, None else: raise KeyError(gpr) elif isinstance(gpr, Grouper) and gpr.key is not None: # Add key to exclusions exclusions.append(gpr.key) in_axis, name = False, None else: in_axis, name = False, None if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]: raise ValueError( ("Length of grouper ({len_gpr}) and axis ({len_axis})" " must be same length" .format(len_gpr=len(gpr), len_axis=obj.shape[axis]))) # create the Grouping # allow us to passing the actual Grouping as the gpr ping = (Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort, observed=observed, in_axis=in_axis) if not isinstance(gpr, Grouping) else gpr) groupings.append(ping) if len(groupings) == 0: raise ValueError('No group keys passed!') # create the internals grouper grouper = BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated) return grouper, exclusions, obj
def _get_grouper(obj, key=None, axis=0, level=None, sort=True, observed=False, mutated=False, validate=True): """ create and return a BaseGrouper, which is an internal mapping of how to create the grouper indexers. This may be composed of multiple Grouping objects, indicating multiple groupers Groupers are ultimately index mappings. They can originate as: index mappings, keys to columns, functions, or Groupers Groupers enable local references to axis,level,sort, while the passed in axis, level, and sort are 'global'. This routine tries to figure out what the passing in references are and then creates a Grouping for each one, combined into a BaseGrouper. If observed & we have a categorical grouper, only show the observed values If validate, then check for key/level overlaps """ group_axis = obj._get_axis(axis) # validate that the passed single level is compatible with the passed # axis of the object if level is not None: # TODO: These if-block and else-block are almost same. # MultiIndex instance check is removable, but it seems that there are # some processes only for non-MultiIndex in else-block, # eg. `obj.index.name != level`. We have to consider carefully whether # these are applicable for MultiIndex. Even if these are applicable, # we need to check if it makes no side effect to subsequent processes # on the outside of this condition. # (GH 17621) if isinstance(group_axis, MultiIndex): if is_list_like(level) and len(level) == 1: level = level[0] if key is None and is_scalar(level): # Get the level values from group_axis key = group_axis.get_level_values(level) level = None else: # allow level to be a length-one list-like object # (e.g., level=[0]) # GH 13901 if is_list_like(level): nlevels = len(level) if nlevels == 1: level = level[0] elif nlevels == 0: raise ValueError('No group keys passed!') else: raise ValueError('multiple levels only valid with ' 'MultiIndex') if isinstance(level, str): if obj.index.name != level: raise ValueError('level name {} is not the name of the ' 'index'.format(level)) elif level > 0 or level < -1: raise ValueError( 'level > 0 or level < -1 only valid with MultiIndex') # NOTE: `group_axis` and `group_axis.get_level_values(level)` # are same in this section. level = None key = group_axis # a passed-in Grouper, directly convert if isinstance(key, Grouper): binner, grouper, obj = key._get_grouper(obj, validate=False) if key.key is None: return grouper, [], obj else: return grouper, {key.key}, obj # already have a BaseGrouper, just return it elif isinstance(key, BaseGrouper): return key, [], obj # In the future, a tuple key will always mean an actual key, # not an iterable of keys. In the meantime, we attempt to provide # a warning. We can assume that the user wanted a list of keys when # the key is not in the index. We just have to be careful with # unhashble elements of `key`. Any unhashable elements implies that # they wanted a list of keys. # https://github.com/pandas-dev/pandas/issues/18314 is_tuple = isinstance(key, tuple) all_hashable = is_tuple and is_hashable(key) if is_tuple: if ((all_hashable and key not in obj and set(key).issubset(obj)) or not all_hashable): # column names ('a', 'b') -> ['a', 'b'] # arrays like (a, b) -> [a, b] msg = ("Interpreting tuple 'by' as a list of keys, rather than " "a single key. Use 'by=[...]' instead of 'by=(...)'. In " "the future, a tuple will always mean a single key.") warnings.warn(msg, FutureWarning, stacklevel=5) key = list(key) if not isinstance(key, list): keys = [key] match_axis_length = False else: keys = key match_axis_length = len(keys) == len(group_axis) # what are we after, exactly? any_callable = any(callable(g) or isinstance(g, dict) for g in keys) any_groupers = any(isinstance(g, Grouper) for g in keys) any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys) # is this an index replacement? if (not any_callable and not any_arraylike and not any_groupers and match_axis_length and level is None): if isinstance(obj, DataFrame): all_in_columns_index = all(g in obj.columns or g in obj.index.names for g in keys) elif isinstance(obj, Series): all_in_columns_index = all(g in obj.index.names for g in keys) if not all_in_columns_index: keys = [com.asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): if key is None: keys = [None] * len(level) levels = level else: levels = [level] * len(keys) groupings = [] exclusions = [] # if the actual grouper should be obj[key] def is_in_axis(key): if not _is_label_like(key): try: obj._data.items.get_loc(key) except Exception: return False return True # if the grouper is obj[name] def is_in_obj(gpr): try: return id(gpr) == id(obj[gpr.name]) except Exception: return False for i, (gpr, level) in enumerate(zip(keys, levels)): if is_in_obj(gpr): # df.groupby(df['name']) in_axis, name = True, gpr.name exclusions.append(name) elif is_in_axis(gpr): # df.groupby('name') if gpr in obj: if validate: obj._check_label_or_level_ambiguity(gpr) in_axis, name, gpr = True, gpr, obj[gpr] exclusions.append(name) elif obj._is_level_reference(gpr): in_axis, name, level, gpr = False, None, gpr, None else: raise KeyError(gpr) elif isinstance(gpr, Grouper) and gpr.key is not None: # Add key to exclusions exclusions.append(gpr.key) in_axis, name = False, None else: in_axis, name = False, None if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]: raise ValueError( ("Length of grouper ({len_gpr}) and axis ({len_axis})" " must be same length" .format(len_gpr=len(gpr), len_axis=obj.shape[axis]))) # create the Grouping # allow us to passing the actual Grouping as the gpr ping = (Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort, observed=observed, in_axis=in_axis) if not isinstance(gpr, Grouping) else gpr) groupings.append(ping) if len(groupings) == 0: raise ValueError('No group keys passed!') # create the internals grouper grouper = BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated) return grouper, exclusions, obj
[ "create", "and", "return", "a", "BaseGrouper", "which", "is", "an", "internal", "mapping", "of", "how", "to", "create", "the", "grouper", "indexers", ".", "This", "may", "be", "composed", "of", "multiple", "Grouping", "objects", "indicating", "multiple", "groupers" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/grouper.py#L406-L612
[ "def", "_get_grouper", "(", "obj", ",", "key", "=", "None", ",", "axis", "=", "0", ",", "level", "=", "None", ",", "sort", "=", "True", ",", "observed", "=", "False", ",", "mutated", "=", "False", ",", "validate", "=", "True", ")", ":", "group_axis", "=", "obj", ".", "_get_axis", "(", "axis", ")", "# validate that the passed single level is compatible with the passed", "# axis of the object", "if", "level", "is", "not", "None", ":", "# TODO: These if-block and else-block are almost same.", "# MultiIndex instance check is removable, but it seems that there are", "# some processes only for non-MultiIndex in else-block,", "# eg. `obj.index.name != level`. We have to consider carefully whether", "# these are applicable for MultiIndex. Even if these are applicable,", "# we need to check if it makes no side effect to subsequent processes", "# on the outside of this condition.", "# (GH 17621)", "if", "isinstance", "(", "group_axis", ",", "MultiIndex", ")", ":", "if", "is_list_like", "(", "level", ")", "and", "len", "(", "level", ")", "==", "1", ":", "level", "=", "level", "[", "0", "]", "if", "key", "is", "None", "and", "is_scalar", "(", "level", ")", ":", "# Get the level values from group_axis", "key", "=", "group_axis", ".", "get_level_values", "(", "level", ")", "level", "=", "None", "else", ":", "# allow level to be a length-one list-like object", "# (e.g., level=[0])", "# GH 13901", "if", "is_list_like", "(", "level", ")", ":", "nlevels", "=", "len", "(", "level", ")", "if", "nlevels", "==", "1", ":", "level", "=", "level", "[", "0", "]", "elif", "nlevels", "==", "0", ":", "raise", "ValueError", "(", "'No group keys passed!'", ")", "else", ":", "raise", "ValueError", "(", "'multiple levels only valid with '", "'MultiIndex'", ")", "if", "isinstance", "(", "level", ",", "str", ")", ":", "if", "obj", ".", "index", ".", "name", "!=", "level", ":", "raise", "ValueError", "(", "'level name {} is not the name of the '", "'index'", ".", "format", "(", "level", ")", ")", "elif", "level", ">", "0", "or", "level", "<", "-", "1", ":", "raise", "ValueError", "(", "'level > 0 or level < -1 only valid with MultiIndex'", ")", "# NOTE: `group_axis` and `group_axis.get_level_values(level)`", "# are same in this section.", "level", "=", "None", "key", "=", "group_axis", "# a passed-in Grouper, directly convert", "if", "isinstance", "(", "key", ",", "Grouper", ")", ":", "binner", ",", "grouper", ",", "obj", "=", "key", ".", "_get_grouper", "(", "obj", ",", "validate", "=", "False", ")", "if", "key", ".", "key", "is", "None", ":", "return", "grouper", ",", "[", "]", ",", "obj", "else", ":", "return", "grouper", ",", "{", "key", ".", "key", "}", ",", "obj", "# already have a BaseGrouper, just return it", "elif", "isinstance", "(", "key", ",", "BaseGrouper", ")", ":", "return", "key", ",", "[", "]", ",", "obj", "# In the future, a tuple key will always mean an actual key,", "# not an iterable of keys. In the meantime, we attempt to provide", "# a warning. We can assume that the user wanted a list of keys when", "# the key is not in the index. We just have to be careful with", "# unhashble elements of `key`. Any unhashable elements implies that", "# they wanted a list of keys.", "# https://github.com/pandas-dev/pandas/issues/18314", "is_tuple", "=", "isinstance", "(", "key", ",", "tuple", ")", "all_hashable", "=", "is_tuple", "and", "is_hashable", "(", "key", ")", "if", "is_tuple", ":", "if", "(", "(", "all_hashable", "and", "key", "not", "in", "obj", "and", "set", "(", "key", ")", ".", "issubset", "(", "obj", ")", ")", "or", "not", "all_hashable", ")", ":", "# column names ('a', 'b') -> ['a', 'b']", "# arrays like (a, b) -> [a, b]", "msg", "=", "(", "\"Interpreting tuple 'by' as a list of keys, rather than \"", "\"a single key. Use 'by=[...]' instead of 'by=(...)'. In \"", "\"the future, a tuple will always mean a single key.\"", ")", "warnings", ".", "warn", "(", "msg", ",", "FutureWarning", ",", "stacklevel", "=", "5", ")", "key", "=", "list", "(", "key", ")", "if", "not", "isinstance", "(", "key", ",", "list", ")", ":", "keys", "=", "[", "key", "]", "match_axis_length", "=", "False", "else", ":", "keys", "=", "key", "match_axis_length", "=", "len", "(", "keys", ")", "==", "len", "(", "group_axis", ")", "# what are we after, exactly?", "any_callable", "=", "any", "(", "callable", "(", "g", ")", "or", "isinstance", "(", "g", ",", "dict", ")", "for", "g", "in", "keys", ")", "any_groupers", "=", "any", "(", "isinstance", "(", "g", ",", "Grouper", ")", "for", "g", "in", "keys", ")", "any_arraylike", "=", "any", "(", "isinstance", "(", "g", ",", "(", "list", ",", "tuple", ",", "Series", ",", "Index", ",", "np", ".", "ndarray", ")", ")", "for", "g", "in", "keys", ")", "# is this an index replacement?", "if", "(", "not", "any_callable", "and", "not", "any_arraylike", "and", "not", "any_groupers", "and", "match_axis_length", "and", "level", "is", "None", ")", ":", "if", "isinstance", "(", "obj", ",", "DataFrame", ")", ":", "all_in_columns_index", "=", "all", "(", "g", "in", "obj", ".", "columns", "or", "g", "in", "obj", ".", "index", ".", "names", "for", "g", "in", "keys", ")", "elif", "isinstance", "(", "obj", ",", "Series", ")", ":", "all_in_columns_index", "=", "all", "(", "g", "in", "obj", ".", "index", ".", "names", "for", "g", "in", "keys", ")", "if", "not", "all_in_columns_index", ":", "keys", "=", "[", "com", ".", "asarray_tuplesafe", "(", "keys", ")", "]", "if", "isinstance", "(", "level", ",", "(", "tuple", ",", "list", ")", ")", ":", "if", "key", "is", "None", ":", "keys", "=", "[", "None", "]", "*", "len", "(", "level", ")", "levels", "=", "level", "else", ":", "levels", "=", "[", "level", "]", "*", "len", "(", "keys", ")", "groupings", "=", "[", "]", "exclusions", "=", "[", "]", "# if the actual grouper should be obj[key]", "def", "is_in_axis", "(", "key", ")", ":", "if", "not", "_is_label_like", "(", "key", ")", ":", "try", ":", "obj", ".", "_data", ".", "items", ".", "get_loc", "(", "key", ")", "except", "Exception", ":", "return", "False", "return", "True", "# if the grouper is obj[name]", "def", "is_in_obj", "(", "gpr", ")", ":", "try", ":", "return", "id", "(", "gpr", ")", "==", "id", "(", "obj", "[", "gpr", ".", "name", "]", ")", "except", "Exception", ":", "return", "False", "for", "i", ",", "(", "gpr", ",", "level", ")", "in", "enumerate", "(", "zip", "(", "keys", ",", "levels", ")", ")", ":", "if", "is_in_obj", "(", "gpr", ")", ":", "# df.groupby(df['name'])", "in_axis", ",", "name", "=", "True", ",", "gpr", ".", "name", "exclusions", ".", "append", "(", "name", ")", "elif", "is_in_axis", "(", "gpr", ")", ":", "# df.groupby('name')", "if", "gpr", "in", "obj", ":", "if", "validate", ":", "obj", ".", "_check_label_or_level_ambiguity", "(", "gpr", ")", "in_axis", ",", "name", ",", "gpr", "=", "True", ",", "gpr", ",", "obj", "[", "gpr", "]", "exclusions", ".", "append", "(", "name", ")", "elif", "obj", ".", "_is_level_reference", "(", "gpr", ")", ":", "in_axis", ",", "name", ",", "level", ",", "gpr", "=", "False", ",", "None", ",", "gpr", ",", "None", "else", ":", "raise", "KeyError", "(", "gpr", ")", "elif", "isinstance", "(", "gpr", ",", "Grouper", ")", "and", "gpr", ".", "key", "is", "not", "None", ":", "# Add key to exclusions", "exclusions", ".", "append", "(", "gpr", ".", "key", ")", "in_axis", ",", "name", "=", "False", ",", "None", "else", ":", "in_axis", ",", "name", "=", "False", ",", "None", "if", "is_categorical_dtype", "(", "gpr", ")", "and", "len", "(", "gpr", ")", "!=", "obj", ".", "shape", "[", "axis", "]", ":", "raise", "ValueError", "(", "(", "\"Length of grouper ({len_gpr}) and axis ({len_axis})\"", "\" must be same length\"", ".", "format", "(", "len_gpr", "=", "len", "(", "gpr", ")", ",", "len_axis", "=", "obj", ".", "shape", "[", "axis", "]", ")", ")", ")", "# create the Grouping", "# allow us to passing the actual Grouping as the gpr", "ping", "=", "(", "Grouping", "(", "group_axis", ",", "gpr", ",", "obj", "=", "obj", ",", "name", "=", "name", ",", "level", "=", "level", ",", "sort", "=", "sort", ",", "observed", "=", "observed", ",", "in_axis", "=", "in_axis", ")", "if", "not", "isinstance", "(", "gpr", ",", "Grouping", ")", "else", "gpr", ")", "groupings", ".", "append", "(", "ping", ")", "if", "len", "(", "groupings", ")", "==", "0", ":", "raise", "ValueError", "(", "'No group keys passed!'", ")", "# create the internals grouper", "grouper", "=", "BaseGrouper", "(", "group_axis", ",", "groupings", ",", "sort", "=", "sort", ",", "mutated", "=", "mutated", ")", "return", "grouper", ",", "exclusions", ",", "obj" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Grouper._get_grouper
Parameters ---------- obj : the subject object validate : boolean, default True if True, validate the grouper Returns ------- a tuple of binner, grouper, obj (possibly sorted)
pandas/core/groupby/grouper.py
def _get_grouper(self, obj, validate=True): """ Parameters ---------- obj : the subject object validate : boolean, default True if True, validate the grouper Returns ------- a tuple of binner, grouper, obj (possibly sorted) """ self._set_grouper(obj) self.grouper, exclusions, self.obj = _get_grouper(self.obj, [self.key], axis=self.axis, level=self.level, sort=self.sort, validate=validate) return self.binner, self.grouper, self.obj
def _get_grouper(self, obj, validate=True): """ Parameters ---------- obj : the subject object validate : boolean, default True if True, validate the grouper Returns ------- a tuple of binner, grouper, obj (possibly sorted) """ self._set_grouper(obj) self.grouper, exclusions, self.obj = _get_grouper(self.obj, [self.key], axis=self.axis, level=self.level, sort=self.sort, validate=validate) return self.binner, self.grouper, self.obj
[ "Parameters", "----------", "obj", ":", "the", "subject", "object", "validate", ":", "boolean", "default", "True", "if", "True", "validate", "the", "grouper" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/grouper.py#L112-L131
[ "def", "_get_grouper", "(", "self", ",", "obj", ",", "validate", "=", "True", ")", ":", "self", ".", "_set_grouper", "(", "obj", ")", "self", ".", "grouper", ",", "exclusions", ",", "self", ".", "obj", "=", "_get_grouper", "(", "self", ".", "obj", ",", "[", "self", ".", "key", "]", ",", "axis", "=", "self", ".", "axis", ",", "level", "=", "self", ".", "level", ",", "sort", "=", "self", ".", "sort", ",", "validate", "=", "validate", ")", "return", "self", ".", "binner", ",", "self", ".", "grouper", ",", "self", ".", "obj" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Grouper._set_grouper
given an object and the specifications, setup the internal grouper for this particular specification Parameters ---------- obj : the subject object sort : bool, default False whether the resulting grouper should be sorted
pandas/core/groupby/grouper.py
def _set_grouper(self, obj, sort=False): """ given an object and the specifications, setup the internal grouper for this particular specification Parameters ---------- obj : the subject object sort : bool, default False whether the resulting grouper should be sorted """ if self.key is not None and self.level is not None: raise ValueError( "The Grouper cannot specify both a key and a level!") # Keep self.grouper value before overriding if self._grouper is None: self._grouper = self.grouper # the key must be a valid info item if self.key is not None: key = self.key # The 'on' is already defined if (getattr(self.grouper, 'name', None) == key and isinstance(obj, ABCSeries)): ax = self._grouper.take(obj.index) else: if key not in obj._info_axis: raise KeyError( "The grouper name {0} is not found".format(key)) ax = Index(obj[key], name=key) else: ax = obj._get_axis(self.axis) if self.level is not None: level = self.level # if a level is given it must be a mi level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) ax = Index(ax._get_level_values(level), name=ax.names[level]) else: if level not in (0, ax.name): raise ValueError( "The level {0} is not valid".format(level)) # possibly sort if (self.sort or sort) and not ax.is_monotonic: # use stable sort to support first, last, nth indexer = self.indexer = ax.argsort(kind='mergesort') ax = ax.take(indexer) obj = obj._take(indexer, axis=self.axis, is_copy=False) self.obj = obj self.grouper = ax return self.grouper
def _set_grouper(self, obj, sort=False): """ given an object and the specifications, setup the internal grouper for this particular specification Parameters ---------- obj : the subject object sort : bool, default False whether the resulting grouper should be sorted """ if self.key is not None and self.level is not None: raise ValueError( "The Grouper cannot specify both a key and a level!") # Keep self.grouper value before overriding if self._grouper is None: self._grouper = self.grouper # the key must be a valid info item if self.key is not None: key = self.key # The 'on' is already defined if (getattr(self.grouper, 'name', None) == key and isinstance(obj, ABCSeries)): ax = self._grouper.take(obj.index) else: if key not in obj._info_axis: raise KeyError( "The grouper name {0} is not found".format(key)) ax = Index(obj[key], name=key) else: ax = obj._get_axis(self.axis) if self.level is not None: level = self.level # if a level is given it must be a mi level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) ax = Index(ax._get_level_values(level), name=ax.names[level]) else: if level not in (0, ax.name): raise ValueError( "The level {0} is not valid".format(level)) # possibly sort if (self.sort or sort) and not ax.is_monotonic: # use stable sort to support first, last, nth indexer = self.indexer = ax.argsort(kind='mergesort') ax = ax.take(indexer) obj = obj._take(indexer, axis=self.axis, is_copy=False) self.obj = obj self.grouper = ax return self.grouper
[ "given", "an", "object", "and", "the", "specifications", "setup", "the", "internal", "grouper", "for", "this", "particular", "specification" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/grouper.py#L133-L192
[ "def", "_set_grouper", "(", "self", ",", "obj", ",", "sort", "=", "False", ")", ":", "if", "self", ".", "key", "is", "not", "None", "and", "self", ".", "level", "is", "not", "None", ":", "raise", "ValueError", "(", "\"The Grouper cannot specify both a key and a level!\"", ")", "# Keep self.grouper value before overriding", "if", "self", ".", "_grouper", "is", "None", ":", "self", ".", "_grouper", "=", "self", ".", "grouper", "# the key must be a valid info item", "if", "self", ".", "key", "is", "not", "None", ":", "key", "=", "self", ".", "key", "# The 'on' is already defined", "if", "(", "getattr", "(", "self", ".", "grouper", ",", "'name'", ",", "None", ")", "==", "key", "and", "isinstance", "(", "obj", ",", "ABCSeries", ")", ")", ":", "ax", "=", "self", ".", "_grouper", ".", "take", "(", "obj", ".", "index", ")", "else", ":", "if", "key", "not", "in", "obj", ".", "_info_axis", ":", "raise", "KeyError", "(", "\"The grouper name {0} is not found\"", ".", "format", "(", "key", ")", ")", "ax", "=", "Index", "(", "obj", "[", "key", "]", ",", "name", "=", "key", ")", "else", ":", "ax", "=", "obj", ".", "_get_axis", "(", "self", ".", "axis", ")", "if", "self", ".", "level", "is", "not", "None", ":", "level", "=", "self", ".", "level", "# if a level is given it must be a mi level or", "# equivalent to the axis name", "if", "isinstance", "(", "ax", ",", "MultiIndex", ")", ":", "level", "=", "ax", ".", "_get_level_number", "(", "level", ")", "ax", "=", "Index", "(", "ax", ".", "_get_level_values", "(", "level", ")", ",", "name", "=", "ax", ".", "names", "[", "level", "]", ")", "else", ":", "if", "level", "not", "in", "(", "0", ",", "ax", ".", "name", ")", ":", "raise", "ValueError", "(", "\"The level {0} is not valid\"", ".", "format", "(", "level", ")", ")", "# possibly sort", "if", "(", "self", ".", "sort", "or", "sort", ")", "and", "not", "ax", ".", "is_monotonic", ":", "# use stable sort to support first, last, nth", "indexer", "=", "self", ".", "indexer", "=", "ax", ".", "argsort", "(", "kind", "=", "'mergesort'", ")", "ax", "=", "ax", ".", "take", "(", "indexer", ")", "obj", "=", "obj", ".", "_take", "(", "indexer", ",", "axis", "=", "self", ".", "axis", ",", "is_copy", "=", "False", ")", "self", ".", "obj", "=", "obj", "self", ".", "grouper", "=", "ax", "return", "self", ".", "grouper" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
to_pickle
Pickle (serialize) object to file. Parameters ---------- obj : any object Any python object. path : str File path where the pickled object will be stored. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' A string representing the compression to use in the output file. By default, infers from the file extension in specified path. .. versionadded:: 0.20.0 protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible values for this parameter depend on the version of Python. For Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value. For Python >= 3.4, 4 is a valid value. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html .. versionadded:: 0.21.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) >>> original_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> pd.to_pickle(original_df, "./dummy.pkl") >>> unpickled_df = pd.read_pickle("./dummy.pkl") >>> unpickled_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> import os >>> os.remove("./dummy.pkl")
pandas/io/pickle.py
def to_pickle(obj, path, compression='infer', protocol=pickle.HIGHEST_PROTOCOL): """ Pickle (serialize) object to file. Parameters ---------- obj : any object Any python object. path : str File path where the pickled object will be stored. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' A string representing the compression to use in the output file. By default, infers from the file extension in specified path. .. versionadded:: 0.20.0 protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible values for this parameter depend on the version of Python. For Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value. For Python >= 3.4, 4 is a valid value. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html .. versionadded:: 0.21.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) >>> original_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> pd.to_pickle(original_df, "./dummy.pkl") >>> unpickled_df = pd.read_pickle("./dummy.pkl") >>> unpickled_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> import os >>> os.remove("./dummy.pkl") """ path = _stringify_path(path) f, fh = _get_handle(path, 'wb', compression=compression, is_text=False) if protocol < 0: protocol = pickle.HIGHEST_PROTOCOL try: f.write(pickle.dumps(obj, protocol=protocol)) finally: f.close() for _f in fh: _f.close()
def to_pickle(obj, path, compression='infer', protocol=pickle.HIGHEST_PROTOCOL): """ Pickle (serialize) object to file. Parameters ---------- obj : any object Any python object. path : str File path where the pickled object will be stored. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' A string representing the compression to use in the output file. By default, infers from the file extension in specified path. .. versionadded:: 0.20.0 protocol : int Int which indicates which protocol should be used by the pickler, default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible values for this parameter depend on the version of Python. For Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a valid value. For Python >= 3.4, 4 is a valid value. A negative value for the protocol parameter is equivalent to setting its value to HIGHEST_PROTOCOL. .. [1] https://docs.python.org/3/library/pickle.html .. versionadded:: 0.21.0 See Also -------- read_pickle : Load pickled pandas object (or any object) from file. DataFrame.to_hdf : Write DataFrame to an HDF5 file. DataFrame.to_sql : Write DataFrame to a SQL database. DataFrame.to_parquet : Write a DataFrame to the binary parquet format. Examples -------- >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) >>> original_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> pd.to_pickle(original_df, "./dummy.pkl") >>> unpickled_df = pd.read_pickle("./dummy.pkl") >>> unpickled_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> import os >>> os.remove("./dummy.pkl") """ path = _stringify_path(path) f, fh = _get_handle(path, 'wb', compression=compression, is_text=False) if protocol < 0: protocol = pickle.HIGHEST_PROTOCOL try: f.write(pickle.dumps(obj, protocol=protocol)) finally: f.close() for _f in fh: _f.close()
[ "Pickle", "(", "serialize", ")", "object", "to", "file", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pickle.py#L13-L83
[ "def", "to_pickle", "(", "obj", ",", "path", ",", "compression", "=", "'infer'", ",", "protocol", "=", "pickle", ".", "HIGHEST_PROTOCOL", ")", ":", "path", "=", "_stringify_path", "(", "path", ")", "f", ",", "fh", "=", "_get_handle", "(", "path", ",", "'wb'", ",", "compression", "=", "compression", ",", "is_text", "=", "False", ")", "if", "protocol", "<", "0", ":", "protocol", "=", "pickle", ".", "HIGHEST_PROTOCOL", "try", ":", "f", ".", "write", "(", "pickle", ".", "dumps", "(", "obj", ",", "protocol", "=", "protocol", ")", ")", "finally", ":", "f", ".", "close", "(", ")", "for", "_f", "in", "fh", ":", "_f", ".", "close", "(", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
read_pickle
Load pickled pandas object (or any object) from file. .. warning:: Loading pickled data received from untrusted sources can be unsafe. See `here <https://docs.python.org/3/library/pickle.html>`__. Parameters ---------- path : str File path where the pickled object will be loaded. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' For on-the-fly decompression of on-disk data. If 'infer', then use gzip, bz2, xz or zip if path ends in '.gz', '.bz2', '.xz', or '.zip' respectively, and no decompression otherwise. Set to None for no decompression. .. versionadded:: 0.20.0 Returns ------- unpickled : same type as object stored in file See Also -------- DataFrame.to_pickle : Pickle (serialize) DataFrame object to file. Series.to_pickle : Pickle (serialize) Series object to file. read_hdf : Read HDF5 file into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. read_parquet : Load a parquet object, returning a DataFrame. Examples -------- >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) >>> original_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> pd.to_pickle(original_df, "./dummy.pkl") >>> unpickled_df = pd.read_pickle("./dummy.pkl") >>> unpickled_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> import os >>> os.remove("./dummy.pkl")
pandas/io/pickle.py
def read_pickle(path, compression='infer'): """ Load pickled pandas object (or any object) from file. .. warning:: Loading pickled data received from untrusted sources can be unsafe. See `here <https://docs.python.org/3/library/pickle.html>`__. Parameters ---------- path : str File path where the pickled object will be loaded. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' For on-the-fly decompression of on-disk data. If 'infer', then use gzip, bz2, xz or zip if path ends in '.gz', '.bz2', '.xz', or '.zip' respectively, and no decompression otherwise. Set to None for no decompression. .. versionadded:: 0.20.0 Returns ------- unpickled : same type as object stored in file See Also -------- DataFrame.to_pickle : Pickle (serialize) DataFrame object to file. Series.to_pickle : Pickle (serialize) Series object to file. read_hdf : Read HDF5 file into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. read_parquet : Load a parquet object, returning a DataFrame. Examples -------- >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) >>> original_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> pd.to_pickle(original_df, "./dummy.pkl") >>> unpickled_df = pd.read_pickle("./dummy.pkl") >>> unpickled_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> import os >>> os.remove("./dummy.pkl") """ path = _stringify_path(path) f, fh = _get_handle(path, 'rb', compression=compression, is_text=False) # 1) try standard libary Pickle # 2) try pickle_compat (older pandas version) to handle subclass changes # 3) try pickle_compat with latin1 encoding try: with warnings.catch_warnings(record=True): # We want to silence any warnings about, e.g. moved modules. warnings.simplefilter("ignore", Warning) return pickle.load(f) except Exception: # noqa: E722 try: return pc.load(f, encoding=None) except Exception: # noqa: E722 return pc.load(f, encoding='latin1') finally: f.close() for _f in fh: _f.close()
def read_pickle(path, compression='infer'): """ Load pickled pandas object (or any object) from file. .. warning:: Loading pickled data received from untrusted sources can be unsafe. See `here <https://docs.python.org/3/library/pickle.html>`__. Parameters ---------- path : str File path where the pickled object will be loaded. compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' For on-the-fly decompression of on-disk data. If 'infer', then use gzip, bz2, xz or zip if path ends in '.gz', '.bz2', '.xz', or '.zip' respectively, and no decompression otherwise. Set to None for no decompression. .. versionadded:: 0.20.0 Returns ------- unpickled : same type as object stored in file See Also -------- DataFrame.to_pickle : Pickle (serialize) DataFrame object to file. Series.to_pickle : Pickle (serialize) Series object to file. read_hdf : Read HDF5 file into a DataFrame. read_sql : Read SQL query or database table into a DataFrame. read_parquet : Load a parquet object, returning a DataFrame. Examples -------- >>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)}) >>> original_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> pd.to_pickle(original_df, "./dummy.pkl") >>> unpickled_df = pd.read_pickle("./dummy.pkl") >>> unpickled_df foo bar 0 0 5 1 1 6 2 2 7 3 3 8 4 4 9 >>> import os >>> os.remove("./dummy.pkl") """ path = _stringify_path(path) f, fh = _get_handle(path, 'rb', compression=compression, is_text=False) # 1) try standard libary Pickle # 2) try pickle_compat (older pandas version) to handle subclass changes # 3) try pickle_compat with latin1 encoding try: with warnings.catch_warnings(record=True): # We want to silence any warnings about, e.g. moved modules. warnings.simplefilter("ignore", Warning) return pickle.load(f) except Exception: # noqa: E722 try: return pc.load(f, encoding=None) except Exception: # noqa: E722 return pc.load(f, encoding='latin1') finally: f.close() for _f in fh: _f.close()
[ "Load", "pickled", "pandas", "object", "(", "or", "any", "object", ")", "from", "file", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pickle.py#L86-L163
[ "def", "read_pickle", "(", "path", ",", "compression", "=", "'infer'", ")", ":", "path", "=", "_stringify_path", "(", "path", ")", "f", ",", "fh", "=", "_get_handle", "(", "path", ",", "'rb'", ",", "compression", "=", "compression", ",", "is_text", "=", "False", ")", "# 1) try standard libary Pickle", "# 2) try pickle_compat (older pandas version) to handle subclass changes", "# 3) try pickle_compat with latin1 encoding", "try", ":", "with", "warnings", ".", "catch_warnings", "(", "record", "=", "True", ")", ":", "# We want to silence any warnings about, e.g. moved modules.", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "Warning", ")", "return", "pickle", ".", "load", "(", "f", ")", "except", "Exception", ":", "# noqa: E722", "try", ":", "return", "pc", ".", "load", "(", "f", ",", "encoding", "=", "None", ")", "except", "Exception", ":", "# noqa: E722", "return", "pc", ".", "load", "(", "f", ",", "encoding", "=", "'latin1'", ")", "finally", ":", "f", ".", "close", "(", ")", "for", "_f", "in", "fh", ":", "_f", ".", "close", "(", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
mask_missing
Return a masking array of same size/shape as arr with entries equaling any member of values_to_mask set to True
pandas/core/missing.py
def mask_missing(arr, values_to_mask): """ Return a masking array of same size/shape as arr with entries equaling any member of values_to_mask set to True """ dtype, values_to_mask = infer_dtype_from_array(values_to_mask) try: values_to_mask = np.array(values_to_mask, dtype=dtype) except Exception: values_to_mask = np.array(values_to_mask, dtype=object) na_mask = isna(values_to_mask) nonna = values_to_mask[~na_mask] mask = None for x in nonna: if mask is None: # numpy elementwise comparison warning if is_numeric_v_string_like(arr, x): mask = False else: mask = arr == x # if x is a string and arr is not, then we get False and we must # expand the mask to size arr.shape if is_scalar(mask): mask = np.zeros(arr.shape, dtype=bool) else: # numpy elementwise comparison warning if is_numeric_v_string_like(arr, x): mask |= False else: mask |= arr == x if na_mask.any(): if mask is None: mask = isna(arr) else: mask |= isna(arr) # GH 21977 if mask is None: mask = np.zeros(arr.shape, dtype=bool) return mask
def mask_missing(arr, values_to_mask): """ Return a masking array of same size/shape as arr with entries equaling any member of values_to_mask set to True """ dtype, values_to_mask = infer_dtype_from_array(values_to_mask) try: values_to_mask = np.array(values_to_mask, dtype=dtype) except Exception: values_to_mask = np.array(values_to_mask, dtype=object) na_mask = isna(values_to_mask) nonna = values_to_mask[~na_mask] mask = None for x in nonna: if mask is None: # numpy elementwise comparison warning if is_numeric_v_string_like(arr, x): mask = False else: mask = arr == x # if x is a string and arr is not, then we get False and we must # expand the mask to size arr.shape if is_scalar(mask): mask = np.zeros(arr.shape, dtype=bool) else: # numpy elementwise comparison warning if is_numeric_v_string_like(arr, x): mask |= False else: mask |= arr == x if na_mask.any(): if mask is None: mask = isna(arr) else: mask |= isna(arr) # GH 21977 if mask is None: mask = np.zeros(arr.shape, dtype=bool) return mask
[ "Return", "a", "masking", "array", "of", "same", "size", "/", "shape", "as", "arr", "with", "entries", "equaling", "any", "member", "of", "values_to_mask", "set", "to", "True" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L18-L66
[ "def", "mask_missing", "(", "arr", ",", "values_to_mask", ")", ":", "dtype", ",", "values_to_mask", "=", "infer_dtype_from_array", "(", "values_to_mask", ")", "try", ":", "values_to_mask", "=", "np", ".", "array", "(", "values_to_mask", ",", "dtype", "=", "dtype", ")", "except", "Exception", ":", "values_to_mask", "=", "np", ".", "array", "(", "values_to_mask", ",", "dtype", "=", "object", ")", "na_mask", "=", "isna", "(", "values_to_mask", ")", "nonna", "=", "values_to_mask", "[", "~", "na_mask", "]", "mask", "=", "None", "for", "x", "in", "nonna", ":", "if", "mask", "is", "None", ":", "# numpy elementwise comparison warning", "if", "is_numeric_v_string_like", "(", "arr", ",", "x", ")", ":", "mask", "=", "False", "else", ":", "mask", "=", "arr", "==", "x", "# if x is a string and arr is not, then we get False and we must", "# expand the mask to size arr.shape", "if", "is_scalar", "(", "mask", ")", ":", "mask", "=", "np", ".", "zeros", "(", "arr", ".", "shape", ",", "dtype", "=", "bool", ")", "else", ":", "# numpy elementwise comparison warning", "if", "is_numeric_v_string_like", "(", "arr", ",", "x", ")", ":", "mask", "|=", "False", "else", ":", "mask", "|=", "arr", "==", "x", "if", "na_mask", ".", "any", "(", ")", ":", "if", "mask", "is", "None", ":", "mask", "=", "isna", "(", "arr", ")", "else", ":", "mask", "|=", "isna", "(", "arr", ")", "# GH 21977", "if", "mask", "is", "None", ":", "mask", "=", "np", ".", "zeros", "(", "arr", ".", "shape", ",", "dtype", "=", "bool", ")", "return", "mask" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
interpolate_1d
Logic for the 1-d interpolation. The result should be 1-d, inputs xvalues and yvalues will each be 1-d arrays of the same length. Bounds_error is currently hardcoded to False since non-scipy ones don't take it as an argument.
pandas/core/missing.py
def interpolate_1d(xvalues, yvalues, method='linear', limit=None, limit_direction='forward', limit_area=None, fill_value=None, bounds_error=False, order=None, **kwargs): """ Logic for the 1-d interpolation. The result should be 1-d, inputs xvalues and yvalues will each be 1-d arrays of the same length. Bounds_error is currently hardcoded to False since non-scipy ones don't take it as an argument. """ # Treat the original, non-scipy methods first. invalid = isna(yvalues) valid = ~invalid if not valid.any(): # have to call np.asarray(xvalues) since xvalues could be an Index # which can't be mutated result = np.empty_like(np.asarray(xvalues), dtype=np.float64) result.fill(np.nan) return result if valid.all(): return yvalues if method == 'time': if not getattr(xvalues, 'is_all_dates', None): # if not issubclass(xvalues.dtype.type, np.datetime64): raise ValueError('time-weighted interpolation only works ' 'on Series or DataFrames with a ' 'DatetimeIndex') method = 'values' valid_limit_directions = ['forward', 'backward', 'both'] limit_direction = limit_direction.lower() if limit_direction not in valid_limit_directions: msg = ('Invalid limit_direction: expecting one of {valid!r}, ' 'got {invalid!r}.') raise ValueError(msg.format(valid=valid_limit_directions, invalid=limit_direction)) if limit_area is not None: valid_limit_areas = ['inside', 'outside'] limit_area = limit_area.lower() if limit_area not in valid_limit_areas: raise ValueError('Invalid limit_area: expecting one of {}, got ' '{}.'.format(valid_limit_areas, limit_area)) # default limit is unlimited GH #16282 if limit is None: # limit = len(xvalues) pass elif not is_integer(limit): raise ValueError('Limit must be an integer') elif limit < 1: raise ValueError('Limit must be greater than 0') from pandas import Series ys = Series(yvalues) # These are sets of index pointers to invalid values... i.e. {0, 1, etc... all_nans = set(np.flatnonzero(invalid)) start_nans = set(range(ys.first_valid_index())) end_nans = set(range(1 + ys.last_valid_index(), len(valid))) mid_nans = all_nans - start_nans - end_nans # Like the sets above, preserve_nans contains indices of invalid values, # but in this case, it is the final set of indices that need to be # preserved as NaN after the interpolation. # For example if limit_direction='forward' then preserve_nans will # contain indices of NaNs at the beginning of the series, and NaNs that # are more than'limit' away from the prior non-NaN. # set preserve_nans based on direction using _interp_limit if limit_direction == 'forward': preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0)) elif limit_direction == 'backward': preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit)) else: # both directions... just use _interp_limit preserve_nans = set(_interp_limit(invalid, limit, limit)) # if limit_area is set, add either mid or outside indices # to preserve_nans GH #16284 if limit_area == 'inside': # preserve NaNs on the outside preserve_nans |= start_nans | end_nans elif limit_area == 'outside': # preserve NaNs on the inside preserve_nans |= mid_nans # sort preserve_nans and covert to list preserve_nans = sorted(preserve_nans) xvalues = getattr(xvalues, 'values', xvalues) yvalues = getattr(yvalues, 'values', yvalues) result = yvalues.copy() if method in ['linear', 'time', 'index', 'values']: if method in ('values', 'index'): inds = np.asarray(xvalues) # hack for DatetimeIndex, #1646 if needs_i8_conversion(inds.dtype.type): inds = inds.view(np.int64) if inds.dtype == np.object_: inds = lib.maybe_convert_objects(inds) else: inds = xvalues result[invalid] = np.interp(inds[invalid], inds[valid], yvalues[valid]) result[preserve_nans] = np.nan return result sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh', 'spline', 'polynomial', 'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima'] if method in sp_methods: inds = np.asarray(xvalues) # hack for DatetimeIndex, #1646 if issubclass(inds.dtype.type, np.datetime64): inds = inds.view(np.int64) result[invalid] = _interpolate_scipy_wrapper(inds[valid], yvalues[valid], inds[invalid], method=method, fill_value=fill_value, bounds_error=bounds_error, order=order, **kwargs) result[preserve_nans] = np.nan return result
def interpolate_1d(xvalues, yvalues, method='linear', limit=None, limit_direction='forward', limit_area=None, fill_value=None, bounds_error=False, order=None, **kwargs): """ Logic for the 1-d interpolation. The result should be 1-d, inputs xvalues and yvalues will each be 1-d arrays of the same length. Bounds_error is currently hardcoded to False since non-scipy ones don't take it as an argument. """ # Treat the original, non-scipy methods first. invalid = isna(yvalues) valid = ~invalid if not valid.any(): # have to call np.asarray(xvalues) since xvalues could be an Index # which can't be mutated result = np.empty_like(np.asarray(xvalues), dtype=np.float64) result.fill(np.nan) return result if valid.all(): return yvalues if method == 'time': if not getattr(xvalues, 'is_all_dates', None): # if not issubclass(xvalues.dtype.type, np.datetime64): raise ValueError('time-weighted interpolation only works ' 'on Series or DataFrames with a ' 'DatetimeIndex') method = 'values' valid_limit_directions = ['forward', 'backward', 'both'] limit_direction = limit_direction.lower() if limit_direction not in valid_limit_directions: msg = ('Invalid limit_direction: expecting one of {valid!r}, ' 'got {invalid!r}.') raise ValueError(msg.format(valid=valid_limit_directions, invalid=limit_direction)) if limit_area is not None: valid_limit_areas = ['inside', 'outside'] limit_area = limit_area.lower() if limit_area not in valid_limit_areas: raise ValueError('Invalid limit_area: expecting one of {}, got ' '{}.'.format(valid_limit_areas, limit_area)) # default limit is unlimited GH #16282 if limit is None: # limit = len(xvalues) pass elif not is_integer(limit): raise ValueError('Limit must be an integer') elif limit < 1: raise ValueError('Limit must be greater than 0') from pandas import Series ys = Series(yvalues) # These are sets of index pointers to invalid values... i.e. {0, 1, etc... all_nans = set(np.flatnonzero(invalid)) start_nans = set(range(ys.first_valid_index())) end_nans = set(range(1 + ys.last_valid_index(), len(valid))) mid_nans = all_nans - start_nans - end_nans # Like the sets above, preserve_nans contains indices of invalid values, # but in this case, it is the final set of indices that need to be # preserved as NaN after the interpolation. # For example if limit_direction='forward' then preserve_nans will # contain indices of NaNs at the beginning of the series, and NaNs that # are more than'limit' away from the prior non-NaN. # set preserve_nans based on direction using _interp_limit if limit_direction == 'forward': preserve_nans = start_nans | set(_interp_limit(invalid, limit, 0)) elif limit_direction == 'backward': preserve_nans = end_nans | set(_interp_limit(invalid, 0, limit)) else: # both directions... just use _interp_limit preserve_nans = set(_interp_limit(invalid, limit, limit)) # if limit_area is set, add either mid or outside indices # to preserve_nans GH #16284 if limit_area == 'inside': # preserve NaNs on the outside preserve_nans |= start_nans | end_nans elif limit_area == 'outside': # preserve NaNs on the inside preserve_nans |= mid_nans # sort preserve_nans and covert to list preserve_nans = sorted(preserve_nans) xvalues = getattr(xvalues, 'values', xvalues) yvalues = getattr(yvalues, 'values', yvalues) result = yvalues.copy() if method in ['linear', 'time', 'index', 'values']: if method in ('values', 'index'): inds = np.asarray(xvalues) # hack for DatetimeIndex, #1646 if needs_i8_conversion(inds.dtype.type): inds = inds.view(np.int64) if inds.dtype == np.object_: inds = lib.maybe_convert_objects(inds) else: inds = xvalues result[invalid] = np.interp(inds[invalid], inds[valid], yvalues[valid]) result[preserve_nans] = np.nan return result sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh', 'spline', 'polynomial', 'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima'] if method in sp_methods: inds = np.asarray(xvalues) # hack for DatetimeIndex, #1646 if issubclass(inds.dtype.type, np.datetime64): inds = inds.view(np.int64) result[invalid] = _interpolate_scipy_wrapper(inds[valid], yvalues[valid], inds[invalid], method=method, fill_value=fill_value, bounds_error=bounds_error, order=order, **kwargs) result[preserve_nans] = np.nan return result
[ "Logic", "for", "the", "1", "-", "d", "interpolation", ".", "The", "result", "should", "be", "1", "-", "d", "inputs", "xvalues", "and", "yvalues", "will", "each", "be", "1", "-", "d", "arrays", "of", "the", "same", "length", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L109-L239
[ "def", "interpolate_1d", "(", "xvalues", ",", "yvalues", ",", "method", "=", "'linear'", ",", "limit", "=", "None", ",", "limit_direction", "=", "'forward'", ",", "limit_area", "=", "None", ",", "fill_value", "=", "None", ",", "bounds_error", "=", "False", ",", "order", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Treat the original, non-scipy methods first.", "invalid", "=", "isna", "(", "yvalues", ")", "valid", "=", "~", "invalid", "if", "not", "valid", ".", "any", "(", ")", ":", "# have to call np.asarray(xvalues) since xvalues could be an Index", "# which can't be mutated", "result", "=", "np", ".", "empty_like", "(", "np", ".", "asarray", "(", "xvalues", ")", ",", "dtype", "=", "np", ".", "float64", ")", "result", ".", "fill", "(", "np", ".", "nan", ")", "return", "result", "if", "valid", ".", "all", "(", ")", ":", "return", "yvalues", "if", "method", "==", "'time'", ":", "if", "not", "getattr", "(", "xvalues", ",", "'is_all_dates'", ",", "None", ")", ":", "# if not issubclass(xvalues.dtype.type, np.datetime64):", "raise", "ValueError", "(", "'time-weighted interpolation only works '", "'on Series or DataFrames with a '", "'DatetimeIndex'", ")", "method", "=", "'values'", "valid_limit_directions", "=", "[", "'forward'", ",", "'backward'", ",", "'both'", "]", "limit_direction", "=", "limit_direction", ".", "lower", "(", ")", "if", "limit_direction", "not", "in", "valid_limit_directions", ":", "msg", "=", "(", "'Invalid limit_direction: expecting one of {valid!r}, '", "'got {invalid!r}.'", ")", "raise", "ValueError", "(", "msg", ".", "format", "(", "valid", "=", "valid_limit_directions", ",", "invalid", "=", "limit_direction", ")", ")", "if", "limit_area", "is", "not", "None", ":", "valid_limit_areas", "=", "[", "'inside'", ",", "'outside'", "]", "limit_area", "=", "limit_area", ".", "lower", "(", ")", "if", "limit_area", "not", "in", "valid_limit_areas", ":", "raise", "ValueError", "(", "'Invalid limit_area: expecting one of {}, got '", "'{}.'", ".", "format", "(", "valid_limit_areas", ",", "limit_area", ")", ")", "# default limit is unlimited GH #16282", "if", "limit", "is", "None", ":", "# limit = len(xvalues)", "pass", "elif", "not", "is_integer", "(", "limit", ")", ":", "raise", "ValueError", "(", "'Limit must be an integer'", ")", "elif", "limit", "<", "1", ":", "raise", "ValueError", "(", "'Limit must be greater than 0'", ")", "from", "pandas", "import", "Series", "ys", "=", "Series", "(", "yvalues", ")", "# These are sets of index pointers to invalid values... i.e. {0, 1, etc...", "all_nans", "=", "set", "(", "np", ".", "flatnonzero", "(", "invalid", ")", ")", "start_nans", "=", "set", "(", "range", "(", "ys", ".", "first_valid_index", "(", ")", ")", ")", "end_nans", "=", "set", "(", "range", "(", "1", "+", "ys", ".", "last_valid_index", "(", ")", ",", "len", "(", "valid", ")", ")", ")", "mid_nans", "=", "all_nans", "-", "start_nans", "-", "end_nans", "# Like the sets above, preserve_nans contains indices of invalid values,", "# but in this case, it is the final set of indices that need to be", "# preserved as NaN after the interpolation.", "# For example if limit_direction='forward' then preserve_nans will", "# contain indices of NaNs at the beginning of the series, and NaNs that", "# are more than'limit' away from the prior non-NaN.", "# set preserve_nans based on direction using _interp_limit", "if", "limit_direction", "==", "'forward'", ":", "preserve_nans", "=", "start_nans", "|", "set", "(", "_interp_limit", "(", "invalid", ",", "limit", ",", "0", ")", ")", "elif", "limit_direction", "==", "'backward'", ":", "preserve_nans", "=", "end_nans", "|", "set", "(", "_interp_limit", "(", "invalid", ",", "0", ",", "limit", ")", ")", "else", ":", "# both directions... just use _interp_limit", "preserve_nans", "=", "set", "(", "_interp_limit", "(", "invalid", ",", "limit", ",", "limit", ")", ")", "# if limit_area is set, add either mid or outside indices", "# to preserve_nans GH #16284", "if", "limit_area", "==", "'inside'", ":", "# preserve NaNs on the outside", "preserve_nans", "|=", "start_nans", "|", "end_nans", "elif", "limit_area", "==", "'outside'", ":", "# preserve NaNs on the inside", "preserve_nans", "|=", "mid_nans", "# sort preserve_nans and covert to list", "preserve_nans", "=", "sorted", "(", "preserve_nans", ")", "xvalues", "=", "getattr", "(", "xvalues", ",", "'values'", ",", "xvalues", ")", "yvalues", "=", "getattr", "(", "yvalues", ",", "'values'", ",", "yvalues", ")", "result", "=", "yvalues", ".", "copy", "(", ")", "if", "method", "in", "[", "'linear'", ",", "'time'", ",", "'index'", ",", "'values'", "]", ":", "if", "method", "in", "(", "'values'", ",", "'index'", ")", ":", "inds", "=", "np", ".", "asarray", "(", "xvalues", ")", "# hack for DatetimeIndex, #1646", "if", "needs_i8_conversion", "(", "inds", ".", "dtype", ".", "type", ")", ":", "inds", "=", "inds", ".", "view", "(", "np", ".", "int64", ")", "if", "inds", ".", "dtype", "==", "np", ".", "object_", ":", "inds", "=", "lib", ".", "maybe_convert_objects", "(", "inds", ")", "else", ":", "inds", "=", "xvalues", "result", "[", "invalid", "]", "=", "np", ".", "interp", "(", "inds", "[", "invalid", "]", ",", "inds", "[", "valid", "]", ",", "yvalues", "[", "valid", "]", ")", "result", "[", "preserve_nans", "]", "=", "np", ".", "nan", "return", "result", "sp_methods", "=", "[", "'nearest'", ",", "'zero'", ",", "'slinear'", ",", "'quadratic'", ",", "'cubic'", ",", "'barycentric'", ",", "'krogh'", ",", "'spline'", ",", "'polynomial'", ",", "'from_derivatives'", ",", "'piecewise_polynomial'", ",", "'pchip'", ",", "'akima'", "]", "if", "method", "in", "sp_methods", ":", "inds", "=", "np", ".", "asarray", "(", "xvalues", ")", "# hack for DatetimeIndex, #1646", "if", "issubclass", "(", "inds", ".", "dtype", ".", "type", ",", "np", ".", "datetime64", ")", ":", "inds", "=", "inds", ".", "view", "(", "np", ".", "int64", ")", "result", "[", "invalid", "]", "=", "_interpolate_scipy_wrapper", "(", "inds", "[", "valid", "]", ",", "yvalues", "[", "valid", "]", ",", "inds", "[", "invalid", "]", ",", "method", "=", "method", ",", "fill_value", "=", "fill_value", ",", "bounds_error", "=", "bounds_error", ",", "order", "=", "order", ",", "*", "*", "kwargs", ")", "result", "[", "preserve_nans", "]", "=", "np", ".", "nan", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_interpolate_scipy_wrapper
Passed off to scipy.interpolate.interp1d. method is scipy's kind. Returns an array interpolated at new_x. Add any new methods to the list in _clean_interp_method.
pandas/core/missing.py
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs): """ Passed off to scipy.interpolate.interp1d. method is scipy's kind. Returns an array interpolated at new_x. Add any new methods to the list in _clean_interp_method. """ try: from scipy import interpolate # TODO: Why is DatetimeIndex being imported here? from pandas import DatetimeIndex # noqa except ImportError: raise ImportError('{method} interpolation requires SciPy' .format(method=method)) new_x = np.asarray(new_x) # ignores some kwargs that could be passed along. alt_methods = { 'barycentric': interpolate.barycentric_interpolate, 'krogh': interpolate.krogh_interpolate, 'from_derivatives': _from_derivatives, 'piecewise_polynomial': _from_derivatives, } if getattr(x, 'is_all_dates', False): # GH 5975, scipy.interp1d can't hande datetime64s x, new_x = x._values.astype('i8'), new_x.astype('i8') if method == 'pchip': try: alt_methods['pchip'] = interpolate.pchip_interpolate except AttributeError: raise ImportError("Your version of Scipy does not support " "PCHIP interpolation.") elif method == 'akima': try: from scipy.interpolate import Akima1DInterpolator # noqa alt_methods['akima'] = _akima_interpolate except ImportError: raise ImportError("Your version of Scipy does not support " "Akima interpolation.") interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'polynomial'] if method in interp1d_methods: if method == 'polynomial': method = order terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value, bounds_error=bounds_error) new_y = terp(new_x) elif method == 'spline': # GH #10633, #24014 if isna(order) or (order <= 0): raise ValueError("order needs to be specified and greater than 0; " "got order: {}".format(order)) terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs) new_y = terp(new_x) else: # GH 7295: need to be able to write for some reason # in some circumstances: check all three if not x.flags.writeable: x = x.copy() if not y.flags.writeable: y = y.copy() if not new_x.flags.writeable: new_x = new_x.copy() method = alt_methods[method] new_y = method(x, y, new_x, **kwargs) return new_y
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None, bounds_error=False, order=None, **kwargs): """ Passed off to scipy.interpolate.interp1d. method is scipy's kind. Returns an array interpolated at new_x. Add any new methods to the list in _clean_interp_method. """ try: from scipy import interpolate # TODO: Why is DatetimeIndex being imported here? from pandas import DatetimeIndex # noqa except ImportError: raise ImportError('{method} interpolation requires SciPy' .format(method=method)) new_x = np.asarray(new_x) # ignores some kwargs that could be passed along. alt_methods = { 'barycentric': interpolate.barycentric_interpolate, 'krogh': interpolate.krogh_interpolate, 'from_derivatives': _from_derivatives, 'piecewise_polynomial': _from_derivatives, } if getattr(x, 'is_all_dates', False): # GH 5975, scipy.interp1d can't hande datetime64s x, new_x = x._values.astype('i8'), new_x.astype('i8') if method == 'pchip': try: alt_methods['pchip'] = interpolate.pchip_interpolate except AttributeError: raise ImportError("Your version of Scipy does not support " "PCHIP interpolation.") elif method == 'akima': try: from scipy.interpolate import Akima1DInterpolator # noqa alt_methods['akima'] = _akima_interpolate except ImportError: raise ImportError("Your version of Scipy does not support " "Akima interpolation.") interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'polynomial'] if method in interp1d_methods: if method == 'polynomial': method = order terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value, bounds_error=bounds_error) new_y = terp(new_x) elif method == 'spline': # GH #10633, #24014 if isna(order) or (order <= 0): raise ValueError("order needs to be specified and greater than 0; " "got order: {}".format(order)) terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs) new_y = terp(new_x) else: # GH 7295: need to be able to write for some reason # in some circumstances: check all three if not x.flags.writeable: x = x.copy() if not y.flags.writeable: y = y.copy() if not new_x.flags.writeable: new_x = new_x.copy() method = alt_methods[method] new_y = method(x, y, new_x, **kwargs) return new_y
[ "Passed", "off", "to", "scipy", ".", "interpolate", ".", "interp1d", ".", "method", "is", "scipy", "s", "kind", ".", "Returns", "an", "array", "interpolated", "at", "new_x", ".", "Add", "any", "new", "methods", "to", "the", "list", "in", "_clean_interp_method", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L242-L311
[ "def", "_interpolate_scipy_wrapper", "(", "x", ",", "y", ",", "new_x", ",", "method", ",", "fill_value", "=", "None", ",", "bounds_error", "=", "False", ",", "order", "=", "None", ",", "*", "*", "kwargs", ")", ":", "try", ":", "from", "scipy", "import", "interpolate", "# TODO: Why is DatetimeIndex being imported here?", "from", "pandas", "import", "DatetimeIndex", "# noqa", "except", "ImportError", ":", "raise", "ImportError", "(", "'{method} interpolation requires SciPy'", ".", "format", "(", "method", "=", "method", ")", ")", "new_x", "=", "np", ".", "asarray", "(", "new_x", ")", "# ignores some kwargs that could be passed along.", "alt_methods", "=", "{", "'barycentric'", ":", "interpolate", ".", "barycentric_interpolate", ",", "'krogh'", ":", "interpolate", ".", "krogh_interpolate", ",", "'from_derivatives'", ":", "_from_derivatives", ",", "'piecewise_polynomial'", ":", "_from_derivatives", ",", "}", "if", "getattr", "(", "x", ",", "'is_all_dates'", ",", "False", ")", ":", "# GH 5975, scipy.interp1d can't hande datetime64s", "x", ",", "new_x", "=", "x", ".", "_values", ".", "astype", "(", "'i8'", ")", ",", "new_x", ".", "astype", "(", "'i8'", ")", "if", "method", "==", "'pchip'", ":", "try", ":", "alt_methods", "[", "'pchip'", "]", "=", "interpolate", ".", "pchip_interpolate", "except", "AttributeError", ":", "raise", "ImportError", "(", "\"Your version of Scipy does not support \"", "\"PCHIP interpolation.\"", ")", "elif", "method", "==", "'akima'", ":", "try", ":", "from", "scipy", ".", "interpolate", "import", "Akima1DInterpolator", "# noqa", "alt_methods", "[", "'akima'", "]", "=", "_akima_interpolate", "except", "ImportError", ":", "raise", "ImportError", "(", "\"Your version of Scipy does not support \"", "\"Akima interpolation.\"", ")", "interp1d_methods", "=", "[", "'nearest'", ",", "'zero'", ",", "'slinear'", ",", "'quadratic'", ",", "'cubic'", ",", "'polynomial'", "]", "if", "method", "in", "interp1d_methods", ":", "if", "method", "==", "'polynomial'", ":", "method", "=", "order", "terp", "=", "interpolate", ".", "interp1d", "(", "x", ",", "y", ",", "kind", "=", "method", ",", "fill_value", "=", "fill_value", ",", "bounds_error", "=", "bounds_error", ")", "new_y", "=", "terp", "(", "new_x", ")", "elif", "method", "==", "'spline'", ":", "# GH #10633, #24014", "if", "isna", "(", "order", ")", "or", "(", "order", "<=", "0", ")", ":", "raise", "ValueError", "(", "\"order needs to be specified and greater than 0; \"", "\"got order: {}\"", ".", "format", "(", "order", ")", ")", "terp", "=", "interpolate", ".", "UnivariateSpline", "(", "x", ",", "y", ",", "k", "=", "order", ",", "*", "*", "kwargs", ")", "new_y", "=", "terp", "(", "new_x", ")", "else", ":", "# GH 7295: need to be able to write for some reason", "# in some circumstances: check all three", "if", "not", "x", ".", "flags", ".", "writeable", ":", "x", "=", "x", ".", "copy", "(", ")", "if", "not", "y", ".", "flags", ".", "writeable", ":", "y", "=", "y", ".", "copy", "(", ")", "if", "not", "new_x", ".", "flags", ".", "writeable", ":", "new_x", "=", "new_x", ".", "copy", "(", ")", "method", "=", "alt_methods", "[", "method", "]", "new_y", "=", "method", "(", "x", ",", "y", ",", "new_x", ",", "*", "*", "kwargs", ")", "return", "new_y" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_from_derivatives
Convenience function for interpolate.BPoly.from_derivatives. Construct a piecewise polynomial in the Bernstein basis, compatible with the specified values and derivatives at breakpoints. Parameters ---------- xi : array_like sorted 1D array of x-coordinates yi : array_like or list of array-likes yi[i][j] is the j-th derivative known at xi[i] order: None or int or array_like of ints. Default: None. Specifies the degree of local polynomials. If not None, some derivatives are ignored. der : int or list How many derivatives to extract; None for all potentially nonzero derivatives (that is a number equal to the number of points), or a list of derivatives to extract. This numberincludes the function value as 0th derivative. extrapolate : bool, optional Whether to extrapolate to ouf-of-bounds points based on first and last intervals, or to return NaNs. Default: True. See Also -------- scipy.interpolate.BPoly.from_derivatives Returns ------- y : scalar or array_like The result, of length R or length M or M by R.
pandas/core/missing.py
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False): """ Convenience function for interpolate.BPoly.from_derivatives. Construct a piecewise polynomial in the Bernstein basis, compatible with the specified values and derivatives at breakpoints. Parameters ---------- xi : array_like sorted 1D array of x-coordinates yi : array_like or list of array-likes yi[i][j] is the j-th derivative known at xi[i] order: None or int or array_like of ints. Default: None. Specifies the degree of local polynomials. If not None, some derivatives are ignored. der : int or list How many derivatives to extract; None for all potentially nonzero derivatives (that is a number equal to the number of points), or a list of derivatives to extract. This numberincludes the function value as 0th derivative. extrapolate : bool, optional Whether to extrapolate to ouf-of-bounds points based on first and last intervals, or to return NaNs. Default: True. See Also -------- scipy.interpolate.BPoly.from_derivatives Returns ------- y : scalar or array_like The result, of length R or length M or M by R. """ from scipy import interpolate # return the method for compat with scipy version & backwards compat method = interpolate.BPoly.from_derivatives m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate) return m(x)
def _from_derivatives(xi, yi, x, order=None, der=0, extrapolate=False): """ Convenience function for interpolate.BPoly.from_derivatives. Construct a piecewise polynomial in the Bernstein basis, compatible with the specified values and derivatives at breakpoints. Parameters ---------- xi : array_like sorted 1D array of x-coordinates yi : array_like or list of array-likes yi[i][j] is the j-th derivative known at xi[i] order: None or int or array_like of ints. Default: None. Specifies the degree of local polynomials. If not None, some derivatives are ignored. der : int or list How many derivatives to extract; None for all potentially nonzero derivatives (that is a number equal to the number of points), or a list of derivatives to extract. This numberincludes the function value as 0th derivative. extrapolate : bool, optional Whether to extrapolate to ouf-of-bounds points based on first and last intervals, or to return NaNs. Default: True. See Also -------- scipy.interpolate.BPoly.from_derivatives Returns ------- y : scalar or array_like The result, of length R or length M or M by R. """ from scipy import interpolate # return the method for compat with scipy version & backwards compat method = interpolate.BPoly.from_derivatives m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate) return m(x)
[ "Convenience", "function", "for", "interpolate", ".", "BPoly", ".", "from_derivatives", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L314-L355
[ "def", "_from_derivatives", "(", "xi", ",", "yi", ",", "x", ",", "order", "=", "None", ",", "der", "=", "0", ",", "extrapolate", "=", "False", ")", ":", "from", "scipy", "import", "interpolate", "# return the method for compat with scipy version & backwards compat", "method", "=", "interpolate", ".", "BPoly", ".", "from_derivatives", "m", "=", "method", "(", "xi", ",", "yi", ".", "reshape", "(", "-", "1", ",", "1", ")", ",", "orders", "=", "order", ",", "extrapolate", "=", "extrapolate", ")", "return", "m", "(", "x", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_akima_interpolate
Convenience function for akima interpolation. xi and yi are arrays of values used to approximate some function f, with ``yi = f(xi)``. See `Akima1DInterpolator` for details. Parameters ---------- xi : array_like A sorted list of x-coordinates, of length N. yi : array_like A 1-D array of real values. `yi`'s length along the interpolation axis must be equal to the length of `xi`. If N-D array, use axis parameter to select correct axis. x : scalar or array_like Of length M. der : int or list, optional How many derivatives to extract; None for all potentially nonzero derivatives (that is a number equal to the number of points), or a list of derivatives to extract. This number includes the function value as 0th derivative. axis : int, optional Axis in the yi array corresponding to the x-coordinate values. See Also -------- scipy.interpolate.Akima1DInterpolator Returns ------- y : scalar or array_like The result, of length R or length M or M by R,
pandas/core/missing.py
def _akima_interpolate(xi, yi, x, der=0, axis=0): """ Convenience function for akima interpolation. xi and yi are arrays of values used to approximate some function f, with ``yi = f(xi)``. See `Akima1DInterpolator` for details. Parameters ---------- xi : array_like A sorted list of x-coordinates, of length N. yi : array_like A 1-D array of real values. `yi`'s length along the interpolation axis must be equal to the length of `xi`. If N-D array, use axis parameter to select correct axis. x : scalar or array_like Of length M. der : int or list, optional How many derivatives to extract; None for all potentially nonzero derivatives (that is a number equal to the number of points), or a list of derivatives to extract. This number includes the function value as 0th derivative. axis : int, optional Axis in the yi array corresponding to the x-coordinate values. See Also -------- scipy.interpolate.Akima1DInterpolator Returns ------- y : scalar or array_like The result, of length R or length M or M by R, """ from scipy import interpolate try: P = interpolate.Akima1DInterpolator(xi, yi, axis=axis) except TypeError: # Scipy earlier than 0.17.0 missing axis P = interpolate.Akima1DInterpolator(xi, yi) if der == 0: return P(x) elif interpolate._isscalar(der): return P(x, der=der) else: return [P(x, nu) for nu in der]
def _akima_interpolate(xi, yi, x, der=0, axis=0): """ Convenience function for akima interpolation. xi and yi are arrays of values used to approximate some function f, with ``yi = f(xi)``. See `Akima1DInterpolator` for details. Parameters ---------- xi : array_like A sorted list of x-coordinates, of length N. yi : array_like A 1-D array of real values. `yi`'s length along the interpolation axis must be equal to the length of `xi`. If N-D array, use axis parameter to select correct axis. x : scalar or array_like Of length M. der : int or list, optional How many derivatives to extract; None for all potentially nonzero derivatives (that is a number equal to the number of points), or a list of derivatives to extract. This number includes the function value as 0th derivative. axis : int, optional Axis in the yi array corresponding to the x-coordinate values. See Also -------- scipy.interpolate.Akima1DInterpolator Returns ------- y : scalar or array_like The result, of length R or length M or M by R, """ from scipy import interpolate try: P = interpolate.Akima1DInterpolator(xi, yi, axis=axis) except TypeError: # Scipy earlier than 0.17.0 missing axis P = interpolate.Akima1DInterpolator(xi, yi) if der == 0: return P(x) elif interpolate._isscalar(der): return P(x, der=der) else: return [P(x, nu) for nu in der]
[ "Convenience", "function", "for", "akima", "interpolation", ".", "xi", "and", "yi", "are", "arrays", "of", "values", "used", "to", "approximate", "some", "function", "f", "with", "yi", "=", "f", "(", "xi", ")", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L358-L405
[ "def", "_akima_interpolate", "(", "xi", ",", "yi", ",", "x", ",", "der", "=", "0", ",", "axis", "=", "0", ")", ":", "from", "scipy", "import", "interpolate", "try", ":", "P", "=", "interpolate", ".", "Akima1DInterpolator", "(", "xi", ",", "yi", ",", "axis", "=", "axis", ")", "except", "TypeError", ":", "# Scipy earlier than 0.17.0 missing axis", "P", "=", "interpolate", ".", "Akima1DInterpolator", "(", "xi", ",", "yi", ")", "if", "der", "==", "0", ":", "return", "P", "(", "x", ")", "elif", "interpolate", ".", "_isscalar", "(", "der", ")", ":", "return", "P", "(", "x", ",", "der", "=", "der", ")", "else", ":", "return", "[", "P", "(", "x", ",", "nu", ")", "for", "nu", "in", "der", "]" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
interpolate_2d
Perform an actual interpolation of values, values will be make 2-d if needed fills inplace, returns the result.
pandas/core/missing.py
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None): """ Perform an actual interpolation of values, values will be make 2-d if needed fills inplace, returns the result. """ transf = (lambda x: x) if axis == 0 else (lambda x: x.T) # reshape a 1 dim if needed ndim = values.ndim if values.ndim == 1: if axis != 0: # pragma: no cover raise AssertionError("cannot interpolate on a ndim == 1 with " "axis != 0") values = values.reshape(tuple((1,) + values.shape)) if fill_value is None: mask = None else: # todo create faster fill func without masking mask = mask_missing(transf(values), fill_value) method = clean_fill_method(method) if method == 'pad': values = transf(pad_2d( transf(values), limit=limit, mask=mask, dtype=dtype)) else: values = transf(backfill_2d( transf(values), limit=limit, mask=mask, dtype=dtype)) # reshape back if ndim == 1: values = values[0] return values
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None): """ Perform an actual interpolation of values, values will be make 2-d if needed fills inplace, returns the result. """ transf = (lambda x: x) if axis == 0 else (lambda x: x.T) # reshape a 1 dim if needed ndim = values.ndim if values.ndim == 1: if axis != 0: # pragma: no cover raise AssertionError("cannot interpolate on a ndim == 1 with " "axis != 0") values = values.reshape(tuple((1,) + values.shape)) if fill_value is None: mask = None else: # todo create faster fill func without masking mask = mask_missing(transf(values), fill_value) method = clean_fill_method(method) if method == 'pad': values = transf(pad_2d( transf(values), limit=limit, mask=mask, dtype=dtype)) else: values = transf(backfill_2d( transf(values), limit=limit, mask=mask, dtype=dtype)) # reshape back if ndim == 1: values = values[0] return values
[ "Perform", "an", "actual", "interpolation", "of", "values", "values", "will", "be", "make", "2", "-", "d", "if", "needed", "fills", "inplace", "returns", "the", "result", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L408-L442
[ "def", "interpolate_2d", "(", "values", ",", "method", "=", "'pad'", ",", "axis", "=", "0", ",", "limit", "=", "None", ",", "fill_value", "=", "None", ",", "dtype", "=", "None", ")", ":", "transf", "=", "(", "lambda", "x", ":", "x", ")", "if", "axis", "==", "0", "else", "(", "lambda", "x", ":", "x", ".", "T", ")", "# reshape a 1 dim if needed", "ndim", "=", "values", ".", "ndim", "if", "values", ".", "ndim", "==", "1", ":", "if", "axis", "!=", "0", ":", "# pragma: no cover", "raise", "AssertionError", "(", "\"cannot interpolate on a ndim == 1 with \"", "\"axis != 0\"", ")", "values", "=", "values", ".", "reshape", "(", "tuple", "(", "(", "1", ",", ")", "+", "values", ".", "shape", ")", ")", "if", "fill_value", "is", "None", ":", "mask", "=", "None", "else", ":", "# todo create faster fill func without masking", "mask", "=", "mask_missing", "(", "transf", "(", "values", ")", ",", "fill_value", ")", "method", "=", "clean_fill_method", "(", "method", ")", "if", "method", "==", "'pad'", ":", "values", "=", "transf", "(", "pad_2d", "(", "transf", "(", "values", ")", ",", "limit", "=", "limit", ",", "mask", "=", "mask", ",", "dtype", "=", "dtype", ")", ")", "else", ":", "values", "=", "transf", "(", "backfill_2d", "(", "transf", "(", "values", ")", ",", "limit", "=", "limit", ",", "mask", "=", "mask", ",", "dtype", "=", "dtype", ")", ")", "# reshape back", "if", "ndim", "==", "1", ":", "values", "=", "values", "[", "0", "]", "return", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_cast_values_for_fillna
Cast values to a dtype that algos.pad and algos.backfill can handle.
pandas/core/missing.py
def _cast_values_for_fillna(values, dtype): """ Cast values to a dtype that algos.pad and algos.backfill can handle. """ # TODO: for int-dtypes we make a copy, but for everything else this # alters the values in-place. Is this intentional? if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or is_timedelta64_dtype(dtype)): values = values.view(np.int64) elif is_integer_dtype(values): # NB: this check needs to come after the datetime64 check above values = ensure_float64(values) return values
def _cast_values_for_fillna(values, dtype): """ Cast values to a dtype that algos.pad and algos.backfill can handle. """ # TODO: for int-dtypes we make a copy, but for everything else this # alters the values in-place. Is this intentional? if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or is_timedelta64_dtype(dtype)): values = values.view(np.int64) elif is_integer_dtype(values): # NB: this check needs to come after the datetime64 check above values = ensure_float64(values) return values
[ "Cast", "values", "to", "a", "dtype", "that", "algos", ".", "pad", "and", "algos", ".", "backfill", "can", "handle", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L445-L460
[ "def", "_cast_values_for_fillna", "(", "values", ",", "dtype", ")", ":", "# TODO: for int-dtypes we make a copy, but for everything else this", "# alters the values in-place. Is this intentional?", "if", "(", "is_datetime64_dtype", "(", "dtype", ")", "or", "is_datetime64tz_dtype", "(", "dtype", ")", "or", "is_timedelta64_dtype", "(", "dtype", ")", ")", ":", "values", "=", "values", ".", "view", "(", "np", ".", "int64", ")", "elif", "is_integer_dtype", "(", "values", ")", ":", "# NB: this check needs to come after the datetime64 check above", "values", "=", "ensure_float64", "(", "values", ")", "return", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
fill_zeros
If this is a reversed op, then flip x,y If we have an integer value (or array in y) and we have 0's, fill them with the fill, return the result. Mask the nan's from x.
pandas/core/missing.py
def fill_zeros(result, x, y, name, fill): """ If this is a reversed op, then flip x,y If we have an integer value (or array in y) and we have 0's, fill them with the fill, return the result. Mask the nan's from x. """ if fill is None or is_float_dtype(result): return result if name.startswith(('r', '__r')): x, y = y, x is_variable_type = (hasattr(y, 'dtype') or hasattr(y, 'type')) is_scalar_type = is_scalar(y) if not is_variable_type and not is_scalar_type: return result if is_scalar_type: y = np.array(y) if is_integer_dtype(y): if (y == 0).any(): # GH 7325, mask and nans must be broadcastable (also: PR 9308) # Raveling and then reshaping makes np.putmask faster mask = ((y == 0) & ~np.isnan(result)).ravel() shape = result.shape result = result.astype('float64', copy=False).ravel() np.putmask(result, mask, fill) # if we have a fill of inf, then sign it correctly # (GH 6178 and PR 9308) if np.isinf(fill): signs = y if name.startswith(('r', '__r')) else x signs = np.sign(signs.astype('float', copy=False)) negative_inf_mask = (signs.ravel() < 0) & mask np.putmask(result, negative_inf_mask, -fill) if "floordiv" in name: # (PR 9308) nan_mask = ((y == 0) & (x == 0)).ravel() np.putmask(result, nan_mask, np.nan) result = result.reshape(shape) return result
def fill_zeros(result, x, y, name, fill): """ If this is a reversed op, then flip x,y If we have an integer value (or array in y) and we have 0's, fill them with the fill, return the result. Mask the nan's from x. """ if fill is None or is_float_dtype(result): return result if name.startswith(('r', '__r')): x, y = y, x is_variable_type = (hasattr(y, 'dtype') or hasattr(y, 'type')) is_scalar_type = is_scalar(y) if not is_variable_type and not is_scalar_type: return result if is_scalar_type: y = np.array(y) if is_integer_dtype(y): if (y == 0).any(): # GH 7325, mask and nans must be broadcastable (also: PR 9308) # Raveling and then reshaping makes np.putmask faster mask = ((y == 0) & ~np.isnan(result)).ravel() shape = result.shape result = result.astype('float64', copy=False).ravel() np.putmask(result, mask, fill) # if we have a fill of inf, then sign it correctly # (GH 6178 and PR 9308) if np.isinf(fill): signs = y if name.startswith(('r', '__r')) else x signs = np.sign(signs.astype('float', copy=False)) negative_inf_mask = (signs.ravel() < 0) & mask np.putmask(result, negative_inf_mask, -fill) if "floordiv" in name: # (PR 9308) nan_mask = ((y == 0) & (x == 0)).ravel() np.putmask(result, nan_mask, np.nan) result = result.reshape(shape) return result
[ "If", "this", "is", "a", "reversed", "op", "then", "flip", "x", "y" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L524-L576
[ "def", "fill_zeros", "(", "result", ",", "x", ",", "y", ",", "name", ",", "fill", ")", ":", "if", "fill", "is", "None", "or", "is_float_dtype", "(", "result", ")", ":", "return", "result", "if", "name", ".", "startswith", "(", "(", "'r'", ",", "'__r'", ")", ")", ":", "x", ",", "y", "=", "y", ",", "x", "is_variable_type", "=", "(", "hasattr", "(", "y", ",", "'dtype'", ")", "or", "hasattr", "(", "y", ",", "'type'", ")", ")", "is_scalar_type", "=", "is_scalar", "(", "y", ")", "if", "not", "is_variable_type", "and", "not", "is_scalar_type", ":", "return", "result", "if", "is_scalar_type", ":", "y", "=", "np", ".", "array", "(", "y", ")", "if", "is_integer_dtype", "(", "y", ")", ":", "if", "(", "y", "==", "0", ")", ".", "any", "(", ")", ":", "# GH 7325, mask and nans must be broadcastable (also: PR 9308)", "# Raveling and then reshaping makes np.putmask faster", "mask", "=", "(", "(", "y", "==", "0", ")", "&", "~", "np", ".", "isnan", "(", "result", ")", ")", ".", "ravel", "(", ")", "shape", "=", "result", ".", "shape", "result", "=", "result", ".", "astype", "(", "'float64'", ",", "copy", "=", "False", ")", ".", "ravel", "(", ")", "np", ".", "putmask", "(", "result", ",", "mask", ",", "fill", ")", "# if we have a fill of inf, then sign it correctly", "# (GH 6178 and PR 9308)", "if", "np", ".", "isinf", "(", "fill", ")", ":", "signs", "=", "y", "if", "name", ".", "startswith", "(", "(", "'r'", ",", "'__r'", ")", ")", "else", "x", "signs", "=", "np", ".", "sign", "(", "signs", ".", "astype", "(", "'float'", ",", "copy", "=", "False", ")", ")", "negative_inf_mask", "=", "(", "signs", ".", "ravel", "(", ")", "<", "0", ")", "&", "mask", "np", ".", "putmask", "(", "result", ",", "negative_inf_mask", ",", "-", "fill", ")", "if", "\"floordiv\"", "in", "name", ":", "# (PR 9308)", "nan_mask", "=", "(", "(", "y", "==", "0", ")", "&", "(", "x", "==", "0", ")", ")", ".", "ravel", "(", ")", "np", ".", "putmask", "(", "result", ",", "nan_mask", ",", "np", ".", "nan", ")", "result", "=", "result", ".", "reshape", "(", "shape", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
mask_zero_div_zero
Set results of 0 / 0 or 0 // 0 to np.nan, regardless of the dtypes of the numerator or the denominator. Parameters ---------- x : ndarray y : ndarray result : ndarray copy : bool (default False) Whether to always create a new array or try to fill in the existing array if possible. Returns ------- filled_result : ndarray Examples -------- >>> x = np.array([1, 0, -1], dtype=np.int64) >>> y = 0 # int 0; numpy behavior is different with float >>> result = x / y >>> result # raw numpy result does not fill division by zero array([0, 0, 0]) >>> mask_zero_div_zero(x, y, result) array([ inf, nan, -inf])
pandas/core/missing.py
def mask_zero_div_zero(x, y, result, copy=False): """ Set results of 0 / 0 or 0 // 0 to np.nan, regardless of the dtypes of the numerator or the denominator. Parameters ---------- x : ndarray y : ndarray result : ndarray copy : bool (default False) Whether to always create a new array or try to fill in the existing array if possible. Returns ------- filled_result : ndarray Examples -------- >>> x = np.array([1, 0, -1], dtype=np.int64) >>> y = 0 # int 0; numpy behavior is different with float >>> result = x / y >>> result # raw numpy result does not fill division by zero array([0, 0, 0]) >>> mask_zero_div_zero(x, y, result) array([ inf, nan, -inf]) """ if is_scalar(y): y = np.array(y) zmask = y == 0 if zmask.any(): shape = result.shape nan_mask = (zmask & (x == 0)).ravel() neginf_mask = (zmask & (x < 0)).ravel() posinf_mask = (zmask & (x > 0)).ravel() if nan_mask.any() or neginf_mask.any() or posinf_mask.any(): # Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN result = result.astype('float64', copy=copy).ravel() np.putmask(result, nan_mask, np.nan) np.putmask(result, posinf_mask, np.inf) np.putmask(result, neginf_mask, -np.inf) result = result.reshape(shape) return result
def mask_zero_div_zero(x, y, result, copy=False): """ Set results of 0 / 0 or 0 // 0 to np.nan, regardless of the dtypes of the numerator or the denominator. Parameters ---------- x : ndarray y : ndarray result : ndarray copy : bool (default False) Whether to always create a new array or try to fill in the existing array if possible. Returns ------- filled_result : ndarray Examples -------- >>> x = np.array([1, 0, -1], dtype=np.int64) >>> y = 0 # int 0; numpy behavior is different with float >>> result = x / y >>> result # raw numpy result does not fill division by zero array([0, 0, 0]) >>> mask_zero_div_zero(x, y, result) array([ inf, nan, -inf]) """ if is_scalar(y): y = np.array(y) zmask = y == 0 if zmask.any(): shape = result.shape nan_mask = (zmask & (x == 0)).ravel() neginf_mask = (zmask & (x < 0)).ravel() posinf_mask = (zmask & (x > 0)).ravel() if nan_mask.any() or neginf_mask.any() or posinf_mask.any(): # Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN result = result.astype('float64', copy=copy).ravel() np.putmask(result, nan_mask, np.nan) np.putmask(result, posinf_mask, np.inf) np.putmask(result, neginf_mask, -np.inf) result = result.reshape(shape) return result
[ "Set", "results", "of", "0", "/", "0", "or", "0", "//", "0", "to", "np", ".", "nan", "regardless", "of", "the", "dtypes", "of", "the", "numerator", "or", "the", "denominator", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L579-L628
[ "def", "mask_zero_div_zero", "(", "x", ",", "y", ",", "result", ",", "copy", "=", "False", ")", ":", "if", "is_scalar", "(", "y", ")", ":", "y", "=", "np", ".", "array", "(", "y", ")", "zmask", "=", "y", "==", "0", "if", "zmask", ".", "any", "(", ")", ":", "shape", "=", "result", ".", "shape", "nan_mask", "=", "(", "zmask", "&", "(", "x", "==", "0", ")", ")", ".", "ravel", "(", ")", "neginf_mask", "=", "(", "zmask", "&", "(", "x", "<", "0", ")", ")", ".", "ravel", "(", ")", "posinf_mask", "=", "(", "zmask", "&", "(", "x", ">", "0", ")", ")", ".", "ravel", "(", ")", "if", "nan_mask", ".", "any", "(", ")", "or", "neginf_mask", ".", "any", "(", ")", "or", "posinf_mask", ".", "any", "(", ")", ":", "# Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN", "result", "=", "result", ".", "astype", "(", "'float64'", ",", "copy", "=", "copy", ")", ".", "ravel", "(", ")", "np", ".", "putmask", "(", "result", ",", "nan_mask", ",", "np", ".", "nan", ")", "np", ".", "putmask", "(", "result", ",", "posinf_mask", ",", "np", ".", "inf", ")", "np", ".", "putmask", "(", "result", ",", "neginf_mask", ",", "-", "np", ".", "inf", ")", "result", "=", "result", ".", "reshape", "(", "shape", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
dispatch_missing
Fill nulls caused by division by zero, casting to a diffferent dtype if necessary. Parameters ---------- op : function (operator.add, operator.div, ...) left : object (Index for non-reversed ops) right : object (Index fof reversed ops) result : ndarray Returns ------- result : ndarray
pandas/core/missing.py
def dispatch_missing(op, left, right, result): """ Fill nulls caused by division by zero, casting to a diffferent dtype if necessary. Parameters ---------- op : function (operator.add, operator.div, ...) left : object (Index for non-reversed ops) right : object (Index fof reversed ops) result : ndarray Returns ------- result : ndarray """ opstr = '__{opname}__'.format(opname=op.__name__).replace('____', '__') if op in [operator.truediv, operator.floordiv, getattr(operator, 'div', None)]: result = mask_zero_div_zero(left, right, result) elif op is operator.mod: result = fill_zeros(result, left, right, opstr, np.nan) elif op is divmod: res0 = mask_zero_div_zero(left, right, result[0]) res1 = fill_zeros(result[1], left, right, opstr, np.nan) result = (res0, res1) return result
def dispatch_missing(op, left, right, result): """ Fill nulls caused by division by zero, casting to a diffferent dtype if necessary. Parameters ---------- op : function (operator.add, operator.div, ...) left : object (Index for non-reversed ops) right : object (Index fof reversed ops) result : ndarray Returns ------- result : ndarray """ opstr = '__{opname}__'.format(opname=op.__name__).replace('____', '__') if op in [operator.truediv, operator.floordiv, getattr(operator, 'div', None)]: result = mask_zero_div_zero(left, right, result) elif op is operator.mod: result = fill_zeros(result, left, right, opstr, np.nan) elif op is divmod: res0 = mask_zero_div_zero(left, right, result[0]) res1 = fill_zeros(result[1], left, right, opstr, np.nan) result = (res0, res1) return result
[ "Fill", "nulls", "caused", "by", "division", "by", "zero", "casting", "to", "a", "diffferent", "dtype", "if", "necessary", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L631-L657
[ "def", "dispatch_missing", "(", "op", ",", "left", ",", "right", ",", "result", ")", ":", "opstr", "=", "'__{opname}__'", ".", "format", "(", "opname", "=", "op", ".", "__name__", ")", ".", "replace", "(", "'____'", ",", "'__'", ")", "if", "op", "in", "[", "operator", ".", "truediv", ",", "operator", ".", "floordiv", ",", "getattr", "(", "operator", ",", "'div'", ",", "None", ")", "]", ":", "result", "=", "mask_zero_div_zero", "(", "left", ",", "right", ",", "result", ")", "elif", "op", "is", "operator", ".", "mod", ":", "result", "=", "fill_zeros", "(", "result", ",", "left", ",", "right", ",", "opstr", ",", "np", ".", "nan", ")", "elif", "op", "is", "divmod", ":", "res0", "=", "mask_zero_div_zero", "(", "left", ",", "right", ",", "result", "[", "0", "]", ")", "res1", "=", "fill_zeros", "(", "result", "[", "1", "]", ",", "left", ",", "right", ",", "opstr", ",", "np", ".", "nan", ")", "result", "=", "(", "res0", ",", "res1", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_interp_limit
Get indexers of values that won't be filled because they exceed the limits. Parameters ---------- invalid : boolean ndarray fw_limit : int or None forward limit to index bw_limit : int or None backward limit to index Returns ------- set of indexers Notes ----- This is equivalent to the more readable, but slower .. code-block:: python def _interp_limit(invalid, fw_limit, bw_limit): for x in np.where(invalid)[0]: if invalid[max(0, x - fw_limit):x + bw_limit + 1].all(): yield x
pandas/core/missing.py
def _interp_limit(invalid, fw_limit, bw_limit): """ Get indexers of values that won't be filled because they exceed the limits. Parameters ---------- invalid : boolean ndarray fw_limit : int or None forward limit to index bw_limit : int or None backward limit to index Returns ------- set of indexers Notes ----- This is equivalent to the more readable, but slower .. code-block:: python def _interp_limit(invalid, fw_limit, bw_limit): for x in np.where(invalid)[0]: if invalid[max(0, x - fw_limit):x + bw_limit + 1].all(): yield x """ # handle forward first; the backward direction is the same except # 1. operate on the reversed array # 2. subtract the returned indices from N - 1 N = len(invalid) f_idx = set() b_idx = set() def inner(invalid, limit): limit = min(limit, N) windowed = _rolling_window(invalid, limit + 1).all(1) idx = (set(np.where(windowed)[0] + limit) | set(np.where((~invalid[:limit + 1]).cumsum() == 0)[0])) return idx if fw_limit is not None: if fw_limit == 0: f_idx = set(np.where(invalid)[0]) else: f_idx = inner(invalid, fw_limit) if bw_limit is not None: if bw_limit == 0: # then we don't even need to care about backwards # just use forwards return f_idx else: b_idx = list(inner(invalid[::-1], bw_limit)) b_idx = set(N - 1 - np.asarray(b_idx)) if fw_limit == 0: return b_idx return f_idx & b_idx
def _interp_limit(invalid, fw_limit, bw_limit): """ Get indexers of values that won't be filled because they exceed the limits. Parameters ---------- invalid : boolean ndarray fw_limit : int or None forward limit to index bw_limit : int or None backward limit to index Returns ------- set of indexers Notes ----- This is equivalent to the more readable, but slower .. code-block:: python def _interp_limit(invalid, fw_limit, bw_limit): for x in np.where(invalid)[0]: if invalid[max(0, x - fw_limit):x + bw_limit + 1].all(): yield x """ # handle forward first; the backward direction is the same except # 1. operate on the reversed array # 2. subtract the returned indices from N - 1 N = len(invalid) f_idx = set() b_idx = set() def inner(invalid, limit): limit = min(limit, N) windowed = _rolling_window(invalid, limit + 1).all(1) idx = (set(np.where(windowed)[0] + limit) | set(np.where((~invalid[:limit + 1]).cumsum() == 0)[0])) return idx if fw_limit is not None: if fw_limit == 0: f_idx = set(np.where(invalid)[0]) else: f_idx = inner(invalid, fw_limit) if bw_limit is not None: if bw_limit == 0: # then we don't even need to care about backwards # just use forwards return f_idx else: b_idx = list(inner(invalid[::-1], bw_limit)) b_idx = set(N - 1 - np.asarray(b_idx)) if fw_limit == 0: return b_idx return f_idx & b_idx
[ "Get", "indexers", "of", "values", "that", "won", "t", "be", "filled", "because", "they", "exceed", "the", "limits", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L660-L721
[ "def", "_interp_limit", "(", "invalid", ",", "fw_limit", ",", "bw_limit", ")", ":", "# handle forward first; the backward direction is the same except", "# 1. operate on the reversed array", "# 2. subtract the returned indices from N - 1", "N", "=", "len", "(", "invalid", ")", "f_idx", "=", "set", "(", ")", "b_idx", "=", "set", "(", ")", "def", "inner", "(", "invalid", ",", "limit", ")", ":", "limit", "=", "min", "(", "limit", ",", "N", ")", "windowed", "=", "_rolling_window", "(", "invalid", ",", "limit", "+", "1", ")", ".", "all", "(", "1", ")", "idx", "=", "(", "set", "(", "np", ".", "where", "(", "windowed", ")", "[", "0", "]", "+", "limit", ")", "|", "set", "(", "np", ".", "where", "(", "(", "~", "invalid", "[", ":", "limit", "+", "1", "]", ")", ".", "cumsum", "(", ")", "==", "0", ")", "[", "0", "]", ")", ")", "return", "idx", "if", "fw_limit", "is", "not", "None", ":", "if", "fw_limit", "==", "0", ":", "f_idx", "=", "set", "(", "np", ".", "where", "(", "invalid", ")", "[", "0", "]", ")", "else", ":", "f_idx", "=", "inner", "(", "invalid", ",", "fw_limit", ")", "if", "bw_limit", "is", "not", "None", ":", "if", "bw_limit", "==", "0", ":", "# then we don't even need to care about backwards", "# just use forwards", "return", "f_idx", "else", ":", "b_idx", "=", "list", "(", "inner", "(", "invalid", "[", ":", ":", "-", "1", "]", ",", "bw_limit", ")", ")", "b_idx", "=", "set", "(", "N", "-", "1", "-", "np", ".", "asarray", "(", "b_idx", ")", ")", "if", "fw_limit", "==", "0", ":", "return", "b_idx", "return", "f_idx", "&", "b_idx" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_rolling_window
[True, True, False, True, False], 2 -> [ [True, True], [True, False], [False, True], [True, False], ]
pandas/core/missing.py
def _rolling_window(a, window): """ [True, True, False, True, False], 2 -> [ [True, True], [True, False], [False, True], [True, False], ] """ # https://stackoverflow.com/a/6811241 shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1],) return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def _rolling_window(a, window): """ [True, True, False, True, False], 2 -> [ [True, True], [True, False], [False, True], [True, False], ] """ # https://stackoverflow.com/a/6811241 shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1],) return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
[ "[", "True", "True", "False", "True", "False", "]", "2", "-", ">" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/missing.py#L724-L738
[ "def", "_rolling_window", "(", "a", ",", "window", ")", ":", "# https://stackoverflow.com/a/6811241", "shape", "=", "a", ".", "shape", "[", ":", "-", "1", "]", "+", "(", "a", ".", "shape", "[", "-", "1", "]", "-", "window", "+", "1", ",", "window", ")", "strides", "=", "a", ".", "strides", "+", "(", "a", ".", "strides", "[", "-", "1", "]", ",", ")", "return", "np", ".", "lib", ".", "stride_tricks", ".", "as_strided", "(", "a", ",", "shape", "=", "shape", ",", "strides", "=", "strides", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
get_console_size
Return console size as tuple = (width, height). Returns (None,None) in non-interactive session.
pandas/io/formats/console.py
def get_console_size(): """Return console size as tuple = (width, height). Returns (None,None) in non-interactive session. """ from pandas import get_option display_width = get_option('display.width') # deprecated. display_height = get_option('display.max_rows') # Consider # interactive shell terminal, can detect term size # interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term # size non-interactive script, should disregard term size # in addition # width,height have default values, but setting to 'None' signals # should use Auto-Detection, But only in interactive shell-terminal. # Simple. yeah. if in_interactive_session(): if in_ipython_frontend(): # sane defaults for interactive non-shell terminal # match default for width,height in config_init from pandas._config.config import get_default_val terminal_width = get_default_val('display.width') terminal_height = get_default_val('display.max_rows') else: # pure terminal terminal_width, terminal_height = get_terminal_size() else: terminal_width, terminal_height = None, None # Note if the User sets width/Height to None (auto-detection) # and we're in a script (non-inter), this will return (None,None) # caller needs to deal. return (display_width or terminal_width, display_height or terminal_height)
def get_console_size(): """Return console size as tuple = (width, height). Returns (None,None) in non-interactive session. """ from pandas import get_option display_width = get_option('display.width') # deprecated. display_height = get_option('display.max_rows') # Consider # interactive shell terminal, can detect term size # interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term # size non-interactive script, should disregard term size # in addition # width,height have default values, but setting to 'None' signals # should use Auto-Detection, But only in interactive shell-terminal. # Simple. yeah. if in_interactive_session(): if in_ipython_frontend(): # sane defaults for interactive non-shell terminal # match default for width,height in config_init from pandas._config.config import get_default_val terminal_width = get_default_val('display.width') terminal_height = get_default_val('display.max_rows') else: # pure terminal terminal_width, terminal_height = get_terminal_size() else: terminal_width, terminal_height = None, None # Note if the User sets width/Height to None (auto-detection) # and we're in a script (non-inter), this will return (None,None) # caller needs to deal. return (display_width or terminal_width, display_height or terminal_height)
[ "Return", "console", "size", "as", "tuple", "=", "(", "width", "height", ")", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/console.py#L8-L45
[ "def", "get_console_size", "(", ")", ":", "from", "pandas", "import", "get_option", "display_width", "=", "get_option", "(", "'display.width'", ")", "# deprecated.", "display_height", "=", "get_option", "(", "'display.max_rows'", ")", "# Consider", "# interactive shell terminal, can detect term size", "# interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term", "# size non-interactive script, should disregard term size", "# in addition", "# width,height have default values, but setting to 'None' signals", "# should use Auto-Detection, But only in interactive shell-terminal.", "# Simple. yeah.", "if", "in_interactive_session", "(", ")", ":", "if", "in_ipython_frontend", "(", ")", ":", "# sane defaults for interactive non-shell terminal", "# match default for width,height in config_init", "from", "pandas", ".", "_config", ".", "config", "import", "get_default_val", "terminal_width", "=", "get_default_val", "(", "'display.width'", ")", "terminal_height", "=", "get_default_val", "(", "'display.max_rows'", ")", "else", ":", "# pure terminal", "terminal_width", ",", "terminal_height", "=", "get_terminal_size", "(", ")", "else", ":", "terminal_width", ",", "terminal_height", "=", "None", ",", "None", "# Note if the User sets width/Height to None (auto-detection)", "# and we're in a script (non-inter), this will return (None,None)", "# caller needs to deal.", "return", "(", "display_width", "or", "terminal_width", ",", "display_height", "or", "terminal_height", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
in_interactive_session
check if we're running in an interactive shell returns True if running under python/ipython interactive shell
pandas/io/formats/console.py
def in_interactive_session(): """ check if we're running in an interactive shell returns True if running under python/ipython interactive shell """ from pandas import get_option def check_main(): try: import __main__ as main except ModuleNotFoundError: return get_option('mode.sim_interactive') return (not hasattr(main, '__file__') or get_option('mode.sim_interactive')) try: return __IPYTHON__ or check_main() # noqa except NameError: return check_main()
def in_interactive_session(): """ check if we're running in an interactive shell returns True if running under python/ipython interactive shell """ from pandas import get_option def check_main(): try: import __main__ as main except ModuleNotFoundError: return get_option('mode.sim_interactive') return (not hasattr(main, '__file__') or get_option('mode.sim_interactive')) try: return __IPYTHON__ or check_main() # noqa except NameError: return check_main()
[ "check", "if", "we", "re", "running", "in", "an", "interactive", "shell" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/console.py#L51-L69
[ "def", "in_interactive_session", "(", ")", ":", "from", "pandas", "import", "get_option", "def", "check_main", "(", ")", ":", "try", ":", "import", "__main__", "as", "main", "except", "ModuleNotFoundError", ":", "return", "get_option", "(", "'mode.sim_interactive'", ")", "return", "(", "not", "hasattr", "(", "main", ",", "'__file__'", ")", "or", "get_option", "(", "'mode.sim_interactive'", ")", ")", "try", ":", "return", "__IPYTHON__", "or", "check_main", "(", ")", "# noqa", "except", "NameError", ":", "return", "check_main", "(", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
recode_for_groupby
Code the categories to ensure we can groupby for categoricals. If observed=True, we return a new Categorical with the observed categories only. If sort=False, return a copy of self, coded with categories as returned by .unique(), followed by any categories not appearing in the data. If sort=True, return self. This method is needed solely to ensure the categorical index of the GroupBy result has categories in the order of appearance in the data (GH-8868). Parameters ---------- c : Categorical sort : boolean The value of the sort parameter groupby was called with. observed : boolean Account only for the observed values Returns ------- New Categorical If sort=False, the new categories are set to the order of appearance in codes (unless ordered=True, in which case the original order is preserved), followed by any unrepresented categories in the original order. Categorical or None If we are observed, return the original categorical, otherwise None
pandas/core/groupby/categorical.py
def recode_for_groupby(c, sort, observed): """ Code the categories to ensure we can groupby for categoricals. If observed=True, we return a new Categorical with the observed categories only. If sort=False, return a copy of self, coded with categories as returned by .unique(), followed by any categories not appearing in the data. If sort=True, return self. This method is needed solely to ensure the categorical index of the GroupBy result has categories in the order of appearance in the data (GH-8868). Parameters ---------- c : Categorical sort : boolean The value of the sort parameter groupby was called with. observed : boolean Account only for the observed values Returns ------- New Categorical If sort=False, the new categories are set to the order of appearance in codes (unless ordered=True, in which case the original order is preserved), followed by any unrepresented categories in the original order. Categorical or None If we are observed, return the original categorical, otherwise None """ # we only care about observed values if observed: unique_codes = unique1d(c.codes) take_codes = unique_codes[unique_codes != -1] if c.ordered: take_codes = np.sort(take_codes) # we recode according to the uniques categories = c.categories.take(take_codes) codes = _recode_for_categories(c.codes, c.categories, categories) # return a new categorical that maps our new codes # and categories dtype = CategoricalDtype(categories, ordered=c.ordered) return Categorical(codes, dtype=dtype, fastpath=True), c # Already sorted according to c.categories; all is fine if sort: return c, None # sort=False should order groups in as-encountered order (GH-8868) cat = c.unique() # But for groupby to work, all categories should be present, # including those missing from the data (GH-13179), which .unique() # above dropped cat = cat.add_categories( c.categories[~c.categories.isin(cat.categories)]) return c.reorder_categories(cat.categories), None
def recode_for_groupby(c, sort, observed): """ Code the categories to ensure we can groupby for categoricals. If observed=True, we return a new Categorical with the observed categories only. If sort=False, return a copy of self, coded with categories as returned by .unique(), followed by any categories not appearing in the data. If sort=True, return self. This method is needed solely to ensure the categorical index of the GroupBy result has categories in the order of appearance in the data (GH-8868). Parameters ---------- c : Categorical sort : boolean The value of the sort parameter groupby was called with. observed : boolean Account only for the observed values Returns ------- New Categorical If sort=False, the new categories are set to the order of appearance in codes (unless ordered=True, in which case the original order is preserved), followed by any unrepresented categories in the original order. Categorical or None If we are observed, return the original categorical, otherwise None """ # we only care about observed values if observed: unique_codes = unique1d(c.codes) take_codes = unique_codes[unique_codes != -1] if c.ordered: take_codes = np.sort(take_codes) # we recode according to the uniques categories = c.categories.take(take_codes) codes = _recode_for_categories(c.codes, c.categories, categories) # return a new categorical that maps our new codes # and categories dtype = CategoricalDtype(categories, ordered=c.ordered) return Categorical(codes, dtype=dtype, fastpath=True), c # Already sorted according to c.categories; all is fine if sort: return c, None # sort=False should order groups in as-encountered order (GH-8868) cat = c.unique() # But for groupby to work, all categories should be present, # including those missing from the data (GH-13179), which .unique() # above dropped cat = cat.add_categories( c.categories[~c.categories.isin(cat.categories)]) return c.reorder_categories(cat.categories), None
[ "Code", "the", "categories", "to", "ensure", "we", "can", "groupby", "for", "categoricals", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/categorical.py#L8-L74
[ "def", "recode_for_groupby", "(", "c", ",", "sort", ",", "observed", ")", ":", "# we only care about observed values", "if", "observed", ":", "unique_codes", "=", "unique1d", "(", "c", ".", "codes", ")", "take_codes", "=", "unique_codes", "[", "unique_codes", "!=", "-", "1", "]", "if", "c", ".", "ordered", ":", "take_codes", "=", "np", ".", "sort", "(", "take_codes", ")", "# we recode according to the uniques", "categories", "=", "c", ".", "categories", ".", "take", "(", "take_codes", ")", "codes", "=", "_recode_for_categories", "(", "c", ".", "codes", ",", "c", ".", "categories", ",", "categories", ")", "# return a new categorical that maps our new codes", "# and categories", "dtype", "=", "CategoricalDtype", "(", "categories", ",", "ordered", "=", "c", ".", "ordered", ")", "return", "Categorical", "(", "codes", ",", "dtype", "=", "dtype", ",", "fastpath", "=", "True", ")", ",", "c", "# Already sorted according to c.categories; all is fine", "if", "sort", ":", "return", "c", ",", "None", "# sort=False should order groups in as-encountered order (GH-8868)", "cat", "=", "c", ".", "unique", "(", ")", "# But for groupby to work, all categories should be present,", "# including those missing from the data (GH-13179), which .unique()", "# above dropped", "cat", "=", "cat", ".", "add_categories", "(", "c", ".", "categories", "[", "~", "c", ".", "categories", ".", "isin", "(", "cat", ".", "categories", ")", "]", ")", "return", "c", ".", "reorder_categories", "(", "cat", ".", "categories", ")", ",", "None" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
recode_from_groupby
Reverse the codes_to_groupby to account for sort / observed. Parameters ---------- c : Categorical sort : boolean The value of the sort parameter groupby was called with. ci : CategoricalIndex The codes / categories to recode Returns ------- CategoricalIndex
pandas/core/groupby/categorical.py
def recode_from_groupby(c, sort, ci): """ Reverse the codes_to_groupby to account for sort / observed. Parameters ---------- c : Categorical sort : boolean The value of the sort parameter groupby was called with. ci : CategoricalIndex The codes / categories to recode Returns ------- CategoricalIndex """ # we re-order to the original category orderings if sort: return ci.set_categories(c.categories) # we are not sorting, so add unobserved to the end return ci.add_categories( c.categories[~c.categories.isin(ci.categories)])
def recode_from_groupby(c, sort, ci): """ Reverse the codes_to_groupby to account for sort / observed. Parameters ---------- c : Categorical sort : boolean The value of the sort parameter groupby was called with. ci : CategoricalIndex The codes / categories to recode Returns ------- CategoricalIndex """ # we re-order to the original category orderings if sort: return ci.set_categories(c.categories) # we are not sorting, so add unobserved to the end return ci.add_categories( c.categories[~c.categories.isin(ci.categories)])
[ "Reverse", "the", "codes_to_groupby", "to", "account", "for", "sort", "/", "observed", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/categorical.py#L77-L100
[ "def", "recode_from_groupby", "(", "c", ",", "sort", ",", "ci", ")", ":", "# we re-order to the original category orderings", "if", "sort", ":", "return", "ci", ".", "set_categories", "(", "c", ".", "categories", ")", "# we are not sorting, so add unobserved to the end", "return", "ci", ".", "add_categories", "(", "c", ".", "categories", "[", "~", "c", ".", "categories", ".", "isin", "(", "ci", ".", "categories", ")", "]", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
get_engine
return our implementation
pandas/io/parquet.py
def get_engine(engine): """ return our implementation """ if engine == 'auto': engine = get_option('io.parquet.engine') if engine == 'auto': # try engines in this order try: return PyArrowImpl() except ImportError: pass try: return FastParquetImpl() except ImportError: pass raise ImportError("Unable to find a usable engine; " "tried using: 'pyarrow', 'fastparquet'.\n" "pyarrow or fastparquet is required for parquet " "support") if engine not in ['pyarrow', 'fastparquet']: raise ValueError("engine must be one of 'pyarrow', 'fastparquet'") if engine == 'pyarrow': return PyArrowImpl() elif engine == 'fastparquet': return FastParquetImpl()
def get_engine(engine): """ return our implementation """ if engine == 'auto': engine = get_option('io.parquet.engine') if engine == 'auto': # try engines in this order try: return PyArrowImpl() except ImportError: pass try: return FastParquetImpl() except ImportError: pass raise ImportError("Unable to find a usable engine; " "tried using: 'pyarrow', 'fastparquet'.\n" "pyarrow or fastparquet is required for parquet " "support") if engine not in ['pyarrow', 'fastparquet']: raise ValueError("engine must be one of 'pyarrow', 'fastparquet'") if engine == 'pyarrow': return PyArrowImpl() elif engine == 'fastparquet': return FastParquetImpl()
[ "return", "our", "implementation" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parquet.py#L13-L42
[ "def", "get_engine", "(", "engine", ")", ":", "if", "engine", "==", "'auto'", ":", "engine", "=", "get_option", "(", "'io.parquet.engine'", ")", "if", "engine", "==", "'auto'", ":", "# try engines in this order", "try", ":", "return", "PyArrowImpl", "(", ")", "except", "ImportError", ":", "pass", "try", ":", "return", "FastParquetImpl", "(", ")", "except", "ImportError", ":", "pass", "raise", "ImportError", "(", "\"Unable to find a usable engine; \"", "\"tried using: 'pyarrow', 'fastparquet'.\\n\"", "\"pyarrow or fastparquet is required for parquet \"", "\"support\"", ")", "if", "engine", "not", "in", "[", "'pyarrow'", ",", "'fastparquet'", "]", ":", "raise", "ValueError", "(", "\"engine must be one of 'pyarrow', 'fastparquet'\"", ")", "if", "engine", "==", "'pyarrow'", ":", "return", "PyArrowImpl", "(", ")", "elif", "engine", "==", "'fastparquet'", ":", "return", "FastParquetImpl", "(", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
to_parquet
Write a DataFrame to the parquet format. Parameters ---------- path : str File path or Root Directory path. Will be used as Root Directory path while writing a partitioned dataset. .. versionchanged:: 0.24.0 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, the engine's default behavior will be used. .. versionadded 0.24.0 partition_cols : list, optional, default None Column names by which to partition the dataset Columns are partitioned in the order they are given .. versionadded:: 0.24.0 kwargs Additional keyword arguments passed to the engine
pandas/io/parquet.py
def to_parquet(df, path, engine='auto', compression='snappy', index=None, partition_cols=None, **kwargs): """ Write a DataFrame to the parquet format. Parameters ---------- path : str File path or Root Directory path. Will be used as Root Directory path while writing a partitioned dataset. .. versionchanged:: 0.24.0 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, the engine's default behavior will be used. .. versionadded 0.24.0 partition_cols : list, optional, default None Column names by which to partition the dataset Columns are partitioned in the order they are given .. versionadded:: 0.24.0 kwargs Additional keyword arguments passed to the engine """ impl = get_engine(engine) return impl.write(df, path, compression=compression, index=index, partition_cols=partition_cols, **kwargs)
def to_parquet(df, path, engine='auto', compression='snappy', index=None, partition_cols=None, **kwargs): """ Write a DataFrame to the parquet format. Parameters ---------- path : str File path or Root Directory path. Will be used as Root Directory path while writing a partitioned dataset. .. versionchanged:: 0.24.0 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, the engine's default behavior will be used. .. versionadded 0.24.0 partition_cols : list, optional, default None Column names by which to partition the dataset Columns are partitioned in the order they are given .. versionadded:: 0.24.0 kwargs Additional keyword arguments passed to the engine """ impl = get_engine(engine) return impl.write(df, path, compression=compression, index=index, partition_cols=partition_cols, **kwargs)
[ "Write", "a", "DataFrame", "to", "the", "parquet", "format", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parquet.py#L213-L251
[ "def", "to_parquet", "(", "df", ",", "path", ",", "engine", "=", "'auto'", ",", "compression", "=", "'snappy'", ",", "index", "=", "None", ",", "partition_cols", "=", "None", ",", "*", "*", "kwargs", ")", ":", "impl", "=", "get_engine", "(", "engine", ")", "return", "impl", ".", "write", "(", "df", ",", "path", ",", "compression", "=", "compression", ",", "index", "=", "index", ",", "partition_cols", "=", "partition_cols", ",", "*", "*", "kwargs", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
read_parquet
Load a parquet object from the file path, returning a DataFrame. .. versionadded 0.21.0 Parameters ---------- path : string File path engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. columns : list, default=None If not None, only these columns will be read from the file. .. versionadded 0.21.1 **kwargs Any additional kwargs are passed to the engine. Returns ------- DataFrame
pandas/io/parquet.py
def read_parquet(path, engine='auto', columns=None, **kwargs): """ Load a parquet object from the file path, returning a DataFrame. .. versionadded 0.21.0 Parameters ---------- path : string File path engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. columns : list, default=None If not None, only these columns will be read from the file. .. versionadded 0.21.1 **kwargs Any additional kwargs are passed to the engine. Returns ------- DataFrame """ impl = get_engine(engine) return impl.read(path, columns=columns, **kwargs)
def read_parquet(path, engine='auto', columns=None, **kwargs): """ Load a parquet object from the file path, returning a DataFrame. .. versionadded 0.21.0 Parameters ---------- path : string File path engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. columns : list, default=None If not None, only these columns will be read from the file. .. versionadded 0.21.1 **kwargs Any additional kwargs are passed to the engine. Returns ------- DataFrame """ impl = get_engine(engine) return impl.read(path, columns=columns, **kwargs)
[ "Load", "a", "parquet", "object", "from", "the", "file", "path", "returning", "a", "DataFrame", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parquet.py#L254-L282
[ "def", "read_parquet", "(", "path", ",", "engine", "=", "'auto'", ",", "columns", "=", "None", ",", "*", "*", "kwargs", ")", ":", "impl", "=", "get_engine", "(", "engine", ")", "return", "impl", ".", "read", "(", "path", ",", "columns", "=", "columns", ",", "*", "*", "kwargs", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
generate_bins_generic
Generate bin edge offsets and bin labels for one array using another array which has bin edge values. Both arrays must be sorted. Parameters ---------- values : array of values binner : a comparable array of values representing bins into which to bin the first array. Note, 'values' end-points must fall within 'binner' end-points. closed : which end of bin is closed; left (default), right Returns ------- bins : array of offsets (into 'values' argument) of bins. Zero and last edge are excluded in result, so for instance the first bin is values[0:bin[0]] and the last is values[bin[-1]:]
pandas/core/groupby/ops.py
def generate_bins_generic(values, binner, closed): """ Generate bin edge offsets and bin labels for one array using another array which has bin edge values. Both arrays must be sorted. Parameters ---------- values : array of values binner : a comparable array of values representing bins into which to bin the first array. Note, 'values' end-points must fall within 'binner' end-points. closed : which end of bin is closed; left (default), right Returns ------- bins : array of offsets (into 'values' argument) of bins. Zero and last edge are excluded in result, so for instance the first bin is values[0:bin[0]] and the last is values[bin[-1]:] """ lenidx = len(values) lenbin = len(binner) if lenidx <= 0 or lenbin <= 0: raise ValueError("Invalid length for values or for binner") # check binner fits data if values[0] < binner[0]: raise ValueError("Values falls before first bin") if values[lenidx - 1] > binner[lenbin - 1]: raise ValueError("Values falls after last bin") bins = np.empty(lenbin - 1, dtype=np.int64) j = 0 # index into values bc = 0 # bin count # linear scan, presume nothing about values/binner except that it fits ok for i in range(0, lenbin - 1): r_bin = binner[i + 1] # count values in current bin, advance to next bin while j < lenidx and (values[j] < r_bin or (closed == 'right' and values[j] == r_bin)): j += 1 bins[bc] = j bc += 1 return bins
def generate_bins_generic(values, binner, closed): """ Generate bin edge offsets and bin labels for one array using another array which has bin edge values. Both arrays must be sorted. Parameters ---------- values : array of values binner : a comparable array of values representing bins into which to bin the first array. Note, 'values' end-points must fall within 'binner' end-points. closed : which end of bin is closed; left (default), right Returns ------- bins : array of offsets (into 'values' argument) of bins. Zero and last edge are excluded in result, so for instance the first bin is values[0:bin[0]] and the last is values[bin[-1]:] """ lenidx = len(values) lenbin = len(binner) if lenidx <= 0 or lenbin <= 0: raise ValueError("Invalid length for values or for binner") # check binner fits data if values[0] < binner[0]: raise ValueError("Values falls before first bin") if values[lenidx - 1] > binner[lenbin - 1]: raise ValueError("Values falls after last bin") bins = np.empty(lenbin - 1, dtype=np.int64) j = 0 # index into values bc = 0 # bin count # linear scan, presume nothing about values/binner except that it fits ok for i in range(0, lenbin - 1): r_bin = binner[i + 1] # count values in current bin, advance to next bin while j < lenidx and (values[j] < r_bin or (closed == 'right' and values[j] == r_bin)): j += 1 bins[bc] = j bc += 1 return bins
[ "Generate", "bin", "edge", "offsets", "and", "bin", "labels", "for", "one", "array", "using", "another", "array", "which", "has", "bin", "edge", "values", ".", "Both", "arrays", "must", "be", "sorted", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/ops.py#L40-L89
[ "def", "generate_bins_generic", "(", "values", ",", "binner", ",", "closed", ")", ":", "lenidx", "=", "len", "(", "values", ")", "lenbin", "=", "len", "(", "binner", ")", "if", "lenidx", "<=", "0", "or", "lenbin", "<=", "0", ":", "raise", "ValueError", "(", "\"Invalid length for values or for binner\"", ")", "# check binner fits data", "if", "values", "[", "0", "]", "<", "binner", "[", "0", "]", ":", "raise", "ValueError", "(", "\"Values falls before first bin\"", ")", "if", "values", "[", "lenidx", "-", "1", "]", ">", "binner", "[", "lenbin", "-", "1", "]", ":", "raise", "ValueError", "(", "\"Values falls after last bin\"", ")", "bins", "=", "np", ".", "empty", "(", "lenbin", "-", "1", ",", "dtype", "=", "np", ".", "int64", ")", "j", "=", "0", "# index into values", "bc", "=", "0", "# bin count", "# linear scan, presume nothing about values/binner except that it fits ok", "for", "i", "in", "range", "(", "0", ",", "lenbin", "-", "1", ")", ":", "r_bin", "=", "binner", "[", "i", "+", "1", "]", "# count values in current bin, advance to next bin", "while", "j", "<", "lenidx", "and", "(", "values", "[", "j", "]", "<", "r_bin", "or", "(", "closed", "==", "'right'", "and", "values", "[", "j", "]", "==", "r_bin", ")", ")", ":", "j", "+=", "1", "bins", "[", "bc", "]", "=", "j", "bc", "+=", "1", "return", "bins" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
BaseGrouper.get_iterator
Groupby iterator Returns ------- Generator yielding sequence of (name, subsetted object) for each group
pandas/core/groupby/ops.py
def get_iterator(self, data, axis=0): """ Groupby iterator Returns ------- Generator yielding sequence of (name, subsetted object) for each group """ splitter = self._get_splitter(data, axis=axis) keys = self._get_group_keys() for key, (i, group) in zip(keys, splitter): yield key, group
def get_iterator(self, data, axis=0): """ Groupby iterator Returns ------- Generator yielding sequence of (name, subsetted object) for each group """ splitter = self._get_splitter(data, axis=axis) keys = self._get_group_keys() for key, (i, group) in zip(keys, splitter): yield key, group
[ "Groupby", "iterator" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/ops.py#L136-L148
[ "def", "get_iterator", "(", "self", ",", "data", ",", "axis", "=", "0", ")", ":", "splitter", "=", "self", ".", "_get_splitter", "(", "data", ",", "axis", "=", "axis", ")", "keys", "=", "self", ".", "_get_group_keys", "(", ")", "for", "key", ",", "(", "i", ",", "group", ")", "in", "zip", "(", "keys", ",", "splitter", ")", ":", "yield", "key", ",", "group" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
BaseGrouper.indices
dict {group name -> group indices}
pandas/core/groupby/ops.py
def indices(self): """ dict {group name -> group indices} """ if len(self.groupings) == 1: return self.groupings[0].indices else: label_list = [ping.labels for ping in self.groupings] keys = [com.values_from_object(ping.group_index) for ping in self.groupings] return get_indexer_dict(label_list, keys)
def indices(self): """ dict {group name -> group indices} """ if len(self.groupings) == 1: return self.groupings[0].indices else: label_list = [ping.labels for ping in self.groupings] keys = [com.values_from_object(ping.group_index) for ping in self.groupings] return get_indexer_dict(label_list, keys)
[ "dict", "{", "group", "name", "-", ">", "group", "indices", "}" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/ops.py#L219-L227
[ "def", "indices", "(", "self", ")", ":", "if", "len", "(", "self", ".", "groupings", ")", "==", "1", ":", "return", "self", ".", "groupings", "[", "0", "]", ".", "indices", "else", ":", "label_list", "=", "[", "ping", ".", "labels", "for", "ping", "in", "self", ".", "groupings", "]", "keys", "=", "[", "com", ".", "values_from_object", "(", "ping", ".", "group_index", ")", "for", "ping", "in", "self", ".", "groupings", "]", "return", "get_indexer_dict", "(", "label_list", ",", "keys", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
BaseGrouper.size
Compute group sizes
pandas/core/groupby/ops.py
def size(self): """ Compute group sizes """ ids, _, ngroup = self.group_info ids = ensure_platform_int(ids) if ngroup: out = np.bincount(ids[ids != -1], minlength=ngroup) else: out = [] return Series(out, index=self.result_index, dtype='int64')
def size(self): """ Compute group sizes """ ids, _, ngroup = self.group_info ids = ensure_platform_int(ids) if ngroup: out = np.bincount(ids[ids != -1], minlength=ngroup) else: out = [] return Series(out, index=self.result_index, dtype='int64')
[ "Compute", "group", "sizes" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/ops.py#L241-L254
[ "def", "size", "(", "self", ")", ":", "ids", ",", "_", ",", "ngroup", "=", "self", ".", "group_info", "ids", "=", "ensure_platform_int", "(", "ids", ")", "if", "ngroup", ":", "out", "=", "np", ".", "bincount", "(", "ids", "[", "ids", "!=", "-", "1", "]", ",", "minlength", "=", "ngroup", ")", "else", ":", "out", "=", "[", "]", "return", "Series", "(", "out", ",", "index", "=", "self", ".", "result_index", ",", "dtype", "=", "'int64'", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
BaseGrouper.groups
dict {group name -> group labels}
pandas/core/groupby/ops.py
def groups(self): """ dict {group name -> group labels} """ if len(self.groupings) == 1: return self.groupings[0].groups else: to_groupby = lzip(*(ping.grouper for ping in self.groupings)) to_groupby = Index(to_groupby) return self.axis.groupby(to_groupby)
def groups(self): """ dict {group name -> group labels} """ if len(self.groupings) == 1: return self.groupings[0].groups else: to_groupby = lzip(*(ping.grouper for ping in self.groupings)) to_groupby = Index(to_groupby) return self.axis.groupby(to_groupby)
[ "dict", "{", "group", "name", "-", ">", "group", "labels", "}" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/ops.py#L257-L264
[ "def", "groups", "(", "self", ")", ":", "if", "len", "(", "self", ".", "groupings", ")", "==", "1", ":", "return", "self", ".", "groupings", "[", "0", "]", ".", "groups", "else", ":", "to_groupby", "=", "lzip", "(", "*", "(", "ping", ".", "grouper", "for", "ping", "in", "self", ".", "groupings", ")", ")", "to_groupby", "=", "Index", "(", "to_groupby", ")", "return", "self", ".", "axis", ".", "groupby", "(", "to_groupby", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
BinGrouper.groups
dict {group name -> group labels}
pandas/core/groupby/ops.py
def groups(self): """ dict {group name -> group labels} """ # this is mainly for compat # GH 3881 result = {key: value for key, value in zip(self.binlabels, self.bins) if key is not NaT} return result
def groups(self): """ dict {group name -> group labels} """ # this is mainly for compat # GH 3881 result = {key: value for key, value in zip(self.binlabels, self.bins) if key is not NaT} return result
[ "dict", "{", "group", "name", "-", ">", "group", "labels", "}" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/ops.py#L698-L705
[ "def", "groups", "(", "self", ")", ":", "# this is mainly for compat", "# GH 3881", "result", "=", "{", "key", ":", "value", "for", "key", ",", "value", "in", "zip", "(", "self", ".", "binlabels", ",", "self", ".", "bins", ")", "if", "key", "is", "not", "NaT", "}", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
BinGrouper.get_iterator
Groupby iterator Returns ------- Generator yielding sequence of (name, subsetted object) for each group
pandas/core/groupby/ops.py
def get_iterator(self, data, axis=0): """ Groupby iterator Returns ------- Generator yielding sequence of (name, subsetted object) for each group """ if isinstance(data, NDFrame): slicer = lambda start, edge: data._slice( slice(start, edge), axis=axis) length = len(data.axes[axis]) else: slicer = lambda start, edge: data[slice(start, edge)] length = len(data) start = 0 for edge, label in zip(self.bins, self.binlabels): if label is not NaT: yield label, slicer(start, edge) start = edge if start < length: yield self.binlabels[-1], slicer(start, None)
def get_iterator(self, data, axis=0): """ Groupby iterator Returns ------- Generator yielding sequence of (name, subsetted object) for each group """ if isinstance(data, NDFrame): slicer = lambda start, edge: data._slice( slice(start, edge), axis=axis) length = len(data.axes[axis]) else: slicer = lambda start, edge: data[slice(start, edge)] length = len(data) start = 0 for edge, label in zip(self.bins, self.binlabels): if label is not NaT: yield label, slicer(start, edge) start = edge if start < length: yield self.binlabels[-1], slicer(start, None)
[ "Groupby", "iterator" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/ops.py#L711-L735
[ "def", "get_iterator", "(", "self", ",", "data", ",", "axis", "=", "0", ")", ":", "if", "isinstance", "(", "data", ",", "NDFrame", ")", ":", "slicer", "=", "lambda", "start", ",", "edge", ":", "data", ".", "_slice", "(", "slice", "(", "start", ",", "edge", ")", ",", "axis", "=", "axis", ")", "length", "=", "len", "(", "data", ".", "axes", "[", "axis", "]", ")", "else", ":", "slicer", "=", "lambda", "start", ",", "edge", ":", "data", "[", "slice", "(", "start", ",", "edge", ")", "]", "length", "=", "len", "(", "data", ")", "start", "=", "0", "for", "edge", ",", "label", "in", "zip", "(", "self", ".", "bins", ",", "self", ".", "binlabels", ")", ":", "if", "label", "is", "not", "NaT", ":", "yield", "label", ",", "slicer", "(", "start", ",", "edge", ")", "start", "=", "edge", "if", "start", "<", "length", ":", "yield", "self", ".", "binlabels", "[", "-", "1", "]", ",", "slicer", "(", "start", ",", "None", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
json_normalize
Normalize semi-structured JSON data into a flat table. Parameters ---------- data : dict or list of dicts Unserialized JSON objects record_path : string or list of strings, default None Path in each object to list of records. If not passed, data will be assumed to be an array of records meta : list of paths (string or list of strings), default None Fields to use as metadata for each record in resulting table meta_prefix : string, default None record_prefix : string, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if path to records is ['foo', 'bar'] errors : {'raise', 'ignore'}, default 'raise' * 'ignore' : will ignore KeyError if keys listed in meta are not always present * 'raise' : will raise KeyError if keys listed in meta are not always present .. versionadded:: 0.20.0 sep : string, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar .. versionadded:: 0.20.0 Returns ------- frame : DataFrame Examples -------- >>> from pandas.io.json import json_normalize >>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}}, ... {'name': {'given': 'Mose', 'family': 'Regner'}}, ... {'id': 2, 'name': 'Faye Raker'}] >>> json_normalize(data) id name name.family name.first name.given name.last 0 1.0 NaN NaN Coleen NaN Volk 1 NaN NaN Regner NaN Mose NaN 2 2.0 Faye Raker NaN NaN NaN NaN >>> data = [{'state': 'Florida', ... 'shortname': 'FL', ... 'info': { ... 'governor': 'Rick Scott' ... }, ... 'counties': [{'name': 'Dade', 'population': 12345}, ... {'name': 'Broward', 'population': 40000}, ... {'name': 'Palm Beach', 'population': 60000}]}, ... {'state': 'Ohio', ... 'shortname': 'OH', ... 'info': { ... 'governor': 'John Kasich' ... }, ... 'counties': [{'name': 'Summit', 'population': 1234}, ... {'name': 'Cuyahoga', 'population': 1337}]}] >>> result = json_normalize(data, 'counties', ['state', 'shortname', ... ['info', 'governor']]) >>> result name population info.governor state shortname 0 Dade 12345 Rick Scott Florida FL 1 Broward 40000 Rick Scott Florida FL 2 Palm Beach 60000 Rick Scott Florida FL 3 Summit 1234 John Kasich Ohio OH 4 Cuyahoga 1337 John Kasich Ohio OH >>> data = {'A': [1, 2]} >>> json_normalize(data, 'A', record_prefix='Prefix.') Prefix.0 0 1 1 2
pandas/io/json/normalize.py
def json_normalize(data, record_path=None, meta=None, meta_prefix=None, record_prefix=None, errors='raise', sep='.'): """ Normalize semi-structured JSON data into a flat table. Parameters ---------- data : dict or list of dicts Unserialized JSON objects record_path : string or list of strings, default None Path in each object to list of records. If not passed, data will be assumed to be an array of records meta : list of paths (string or list of strings), default None Fields to use as metadata for each record in resulting table meta_prefix : string, default None record_prefix : string, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if path to records is ['foo', 'bar'] errors : {'raise', 'ignore'}, default 'raise' * 'ignore' : will ignore KeyError if keys listed in meta are not always present * 'raise' : will raise KeyError if keys listed in meta are not always present .. versionadded:: 0.20.0 sep : string, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar .. versionadded:: 0.20.0 Returns ------- frame : DataFrame Examples -------- >>> from pandas.io.json import json_normalize >>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}}, ... {'name': {'given': 'Mose', 'family': 'Regner'}}, ... {'id': 2, 'name': 'Faye Raker'}] >>> json_normalize(data) id name name.family name.first name.given name.last 0 1.0 NaN NaN Coleen NaN Volk 1 NaN NaN Regner NaN Mose NaN 2 2.0 Faye Raker NaN NaN NaN NaN >>> data = [{'state': 'Florida', ... 'shortname': 'FL', ... 'info': { ... 'governor': 'Rick Scott' ... }, ... 'counties': [{'name': 'Dade', 'population': 12345}, ... {'name': 'Broward', 'population': 40000}, ... {'name': 'Palm Beach', 'population': 60000}]}, ... {'state': 'Ohio', ... 'shortname': 'OH', ... 'info': { ... 'governor': 'John Kasich' ... }, ... 'counties': [{'name': 'Summit', 'population': 1234}, ... {'name': 'Cuyahoga', 'population': 1337}]}] >>> result = json_normalize(data, 'counties', ['state', 'shortname', ... ['info', 'governor']]) >>> result name population info.governor state shortname 0 Dade 12345 Rick Scott Florida FL 1 Broward 40000 Rick Scott Florida FL 2 Palm Beach 60000 Rick Scott Florida FL 3 Summit 1234 John Kasich Ohio OH 4 Cuyahoga 1337 John Kasich Ohio OH >>> data = {'A': [1, 2]} >>> json_normalize(data, 'A', record_prefix='Prefix.') Prefix.0 0 1 1 2 """ def _pull_field(js, spec): result = js if isinstance(spec, list): for field in spec: result = result[field] else: result = result[spec] return result if isinstance(data, list) and not data: return DataFrame() # A bit of a hackjob if isinstance(data, dict): data = [data] if record_path is None: if any([isinstance(x, dict) for x in y.values()] for y in data): # naive normalization, this is idempotent for flat records # and potentially will inflate the data considerably for # deeply nested structures: # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@} # # TODO: handle record value which are lists, at least error # reasonably data = nested_to_record(data, sep=sep) return DataFrame(data) elif not isinstance(record_path, list): record_path = [record_path] if meta is None: meta = [] elif not isinstance(meta, list): meta = [meta] meta = [m if isinstance(m, list) else [m] for m in meta] # Disastrously inefficient for now records = [] lengths = [] meta_vals = defaultdict(list) if not isinstance(sep, str): sep = str(sep) meta_keys = [sep.join(val) for val in meta] def _recursive_extract(data, path, seen_meta, level=0): if isinstance(data, dict): data = [data] if len(path) > 1: for obj in data: for val, key in zip(meta, meta_keys): if level + 1 == len(val): seen_meta[key] = _pull_field(obj, val[-1]) _recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1) else: for obj in data: recs = _pull_field(obj, path[0]) # For repeating the metadata later lengths.append(len(recs)) for val, key in zip(meta, meta_keys): if level + 1 > len(val): meta_val = seen_meta[key] else: try: meta_val = _pull_field(obj, val[level:]) except KeyError as e: if errors == 'ignore': meta_val = np.nan else: raise KeyError("Try running with " "errors='ignore' as key " "{err} is not always present" .format(err=e)) meta_vals[key].append(meta_val) records.extend(recs) _recursive_extract(data, record_path, {}, level=0) result = DataFrame(records) if record_prefix is not None: result = result.rename( columns=lambda x: "{p}{c}".format(p=record_prefix, c=x)) # Data types, a problem for k, v in meta_vals.items(): if meta_prefix is not None: k = meta_prefix + k if k in result: raise ValueError('Conflicting metadata name {name}, ' 'need distinguishing prefix '.format(name=k)) # forcing dtype to object to avoid the metadata being casted to string result[k] = np.array(v, dtype=object).repeat(lengths) return result
def json_normalize(data, record_path=None, meta=None, meta_prefix=None, record_prefix=None, errors='raise', sep='.'): """ Normalize semi-structured JSON data into a flat table. Parameters ---------- data : dict or list of dicts Unserialized JSON objects record_path : string or list of strings, default None Path in each object to list of records. If not passed, data will be assumed to be an array of records meta : list of paths (string or list of strings), default None Fields to use as metadata for each record in resulting table meta_prefix : string, default None record_prefix : string, default None If True, prefix records with dotted (?) path, e.g. foo.bar.field if path to records is ['foo', 'bar'] errors : {'raise', 'ignore'}, default 'raise' * 'ignore' : will ignore KeyError if keys listed in meta are not always present * 'raise' : will raise KeyError if keys listed in meta are not always present .. versionadded:: 0.20.0 sep : string, default '.' Nested records will generate names separated by sep, e.g., for sep='.', { 'foo' : { 'bar' : 0 } } -> foo.bar .. versionadded:: 0.20.0 Returns ------- frame : DataFrame Examples -------- >>> from pandas.io.json import json_normalize >>> data = [{'id': 1, 'name': {'first': 'Coleen', 'last': 'Volk'}}, ... {'name': {'given': 'Mose', 'family': 'Regner'}}, ... {'id': 2, 'name': 'Faye Raker'}] >>> json_normalize(data) id name name.family name.first name.given name.last 0 1.0 NaN NaN Coleen NaN Volk 1 NaN NaN Regner NaN Mose NaN 2 2.0 Faye Raker NaN NaN NaN NaN >>> data = [{'state': 'Florida', ... 'shortname': 'FL', ... 'info': { ... 'governor': 'Rick Scott' ... }, ... 'counties': [{'name': 'Dade', 'population': 12345}, ... {'name': 'Broward', 'population': 40000}, ... {'name': 'Palm Beach', 'population': 60000}]}, ... {'state': 'Ohio', ... 'shortname': 'OH', ... 'info': { ... 'governor': 'John Kasich' ... }, ... 'counties': [{'name': 'Summit', 'population': 1234}, ... {'name': 'Cuyahoga', 'population': 1337}]}] >>> result = json_normalize(data, 'counties', ['state', 'shortname', ... ['info', 'governor']]) >>> result name population info.governor state shortname 0 Dade 12345 Rick Scott Florida FL 1 Broward 40000 Rick Scott Florida FL 2 Palm Beach 60000 Rick Scott Florida FL 3 Summit 1234 John Kasich Ohio OH 4 Cuyahoga 1337 John Kasich Ohio OH >>> data = {'A': [1, 2]} >>> json_normalize(data, 'A', record_prefix='Prefix.') Prefix.0 0 1 1 2 """ def _pull_field(js, spec): result = js if isinstance(spec, list): for field in spec: result = result[field] else: result = result[spec] return result if isinstance(data, list) and not data: return DataFrame() # A bit of a hackjob if isinstance(data, dict): data = [data] if record_path is None: if any([isinstance(x, dict) for x in y.values()] for y in data): # naive normalization, this is idempotent for flat records # and potentially will inflate the data considerably for # deeply nested structures: # {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@} # # TODO: handle record value which are lists, at least error # reasonably data = nested_to_record(data, sep=sep) return DataFrame(data) elif not isinstance(record_path, list): record_path = [record_path] if meta is None: meta = [] elif not isinstance(meta, list): meta = [meta] meta = [m if isinstance(m, list) else [m] for m in meta] # Disastrously inefficient for now records = [] lengths = [] meta_vals = defaultdict(list) if not isinstance(sep, str): sep = str(sep) meta_keys = [sep.join(val) for val in meta] def _recursive_extract(data, path, seen_meta, level=0): if isinstance(data, dict): data = [data] if len(path) > 1: for obj in data: for val, key in zip(meta, meta_keys): if level + 1 == len(val): seen_meta[key] = _pull_field(obj, val[-1]) _recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1) else: for obj in data: recs = _pull_field(obj, path[0]) # For repeating the metadata later lengths.append(len(recs)) for val, key in zip(meta, meta_keys): if level + 1 > len(val): meta_val = seen_meta[key] else: try: meta_val = _pull_field(obj, val[level:]) except KeyError as e: if errors == 'ignore': meta_val = np.nan else: raise KeyError("Try running with " "errors='ignore' as key " "{err} is not always present" .format(err=e)) meta_vals[key].append(meta_val) records.extend(recs) _recursive_extract(data, record_path, {}, level=0) result = DataFrame(records) if record_prefix is not None: result = result.rename( columns=lambda x: "{p}{c}".format(p=record_prefix, c=x)) # Data types, a problem for k, v in meta_vals.items(): if meta_prefix is not None: k = meta_prefix + k if k in result: raise ValueError('Conflicting metadata name {name}, ' 'need distinguishing prefix '.format(name=k)) # forcing dtype to object to avoid the metadata being casted to string result[k] = np.array(v, dtype=object).repeat(lengths) return result
[ "Normalize", "semi", "-", "structured", "JSON", "data", "into", "a", "flat", "table", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/normalize.py#L99-L286
[ "def", "json_normalize", "(", "data", ",", "record_path", "=", "None", ",", "meta", "=", "None", ",", "meta_prefix", "=", "None", ",", "record_prefix", "=", "None", ",", "errors", "=", "'raise'", ",", "sep", "=", "'.'", ")", ":", "def", "_pull_field", "(", "js", ",", "spec", ")", ":", "result", "=", "js", "if", "isinstance", "(", "spec", ",", "list", ")", ":", "for", "field", "in", "spec", ":", "result", "=", "result", "[", "field", "]", "else", ":", "result", "=", "result", "[", "spec", "]", "return", "result", "if", "isinstance", "(", "data", ",", "list", ")", "and", "not", "data", ":", "return", "DataFrame", "(", ")", "# A bit of a hackjob", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "data", "=", "[", "data", "]", "if", "record_path", "is", "None", ":", "if", "any", "(", "[", "isinstance", "(", "x", ",", "dict", ")", "for", "x", "in", "y", ".", "values", "(", ")", "]", "for", "y", "in", "data", ")", ":", "# naive normalization, this is idempotent for flat records", "# and potentially will inflate the data considerably for", "# deeply nested structures:", "# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}", "#", "# TODO: handle record value which are lists, at least error", "# reasonably", "data", "=", "nested_to_record", "(", "data", ",", "sep", "=", "sep", ")", "return", "DataFrame", "(", "data", ")", "elif", "not", "isinstance", "(", "record_path", ",", "list", ")", ":", "record_path", "=", "[", "record_path", "]", "if", "meta", "is", "None", ":", "meta", "=", "[", "]", "elif", "not", "isinstance", "(", "meta", ",", "list", ")", ":", "meta", "=", "[", "meta", "]", "meta", "=", "[", "m", "if", "isinstance", "(", "m", ",", "list", ")", "else", "[", "m", "]", "for", "m", "in", "meta", "]", "# Disastrously inefficient for now", "records", "=", "[", "]", "lengths", "=", "[", "]", "meta_vals", "=", "defaultdict", "(", "list", ")", "if", "not", "isinstance", "(", "sep", ",", "str", ")", ":", "sep", "=", "str", "(", "sep", ")", "meta_keys", "=", "[", "sep", ".", "join", "(", "val", ")", "for", "val", "in", "meta", "]", "def", "_recursive_extract", "(", "data", ",", "path", ",", "seen_meta", ",", "level", "=", "0", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "data", "=", "[", "data", "]", "if", "len", "(", "path", ")", ">", "1", ":", "for", "obj", "in", "data", ":", "for", "val", ",", "key", "in", "zip", "(", "meta", ",", "meta_keys", ")", ":", "if", "level", "+", "1", "==", "len", "(", "val", ")", ":", "seen_meta", "[", "key", "]", "=", "_pull_field", "(", "obj", ",", "val", "[", "-", "1", "]", ")", "_recursive_extract", "(", "obj", "[", "path", "[", "0", "]", "]", ",", "path", "[", "1", ":", "]", ",", "seen_meta", ",", "level", "=", "level", "+", "1", ")", "else", ":", "for", "obj", "in", "data", ":", "recs", "=", "_pull_field", "(", "obj", ",", "path", "[", "0", "]", ")", "# For repeating the metadata later", "lengths", ".", "append", "(", "len", "(", "recs", ")", ")", "for", "val", ",", "key", "in", "zip", "(", "meta", ",", "meta_keys", ")", ":", "if", "level", "+", "1", ">", "len", "(", "val", ")", ":", "meta_val", "=", "seen_meta", "[", "key", "]", "else", ":", "try", ":", "meta_val", "=", "_pull_field", "(", "obj", ",", "val", "[", "level", ":", "]", ")", "except", "KeyError", "as", "e", ":", "if", "errors", "==", "'ignore'", ":", "meta_val", "=", "np", ".", "nan", "else", ":", "raise", "KeyError", "(", "\"Try running with \"", "\"errors='ignore' as key \"", "\"{err} is not always present\"", ".", "format", "(", "err", "=", "e", ")", ")", "meta_vals", "[", "key", "]", ".", "append", "(", "meta_val", ")", "records", ".", "extend", "(", "recs", ")", "_recursive_extract", "(", "data", ",", "record_path", ",", "{", "}", ",", "level", "=", "0", ")", "result", "=", "DataFrame", "(", "records", ")", "if", "record_prefix", "is", "not", "None", ":", "result", "=", "result", ".", "rename", "(", "columns", "=", "lambda", "x", ":", "\"{p}{c}\"", ".", "format", "(", "p", "=", "record_prefix", ",", "c", "=", "x", ")", ")", "# Data types, a problem", "for", "k", ",", "v", "in", "meta_vals", ".", "items", "(", ")", ":", "if", "meta_prefix", "is", "not", "None", ":", "k", "=", "meta_prefix", "+", "k", "if", "k", "in", "result", ":", "raise", "ValueError", "(", "'Conflicting metadata name {name}, '", "'need distinguishing prefix '", ".", "format", "(", "name", "=", "k", ")", ")", "# forcing dtype to object to avoid the metadata being casted to string", "result", "[", "k", "]", "=", "np", ".", "array", "(", "v", ",", "dtype", "=", "object", ")", ".", "repeat", "(", "lengths", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
lreshape
Reshape long-format data to wide. Generalized inverse of DataFrame.pivot Parameters ---------- data : DataFrame groups : dict {new_name : list_of_columns} dropna : boolean, default True Examples -------- >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], ... 'team': ['Red Sox', 'Yankees'], ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) >>> data hr1 hr2 team year1 year2 0 514 545 Red Sox 2007 2008 1 573 526 Yankees 2007 2008 >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) team year hr 0 Red Sox 2007 514 1 Yankees 2007 573 2 Red Sox 2008 545 3 Yankees 2008 526 Returns ------- reshaped : DataFrame
pandas/core/reshape/melt.py
def lreshape(data, groups, dropna=True, label=None): """ Reshape long-format data to wide. Generalized inverse of DataFrame.pivot Parameters ---------- data : DataFrame groups : dict {new_name : list_of_columns} dropna : boolean, default True Examples -------- >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], ... 'team': ['Red Sox', 'Yankees'], ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) >>> data hr1 hr2 team year1 year2 0 514 545 Red Sox 2007 2008 1 573 526 Yankees 2007 2008 >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) team year hr 0 Red Sox 2007 514 1 Yankees 2007 573 2 Red Sox 2008 545 3 Yankees 2008 526 Returns ------- reshaped : DataFrame """ if isinstance(groups, dict): keys = list(groups.keys()) values = list(groups.values()) else: keys, values = zip(*groups) all_cols = list(set.union(*[set(x) for x in values])) id_cols = list(data.columns.difference(all_cols)) K = len(values[0]) for seq in values: if len(seq) != K: raise ValueError('All column lists must be same length') mdata = {} pivot_cols = [] for target, names in zip(keys, values): to_concat = [data[col].values for col in names] import pandas.core.dtypes.concat as _concat mdata[target] = _concat._concat_compat(to_concat) pivot_cols.append(target) for col in id_cols: mdata[col] = np.tile(data[col].values, K) if dropna: mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool) for c in pivot_cols: mask &= notna(mdata[c]) if not mask.all(): mdata = {k: v[mask] for k, v in mdata.items()} return data._constructor(mdata, columns=id_cols + pivot_cols)
def lreshape(data, groups, dropna=True, label=None): """ Reshape long-format data to wide. Generalized inverse of DataFrame.pivot Parameters ---------- data : DataFrame groups : dict {new_name : list_of_columns} dropna : boolean, default True Examples -------- >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], ... 'team': ['Red Sox', 'Yankees'], ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) >>> data hr1 hr2 team year1 year2 0 514 545 Red Sox 2007 2008 1 573 526 Yankees 2007 2008 >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) team year hr 0 Red Sox 2007 514 1 Yankees 2007 573 2 Red Sox 2008 545 3 Yankees 2008 526 Returns ------- reshaped : DataFrame """ if isinstance(groups, dict): keys = list(groups.keys()) values = list(groups.values()) else: keys, values = zip(*groups) all_cols = list(set.union(*[set(x) for x in values])) id_cols = list(data.columns.difference(all_cols)) K = len(values[0]) for seq in values: if len(seq) != K: raise ValueError('All column lists must be same length') mdata = {} pivot_cols = [] for target, names in zip(keys, values): to_concat = [data[col].values for col in names] import pandas.core.dtypes.concat as _concat mdata[target] = _concat._concat_compat(to_concat) pivot_cols.append(target) for col in id_cols: mdata[col] = np.tile(data[col].values, K) if dropna: mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool) for c in pivot_cols: mask &= notna(mdata[c]) if not mask.all(): mdata = {k: v[mask] for k, v in mdata.items()} return data._constructor(mdata, columns=id_cols + pivot_cols)
[ "Reshape", "long", "-", "format", "data", "to", "wide", ".", "Generalized", "inverse", "of", "DataFrame", ".", "pivot" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/melt.py#L108-L175
[ "def", "lreshape", "(", "data", ",", "groups", ",", "dropna", "=", "True", ",", "label", "=", "None", ")", ":", "if", "isinstance", "(", "groups", ",", "dict", ")", ":", "keys", "=", "list", "(", "groups", ".", "keys", "(", ")", ")", "values", "=", "list", "(", "groups", ".", "values", "(", ")", ")", "else", ":", "keys", ",", "values", "=", "zip", "(", "*", "groups", ")", "all_cols", "=", "list", "(", "set", ".", "union", "(", "*", "[", "set", "(", "x", ")", "for", "x", "in", "values", "]", ")", ")", "id_cols", "=", "list", "(", "data", ".", "columns", ".", "difference", "(", "all_cols", ")", ")", "K", "=", "len", "(", "values", "[", "0", "]", ")", "for", "seq", "in", "values", ":", "if", "len", "(", "seq", ")", "!=", "K", ":", "raise", "ValueError", "(", "'All column lists must be same length'", ")", "mdata", "=", "{", "}", "pivot_cols", "=", "[", "]", "for", "target", ",", "names", "in", "zip", "(", "keys", ",", "values", ")", ":", "to_concat", "=", "[", "data", "[", "col", "]", ".", "values", "for", "col", "in", "names", "]", "import", "pandas", ".", "core", ".", "dtypes", ".", "concat", "as", "_concat", "mdata", "[", "target", "]", "=", "_concat", ".", "_concat_compat", "(", "to_concat", ")", "pivot_cols", ".", "append", "(", "target", ")", "for", "col", "in", "id_cols", ":", "mdata", "[", "col", "]", "=", "np", ".", "tile", "(", "data", "[", "col", "]", ".", "values", ",", "K", ")", "if", "dropna", ":", "mask", "=", "np", ".", "ones", "(", "len", "(", "mdata", "[", "pivot_cols", "[", "0", "]", "]", ")", ",", "dtype", "=", "bool", ")", "for", "c", "in", "pivot_cols", ":", "mask", "&=", "notna", "(", "mdata", "[", "c", "]", ")", "if", "not", "mask", ".", "all", "(", ")", ":", "mdata", "=", "{", "k", ":", "v", "[", "mask", "]", "for", "k", ",", "v", "in", "mdata", ".", "items", "(", ")", "}", "return", "data", ".", "_constructor", "(", "mdata", ",", "columns", "=", "id_cols", "+", "pivot_cols", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
wide_to_long
r""" Wide panel to long format. Less flexible but more user-friendly than melt. With stubnames ['A', 'B'], this function expects to find one or more group of columns with format A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,... You specify what you want to call this suffix in the resulting long format with `j` (for example `j='year'`) Each row of these wide variables are assumed to be uniquely identified by `i` (can be a single column name or a list of column names) All remaining variables in the data frame are left intact. Parameters ---------- df : DataFrame The wide-format DataFrame stubnames : str or list-like The stub name(s). The wide format variables are assumed to start with the stub names. i : str or list-like Column(s) to use as id variable(s) j : str The name of the sub-observation variable. What you wish to name your suffix in the long format. sep : str, default "" A character indicating the separation of the variable names in the wide format, to be stripped from the names in the long format. For example, if your column names are A-suffix1, A-suffix2, you can strip the hyphen by specifying `sep='-'` .. versionadded:: 0.20.0 suffix : str, default '\\d+' A regular expression capturing the wanted suffixes. '\\d+' captures numeric suffixes. Suffixes with no numbers could be specified with the negated character class '\\D+'. You can also further disambiguate suffixes, for example, if your wide variables are of the form A-one, B-two,.., and you have an unrelated column A-rating, you can ignore the last one by specifying `suffix='(!?one|two)'` .. versionadded:: 0.20.0 .. versionchanged:: 0.23.0 When all suffixes are numeric, they are cast to int64/float64. Returns ------- DataFrame A DataFrame that contains each stub name as a variable, with new index (i, j). Notes ----- All extra variables are left untouched. This simply uses `pandas.melt` under the hood, but is hard-coded to "do the right thing" in a typical case. Examples -------- >>> np.random.seed(123) >>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"}, ... "A1980" : {0 : "d", 1 : "e", 2 : "f"}, ... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7}, ... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1}, ... "X" : dict(zip(range(3), np.random.randn(3))) ... }) >>> df["id"] = df.index >>> df A1970 A1980 B1970 B1980 X id 0 a d 2.5 3.2 -1.085631 0 1 b e 1.2 1.3 0.997345 1 2 c f 0.7 0.1 0.282978 2 >>> pd.wide_to_long(df, ["A", "B"], i="id", j="year") ... # doctest: +NORMALIZE_WHITESPACE X A B id year 0 1970 -1.085631 a 2.5 1 1970 0.997345 b 1.2 2 1970 0.282978 c 0.7 0 1980 -1.085631 d 3.2 1 1980 0.997345 e 1.3 2 1980 0.282978 f 0.1 With multiple id columns >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df birth famid ht1 ht2 0 1 1 2.8 3.4 1 2 1 2.9 3.8 2 3 1 2.2 2.9 3 1 2 2.0 3.2 4 2 2 1.8 2.8 5 3 2 1.9 2.4 6 1 3 2.2 3.3 7 2 3 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 1 2.8 2 3.4 2 1 2.9 2 3.8 3 1 2.2 2 2.9 2 1 1 2.0 2 3.2 2 1 1.8 2 2.8 3 1 1.9 2 2.4 3 1 1 2.2 2 3.3 2 1 2.3 2 3.4 3 1 2.1 2 2.9 Going from long back to wide just takes some creative use of `unstack` >>> w = l.unstack() >>> w.columns = w.columns.map('{0[0]}{0[1]}'.format) >>> w.reset_index() famid birth ht1 ht2 0 1 1 2.8 3.4 1 1 2 2.9 3.8 2 1 3 2.2 2.9 3 2 1 2.0 3.2 4 2 2 1.8 2.8 5 2 3 1.9 2.4 6 3 1 2.2 3.3 7 3 2 2.3 3.4 8 3 3 2.1 2.9 Less wieldy column names are also handled >>> np.random.seed(0) >>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3), ... 'A(quarterly)-2011': np.random.rand(3), ... 'B(quarterly)-2010': np.random.rand(3), ... 'B(quarterly)-2011': np.random.rand(3), ... 'X' : np.random.randint(3, size=3)}) >>> df['id'] = df.index >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ... 0 0.548814 0.544883 0.437587 ... 1 0.715189 0.423655 0.891773 ... 2 0.602763 0.645894 0.963663 ... X id 0 0 0 1 1 1 2 1 2 >>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id', ... j='year', sep='-') ... # doctest: +NORMALIZE_WHITESPACE X A(quarterly) B(quarterly) id year 0 2010 0 0.548814 0.437587 1 2010 1 0.715189 0.891773 2 2010 1 0.602763 0.963663 0 2011 0 0.544883 0.383442 1 2011 1 0.423655 0.791725 2 2011 1 0.645894 0.528895 If we have many columns, we could also use a regex to find our stubnames and pass that list on to wide_to_long >>> stubnames = sorted( ... set([match[0] for match in df.columns.str.findall( ... r'[A-B]\(.*\)').values if match != [] ]) ... ) >>> list(stubnames) ['A(quarterly)', 'B(quarterly)'] All of the above examples have integers as suffixes. It is possible to have non-integers as suffixes. >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df birth famid ht_one ht_two 0 1 1 2.8 3.4 1 2 1 2.9 3.8 2 3 1 2.2 2.9 3 1 2 2.0 3.2 4 2 2 1.8 2.8 5 3 2 1.9 2.4 6 1 3 2.2 3.3 7 2 3 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age', sep='_', suffix='\w') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 one 2.8 two 3.4 2 one 2.9 two 3.8 3 one 2.2 two 2.9 2 1 one 2.0 two 3.2 2 one 1.8 two 2.8 3 one 1.9 two 2.4 3 1 one 2.2 two 3.3 2 one 2.3 two 3.4 3 one 2.1 two 2.9
pandas/core/reshape/melt.py
def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): r""" Wide panel to long format. Less flexible but more user-friendly than melt. With stubnames ['A', 'B'], this function expects to find one or more group of columns with format A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,... You specify what you want to call this suffix in the resulting long format with `j` (for example `j='year'`) Each row of these wide variables are assumed to be uniquely identified by `i` (can be a single column name or a list of column names) All remaining variables in the data frame are left intact. Parameters ---------- df : DataFrame The wide-format DataFrame stubnames : str or list-like The stub name(s). The wide format variables are assumed to start with the stub names. i : str or list-like Column(s) to use as id variable(s) j : str The name of the sub-observation variable. What you wish to name your suffix in the long format. sep : str, default "" A character indicating the separation of the variable names in the wide format, to be stripped from the names in the long format. For example, if your column names are A-suffix1, A-suffix2, you can strip the hyphen by specifying `sep='-'` .. versionadded:: 0.20.0 suffix : str, default '\\d+' A regular expression capturing the wanted suffixes. '\\d+' captures numeric suffixes. Suffixes with no numbers could be specified with the negated character class '\\D+'. You can also further disambiguate suffixes, for example, if your wide variables are of the form A-one, B-two,.., and you have an unrelated column A-rating, you can ignore the last one by specifying `suffix='(!?one|two)'` .. versionadded:: 0.20.0 .. versionchanged:: 0.23.0 When all suffixes are numeric, they are cast to int64/float64. Returns ------- DataFrame A DataFrame that contains each stub name as a variable, with new index (i, j). Notes ----- All extra variables are left untouched. This simply uses `pandas.melt` under the hood, but is hard-coded to "do the right thing" in a typical case. Examples -------- >>> np.random.seed(123) >>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"}, ... "A1980" : {0 : "d", 1 : "e", 2 : "f"}, ... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7}, ... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1}, ... "X" : dict(zip(range(3), np.random.randn(3))) ... }) >>> df["id"] = df.index >>> df A1970 A1980 B1970 B1980 X id 0 a d 2.5 3.2 -1.085631 0 1 b e 1.2 1.3 0.997345 1 2 c f 0.7 0.1 0.282978 2 >>> pd.wide_to_long(df, ["A", "B"], i="id", j="year") ... # doctest: +NORMALIZE_WHITESPACE X A B id year 0 1970 -1.085631 a 2.5 1 1970 0.997345 b 1.2 2 1970 0.282978 c 0.7 0 1980 -1.085631 d 3.2 1 1980 0.997345 e 1.3 2 1980 0.282978 f 0.1 With multiple id columns >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df birth famid ht1 ht2 0 1 1 2.8 3.4 1 2 1 2.9 3.8 2 3 1 2.2 2.9 3 1 2 2.0 3.2 4 2 2 1.8 2.8 5 3 2 1.9 2.4 6 1 3 2.2 3.3 7 2 3 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 1 2.8 2 3.4 2 1 2.9 2 3.8 3 1 2.2 2 2.9 2 1 1 2.0 2 3.2 2 1 1.8 2 2.8 3 1 1.9 2 2.4 3 1 1 2.2 2 3.3 2 1 2.3 2 3.4 3 1 2.1 2 2.9 Going from long back to wide just takes some creative use of `unstack` >>> w = l.unstack() >>> w.columns = w.columns.map('{0[0]}{0[1]}'.format) >>> w.reset_index() famid birth ht1 ht2 0 1 1 2.8 3.4 1 1 2 2.9 3.8 2 1 3 2.2 2.9 3 2 1 2.0 3.2 4 2 2 1.8 2.8 5 2 3 1.9 2.4 6 3 1 2.2 3.3 7 3 2 2.3 3.4 8 3 3 2.1 2.9 Less wieldy column names are also handled >>> np.random.seed(0) >>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3), ... 'A(quarterly)-2011': np.random.rand(3), ... 'B(quarterly)-2010': np.random.rand(3), ... 'B(quarterly)-2011': np.random.rand(3), ... 'X' : np.random.randint(3, size=3)}) >>> df['id'] = df.index >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ... 0 0.548814 0.544883 0.437587 ... 1 0.715189 0.423655 0.891773 ... 2 0.602763 0.645894 0.963663 ... X id 0 0 0 1 1 1 2 1 2 >>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id', ... j='year', sep='-') ... # doctest: +NORMALIZE_WHITESPACE X A(quarterly) B(quarterly) id year 0 2010 0 0.548814 0.437587 1 2010 1 0.715189 0.891773 2 2010 1 0.602763 0.963663 0 2011 0 0.544883 0.383442 1 2011 1 0.423655 0.791725 2 2011 1 0.645894 0.528895 If we have many columns, we could also use a regex to find our stubnames and pass that list on to wide_to_long >>> stubnames = sorted( ... set([match[0] for match in df.columns.str.findall( ... r'[A-B]\(.*\)').values if match != [] ]) ... ) >>> list(stubnames) ['A(quarterly)', 'B(quarterly)'] All of the above examples have integers as suffixes. It is possible to have non-integers as suffixes. >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df birth famid ht_one ht_two 0 1 1 2.8 3.4 1 2 1 2.9 3.8 2 3 1 2.2 2.9 3 1 2 2.0 3.2 4 2 2 1.8 2.8 5 3 2 1.9 2.4 6 1 3 2.2 3.3 7 2 3 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age', sep='_', suffix='\w') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 one 2.8 two 3.4 2 one 2.9 two 3.8 3 one 2.2 two 2.9 2 1 one 2.0 two 3.2 2 one 1.8 two 2.8 3 one 1.9 two 2.4 3 1 one 2.2 two 3.3 2 one 2.3 two 3.4 3 one 2.1 two 2.9 """ def get_var_names(df, stub, sep, suffix): regex = r'^{stub}{sep}{suffix}$'.format( stub=re.escape(stub), sep=re.escape(sep), suffix=suffix) pattern = re.compile(regex) return [col for col in df.columns if pattern.match(col)] def melt_stub(df, stub, i, j, value_vars, sep): newdf = melt(df, id_vars=i, value_vars=value_vars, value_name=stub.rstrip(sep), var_name=j) newdf[j] = Categorical(newdf[j]) newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "") # GH17627 Cast numerics suffixes to int/float newdf[j] = to_numeric(newdf[j], errors='ignore') return newdf.set_index(i + [j]) if not is_list_like(stubnames): stubnames = [stubnames] else: stubnames = list(stubnames) if any(col in stubnames for col in df.columns): raise ValueError("stubname can't be identical to a column name") if not is_list_like(i): i = [i] else: i = list(i) if df[i].duplicated().any(): raise ValueError("the id variables need to uniquely identify each row") value_vars = [get_var_names(df, stub, sep, suffix) for stub in stubnames] value_vars_flattened = [e for sublist in value_vars for e in sublist] id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened)) melted = [melt_stub(df, s, i, j, v, sep) for s, v in zip(stubnames, value_vars)] melted = melted[0].join(melted[1:], how='outer') if len(i) == 1: new = df[id_vars].set_index(i).join(melted) return new new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j]) return new
def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): r""" Wide panel to long format. Less flexible but more user-friendly than melt. With stubnames ['A', 'B'], this function expects to find one or more group of columns with format A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,... You specify what you want to call this suffix in the resulting long format with `j` (for example `j='year'`) Each row of these wide variables are assumed to be uniquely identified by `i` (can be a single column name or a list of column names) All remaining variables in the data frame are left intact. Parameters ---------- df : DataFrame The wide-format DataFrame stubnames : str or list-like The stub name(s). The wide format variables are assumed to start with the stub names. i : str or list-like Column(s) to use as id variable(s) j : str The name of the sub-observation variable. What you wish to name your suffix in the long format. sep : str, default "" A character indicating the separation of the variable names in the wide format, to be stripped from the names in the long format. For example, if your column names are A-suffix1, A-suffix2, you can strip the hyphen by specifying `sep='-'` .. versionadded:: 0.20.0 suffix : str, default '\\d+' A regular expression capturing the wanted suffixes. '\\d+' captures numeric suffixes. Suffixes with no numbers could be specified with the negated character class '\\D+'. You can also further disambiguate suffixes, for example, if your wide variables are of the form A-one, B-two,.., and you have an unrelated column A-rating, you can ignore the last one by specifying `suffix='(!?one|two)'` .. versionadded:: 0.20.0 .. versionchanged:: 0.23.0 When all suffixes are numeric, they are cast to int64/float64. Returns ------- DataFrame A DataFrame that contains each stub name as a variable, with new index (i, j). Notes ----- All extra variables are left untouched. This simply uses `pandas.melt` under the hood, but is hard-coded to "do the right thing" in a typical case. Examples -------- >>> np.random.seed(123) >>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"}, ... "A1980" : {0 : "d", 1 : "e", 2 : "f"}, ... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7}, ... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1}, ... "X" : dict(zip(range(3), np.random.randn(3))) ... }) >>> df["id"] = df.index >>> df A1970 A1980 B1970 B1980 X id 0 a d 2.5 3.2 -1.085631 0 1 b e 1.2 1.3 0.997345 1 2 c f 0.7 0.1 0.282978 2 >>> pd.wide_to_long(df, ["A", "B"], i="id", j="year") ... # doctest: +NORMALIZE_WHITESPACE X A B id year 0 1970 -1.085631 a 2.5 1 1970 0.997345 b 1.2 2 1970 0.282978 c 0.7 0 1980 -1.085631 d 3.2 1 1980 0.997345 e 1.3 2 1980 0.282978 f 0.1 With multiple id columns >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df birth famid ht1 ht2 0 1 1 2.8 3.4 1 2 1 2.9 3.8 2 3 1 2.2 2.9 3 1 2 2.0 3.2 4 2 2 1.8 2.8 5 3 2 1.9 2.4 6 1 3 2.2 3.3 7 2 3 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 1 2.8 2 3.4 2 1 2.9 2 3.8 3 1 2.2 2 2.9 2 1 1 2.0 2 3.2 2 1 1.8 2 2.8 3 1 1.9 2 2.4 3 1 1 2.2 2 3.3 2 1 2.3 2 3.4 3 1 2.1 2 2.9 Going from long back to wide just takes some creative use of `unstack` >>> w = l.unstack() >>> w.columns = w.columns.map('{0[0]}{0[1]}'.format) >>> w.reset_index() famid birth ht1 ht2 0 1 1 2.8 3.4 1 1 2 2.9 3.8 2 1 3 2.2 2.9 3 2 1 2.0 3.2 4 2 2 1.8 2.8 5 2 3 1.9 2.4 6 3 1 2.2 3.3 7 3 2 2.3 3.4 8 3 3 2.1 2.9 Less wieldy column names are also handled >>> np.random.seed(0) >>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3), ... 'A(quarterly)-2011': np.random.rand(3), ... 'B(quarterly)-2010': np.random.rand(3), ... 'B(quarterly)-2011': np.random.rand(3), ... 'X' : np.random.randint(3, size=3)}) >>> df['id'] = df.index >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ... 0 0.548814 0.544883 0.437587 ... 1 0.715189 0.423655 0.891773 ... 2 0.602763 0.645894 0.963663 ... X id 0 0 0 1 1 1 2 1 2 >>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id', ... j='year', sep='-') ... # doctest: +NORMALIZE_WHITESPACE X A(quarterly) B(quarterly) id year 0 2010 0 0.548814 0.437587 1 2010 1 0.715189 0.891773 2 2010 1 0.602763 0.963663 0 2011 0 0.544883 0.383442 1 2011 1 0.423655 0.791725 2 2011 1 0.645894 0.528895 If we have many columns, we could also use a regex to find our stubnames and pass that list on to wide_to_long >>> stubnames = sorted( ... set([match[0] for match in df.columns.str.findall( ... r'[A-B]\(.*\)').values if match != [] ]) ... ) >>> list(stubnames) ['A(quarterly)', 'B(quarterly)'] All of the above examples have integers as suffixes. It is possible to have non-integers as suffixes. >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df birth famid ht_one ht_two 0 1 1 2.8 3.4 1 2 1 2.9 3.8 2 3 1 2.2 2.9 3 1 2 2.0 3.2 4 2 2 1.8 2.8 5 3 2 1.9 2.4 6 1 3 2.2 3.3 7 2 3 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age', sep='_', suffix='\w') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 one 2.8 two 3.4 2 one 2.9 two 3.8 3 one 2.2 two 2.9 2 1 one 2.0 two 3.2 2 one 1.8 two 2.8 3 one 1.9 two 2.4 3 1 one 2.2 two 3.3 2 one 2.3 two 3.4 3 one 2.1 two 2.9 """ def get_var_names(df, stub, sep, suffix): regex = r'^{stub}{sep}{suffix}$'.format( stub=re.escape(stub), sep=re.escape(sep), suffix=suffix) pattern = re.compile(regex) return [col for col in df.columns if pattern.match(col)] def melt_stub(df, stub, i, j, value_vars, sep): newdf = melt(df, id_vars=i, value_vars=value_vars, value_name=stub.rstrip(sep), var_name=j) newdf[j] = Categorical(newdf[j]) newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "") # GH17627 Cast numerics suffixes to int/float newdf[j] = to_numeric(newdf[j], errors='ignore') return newdf.set_index(i + [j]) if not is_list_like(stubnames): stubnames = [stubnames] else: stubnames = list(stubnames) if any(col in stubnames for col in df.columns): raise ValueError("stubname can't be identical to a column name") if not is_list_like(i): i = [i] else: i = list(i) if df[i].duplicated().any(): raise ValueError("the id variables need to uniquely identify each row") value_vars = [get_var_names(df, stub, sep, suffix) for stub in stubnames] value_vars_flattened = [e for sublist in value_vars for e in sublist] id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened)) melted = [melt_stub(df, s, i, j, v, sep) for s, v in zip(stubnames, value_vars)] melted = melted[0].join(melted[1:], how='outer') if len(i) == 1: new = df[id_vars].set_index(i).join(melted) return new new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j]) return new
[ "r", "Wide", "panel", "to", "long", "format", ".", "Less", "flexible", "but", "more", "user", "-", "friendly", "than", "melt", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/melt.py#L178-L458
[ "def", "wide_to_long", "(", "df", ",", "stubnames", ",", "i", ",", "j", ",", "sep", "=", "\"\"", ",", "suffix", "=", "r'\\d+'", ")", ":", "def", "get_var_names", "(", "df", ",", "stub", ",", "sep", ",", "suffix", ")", ":", "regex", "=", "r'^{stub}{sep}{suffix}$'", ".", "format", "(", "stub", "=", "re", ".", "escape", "(", "stub", ")", ",", "sep", "=", "re", ".", "escape", "(", "sep", ")", ",", "suffix", "=", "suffix", ")", "pattern", "=", "re", ".", "compile", "(", "regex", ")", "return", "[", "col", "for", "col", "in", "df", ".", "columns", "if", "pattern", ".", "match", "(", "col", ")", "]", "def", "melt_stub", "(", "df", ",", "stub", ",", "i", ",", "j", ",", "value_vars", ",", "sep", ")", ":", "newdf", "=", "melt", "(", "df", ",", "id_vars", "=", "i", ",", "value_vars", "=", "value_vars", ",", "value_name", "=", "stub", ".", "rstrip", "(", "sep", ")", ",", "var_name", "=", "j", ")", "newdf", "[", "j", "]", "=", "Categorical", "(", "newdf", "[", "j", "]", ")", "newdf", "[", "j", "]", "=", "newdf", "[", "j", "]", ".", "str", ".", "replace", "(", "re", ".", "escape", "(", "stub", "+", "sep", ")", ",", "\"\"", ")", "# GH17627 Cast numerics suffixes to int/float", "newdf", "[", "j", "]", "=", "to_numeric", "(", "newdf", "[", "j", "]", ",", "errors", "=", "'ignore'", ")", "return", "newdf", ".", "set_index", "(", "i", "+", "[", "j", "]", ")", "if", "not", "is_list_like", "(", "stubnames", ")", ":", "stubnames", "=", "[", "stubnames", "]", "else", ":", "stubnames", "=", "list", "(", "stubnames", ")", "if", "any", "(", "col", "in", "stubnames", "for", "col", "in", "df", ".", "columns", ")", ":", "raise", "ValueError", "(", "\"stubname can't be identical to a column name\"", ")", "if", "not", "is_list_like", "(", "i", ")", ":", "i", "=", "[", "i", "]", "else", ":", "i", "=", "list", "(", "i", ")", "if", "df", "[", "i", "]", ".", "duplicated", "(", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"the id variables need to uniquely identify each row\"", ")", "value_vars", "=", "[", "get_var_names", "(", "df", ",", "stub", ",", "sep", ",", "suffix", ")", "for", "stub", "in", "stubnames", "]", "value_vars_flattened", "=", "[", "e", "for", "sublist", "in", "value_vars", "for", "e", "in", "sublist", "]", "id_vars", "=", "list", "(", "set", "(", "df", ".", "columns", ".", "tolist", "(", ")", ")", ".", "difference", "(", "value_vars_flattened", ")", ")", "melted", "=", "[", "melt_stub", "(", "df", ",", "s", ",", "i", ",", "j", ",", "v", ",", "sep", ")", "for", "s", ",", "v", "in", "zip", "(", "stubnames", ",", "value_vars", ")", "]", "melted", "=", "melted", "[", "0", "]", ".", "join", "(", "melted", "[", "1", ":", "]", ",", "how", "=", "'outer'", ")", "if", "len", "(", "i", ")", "==", "1", ":", "new", "=", "df", "[", "id_vars", "]", ".", "set_index", "(", "i", ")", ".", "join", "(", "melted", ")", "return", "new", "new", "=", "df", "[", "id_vars", "]", ".", "merge", "(", "melted", ".", "reset_index", "(", ")", ",", "on", "=", "i", ")", ".", "set_index", "(", "i", "+", "[", "j", "]", ")", "return", "new" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_GroupBy._get_indices
Safe get multiple indices, translate keys for datelike to underlying repr.
pandas/core/groupby/groupby.py
def _get_indices(self, names): """ Safe get multiple indices, translate keys for datelike to underlying repr. """ def get_converter(s): # possibly convert to the actual key types # in the indices, could be a Timestamp or a np.datetime64 if isinstance(s, (Timestamp, datetime.datetime)): return lambda key: Timestamp(key) elif isinstance(s, np.datetime64): return lambda key: Timestamp(key).asm8 else: return lambda key: key if len(names) == 0: return [] if len(self.indices) > 0: index_sample = next(iter(self.indices)) else: index_sample = None # Dummy sample name_sample = names[0] if isinstance(index_sample, tuple): if not isinstance(name_sample, tuple): msg = ("must supply a tuple to get_group with multiple" " grouping keys") raise ValueError(msg) if not len(name_sample) == len(index_sample): try: # If the original grouper was a tuple return [self.indices[name] for name in names] except KeyError: # turns out it wasn't a tuple msg = ("must supply a same-length tuple to get_group" " with multiple grouping keys") raise ValueError(msg) converters = [get_converter(s) for s in index_sample] names = (tuple(f(n) for f, n in zip(converters, name)) for name in names) else: converter = get_converter(index_sample) names = (converter(name) for name in names) return [self.indices.get(name, []) for name in names]
def _get_indices(self, names): """ Safe get multiple indices, translate keys for datelike to underlying repr. """ def get_converter(s): # possibly convert to the actual key types # in the indices, could be a Timestamp or a np.datetime64 if isinstance(s, (Timestamp, datetime.datetime)): return lambda key: Timestamp(key) elif isinstance(s, np.datetime64): return lambda key: Timestamp(key).asm8 else: return lambda key: key if len(names) == 0: return [] if len(self.indices) > 0: index_sample = next(iter(self.indices)) else: index_sample = None # Dummy sample name_sample = names[0] if isinstance(index_sample, tuple): if not isinstance(name_sample, tuple): msg = ("must supply a tuple to get_group with multiple" " grouping keys") raise ValueError(msg) if not len(name_sample) == len(index_sample): try: # If the original grouper was a tuple return [self.indices[name] for name in names] except KeyError: # turns out it wasn't a tuple msg = ("must supply a same-length tuple to get_group" " with multiple grouping keys") raise ValueError(msg) converters = [get_converter(s) for s in index_sample] names = (tuple(f(n) for f, n in zip(converters, name)) for name in names) else: converter = get_converter(index_sample) names = (converter(name) for name in names) return [self.indices.get(name, []) for name in names]
[ "Safe", "get", "multiple", "indices", "translate", "keys", "for", "datelike", "to", "underlying", "repr", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L409-L457
[ "def", "_get_indices", "(", "self", ",", "names", ")", ":", "def", "get_converter", "(", "s", ")", ":", "# possibly convert to the actual key types", "# in the indices, could be a Timestamp or a np.datetime64", "if", "isinstance", "(", "s", ",", "(", "Timestamp", ",", "datetime", ".", "datetime", ")", ")", ":", "return", "lambda", "key", ":", "Timestamp", "(", "key", ")", "elif", "isinstance", "(", "s", ",", "np", ".", "datetime64", ")", ":", "return", "lambda", "key", ":", "Timestamp", "(", "key", ")", ".", "asm8", "else", ":", "return", "lambda", "key", ":", "key", "if", "len", "(", "names", ")", "==", "0", ":", "return", "[", "]", "if", "len", "(", "self", ".", "indices", ")", ">", "0", ":", "index_sample", "=", "next", "(", "iter", "(", "self", ".", "indices", ")", ")", "else", ":", "index_sample", "=", "None", "# Dummy sample", "name_sample", "=", "names", "[", "0", "]", "if", "isinstance", "(", "index_sample", ",", "tuple", ")", ":", "if", "not", "isinstance", "(", "name_sample", ",", "tuple", ")", ":", "msg", "=", "(", "\"must supply a tuple to get_group with multiple\"", "\" grouping keys\"", ")", "raise", "ValueError", "(", "msg", ")", "if", "not", "len", "(", "name_sample", ")", "==", "len", "(", "index_sample", ")", ":", "try", ":", "# If the original grouper was a tuple", "return", "[", "self", ".", "indices", "[", "name", "]", "for", "name", "in", "names", "]", "except", "KeyError", ":", "# turns out it wasn't a tuple", "msg", "=", "(", "\"must supply a same-length tuple to get_group\"", "\" with multiple grouping keys\"", ")", "raise", "ValueError", "(", "msg", ")", "converters", "=", "[", "get_converter", "(", "s", ")", "for", "s", "in", "index_sample", "]", "names", "=", "(", "tuple", "(", "f", "(", "n", ")", "for", "f", ",", "n", "in", "zip", "(", "converters", ",", "name", ")", ")", "for", "name", "in", "names", ")", "else", ":", "converter", "=", "get_converter", "(", "index_sample", ")", "names", "=", "(", "converter", "(", "name", ")", "for", "name", "in", "names", ")", "return", "[", "self", ".", "indices", ".", "get", "(", "name", ",", "[", "]", ")", "for", "name", "in", "names", "]" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_GroupBy._set_group_selection
Create group based selection. Used when selection is not passed directly but instead via a grouper. NOTE: this should be paired with a call to _reset_group_selection
pandas/core/groupby/groupby.py
def _set_group_selection(self): """ Create group based selection. Used when selection is not passed directly but instead via a grouper. NOTE: this should be paired with a call to _reset_group_selection """ grp = self.grouper if not (self.as_index and getattr(grp, 'groupings', None) is not None and self.obj.ndim > 1 and self._group_selection is None): return ax = self.obj._info_axis groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis] if len(groupers): # GH12839 clear selected obj cache when group selection changes self._group_selection = ax.difference(Index(groupers), sort=False).tolist() self._reset_cache('_selected_obj')
def _set_group_selection(self): """ Create group based selection. Used when selection is not passed directly but instead via a grouper. NOTE: this should be paired with a call to _reset_group_selection """ grp = self.grouper if not (self.as_index and getattr(grp, 'groupings', None) is not None and self.obj.ndim > 1 and self._group_selection is None): return ax = self.obj._info_axis groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis] if len(groupers): # GH12839 clear selected obj cache when group selection changes self._group_selection = ax.difference(Index(groupers), sort=False).tolist() self._reset_cache('_selected_obj')
[ "Create", "group", "based", "selection", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L487-L510
[ "def", "_set_group_selection", "(", "self", ")", ":", "grp", "=", "self", ".", "grouper", "if", "not", "(", "self", ".", "as_index", "and", "getattr", "(", "grp", ",", "'groupings'", ",", "None", ")", "is", "not", "None", "and", "self", ".", "obj", ".", "ndim", ">", "1", "and", "self", ".", "_group_selection", "is", "None", ")", ":", "return", "ax", "=", "self", ".", "obj", ".", "_info_axis", "groupers", "=", "[", "g", ".", "name", "for", "g", "in", "grp", ".", "groupings", "if", "g", ".", "level", "is", "None", "and", "g", ".", "in_axis", "]", "if", "len", "(", "groupers", ")", ":", "# GH12839 clear selected obj cache when group selection changes", "self", ".", "_group_selection", "=", "ax", ".", "difference", "(", "Index", "(", "groupers", ")", ",", "sort", "=", "False", ")", ".", "tolist", "(", ")", "self", ".", "_reset_cache", "(", "'_selected_obj'", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_GroupBy.get_group
Construct NDFrame from group with provided name. Parameters ---------- name : object the name of the group to get as a DataFrame obj : NDFrame, default None the NDFrame to take the DataFrame out of. If it is None, the object groupby was called on will be used Returns ------- group : same type as obj
pandas/core/groupby/groupby.py
def get_group(self, name, obj=None): """ Construct NDFrame from group with provided name. Parameters ---------- name : object the name of the group to get as a DataFrame obj : NDFrame, default None the NDFrame to take the DataFrame out of. If it is None, the object groupby was called on will be used Returns ------- group : same type as obj """ if obj is None: obj = self._selected_obj inds = self._get_index(name) if not len(inds): raise KeyError(name) return obj._take(inds, axis=self.axis)
def get_group(self, name, obj=None): """ Construct NDFrame from group with provided name. Parameters ---------- name : object the name of the group to get as a DataFrame obj : NDFrame, default None the NDFrame to take the DataFrame out of. If it is None, the object groupby was called on will be used Returns ------- group : same type as obj """ if obj is None: obj = self._selected_obj inds = self._get_index(name) if not len(inds): raise KeyError(name) return obj._take(inds, axis=self.axis)
[ "Construct", "NDFrame", "from", "group", "with", "provided", "name", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L630-L654
[ "def", "get_group", "(", "self", ",", "name", ",", "obj", "=", "None", ")", ":", "if", "obj", "is", "None", ":", "obj", "=", "self", ".", "_selected_obj", "inds", "=", "self", ".", "_get_index", "(", "name", ")", "if", "not", "len", "(", "inds", ")", ":", "raise", "KeyError", "(", "name", ")", "return", "obj", ".", "_take", "(", "inds", ",", "axis", "=", "self", ".", "axis", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_GroupBy._cumcount_array
Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. Notes ----- this is currently implementing sort=False (though the default is sort=True) for groupby in general
pandas/core/groupby/groupby.py
def _cumcount_array(self, ascending=True): """ Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. Notes ----- this is currently implementing sort=False (though the default is sort=True) for groupby in general """ ids, _, ngroups = self.grouper.group_info sorter = get_group_index_sorter(ids, ngroups) ids, count = ids[sorter], len(ids) if count == 0: return np.empty(0, dtype=np.int64) run = np.r_[True, ids[:-1] != ids[1:]] rep = np.diff(np.r_[np.nonzero(run)[0], count]) out = (~run).cumsum() if ascending: out -= np.repeat(out[run], rep) else: out = np.repeat(out[np.r_[run[1:], True]], rep) - out rev = np.empty(count, dtype=np.intp) rev[sorter] = np.arange(count, dtype=np.intp) return out[rev].astype(np.int64, copy=False)
def _cumcount_array(self, ascending=True): """ Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. Notes ----- this is currently implementing sort=False (though the default is sort=True) for groupby in general """ ids, _, ngroups = self.grouper.group_info sorter = get_group_index_sorter(ids, ngroups) ids, count = ids[sorter], len(ids) if count == 0: return np.empty(0, dtype=np.int64) run = np.r_[True, ids[:-1] != ids[1:]] rep = np.diff(np.r_[np.nonzero(run)[0], count]) out = (~run).cumsum() if ascending: out -= np.repeat(out[run], rep) else: out = np.repeat(out[np.r_[run[1:], True]], rep) - out rev = np.empty(count, dtype=np.intp) rev[sorter] = np.arange(count, dtype=np.intp) return out[rev].astype(np.int64, copy=False)
[ "Parameters", "----------", "ascending", ":", "bool", "default", "True", "If", "False", "number", "in", "reverse", "from", "length", "of", "group", "-", "1", "to", "0", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L724-L754
[ "def", "_cumcount_array", "(", "self", ",", "ascending", "=", "True", ")", ":", "ids", ",", "_", ",", "ngroups", "=", "self", ".", "grouper", ".", "group_info", "sorter", "=", "get_group_index_sorter", "(", "ids", ",", "ngroups", ")", "ids", ",", "count", "=", "ids", "[", "sorter", "]", ",", "len", "(", "ids", ")", "if", "count", "==", "0", ":", "return", "np", ".", "empty", "(", "0", ",", "dtype", "=", "np", ".", "int64", ")", "run", "=", "np", ".", "r_", "[", "True", ",", "ids", "[", ":", "-", "1", "]", "!=", "ids", "[", "1", ":", "]", "]", "rep", "=", "np", ".", "diff", "(", "np", ".", "r_", "[", "np", ".", "nonzero", "(", "run", ")", "[", "0", "]", ",", "count", "]", ")", "out", "=", "(", "~", "run", ")", ".", "cumsum", "(", ")", "if", "ascending", ":", "out", "-=", "np", ".", "repeat", "(", "out", "[", "run", "]", ",", "rep", ")", "else", ":", "out", "=", "np", ".", "repeat", "(", "out", "[", "np", ".", "r_", "[", "run", "[", "1", ":", "]", ",", "True", "]", "]", ",", "rep", ")", "-", "out", "rev", "=", "np", ".", "empty", "(", "count", ",", "dtype", "=", "np", ".", "intp", ")", "rev", "[", "sorter", "]", "=", "np", ".", "arange", "(", "count", ",", "dtype", "=", "np", ".", "intp", ")", "return", "out", "[", "rev", "]", ".", "astype", "(", "np", ".", "int64", ",", "copy", "=", "False", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_GroupBy._try_cast
Try to cast the result to our obj original type, we may have roundtripped through object in the mean-time. If numeric_only is True, then only try to cast numerics and not datetimelikes.
pandas/core/groupby/groupby.py
def _try_cast(self, result, obj, numeric_only=False): """ Try to cast the result to our obj original type, we may have roundtripped through object in the mean-time. If numeric_only is True, then only try to cast numerics and not datetimelikes. """ if obj.ndim > 1: dtype = obj._values.dtype else: dtype = obj.dtype if not is_scalar(result): if is_datetime64tz_dtype(dtype): # GH 23683 # Prior results _may_ have been generated in UTC. # Ensure we localize to UTC first before converting # to the target timezone try: result = obj._values._from_sequence( result, dtype='datetime64[ns, UTC]' ) result = result.astype(dtype) except TypeError: # _try_cast was called at a point where the result # was already tz-aware pass elif is_extension_array_dtype(dtype): # The function can return something of any type, so check # if the type is compatible with the calling EA. try: result = obj._values._from_sequence(result, dtype=dtype) except Exception: # https://github.com/pandas-dev/pandas/issues/22850 # pandas has no control over what 3rd-party ExtensionArrays # do in _values_from_sequence. We still want ops to work # though, so we catch any regular Exception. pass elif numeric_only and is_numeric_dtype(dtype) or not numeric_only: result = maybe_downcast_to_dtype(result, dtype) return result
def _try_cast(self, result, obj, numeric_only=False): """ Try to cast the result to our obj original type, we may have roundtripped through object in the mean-time. If numeric_only is True, then only try to cast numerics and not datetimelikes. """ if obj.ndim > 1: dtype = obj._values.dtype else: dtype = obj.dtype if not is_scalar(result): if is_datetime64tz_dtype(dtype): # GH 23683 # Prior results _may_ have been generated in UTC. # Ensure we localize to UTC first before converting # to the target timezone try: result = obj._values._from_sequence( result, dtype='datetime64[ns, UTC]' ) result = result.astype(dtype) except TypeError: # _try_cast was called at a point where the result # was already tz-aware pass elif is_extension_array_dtype(dtype): # The function can return something of any type, so check # if the type is compatible with the calling EA. try: result = obj._values._from_sequence(result, dtype=dtype) except Exception: # https://github.com/pandas-dev/pandas/issues/22850 # pandas has no control over what 3rd-party ExtensionArrays # do in _values_from_sequence. We still want ops to work # though, so we catch any regular Exception. pass elif numeric_only and is_numeric_dtype(dtype) or not numeric_only: result = maybe_downcast_to_dtype(result, dtype) return result
[ "Try", "to", "cast", "the", "result", "to", "our", "obj", "original", "type", "we", "may", "have", "roundtripped", "through", "object", "in", "the", "mean", "-", "time", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L756-L799
[ "def", "_try_cast", "(", "self", ",", "result", ",", "obj", ",", "numeric_only", "=", "False", ")", ":", "if", "obj", ".", "ndim", ">", "1", ":", "dtype", "=", "obj", ".", "_values", ".", "dtype", "else", ":", "dtype", "=", "obj", ".", "dtype", "if", "not", "is_scalar", "(", "result", ")", ":", "if", "is_datetime64tz_dtype", "(", "dtype", ")", ":", "# GH 23683", "# Prior results _may_ have been generated in UTC.", "# Ensure we localize to UTC first before converting", "# to the target timezone", "try", ":", "result", "=", "obj", ".", "_values", ".", "_from_sequence", "(", "result", ",", "dtype", "=", "'datetime64[ns, UTC]'", ")", "result", "=", "result", ".", "astype", "(", "dtype", ")", "except", "TypeError", ":", "# _try_cast was called at a point where the result", "# was already tz-aware", "pass", "elif", "is_extension_array_dtype", "(", "dtype", ")", ":", "# The function can return something of any type, so check", "# if the type is compatible with the calling EA.", "try", ":", "result", "=", "obj", ".", "_values", ".", "_from_sequence", "(", "result", ",", "dtype", "=", "dtype", ")", "except", "Exception", ":", "# https://github.com/pandas-dev/pandas/issues/22850", "# pandas has no control over what 3rd-party ExtensionArrays", "# do in _values_from_sequence. We still want ops to work", "# though, so we catch any regular Exception.", "pass", "elif", "numeric_only", "and", "is_numeric_dtype", "(", "dtype", ")", "or", "not", "numeric_only", ":", "result", "=", "maybe_downcast_to_dtype", "(", "result", ",", "dtype", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_GroupBy._transform_should_cast
Parameters: ----------- func_nm: str The name of the aggregation function being performed Returns: -------- bool Whether transform should attempt to cast the result of aggregation
pandas/core/groupby/groupby.py
def _transform_should_cast(self, func_nm): """ Parameters: ----------- func_nm: str The name of the aggregation function being performed Returns: -------- bool Whether transform should attempt to cast the result of aggregation """ return (self.size().fillna(0) > 0).any() and ( func_nm not in base.cython_cast_blacklist)
def _transform_should_cast(self, func_nm): """ Parameters: ----------- func_nm: str The name of the aggregation function being performed Returns: -------- bool Whether transform should attempt to cast the result of aggregation """ return (self.size().fillna(0) > 0).any() and ( func_nm not in base.cython_cast_blacklist)
[ "Parameters", ":", "-----------", "func_nm", ":", "str", "The", "name", "of", "the", "aggregation", "function", "being", "performed" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L801-L814
[ "def", "_transform_should_cast", "(", "self", ",", "func_nm", ")", ":", "return", "(", "self", ".", "size", "(", ")", ".", "fillna", "(", "0", ")", ">", "0", ")", ".", "any", "(", ")", "and", "(", "func_nm", "not", "in", "base", ".", "cython_cast_blacklist", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy._bool_agg
Shared func to call any / all Cython GroupBy implementations.
pandas/core/groupby/groupby.py
def _bool_agg(self, val_test, skipna): """ Shared func to call any / all Cython GroupBy implementations. """ def objs_to_bool(vals: np.ndarray) -> Tuple[np.ndarray, Type]: if is_object_dtype(vals): vals = np.array([bool(x) for x in vals]) else: vals = vals.astype(np.bool) return vals.view(np.uint8), np.bool def result_to_bool(result: np.ndarray, inference: Type) -> np.ndarray: return result.astype(inference, copy=False) return self._get_cythonized_result('group_any_all', self.grouper, aggregate=True, cython_dtype=np.uint8, needs_values=True, needs_mask=True, pre_processing=objs_to_bool, post_processing=result_to_bool, val_test=val_test, skipna=skipna)
def _bool_agg(self, val_test, skipna): """ Shared func to call any / all Cython GroupBy implementations. """ def objs_to_bool(vals: np.ndarray) -> Tuple[np.ndarray, Type]: if is_object_dtype(vals): vals = np.array([bool(x) for x in vals]) else: vals = vals.astype(np.bool) return vals.view(np.uint8), np.bool def result_to_bool(result: np.ndarray, inference: Type) -> np.ndarray: return result.astype(inference, copy=False) return self._get_cythonized_result('group_any_all', self.grouper, aggregate=True, cython_dtype=np.uint8, needs_values=True, needs_mask=True, pre_processing=objs_to_bool, post_processing=result_to_bool, val_test=val_test, skipna=skipna)
[ "Shared", "func", "to", "call", "any", "/", "all", "Cython", "GroupBy", "implementations", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1039-L1062
[ "def", "_bool_agg", "(", "self", ",", "val_test", ",", "skipna", ")", ":", "def", "objs_to_bool", "(", "vals", ":", "np", ".", "ndarray", ")", "->", "Tuple", "[", "np", ".", "ndarray", ",", "Type", "]", ":", "if", "is_object_dtype", "(", "vals", ")", ":", "vals", "=", "np", ".", "array", "(", "[", "bool", "(", "x", ")", "for", "x", "in", "vals", "]", ")", "else", ":", "vals", "=", "vals", ".", "astype", "(", "np", ".", "bool", ")", "return", "vals", ".", "view", "(", "np", ".", "uint8", ")", ",", "np", ".", "bool", "def", "result_to_bool", "(", "result", ":", "np", ".", "ndarray", ",", "inference", ":", "Type", ")", "->", "np", ".", "ndarray", ":", "return", "result", ".", "astype", "(", "inference", ",", "copy", "=", "False", ")", "return", "self", ".", "_get_cythonized_result", "(", "'group_any_all'", ",", "self", ".", "grouper", ",", "aggregate", "=", "True", ",", "cython_dtype", "=", "np", ".", "uint8", ",", "needs_values", "=", "True", ",", "needs_mask", "=", "True", ",", "pre_processing", "=", "objs_to_bool", ",", "post_processing", "=", "result_to_bool", ",", "val_test", "=", "val_test", ",", "skipna", "=", "skipna", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.mean
Compute mean of groups, excluding missing values. Returns ------- pandas.Series or pandas.DataFrame %(see_also)s Examples -------- >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2], ... 'B': [np.nan, 2, 3, 4, 5], ... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C']) Groupby one column and return the mean of the remaining columns in each group. >>> df.groupby('A').mean() >>> B C A 1 3.0 1.333333 2 4.0 1.500000 Groupby two columns and return the mean of the remaining column. >>> df.groupby(['A', 'B']).mean() >>> C A B 1 2.0 2 4.0 1 2 3.0 1 5.0 2 Groupby one column and return the mean of only particular column in the group. >>> df.groupby('A')['B'].mean() >>> A 1 3.0 2 4.0 Name: B, dtype: float64
pandas/core/groupby/groupby.py
def mean(self, *args, **kwargs): """ Compute mean of groups, excluding missing values. Returns ------- pandas.Series or pandas.DataFrame %(see_also)s Examples -------- >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2], ... 'B': [np.nan, 2, 3, 4, 5], ... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C']) Groupby one column and return the mean of the remaining columns in each group. >>> df.groupby('A').mean() >>> B C A 1 3.0 1.333333 2 4.0 1.500000 Groupby two columns and return the mean of the remaining column. >>> df.groupby(['A', 'B']).mean() >>> C A B 1 2.0 2 4.0 1 2 3.0 1 5.0 2 Groupby one column and return the mean of only particular column in the group. >>> df.groupby('A')['B'].mean() >>> A 1 3.0 2 4.0 Name: B, dtype: float64 """ nv.validate_groupby_func('mean', args, kwargs, ['numeric_only']) try: return self._cython_agg_general('mean', **kwargs) except GroupByError: raise except Exception: # pragma: no cover with _group_selection_context(self): f = lambda x: x.mean(axis=self.axis, **kwargs) return self._python_agg_general(f)
def mean(self, *args, **kwargs): """ Compute mean of groups, excluding missing values. Returns ------- pandas.Series or pandas.DataFrame %(see_also)s Examples -------- >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2], ... 'B': [np.nan, 2, 3, 4, 5], ... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C']) Groupby one column and return the mean of the remaining columns in each group. >>> df.groupby('A').mean() >>> B C A 1 3.0 1.333333 2 4.0 1.500000 Groupby two columns and return the mean of the remaining column. >>> df.groupby(['A', 'B']).mean() >>> C A B 1 2.0 2 4.0 1 2 3.0 1 5.0 2 Groupby one column and return the mean of only particular column in the group. >>> df.groupby('A')['B'].mean() >>> A 1 3.0 2 4.0 Name: B, dtype: float64 """ nv.validate_groupby_func('mean', args, kwargs, ['numeric_only']) try: return self._cython_agg_general('mean', **kwargs) except GroupByError: raise except Exception: # pragma: no cover with _group_selection_context(self): f = lambda x: x.mean(axis=self.axis, **kwargs) return self._python_agg_general(f)
[ "Compute", "mean", "of", "groups", "excluding", "missing", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1102-L1155
[ "def", "mean", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_groupby_func", "(", "'mean'", ",", "args", ",", "kwargs", ",", "[", "'numeric_only'", "]", ")", "try", ":", "return", "self", ".", "_cython_agg_general", "(", "'mean'", ",", "*", "*", "kwargs", ")", "except", "GroupByError", ":", "raise", "except", "Exception", ":", "# pragma: no cover", "with", "_group_selection_context", "(", "self", ")", ":", "f", "=", "lambda", "x", ":", "x", ".", "mean", "(", "axis", "=", "self", ".", "axis", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_python_agg_general", "(", "f", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.median
Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex
pandas/core/groupby/groupby.py
def median(self, **kwargs): """ Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex """ try: return self._cython_agg_general('median', **kwargs) except GroupByError: raise except Exception: # pragma: no cover def f(x): if isinstance(x, np.ndarray): x = Series(x) return x.median(axis=self.axis, **kwargs) with _group_selection_context(self): return self._python_agg_general(f)
def median(self, **kwargs): """ Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex """ try: return self._cython_agg_general('median', **kwargs) except GroupByError: raise except Exception: # pragma: no cover def f(x): if isinstance(x, np.ndarray): x = Series(x) return x.median(axis=self.axis, **kwargs) with _group_selection_context(self): return self._python_agg_general(f)
[ "Compute", "median", "of", "groups", "excluding", "missing", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1159-L1176
[ "def", "median", "(", "self", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "self", ".", "_cython_agg_general", "(", "'median'", ",", "*", "*", "kwargs", ")", "except", "GroupByError", ":", "raise", "except", "Exception", ":", "# pragma: no cover", "def", "f", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", ":", "x", "=", "Series", "(", "x", ")", "return", "x", ".", "median", "(", "axis", "=", "self", ".", "axis", ",", "*", "*", "kwargs", ")", "with", "_group_selection_context", "(", "self", ")", ":", "return", "self", ".", "_python_agg_general", "(", "f", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.std
Compute standard deviation of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom
pandas/core/groupby/groupby.py
def std(self, ddof=1, *args, **kwargs): """ Compute standard deviation of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom """ # TODO: implement at Cython level? nv.validate_groupby_func('std', args, kwargs) return np.sqrt(self.var(ddof=ddof, **kwargs))
def std(self, ddof=1, *args, **kwargs): """ Compute standard deviation of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom """ # TODO: implement at Cython level? nv.validate_groupby_func('std', args, kwargs) return np.sqrt(self.var(ddof=ddof, **kwargs))
[ "Compute", "standard", "deviation", "of", "groups", "excluding", "missing", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1180-L1194
[ "def", "std", "(", "self", ",", "ddof", "=", "1", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# TODO: implement at Cython level?", "nv", ".", "validate_groupby_func", "(", "'std'", ",", "args", ",", "kwargs", ")", "return", "np", ".", "sqrt", "(", "self", ".", "var", "(", "ddof", "=", "ddof", ",", "*", "*", "kwargs", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.var
Compute variance of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom
pandas/core/groupby/groupby.py
def var(self, ddof=1, *args, **kwargs): """ Compute variance of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom """ nv.validate_groupby_func('var', args, kwargs) if ddof == 1: try: return self._cython_agg_general('var', **kwargs) except Exception: f = lambda x: x.var(ddof=ddof, **kwargs) with _group_selection_context(self): return self._python_agg_general(f) else: f = lambda x: x.var(ddof=ddof, **kwargs) with _group_selection_context(self): return self._python_agg_general(f)
def var(self, ddof=1, *args, **kwargs): """ Compute variance of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom """ nv.validate_groupby_func('var', args, kwargs) if ddof == 1: try: return self._cython_agg_general('var', **kwargs) except Exception: f = lambda x: x.var(ddof=ddof, **kwargs) with _group_selection_context(self): return self._python_agg_general(f) else: f = lambda x: x.var(ddof=ddof, **kwargs) with _group_selection_context(self): return self._python_agg_general(f)
[ "Compute", "variance", "of", "groups", "excluding", "missing", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1198-L1220
[ "def", "var", "(", "self", ",", "ddof", "=", "1", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_groupby_func", "(", "'var'", ",", "args", ",", "kwargs", ")", "if", "ddof", "==", "1", ":", "try", ":", "return", "self", ".", "_cython_agg_general", "(", "'var'", ",", "*", "*", "kwargs", ")", "except", "Exception", ":", "f", "=", "lambda", "x", ":", "x", ".", "var", "(", "ddof", "=", "ddof", ",", "*", "*", "kwargs", ")", "with", "_group_selection_context", "(", "self", ")", ":", "return", "self", ".", "_python_agg_general", "(", "f", ")", "else", ":", "f", "=", "lambda", "x", ":", "x", ".", "var", "(", "ddof", "=", "ddof", ",", "*", "*", "kwargs", ")", "with", "_group_selection_context", "(", "self", ")", ":", "return", "self", ".", "_python_agg_general", "(", "f", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.sem
Compute standard error of the mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom
pandas/core/groupby/groupby.py
def sem(self, ddof=1): """ Compute standard error of the mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom """ return self.std(ddof=ddof) / np.sqrt(self.count())
def sem(self, ddof=1): """ Compute standard error of the mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom """ return self.std(ddof=ddof) / np.sqrt(self.count())
[ "Compute", "standard", "error", "of", "the", "mean", "of", "groups", "excluding", "missing", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1224-L1236
[ "def", "sem", "(", "self", ",", "ddof", "=", "1", ")", ":", "return", "self", ".", "std", "(", "ddof", "=", "ddof", ")", "/", "np", ".", "sqrt", "(", "self", ".", "count", "(", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.size
Compute group sizes.
pandas/core/groupby/groupby.py
def size(self): """ Compute group sizes. """ result = self.grouper.size() if isinstance(self.obj, Series): result.name = getattr(self.obj, 'name', None) return result
def size(self): """ Compute group sizes. """ result = self.grouper.size() if isinstance(self.obj, Series): result.name = getattr(self.obj, 'name', None) return result
[ "Compute", "group", "sizes", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1240-L1248
[ "def", "size", "(", "self", ")", ":", "result", "=", "self", ".", "grouper", ".", "size", "(", ")", "if", "isinstance", "(", "self", ".", "obj", ",", "Series", ")", ":", "result", ".", "name", "=", "getattr", "(", "self", ".", "obj", ",", "'name'", ",", "None", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy._add_numeric_operations
Add numeric operations to the GroupBy generically.
pandas/core/groupby/groupby.py
def _add_numeric_operations(cls): """ Add numeric operations to the GroupBy generically. """ def groupby_function(name, alias, npfunc, numeric_only=True, _convert=False, min_count=-1): _local_template = "Compute %(f)s of group values" @Substitution(name='groupby', f=name) @Appender(_common_see_also) @Appender(_local_template) def f(self, **kwargs): if 'numeric_only' not in kwargs: kwargs['numeric_only'] = numeric_only if 'min_count' not in kwargs: kwargs['min_count'] = min_count self._set_group_selection() try: return self._cython_agg_general( alias, alt=npfunc, **kwargs) except AssertionError as e: raise SpecificationError(str(e)) except Exception: result = self.aggregate( lambda x: npfunc(x, axis=self.axis)) if _convert: result = result._convert(datetime=True) return result set_function_name(f, name, cls) return f def first_compat(x, axis=0): def first(x): x = x.to_numpy() x = x[notna(x)] if len(x) == 0: return np.nan return x[0] if isinstance(x, DataFrame): return x.apply(first, axis=axis) else: return first(x) def last_compat(x, axis=0): def last(x): x = x.to_numpy() x = x[notna(x)] if len(x) == 0: return np.nan return x[-1] if isinstance(x, DataFrame): return x.apply(last, axis=axis) else: return last(x) cls.sum = groupby_function('sum', 'add', np.sum, min_count=0) cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0) cls.min = groupby_function('min', 'min', np.min, numeric_only=False) cls.max = groupby_function('max', 'max', np.max, numeric_only=False) cls.first = groupby_function('first', 'first', first_compat, numeric_only=False) cls.last = groupby_function('last', 'last', last_compat, numeric_only=False)
def _add_numeric_operations(cls): """ Add numeric operations to the GroupBy generically. """ def groupby_function(name, alias, npfunc, numeric_only=True, _convert=False, min_count=-1): _local_template = "Compute %(f)s of group values" @Substitution(name='groupby', f=name) @Appender(_common_see_also) @Appender(_local_template) def f(self, **kwargs): if 'numeric_only' not in kwargs: kwargs['numeric_only'] = numeric_only if 'min_count' not in kwargs: kwargs['min_count'] = min_count self._set_group_selection() try: return self._cython_agg_general( alias, alt=npfunc, **kwargs) except AssertionError as e: raise SpecificationError(str(e)) except Exception: result = self.aggregate( lambda x: npfunc(x, axis=self.axis)) if _convert: result = result._convert(datetime=True) return result set_function_name(f, name, cls) return f def first_compat(x, axis=0): def first(x): x = x.to_numpy() x = x[notna(x)] if len(x) == 0: return np.nan return x[0] if isinstance(x, DataFrame): return x.apply(first, axis=axis) else: return first(x) def last_compat(x, axis=0): def last(x): x = x.to_numpy() x = x[notna(x)] if len(x) == 0: return np.nan return x[-1] if isinstance(x, DataFrame): return x.apply(last, axis=axis) else: return last(x) cls.sum = groupby_function('sum', 'add', np.sum, min_count=0) cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0) cls.min = groupby_function('min', 'min', np.min, numeric_only=False) cls.max = groupby_function('max', 'max', np.max, numeric_only=False) cls.first = groupby_function('first', 'first', first_compat, numeric_only=False) cls.last = groupby_function('last', 'last', last_compat, numeric_only=False)
[ "Add", "numeric", "operations", "to", "the", "GroupBy", "generically", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1251-L1324
[ "def", "_add_numeric_operations", "(", "cls", ")", ":", "def", "groupby_function", "(", "name", ",", "alias", ",", "npfunc", ",", "numeric_only", "=", "True", ",", "_convert", "=", "False", ",", "min_count", "=", "-", "1", ")", ":", "_local_template", "=", "\"Compute %(f)s of group values\"", "@", "Substitution", "(", "name", "=", "'groupby'", ",", "f", "=", "name", ")", "@", "Appender", "(", "_common_see_also", ")", "@", "Appender", "(", "_local_template", ")", "def", "f", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "'numeric_only'", "not", "in", "kwargs", ":", "kwargs", "[", "'numeric_only'", "]", "=", "numeric_only", "if", "'min_count'", "not", "in", "kwargs", ":", "kwargs", "[", "'min_count'", "]", "=", "min_count", "self", ".", "_set_group_selection", "(", ")", "try", ":", "return", "self", ".", "_cython_agg_general", "(", "alias", ",", "alt", "=", "npfunc", ",", "*", "*", "kwargs", ")", "except", "AssertionError", "as", "e", ":", "raise", "SpecificationError", "(", "str", "(", "e", ")", ")", "except", "Exception", ":", "result", "=", "self", ".", "aggregate", "(", "lambda", "x", ":", "npfunc", "(", "x", ",", "axis", "=", "self", ".", "axis", ")", ")", "if", "_convert", ":", "result", "=", "result", ".", "_convert", "(", "datetime", "=", "True", ")", "return", "result", "set_function_name", "(", "f", ",", "name", ",", "cls", ")", "return", "f", "def", "first_compat", "(", "x", ",", "axis", "=", "0", ")", ":", "def", "first", "(", "x", ")", ":", "x", "=", "x", ".", "to_numpy", "(", ")", "x", "=", "x", "[", "notna", "(", "x", ")", "]", "if", "len", "(", "x", ")", "==", "0", ":", "return", "np", ".", "nan", "return", "x", "[", "0", "]", "if", "isinstance", "(", "x", ",", "DataFrame", ")", ":", "return", "x", ".", "apply", "(", "first", ",", "axis", "=", "axis", ")", "else", ":", "return", "first", "(", "x", ")", "def", "last_compat", "(", "x", ",", "axis", "=", "0", ")", ":", "def", "last", "(", "x", ")", ":", "x", "=", "x", ".", "to_numpy", "(", ")", "x", "=", "x", "[", "notna", "(", "x", ")", "]", "if", "len", "(", "x", ")", "==", "0", ":", "return", "np", ".", "nan", "return", "x", "[", "-", "1", "]", "if", "isinstance", "(", "x", ",", "DataFrame", ")", ":", "return", "x", ".", "apply", "(", "last", ",", "axis", "=", "axis", ")", "else", ":", "return", "last", "(", "x", ")", "cls", ".", "sum", "=", "groupby_function", "(", "'sum'", ",", "'add'", ",", "np", ".", "sum", ",", "min_count", "=", "0", ")", "cls", ".", "prod", "=", "groupby_function", "(", "'prod'", ",", "'prod'", ",", "np", ".", "prod", ",", "min_count", "=", "0", ")", "cls", ".", "min", "=", "groupby_function", "(", "'min'", ",", "'min'", ",", "np", ".", "min", ",", "numeric_only", "=", "False", ")", "cls", ".", "max", "=", "groupby_function", "(", "'max'", ",", "'max'", ",", "np", ".", "max", ",", "numeric_only", "=", "False", ")", "cls", ".", "first", "=", "groupby_function", "(", "'first'", ",", "'first'", ",", "first_compat", ",", "numeric_only", "=", "False", ")", "cls", ".", "last", "=", "groupby_function", "(", "'last'", ",", "'last'", ",", "last_compat", ",", "numeric_only", "=", "False", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.resample
Provide resampling when using a TimeGrouper. Given a grouper, the function resamples it according to a string "string" -> "frequency". See the :ref:`frequency aliases <timeseries.offset_aliases>` documentation for more details. Parameters ---------- rule : str or DateOffset The offset string or object representing target grouper conversion. *args, **kwargs Possible arguments are `how`, `fill_method`, `limit`, `kind` and `on`, and other arguments of `TimeGrouper`. Returns ------- Grouper Return a new grouper with our resampler appended. See Also -------- Grouper : Specify a frequency to resample with when grouping by a key. DatetimeIndex.resample : Frequency conversion and resampling of time series. Examples -------- >>> idx = pd.date_range('1/1/2000', periods=4, freq='T') >>> df = pd.DataFrame(data=4 * [range(2)], ... index=idx, ... columns=['a', 'b']) >>> df.iloc[2, 0] = 5 >>> df a b 2000-01-01 00:00:00 0 1 2000-01-01 00:01:00 0 1 2000-01-01 00:02:00 5 1 2000-01-01 00:03:00 0 1 Downsample the DataFrame into 3 minute bins and sum the values of the timestamps falling into a bin. >>> df.groupby('a').resample('3T').sum() a b a 0 2000-01-01 00:00:00 0 2 2000-01-01 00:03:00 0 1 5 2000-01-01 00:00:00 5 1 Upsample the series into 30 second bins. >>> df.groupby('a').resample('30S').sum() a b a 0 2000-01-01 00:00:00 0 1 2000-01-01 00:00:30 0 0 2000-01-01 00:01:00 0 1 2000-01-01 00:01:30 0 0 2000-01-01 00:02:00 0 0 2000-01-01 00:02:30 0 0 2000-01-01 00:03:00 0 1 5 2000-01-01 00:02:00 5 1 Resample by month. Values are assigned to the month of the period. >>> df.groupby('a').resample('M').sum() a b a 0 2000-01-31 0 3 5 2000-01-31 5 1 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> df.groupby('a').resample('3T', closed='right').sum() a b a 0 1999-12-31 23:57:00 0 1 2000-01-01 00:00:00 0 2 5 2000-01-01 00:00:00 5 1 Downsample the series into 3 minute bins and close the right side of the bin interval, but label each bin using the right edge instead of the left. >>> df.groupby('a').resample('3T', closed='right', label='right').sum() a b a 0 2000-01-01 00:00:00 0 1 2000-01-01 00:03:00 0 2 5 2000-01-01 00:03:00 5 1 Add an offset of twenty seconds. >>> df.groupby('a').resample('3T', loffset='20s').sum() a b a 0 2000-01-01 00:00:20 0 2 2000-01-01 00:03:20 0 1 5 2000-01-01 00:00:20 5 1
pandas/core/groupby/groupby.py
def resample(self, rule, *args, **kwargs): """ Provide resampling when using a TimeGrouper. Given a grouper, the function resamples it according to a string "string" -> "frequency". See the :ref:`frequency aliases <timeseries.offset_aliases>` documentation for more details. Parameters ---------- rule : str or DateOffset The offset string or object representing target grouper conversion. *args, **kwargs Possible arguments are `how`, `fill_method`, `limit`, `kind` and `on`, and other arguments of `TimeGrouper`. Returns ------- Grouper Return a new grouper with our resampler appended. See Also -------- Grouper : Specify a frequency to resample with when grouping by a key. DatetimeIndex.resample : Frequency conversion and resampling of time series. Examples -------- >>> idx = pd.date_range('1/1/2000', periods=4, freq='T') >>> df = pd.DataFrame(data=4 * [range(2)], ... index=idx, ... columns=['a', 'b']) >>> df.iloc[2, 0] = 5 >>> df a b 2000-01-01 00:00:00 0 1 2000-01-01 00:01:00 0 1 2000-01-01 00:02:00 5 1 2000-01-01 00:03:00 0 1 Downsample the DataFrame into 3 minute bins and sum the values of the timestamps falling into a bin. >>> df.groupby('a').resample('3T').sum() a b a 0 2000-01-01 00:00:00 0 2 2000-01-01 00:03:00 0 1 5 2000-01-01 00:00:00 5 1 Upsample the series into 30 second bins. >>> df.groupby('a').resample('30S').sum() a b a 0 2000-01-01 00:00:00 0 1 2000-01-01 00:00:30 0 0 2000-01-01 00:01:00 0 1 2000-01-01 00:01:30 0 0 2000-01-01 00:02:00 0 0 2000-01-01 00:02:30 0 0 2000-01-01 00:03:00 0 1 5 2000-01-01 00:02:00 5 1 Resample by month. Values are assigned to the month of the period. >>> df.groupby('a').resample('M').sum() a b a 0 2000-01-31 0 3 5 2000-01-31 5 1 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> df.groupby('a').resample('3T', closed='right').sum() a b a 0 1999-12-31 23:57:00 0 1 2000-01-01 00:00:00 0 2 5 2000-01-01 00:00:00 5 1 Downsample the series into 3 minute bins and close the right side of the bin interval, but label each bin using the right edge instead of the left. >>> df.groupby('a').resample('3T', closed='right', label='right').sum() a b a 0 2000-01-01 00:00:00 0 1 2000-01-01 00:03:00 0 2 5 2000-01-01 00:03:00 5 1 Add an offset of twenty seconds. >>> df.groupby('a').resample('3T', loffset='20s').sum() a b a 0 2000-01-01 00:00:20 0 2 2000-01-01 00:03:20 0 1 5 2000-01-01 00:00:20 5 1 """ from pandas.core.resample import get_resampler_for_grouping return get_resampler_for_grouping(self, rule, *args, **kwargs)
def resample(self, rule, *args, **kwargs): """ Provide resampling when using a TimeGrouper. Given a grouper, the function resamples it according to a string "string" -> "frequency". See the :ref:`frequency aliases <timeseries.offset_aliases>` documentation for more details. Parameters ---------- rule : str or DateOffset The offset string or object representing target grouper conversion. *args, **kwargs Possible arguments are `how`, `fill_method`, `limit`, `kind` and `on`, and other arguments of `TimeGrouper`. Returns ------- Grouper Return a new grouper with our resampler appended. See Also -------- Grouper : Specify a frequency to resample with when grouping by a key. DatetimeIndex.resample : Frequency conversion and resampling of time series. Examples -------- >>> idx = pd.date_range('1/1/2000', periods=4, freq='T') >>> df = pd.DataFrame(data=4 * [range(2)], ... index=idx, ... columns=['a', 'b']) >>> df.iloc[2, 0] = 5 >>> df a b 2000-01-01 00:00:00 0 1 2000-01-01 00:01:00 0 1 2000-01-01 00:02:00 5 1 2000-01-01 00:03:00 0 1 Downsample the DataFrame into 3 minute bins and sum the values of the timestamps falling into a bin. >>> df.groupby('a').resample('3T').sum() a b a 0 2000-01-01 00:00:00 0 2 2000-01-01 00:03:00 0 1 5 2000-01-01 00:00:00 5 1 Upsample the series into 30 second bins. >>> df.groupby('a').resample('30S').sum() a b a 0 2000-01-01 00:00:00 0 1 2000-01-01 00:00:30 0 0 2000-01-01 00:01:00 0 1 2000-01-01 00:01:30 0 0 2000-01-01 00:02:00 0 0 2000-01-01 00:02:30 0 0 2000-01-01 00:03:00 0 1 5 2000-01-01 00:02:00 5 1 Resample by month. Values are assigned to the month of the period. >>> df.groupby('a').resample('M').sum() a b a 0 2000-01-31 0 3 5 2000-01-31 5 1 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> df.groupby('a').resample('3T', closed='right').sum() a b a 0 1999-12-31 23:57:00 0 1 2000-01-01 00:00:00 0 2 5 2000-01-01 00:00:00 5 1 Downsample the series into 3 minute bins and close the right side of the bin interval, but label each bin using the right edge instead of the left. >>> df.groupby('a').resample('3T', closed='right', label='right').sum() a b a 0 2000-01-01 00:00:00 0 1 2000-01-01 00:03:00 0 2 5 2000-01-01 00:03:00 5 1 Add an offset of twenty seconds. >>> df.groupby('a').resample('3T', loffset='20s').sum() a b a 0 2000-01-01 00:00:20 0 2 2000-01-01 00:03:20 0 1 5 2000-01-01 00:00:20 5 1 """ from pandas.core.resample import get_resampler_for_grouping return get_resampler_for_grouping(self, rule, *args, **kwargs)
[ "Provide", "resampling", "when", "using", "a", "TimeGrouper", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1346-L1453
[ "def", "resample", "(", "self", ",", "rule", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "pandas", ".", "core", ".", "resample", "import", "get_resampler_for_grouping", "return", "get_resampler_for_grouping", "(", "self", ",", "rule", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.rolling
Return a rolling grouper, providing rolling functionality per group.
pandas/core/groupby/groupby.py
def rolling(self, *args, **kwargs): """ Return a rolling grouper, providing rolling functionality per group. """ from pandas.core.window import RollingGroupby return RollingGroupby(self, *args, **kwargs)
def rolling(self, *args, **kwargs): """ Return a rolling grouper, providing rolling functionality per group. """ from pandas.core.window import RollingGroupby return RollingGroupby(self, *args, **kwargs)
[ "Return", "a", "rolling", "grouper", "providing", "rolling", "functionality", "per", "group", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1457-L1462
[ "def", "rolling", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "pandas", ".", "core", ".", "window", "import", "RollingGroupby", "return", "RollingGroupby", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.expanding
Return an expanding grouper, providing expanding functionality per group.
pandas/core/groupby/groupby.py
def expanding(self, *args, **kwargs): """ Return an expanding grouper, providing expanding functionality per group. """ from pandas.core.window import ExpandingGroupby return ExpandingGroupby(self, *args, **kwargs)
def expanding(self, *args, **kwargs): """ Return an expanding grouper, providing expanding functionality per group. """ from pandas.core.window import ExpandingGroupby return ExpandingGroupby(self, *args, **kwargs)
[ "Return", "an", "expanding", "grouper", "providing", "expanding", "functionality", "per", "group", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1466-L1472
[ "def", "expanding", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "pandas", ".", "core", ".", "window", "import", "ExpandingGroupby", "return", "ExpandingGroupby", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy._fill
Shared function for `pad` and `backfill` to call Cython method. Parameters ---------- direction : {'ffill', 'bfill'} Direction passed to underlying Cython function. `bfill` will cause values to be filled backwards. `ffill` and any other values will default to a forward fill limit : int, default None Maximum number of consecutive values to fill. If `None`, this method will convert to -1 prior to passing to Cython Returns ------- `Series` or `DataFrame` with filled values See Also -------- pad backfill
pandas/core/groupby/groupby.py
def _fill(self, direction, limit=None): """ Shared function for `pad` and `backfill` to call Cython method. Parameters ---------- direction : {'ffill', 'bfill'} Direction passed to underlying Cython function. `bfill` will cause values to be filled backwards. `ffill` and any other values will default to a forward fill limit : int, default None Maximum number of consecutive values to fill. If `None`, this method will convert to -1 prior to passing to Cython Returns ------- `Series` or `DataFrame` with filled values See Also -------- pad backfill """ # Need int value for Cython if limit is None: limit = -1 return self._get_cythonized_result('group_fillna_indexer', self.grouper, needs_mask=True, cython_dtype=np.int64, result_is_index=True, direction=direction, limit=limit)
def _fill(self, direction, limit=None): """ Shared function for `pad` and `backfill` to call Cython method. Parameters ---------- direction : {'ffill', 'bfill'} Direction passed to underlying Cython function. `bfill` will cause values to be filled backwards. `ffill` and any other values will default to a forward fill limit : int, default None Maximum number of consecutive values to fill. If `None`, this method will convert to -1 prior to passing to Cython Returns ------- `Series` or `DataFrame` with filled values See Also -------- pad backfill """ # Need int value for Cython if limit is None: limit = -1 return self._get_cythonized_result('group_fillna_indexer', self.grouper, needs_mask=True, cython_dtype=np.int64, result_is_index=True, direction=direction, limit=limit)
[ "Shared", "function", "for", "pad", "and", "backfill", "to", "call", "Cython", "method", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1474-L1505
[ "def", "_fill", "(", "self", ",", "direction", ",", "limit", "=", "None", ")", ":", "# Need int value for Cython", "if", "limit", "is", "None", ":", "limit", "=", "-", "1", "return", "self", ".", "_get_cythonized_result", "(", "'group_fillna_indexer'", ",", "self", ".", "grouper", ",", "needs_mask", "=", "True", ",", "cython_dtype", "=", "np", ".", "int64", ",", "result_is_index", "=", "True", ",", "direction", "=", "direction", ",", "limit", "=", "limit", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.nth
Take the nth row from each group if n is an int, or a subset of rows if n is a list of ints. If dropna, will take the nth non-null row, dropna is either Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent to calling dropna(how=dropna) before the groupby. Parameters ---------- n : int or list of ints a single nth value for the row or a list of nth values dropna : None or str, optional apply the specified dropna operation before counting which row is the nth row. Needs to be None, 'any' or 'all' %(see_also)s Examples -------- >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2], ... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B']) >>> g = df.groupby('A') >>> g.nth(0) B A 1 NaN 2 3.0 >>> g.nth(1) B A 1 2.0 2 5.0 >>> g.nth(-1) B A 1 4.0 2 5.0 >>> g.nth([0, 1]) B A 1 NaN 1 2.0 2 3.0 2 5.0 Specifying `dropna` allows count ignoring ``NaN`` >>> g.nth(0, dropna='any') B A 1 2.0 2 3.0 NaNs denote group exhausted when using dropna >>> g.nth(3, dropna='any') B A 1 NaN 2 NaN Specifying `as_index=False` in `groupby` keeps the original index. >>> df.groupby('A', as_index=False).nth(1) A B 1 1 2.0 4 2 5.0
pandas/core/groupby/groupby.py
def nth(self, n, dropna=None): """ Take the nth row from each group if n is an int, or a subset of rows if n is a list of ints. If dropna, will take the nth non-null row, dropna is either Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent to calling dropna(how=dropna) before the groupby. Parameters ---------- n : int or list of ints a single nth value for the row or a list of nth values dropna : None or str, optional apply the specified dropna operation before counting which row is the nth row. Needs to be None, 'any' or 'all' %(see_also)s Examples -------- >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2], ... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B']) >>> g = df.groupby('A') >>> g.nth(0) B A 1 NaN 2 3.0 >>> g.nth(1) B A 1 2.0 2 5.0 >>> g.nth(-1) B A 1 4.0 2 5.0 >>> g.nth([0, 1]) B A 1 NaN 1 2.0 2 3.0 2 5.0 Specifying `dropna` allows count ignoring ``NaN`` >>> g.nth(0, dropna='any') B A 1 2.0 2 3.0 NaNs denote group exhausted when using dropna >>> g.nth(3, dropna='any') B A 1 NaN 2 NaN Specifying `as_index=False` in `groupby` keeps the original index. >>> df.groupby('A', as_index=False).nth(1) A B 1 1 2.0 4 2 5.0 """ if isinstance(n, int): nth_values = [n] elif isinstance(n, (set, list, tuple)): nth_values = list(set(n)) if dropna is not None: raise ValueError( "dropna option with a list of nth values is not supported") else: raise TypeError("n needs to be an int or a list/set/tuple of ints") nth_values = np.array(nth_values, dtype=np.intp) self._set_group_selection() if not dropna: mask_left = np.in1d(self._cumcount_array(), nth_values) mask_right = np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values) mask = mask_left | mask_right out = self._selected_obj[mask] if not self.as_index: return out ids, _, _ = self.grouper.group_info out.index = self.grouper.result_index[ids[mask]] return out.sort_index() if self.sort else out if dropna not in ['any', 'all']: if isinstance(self._selected_obj, Series) and dropna is True: warnings.warn("the dropna={dropna} keyword is deprecated," "use dropna='all' instead. " "For a Series groupby, dropna must be " "either None, 'any' or 'all'.".format( dropna=dropna), FutureWarning, stacklevel=2) dropna = 'all' else: # Note: when agg-ing picker doesn't raise this, # just returns NaN raise ValueError("For a DataFrame groupby, dropna must be " "either None, 'any' or 'all', " "(was passed {dropna}).".format( dropna=dropna)) # old behaviour, but with all and any support for DataFrames. # modified in GH 7559 to have better perf max_len = n if n >= 0 else - 1 - n dropped = self.obj.dropna(how=dropna, axis=self.axis) # get a new grouper for our dropped obj if self.keys is None and self.level is None: # we don't have the grouper info available # (e.g. we have selected out # a column that is not in the current object) axis = self.grouper.axis grouper = axis[axis.isin(dropped.index)] else: # create a grouper with the original parameters, but on the dropped # object from pandas.core.groupby.grouper import _get_grouper grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis, level=self.level, sort=self.sort, mutated=self.mutated) grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort) sizes, result = grb.size(), grb.nth(n) mask = (sizes < max_len).values # set the results which don't meet the criteria if len(result) and mask.any(): result.loc[mask] = np.nan # reset/reindex to the original groups if (len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index)): result.index = self.grouper.result_index else: result = result.reindex(self.grouper.result_index) return result
def nth(self, n, dropna=None): """ Take the nth row from each group if n is an int, or a subset of rows if n is a list of ints. If dropna, will take the nth non-null row, dropna is either Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent to calling dropna(how=dropna) before the groupby. Parameters ---------- n : int or list of ints a single nth value for the row or a list of nth values dropna : None or str, optional apply the specified dropna operation before counting which row is the nth row. Needs to be None, 'any' or 'all' %(see_also)s Examples -------- >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2], ... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B']) >>> g = df.groupby('A') >>> g.nth(0) B A 1 NaN 2 3.0 >>> g.nth(1) B A 1 2.0 2 5.0 >>> g.nth(-1) B A 1 4.0 2 5.0 >>> g.nth([0, 1]) B A 1 NaN 1 2.0 2 3.0 2 5.0 Specifying `dropna` allows count ignoring ``NaN`` >>> g.nth(0, dropna='any') B A 1 2.0 2 3.0 NaNs denote group exhausted when using dropna >>> g.nth(3, dropna='any') B A 1 NaN 2 NaN Specifying `as_index=False` in `groupby` keeps the original index. >>> df.groupby('A', as_index=False).nth(1) A B 1 1 2.0 4 2 5.0 """ if isinstance(n, int): nth_values = [n] elif isinstance(n, (set, list, tuple)): nth_values = list(set(n)) if dropna is not None: raise ValueError( "dropna option with a list of nth values is not supported") else: raise TypeError("n needs to be an int or a list/set/tuple of ints") nth_values = np.array(nth_values, dtype=np.intp) self._set_group_selection() if not dropna: mask_left = np.in1d(self._cumcount_array(), nth_values) mask_right = np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values) mask = mask_left | mask_right out = self._selected_obj[mask] if not self.as_index: return out ids, _, _ = self.grouper.group_info out.index = self.grouper.result_index[ids[mask]] return out.sort_index() if self.sort else out if dropna not in ['any', 'all']: if isinstance(self._selected_obj, Series) and dropna is True: warnings.warn("the dropna={dropna} keyword is deprecated," "use dropna='all' instead. " "For a Series groupby, dropna must be " "either None, 'any' or 'all'.".format( dropna=dropna), FutureWarning, stacklevel=2) dropna = 'all' else: # Note: when agg-ing picker doesn't raise this, # just returns NaN raise ValueError("For a DataFrame groupby, dropna must be " "either None, 'any' or 'all', " "(was passed {dropna}).".format( dropna=dropna)) # old behaviour, but with all and any support for DataFrames. # modified in GH 7559 to have better perf max_len = n if n >= 0 else - 1 - n dropped = self.obj.dropna(how=dropna, axis=self.axis) # get a new grouper for our dropped obj if self.keys is None and self.level is None: # we don't have the grouper info available # (e.g. we have selected out # a column that is not in the current object) axis = self.grouper.axis grouper = axis[axis.isin(dropped.index)] else: # create a grouper with the original parameters, but on the dropped # object from pandas.core.groupby.grouper import _get_grouper grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis, level=self.level, sort=self.sort, mutated=self.mutated) grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort) sizes, result = grb.size(), grb.nth(n) mask = (sizes < max_len).values # set the results which don't meet the criteria if len(result) and mask.any(): result.loc[mask] = np.nan # reset/reindex to the original groups if (len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index)): result.index = self.grouper.result_index else: result = result.reindex(self.grouper.result_index) return result
[ "Take", "the", "nth", "row", "from", "each", "group", "if", "n", "is", "an", "int", "or", "a", "subset", "of", "rows", "if", "n", "is", "a", "list", "of", "ints", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1549-L1705
[ "def", "nth", "(", "self", ",", "n", ",", "dropna", "=", "None", ")", ":", "if", "isinstance", "(", "n", ",", "int", ")", ":", "nth_values", "=", "[", "n", "]", "elif", "isinstance", "(", "n", ",", "(", "set", ",", "list", ",", "tuple", ")", ")", ":", "nth_values", "=", "list", "(", "set", "(", "n", ")", ")", "if", "dropna", "is", "not", "None", ":", "raise", "ValueError", "(", "\"dropna option with a list of nth values is not supported\"", ")", "else", ":", "raise", "TypeError", "(", "\"n needs to be an int or a list/set/tuple of ints\"", ")", "nth_values", "=", "np", ".", "array", "(", "nth_values", ",", "dtype", "=", "np", ".", "intp", ")", "self", ".", "_set_group_selection", "(", ")", "if", "not", "dropna", ":", "mask_left", "=", "np", ".", "in1d", "(", "self", ".", "_cumcount_array", "(", ")", ",", "nth_values", ")", "mask_right", "=", "np", ".", "in1d", "(", "self", ".", "_cumcount_array", "(", "ascending", "=", "False", ")", "+", "1", ",", "-", "nth_values", ")", "mask", "=", "mask_left", "|", "mask_right", "out", "=", "self", ".", "_selected_obj", "[", "mask", "]", "if", "not", "self", ".", "as_index", ":", "return", "out", "ids", ",", "_", ",", "_", "=", "self", ".", "grouper", ".", "group_info", "out", ".", "index", "=", "self", ".", "grouper", ".", "result_index", "[", "ids", "[", "mask", "]", "]", "return", "out", ".", "sort_index", "(", ")", "if", "self", ".", "sort", "else", "out", "if", "dropna", "not", "in", "[", "'any'", ",", "'all'", "]", ":", "if", "isinstance", "(", "self", ".", "_selected_obj", ",", "Series", ")", "and", "dropna", "is", "True", ":", "warnings", ".", "warn", "(", "\"the dropna={dropna} keyword is deprecated,\"", "\"use dropna='all' instead. \"", "\"For a Series groupby, dropna must be \"", "\"either None, 'any' or 'all'.\"", ".", "format", "(", "dropna", "=", "dropna", ")", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "dropna", "=", "'all'", "else", ":", "# Note: when agg-ing picker doesn't raise this,", "# just returns NaN", "raise", "ValueError", "(", "\"For a DataFrame groupby, dropna must be \"", "\"either None, 'any' or 'all', \"", "\"(was passed {dropna}).\"", ".", "format", "(", "dropna", "=", "dropna", ")", ")", "# old behaviour, but with all and any support for DataFrames.", "# modified in GH 7559 to have better perf", "max_len", "=", "n", "if", "n", ">=", "0", "else", "-", "1", "-", "n", "dropped", "=", "self", ".", "obj", ".", "dropna", "(", "how", "=", "dropna", ",", "axis", "=", "self", ".", "axis", ")", "# get a new grouper for our dropped obj", "if", "self", ".", "keys", "is", "None", "and", "self", ".", "level", "is", "None", ":", "# we don't have the grouper info available", "# (e.g. we have selected out", "# a column that is not in the current object)", "axis", "=", "self", ".", "grouper", ".", "axis", "grouper", "=", "axis", "[", "axis", ".", "isin", "(", "dropped", ".", "index", ")", "]", "else", ":", "# create a grouper with the original parameters, but on the dropped", "# object", "from", "pandas", ".", "core", ".", "groupby", ".", "grouper", "import", "_get_grouper", "grouper", ",", "_", ",", "_", "=", "_get_grouper", "(", "dropped", ",", "key", "=", "self", ".", "keys", ",", "axis", "=", "self", ".", "axis", ",", "level", "=", "self", ".", "level", ",", "sort", "=", "self", ".", "sort", ",", "mutated", "=", "self", ".", "mutated", ")", "grb", "=", "dropped", ".", "groupby", "(", "grouper", ",", "as_index", "=", "self", ".", "as_index", ",", "sort", "=", "self", ".", "sort", ")", "sizes", ",", "result", "=", "grb", ".", "size", "(", ")", ",", "grb", ".", "nth", "(", "n", ")", "mask", "=", "(", "sizes", "<", "max_len", ")", ".", "values", "# set the results which don't meet the criteria", "if", "len", "(", "result", ")", "and", "mask", ".", "any", "(", ")", ":", "result", ".", "loc", "[", "mask", "]", "=", "np", ".", "nan", "# reset/reindex to the original groups", "if", "(", "len", "(", "self", ".", "obj", ")", "==", "len", "(", "dropped", ")", "or", "len", "(", "result", ")", "==", "len", "(", "self", ".", "grouper", ".", "result_index", ")", ")", ":", "result", ".", "index", "=", "self", ".", "grouper", ".", "result_index", "else", ":", "result", "=", "result", ".", "reindex", "(", "self", ".", "grouper", ".", "result_index", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.quantile
Return group values at the given quantile, a la numpy.percentile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value(s) between 0 and 1 providing the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} Method to use when the desired quantile falls between two points. Returns ------- Series or DataFrame Return type determined by caller of GroupBy object. See Also -------- Series.quantile : Similar method for Series. DataFrame.quantile : Similar method for DataFrame. numpy.percentile : NumPy method to compute qth percentile. Examples -------- >>> df = pd.DataFrame([ ... ['a', 1], ['a', 2], ['a', 3], ... ['b', 1], ['b', 3], ['b', 5] ... ], columns=['key', 'val']) >>> df.groupby('key').quantile() val key a 2.0 b 3.0
pandas/core/groupby/groupby.py
def quantile(self, q=0.5, interpolation='linear'): """ Return group values at the given quantile, a la numpy.percentile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value(s) between 0 and 1 providing the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} Method to use when the desired quantile falls between two points. Returns ------- Series or DataFrame Return type determined by caller of GroupBy object. See Also -------- Series.quantile : Similar method for Series. DataFrame.quantile : Similar method for DataFrame. numpy.percentile : NumPy method to compute qth percentile. Examples -------- >>> df = pd.DataFrame([ ... ['a', 1], ['a', 2], ['a', 3], ... ['b', 1], ['b', 3], ['b', 5] ... ], columns=['key', 'val']) >>> df.groupby('key').quantile() val key a 2.0 b 3.0 """ def pre_processor( vals: np.ndarray ) -> Tuple[np.ndarray, Optional[Type]]: if is_object_dtype(vals): raise TypeError("'quantile' cannot be performed against " "'object' dtypes!") inference = None if is_integer_dtype(vals): inference = np.int64 elif is_datetime64_dtype(vals): inference = 'datetime64[ns]' vals = vals.astype(np.float) return vals, inference def post_processor( vals: np.ndarray, inference: Optional[Type] ) -> np.ndarray: if inference: # Check for edge case if not (is_integer_dtype(inference) and interpolation in {'linear', 'midpoint'}): vals = vals.astype(inference) return vals return self._get_cythonized_result('group_quantile', self.grouper, aggregate=True, needs_values=True, needs_mask=True, cython_dtype=np.float64, pre_processing=pre_processor, post_processing=post_processor, q=q, interpolation=interpolation)
def quantile(self, q=0.5, interpolation='linear'): """ Return group values at the given quantile, a la numpy.percentile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value(s) between 0 and 1 providing the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} Method to use when the desired quantile falls between two points. Returns ------- Series or DataFrame Return type determined by caller of GroupBy object. See Also -------- Series.quantile : Similar method for Series. DataFrame.quantile : Similar method for DataFrame. numpy.percentile : NumPy method to compute qth percentile. Examples -------- >>> df = pd.DataFrame([ ... ['a', 1], ['a', 2], ['a', 3], ... ['b', 1], ['b', 3], ['b', 5] ... ], columns=['key', 'val']) >>> df.groupby('key').quantile() val key a 2.0 b 3.0 """ def pre_processor( vals: np.ndarray ) -> Tuple[np.ndarray, Optional[Type]]: if is_object_dtype(vals): raise TypeError("'quantile' cannot be performed against " "'object' dtypes!") inference = None if is_integer_dtype(vals): inference = np.int64 elif is_datetime64_dtype(vals): inference = 'datetime64[ns]' vals = vals.astype(np.float) return vals, inference def post_processor( vals: np.ndarray, inference: Optional[Type] ) -> np.ndarray: if inference: # Check for edge case if not (is_integer_dtype(inference) and interpolation in {'linear', 'midpoint'}): vals = vals.astype(inference) return vals return self._get_cythonized_result('group_quantile', self.grouper, aggregate=True, needs_values=True, needs_mask=True, cython_dtype=np.float64, pre_processing=pre_processor, post_processing=post_processor, q=q, interpolation=interpolation)
[ "Return", "group", "values", "at", "the", "given", "quantile", "a", "la", "numpy", ".", "percentile", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1707-L1777
[ "def", "quantile", "(", "self", ",", "q", "=", "0.5", ",", "interpolation", "=", "'linear'", ")", ":", "def", "pre_processor", "(", "vals", ":", "np", ".", "ndarray", ")", "->", "Tuple", "[", "np", ".", "ndarray", ",", "Optional", "[", "Type", "]", "]", ":", "if", "is_object_dtype", "(", "vals", ")", ":", "raise", "TypeError", "(", "\"'quantile' cannot be performed against \"", "\"'object' dtypes!\"", ")", "inference", "=", "None", "if", "is_integer_dtype", "(", "vals", ")", ":", "inference", "=", "np", ".", "int64", "elif", "is_datetime64_dtype", "(", "vals", ")", ":", "inference", "=", "'datetime64[ns]'", "vals", "=", "vals", ".", "astype", "(", "np", ".", "float", ")", "return", "vals", ",", "inference", "def", "post_processor", "(", "vals", ":", "np", ".", "ndarray", ",", "inference", ":", "Optional", "[", "Type", "]", ")", "->", "np", ".", "ndarray", ":", "if", "inference", ":", "# Check for edge case", "if", "not", "(", "is_integer_dtype", "(", "inference", ")", "and", "interpolation", "in", "{", "'linear'", ",", "'midpoint'", "}", ")", ":", "vals", "=", "vals", ".", "astype", "(", "inference", ")", "return", "vals", "return", "self", ".", "_get_cythonized_result", "(", "'group_quantile'", ",", "self", ".", "grouper", ",", "aggregate", "=", "True", ",", "needs_values", "=", "True", ",", "needs_mask", "=", "True", ",", "cython_dtype", "=", "np", ".", "float64", ",", "pre_processing", "=", "pre_processor", ",", "post_processing", "=", "post_processor", ",", "q", "=", "q", ",", "interpolation", "=", "interpolation", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.ngroup
Number each group from 0 to the number of groups - 1. This is the enumerative complement of cumcount. Note that the numbers given to the groups match the order in which the groups would be seen when iterating over the groupby object, not the order they are first observed. .. versionadded:: 0.20.2 Parameters ---------- ascending : bool, default True If False, number in reverse, from number of group - 1 to 0. See Also -------- .cumcount : Number the rows in each group. Examples -------- >>> df = pd.DataFrame({"A": list("aaabba")}) >>> df A 0 a 1 a 2 a 3 b 4 b 5 a >>> df.groupby('A').ngroup() 0 0 1 0 2 0 3 1 4 1 5 0 dtype: int64 >>> df.groupby('A').ngroup(ascending=False) 0 1 1 1 2 1 3 0 4 0 5 1 dtype: int64 >>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup() 0 0 1 0 2 1 3 3 4 2 5 0 dtype: int64
pandas/core/groupby/groupby.py
def ngroup(self, ascending=True): """ Number each group from 0 to the number of groups - 1. This is the enumerative complement of cumcount. Note that the numbers given to the groups match the order in which the groups would be seen when iterating over the groupby object, not the order they are first observed. .. versionadded:: 0.20.2 Parameters ---------- ascending : bool, default True If False, number in reverse, from number of group - 1 to 0. See Also -------- .cumcount : Number the rows in each group. Examples -------- >>> df = pd.DataFrame({"A": list("aaabba")}) >>> df A 0 a 1 a 2 a 3 b 4 b 5 a >>> df.groupby('A').ngroup() 0 0 1 0 2 0 3 1 4 1 5 0 dtype: int64 >>> df.groupby('A').ngroup(ascending=False) 0 1 1 1 2 1 3 0 4 0 5 1 dtype: int64 >>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup() 0 0 1 0 2 1 3 3 4 2 5 0 dtype: int64 """ with _group_selection_context(self): index = self._selected_obj.index result = Series(self.grouper.group_info[0], index) if not ascending: result = self.ngroups - 1 - result return result
def ngroup(self, ascending=True): """ Number each group from 0 to the number of groups - 1. This is the enumerative complement of cumcount. Note that the numbers given to the groups match the order in which the groups would be seen when iterating over the groupby object, not the order they are first observed. .. versionadded:: 0.20.2 Parameters ---------- ascending : bool, default True If False, number in reverse, from number of group - 1 to 0. See Also -------- .cumcount : Number the rows in each group. Examples -------- >>> df = pd.DataFrame({"A": list("aaabba")}) >>> df A 0 a 1 a 2 a 3 b 4 b 5 a >>> df.groupby('A').ngroup() 0 0 1 0 2 0 3 1 4 1 5 0 dtype: int64 >>> df.groupby('A').ngroup(ascending=False) 0 1 1 1 2 1 3 0 4 0 5 1 dtype: int64 >>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup() 0 0 1 0 2 1 3 3 4 2 5 0 dtype: int64 """ with _group_selection_context(self): index = self._selected_obj.index result = Series(self.grouper.group_info[0], index) if not ascending: result = self.ngroups - 1 - result return result
[ "Number", "each", "group", "from", "0", "to", "the", "number", "of", "groups", "-", "1", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1780-L1843
[ "def", "ngroup", "(", "self", ",", "ascending", "=", "True", ")", ":", "with", "_group_selection_context", "(", "self", ")", ":", "index", "=", "self", ".", "_selected_obj", ".", "index", "result", "=", "Series", "(", "self", ".", "grouper", ".", "group_info", "[", "0", "]", ",", "index", ")", "if", "not", "ascending", ":", "result", "=", "self", ".", "ngroups", "-", "1", "-", "result", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.cumcount
Number each item in each group from 0 to the length of that group - 1. Essentially this is equivalent to >>> self.apply(lambda x: pd.Series(np.arange(len(x)), x.index)) Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. See Also -------- .ngroup : Number the groups themselves. Examples -------- >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']], ... columns=['A']) >>> df A 0 a 1 a 2 a 3 b 4 b 5 a >>> df.groupby('A').cumcount() 0 0 1 1 2 2 3 0 4 1 5 3 dtype: int64 >>> df.groupby('A').cumcount(ascending=False) 0 3 1 2 2 1 3 1 4 0 5 0 dtype: int64
pandas/core/groupby/groupby.py
def cumcount(self, ascending=True): """ Number each item in each group from 0 to the length of that group - 1. Essentially this is equivalent to >>> self.apply(lambda x: pd.Series(np.arange(len(x)), x.index)) Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. See Also -------- .ngroup : Number the groups themselves. Examples -------- >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']], ... columns=['A']) >>> df A 0 a 1 a 2 a 3 b 4 b 5 a >>> df.groupby('A').cumcount() 0 0 1 1 2 2 3 0 4 1 5 3 dtype: int64 >>> df.groupby('A').cumcount(ascending=False) 0 3 1 2 2 1 3 1 4 0 5 0 dtype: int64 """ with _group_selection_context(self): index = self._selected_obj.index cumcounts = self._cumcount_array(ascending=ascending) return Series(cumcounts, index)
def cumcount(self, ascending=True): """ Number each item in each group from 0 to the length of that group - 1. Essentially this is equivalent to >>> self.apply(lambda x: pd.Series(np.arange(len(x)), x.index)) Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. See Also -------- .ngroup : Number the groups themselves. Examples -------- >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']], ... columns=['A']) >>> df A 0 a 1 a 2 a 3 b 4 b 5 a >>> df.groupby('A').cumcount() 0 0 1 1 2 2 3 0 4 1 5 3 dtype: int64 >>> df.groupby('A').cumcount(ascending=False) 0 3 1 2 2 1 3 1 4 0 5 0 dtype: int64 """ with _group_selection_context(self): index = self._selected_obj.index cumcounts = self._cumcount_array(ascending=ascending) return Series(cumcounts, index)
[ "Number", "each", "item", "in", "each", "group", "from", "0", "to", "the", "length", "of", "that", "group", "-", "1", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1846-L1897
[ "def", "cumcount", "(", "self", ",", "ascending", "=", "True", ")", ":", "with", "_group_selection_context", "(", "self", ")", ":", "index", "=", "self", ".", "_selected_obj", ".", "index", "cumcounts", "=", "self", ".", "_cumcount_array", "(", "ascending", "=", "ascending", ")", "return", "Series", "(", "cumcounts", ",", "index", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.rank
Provide the rank of values within each group. Parameters ---------- method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' * average: average rank of group * min: lowest rank in group * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups ascending : boolean, default True False for ranks by high (1) to low (N) na_option : {'keep', 'top', 'bottom'}, default 'keep' * keep: leave NA values where they are * top: smallest rank if ascending * bottom: smallest rank if descending pct : boolean, default False Compute percentage rank of data within each group axis : int, default 0 The axis of the object over which to compute the rank. Returns ------- DataFrame with ranking of values within each group
pandas/core/groupby/groupby.py
def rank(self, method='average', ascending=True, na_option='keep', pct=False, axis=0): """ Provide the rank of values within each group. Parameters ---------- method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' * average: average rank of group * min: lowest rank in group * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups ascending : boolean, default True False for ranks by high (1) to low (N) na_option : {'keep', 'top', 'bottom'}, default 'keep' * keep: leave NA values where they are * top: smallest rank if ascending * bottom: smallest rank if descending pct : boolean, default False Compute percentage rank of data within each group axis : int, default 0 The axis of the object over which to compute the rank. Returns ------- DataFrame with ranking of values within each group """ if na_option not in {'keep', 'top', 'bottom'}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) return self._cython_transform('rank', numeric_only=False, ties_method=method, ascending=ascending, na_option=na_option, pct=pct, axis=axis)
def rank(self, method='average', ascending=True, na_option='keep', pct=False, axis=0): """ Provide the rank of values within each group. Parameters ---------- method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' * average: average rank of group * min: lowest rank in group * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups ascending : boolean, default True False for ranks by high (1) to low (N) na_option : {'keep', 'top', 'bottom'}, default 'keep' * keep: leave NA values where they are * top: smallest rank if ascending * bottom: smallest rank if descending pct : boolean, default False Compute percentage rank of data within each group axis : int, default 0 The axis of the object over which to compute the rank. Returns ------- DataFrame with ranking of values within each group """ if na_option not in {'keep', 'top', 'bottom'}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) return self._cython_transform('rank', numeric_only=False, ties_method=method, ascending=ascending, na_option=na_option, pct=pct, axis=axis)
[ "Provide", "the", "rank", "of", "values", "within", "each", "group", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1901-L1934
[ "def", "rank", "(", "self", ",", "method", "=", "'average'", ",", "ascending", "=", "True", ",", "na_option", "=", "'keep'", ",", "pct", "=", "False", ",", "axis", "=", "0", ")", ":", "if", "na_option", "not", "in", "{", "'keep'", ",", "'top'", ",", "'bottom'", "}", ":", "msg", "=", "\"na_option must be one of 'keep', 'top', or 'bottom'\"", "raise", "ValueError", "(", "msg", ")", "return", "self", ".", "_cython_transform", "(", "'rank'", ",", "numeric_only", "=", "False", ",", "ties_method", "=", "method", ",", "ascending", "=", "ascending", ",", "na_option", "=", "na_option", ",", "pct", "=", "pct", ",", "axis", "=", "axis", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.cumprod
Cumulative product for each group.
pandas/core/groupby/groupby.py
def cumprod(self, axis=0, *args, **kwargs): """ Cumulative product for each group. """ nv.validate_groupby_func('cumprod', args, kwargs, ['numeric_only', 'skipna']) if axis != 0: return self.apply(lambda x: x.cumprod(axis=axis, **kwargs)) return self._cython_transform('cumprod', **kwargs)
def cumprod(self, axis=0, *args, **kwargs): """ Cumulative product for each group. """ nv.validate_groupby_func('cumprod', args, kwargs, ['numeric_only', 'skipna']) if axis != 0: return self.apply(lambda x: x.cumprod(axis=axis, **kwargs)) return self._cython_transform('cumprod', **kwargs)
[ "Cumulative", "product", "for", "each", "group", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1938-L1947
[ "def", "cumprod", "(", "self", ",", "axis", "=", "0", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_groupby_func", "(", "'cumprod'", ",", "args", ",", "kwargs", ",", "[", "'numeric_only'", ",", "'skipna'", "]", ")", "if", "axis", "!=", "0", ":", "return", "self", ".", "apply", "(", "lambda", "x", ":", "x", ".", "cumprod", "(", "axis", "=", "axis", ",", "*", "*", "kwargs", ")", ")", "return", "self", ".", "_cython_transform", "(", "'cumprod'", ",", "*", "*", "kwargs", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.cummin
Cumulative min for each group.
pandas/core/groupby/groupby.py
def cummin(self, axis=0, **kwargs): """ Cumulative min for each group. """ if axis != 0: return self.apply(lambda x: np.minimum.accumulate(x, axis)) return self._cython_transform('cummin', numeric_only=False)
def cummin(self, axis=0, **kwargs): """ Cumulative min for each group. """ if axis != 0: return self.apply(lambda x: np.minimum.accumulate(x, axis)) return self._cython_transform('cummin', numeric_only=False)
[ "Cumulative", "min", "for", "each", "group", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1964-L1971
[ "def", "cummin", "(", "self", ",", "axis", "=", "0", ",", "*", "*", "kwargs", ")", ":", "if", "axis", "!=", "0", ":", "return", "self", ".", "apply", "(", "lambda", "x", ":", "np", ".", "minimum", ".", "accumulate", "(", "x", ",", "axis", ")", ")", "return", "self", ".", "_cython_transform", "(", "'cummin'", ",", "numeric_only", "=", "False", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.cummax
Cumulative max for each group.
pandas/core/groupby/groupby.py
def cummax(self, axis=0, **kwargs): """ Cumulative max for each group. """ if axis != 0: return self.apply(lambda x: np.maximum.accumulate(x, axis)) return self._cython_transform('cummax', numeric_only=False)
def cummax(self, axis=0, **kwargs): """ Cumulative max for each group. """ if axis != 0: return self.apply(lambda x: np.maximum.accumulate(x, axis)) return self._cython_transform('cummax', numeric_only=False)
[ "Cumulative", "max", "for", "each", "group", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1975-L1982
[ "def", "cummax", "(", "self", ",", "axis", "=", "0", ",", "*", "*", "kwargs", ")", ":", "if", "axis", "!=", "0", ":", "return", "self", ".", "apply", "(", "lambda", "x", ":", "np", ".", "maximum", ".", "accumulate", "(", "x", ",", "axis", ")", ")", "return", "self", ".", "_cython_transform", "(", "'cummax'", ",", "numeric_only", "=", "False", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy._get_cythonized_result
Get result for Cythonized functions. Parameters ---------- how : str, Cythonized function name to be called grouper : Grouper object containing pertinent group info aggregate : bool, default False Whether the result should be aggregated to match the number of groups cython_dtype : default None Type of the array that will be modified by the Cython call. If `None`, the type will be inferred from the values of each slice needs_values : bool, default False Whether the values should be a part of the Cython call signature needs_mask : bool, default False Whether boolean mask needs to be part of the Cython call signature needs_ngroups : bool, default False Whether number of groups is part of the Cython call signature result_is_index : bool, default False Whether the result of the Cython operation is an index of values to be retrieved, instead of the actual values themselves pre_processing : function, default None Function to be applied to `values` prior to passing to Cython. Function should return a tuple where the first element is the values to be passed to Cython and the second element is an optional type which the values should be converted to after being returned by the Cython operation. Raises if `needs_values` is False. post_processing : function, default None Function to be applied to result of Cython function. Should accept an array of values as the first argument and type inferences as its second argument, i.e. the signature should be (ndarray, Type). **kwargs : dict Extra arguments to be passed back to Cython funcs Returns ------- `Series` or `DataFrame` with filled values
pandas/core/groupby/groupby.py
def _get_cythonized_result(self, how, grouper, aggregate=False, cython_dtype=None, needs_values=False, needs_mask=False, needs_ngroups=False, result_is_index=False, pre_processing=None, post_processing=None, **kwargs): """ Get result for Cythonized functions. Parameters ---------- how : str, Cythonized function name to be called grouper : Grouper object containing pertinent group info aggregate : bool, default False Whether the result should be aggregated to match the number of groups cython_dtype : default None Type of the array that will be modified by the Cython call. If `None`, the type will be inferred from the values of each slice needs_values : bool, default False Whether the values should be a part of the Cython call signature needs_mask : bool, default False Whether boolean mask needs to be part of the Cython call signature needs_ngroups : bool, default False Whether number of groups is part of the Cython call signature result_is_index : bool, default False Whether the result of the Cython operation is an index of values to be retrieved, instead of the actual values themselves pre_processing : function, default None Function to be applied to `values` prior to passing to Cython. Function should return a tuple where the first element is the values to be passed to Cython and the second element is an optional type which the values should be converted to after being returned by the Cython operation. Raises if `needs_values` is False. post_processing : function, default None Function to be applied to result of Cython function. Should accept an array of values as the first argument and type inferences as its second argument, i.e. the signature should be (ndarray, Type). **kwargs : dict Extra arguments to be passed back to Cython funcs Returns ------- `Series` or `DataFrame` with filled values """ if result_is_index and aggregate: raise ValueError("'result_is_index' and 'aggregate' cannot both " "be True!") if post_processing: if not callable(pre_processing): raise ValueError("'post_processing' must be a callable!") if pre_processing: if not callable(pre_processing): raise ValueError("'pre_processing' must be a callable!") if not needs_values: raise ValueError("Cannot use 'pre_processing' without " "specifying 'needs_values'!") labels, _, ngroups = grouper.group_info output = collections.OrderedDict() base_func = getattr(libgroupby, how) for name, obj in self._iterate_slices(): if aggregate: result_sz = ngroups else: result_sz = len(obj.values) if not cython_dtype: cython_dtype = obj.values.dtype result = np.zeros(result_sz, dtype=cython_dtype) func = partial(base_func, result, labels) inferences = None if needs_values: vals = obj.values if pre_processing: vals, inferences = pre_processing(vals) func = partial(func, vals) if needs_mask: mask = isna(obj.values).view(np.uint8) func = partial(func, mask) if needs_ngroups: func = partial(func, ngroups) func(**kwargs) # Call func to modify indexer values in place if result_is_index: result = algorithms.take_nd(obj.values, result) if post_processing: result = post_processing(result, inferences) output[name] = result if aggregate: return self._wrap_aggregated_output(output) else: return self._wrap_transformed_output(output)
def _get_cythonized_result(self, how, grouper, aggregate=False, cython_dtype=None, needs_values=False, needs_mask=False, needs_ngroups=False, result_is_index=False, pre_processing=None, post_processing=None, **kwargs): """ Get result for Cythonized functions. Parameters ---------- how : str, Cythonized function name to be called grouper : Grouper object containing pertinent group info aggregate : bool, default False Whether the result should be aggregated to match the number of groups cython_dtype : default None Type of the array that will be modified by the Cython call. If `None`, the type will be inferred from the values of each slice needs_values : bool, default False Whether the values should be a part of the Cython call signature needs_mask : bool, default False Whether boolean mask needs to be part of the Cython call signature needs_ngroups : bool, default False Whether number of groups is part of the Cython call signature result_is_index : bool, default False Whether the result of the Cython operation is an index of values to be retrieved, instead of the actual values themselves pre_processing : function, default None Function to be applied to `values` prior to passing to Cython. Function should return a tuple where the first element is the values to be passed to Cython and the second element is an optional type which the values should be converted to after being returned by the Cython operation. Raises if `needs_values` is False. post_processing : function, default None Function to be applied to result of Cython function. Should accept an array of values as the first argument and type inferences as its second argument, i.e. the signature should be (ndarray, Type). **kwargs : dict Extra arguments to be passed back to Cython funcs Returns ------- `Series` or `DataFrame` with filled values """ if result_is_index and aggregate: raise ValueError("'result_is_index' and 'aggregate' cannot both " "be True!") if post_processing: if not callable(pre_processing): raise ValueError("'post_processing' must be a callable!") if pre_processing: if not callable(pre_processing): raise ValueError("'pre_processing' must be a callable!") if not needs_values: raise ValueError("Cannot use 'pre_processing' without " "specifying 'needs_values'!") labels, _, ngroups = grouper.group_info output = collections.OrderedDict() base_func = getattr(libgroupby, how) for name, obj in self._iterate_slices(): if aggregate: result_sz = ngroups else: result_sz = len(obj.values) if not cython_dtype: cython_dtype = obj.values.dtype result = np.zeros(result_sz, dtype=cython_dtype) func = partial(base_func, result, labels) inferences = None if needs_values: vals = obj.values if pre_processing: vals, inferences = pre_processing(vals) func = partial(func, vals) if needs_mask: mask = isna(obj.values).view(np.uint8) func = partial(func, mask) if needs_ngroups: func = partial(func, ngroups) func(**kwargs) # Call func to modify indexer values in place if result_is_index: result = algorithms.take_nd(obj.values, result) if post_processing: result = post_processing(result, inferences) output[name] = result if aggregate: return self._wrap_aggregated_output(output) else: return self._wrap_transformed_output(output)
[ "Get", "result", "for", "Cythonized", "functions", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L1984-L2088
[ "def", "_get_cythonized_result", "(", "self", ",", "how", ",", "grouper", ",", "aggregate", "=", "False", ",", "cython_dtype", "=", "None", ",", "needs_values", "=", "False", ",", "needs_mask", "=", "False", ",", "needs_ngroups", "=", "False", ",", "result_is_index", "=", "False", ",", "pre_processing", "=", "None", ",", "post_processing", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "result_is_index", "and", "aggregate", ":", "raise", "ValueError", "(", "\"'result_is_index' and 'aggregate' cannot both \"", "\"be True!\"", ")", "if", "post_processing", ":", "if", "not", "callable", "(", "pre_processing", ")", ":", "raise", "ValueError", "(", "\"'post_processing' must be a callable!\"", ")", "if", "pre_processing", ":", "if", "not", "callable", "(", "pre_processing", ")", ":", "raise", "ValueError", "(", "\"'pre_processing' must be a callable!\"", ")", "if", "not", "needs_values", ":", "raise", "ValueError", "(", "\"Cannot use 'pre_processing' without \"", "\"specifying 'needs_values'!\"", ")", "labels", ",", "_", ",", "ngroups", "=", "grouper", ".", "group_info", "output", "=", "collections", ".", "OrderedDict", "(", ")", "base_func", "=", "getattr", "(", "libgroupby", ",", "how", ")", "for", "name", ",", "obj", "in", "self", ".", "_iterate_slices", "(", ")", ":", "if", "aggregate", ":", "result_sz", "=", "ngroups", "else", ":", "result_sz", "=", "len", "(", "obj", ".", "values", ")", "if", "not", "cython_dtype", ":", "cython_dtype", "=", "obj", ".", "values", ".", "dtype", "result", "=", "np", ".", "zeros", "(", "result_sz", ",", "dtype", "=", "cython_dtype", ")", "func", "=", "partial", "(", "base_func", ",", "result", ",", "labels", ")", "inferences", "=", "None", "if", "needs_values", ":", "vals", "=", "obj", ".", "values", "if", "pre_processing", ":", "vals", ",", "inferences", "=", "pre_processing", "(", "vals", ")", "func", "=", "partial", "(", "func", ",", "vals", ")", "if", "needs_mask", ":", "mask", "=", "isna", "(", "obj", ".", "values", ")", ".", "view", "(", "np", ".", "uint8", ")", "func", "=", "partial", "(", "func", ",", "mask", ")", "if", "needs_ngroups", ":", "func", "=", "partial", "(", "func", ",", "ngroups", ")", "func", "(", "*", "*", "kwargs", ")", "# Call func to modify indexer values in place", "if", "result_is_index", ":", "result", "=", "algorithms", ".", "take_nd", "(", "obj", ".", "values", ",", "result", ")", "if", "post_processing", ":", "result", "=", "post_processing", "(", "result", ",", "inferences", ")", "output", "[", "name", "]", "=", "result", "if", "aggregate", ":", "return", "self", ".", "_wrap_aggregated_output", "(", "output", ")", "else", ":", "return", "self", ".", "_wrap_transformed_output", "(", "output", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.shift
Shift each group by periods observations. Parameters ---------- periods : integer, default 1 number of periods to shift freq : frequency string axis : axis to shift, default 0 fill_value : optional .. versionadded:: 0.24.0
pandas/core/groupby/groupby.py
def shift(self, periods=1, freq=None, axis=0, fill_value=None): """ Shift each group by periods observations. Parameters ---------- periods : integer, default 1 number of periods to shift freq : frequency string axis : axis to shift, default 0 fill_value : optional .. versionadded:: 0.24.0 """ if freq is not None or axis != 0 or not isna(fill_value): return self.apply(lambda x: x.shift(periods, freq, axis, fill_value)) return self._get_cythonized_result('group_shift_indexer', self.grouper, cython_dtype=np.int64, needs_ngroups=True, result_is_index=True, periods=periods)
def shift(self, periods=1, freq=None, axis=0, fill_value=None): """ Shift each group by periods observations. Parameters ---------- periods : integer, default 1 number of periods to shift freq : frequency string axis : axis to shift, default 0 fill_value : optional .. versionadded:: 0.24.0 """ if freq is not None or axis != 0 or not isna(fill_value): return self.apply(lambda x: x.shift(periods, freq, axis, fill_value)) return self._get_cythonized_result('group_shift_indexer', self.grouper, cython_dtype=np.int64, needs_ngroups=True, result_is_index=True, periods=periods)
[ "Shift", "each", "group", "by", "periods", "observations", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L2092-L2115
[ "def", "shift", "(", "self", ",", "periods", "=", "1", ",", "freq", "=", "None", ",", "axis", "=", "0", ",", "fill_value", "=", "None", ")", ":", "if", "freq", "is", "not", "None", "or", "axis", "!=", "0", "or", "not", "isna", "(", "fill_value", ")", ":", "return", "self", ".", "apply", "(", "lambda", "x", ":", "x", ".", "shift", "(", "periods", ",", "freq", ",", "axis", ",", "fill_value", ")", ")", "return", "self", ".", "_get_cythonized_result", "(", "'group_shift_indexer'", ",", "self", ".", "grouper", ",", "cython_dtype", "=", "np", ".", "int64", ",", "needs_ngroups", "=", "True", ",", "result_is_index", "=", "True", ",", "periods", "=", "periods", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.head
Return first n rows of each group. Essentially equivalent to ``.apply(lambda x: x.head(n))``, except ignores as_index flag. %(see_also)s Examples -------- >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B']) >>> df.groupby('A', as_index=False).head(1) A B 0 1 2 2 5 6 >>> df.groupby('A').head(1) A B 0 1 2 2 5 6
pandas/core/groupby/groupby.py
def head(self, n=5): """ Return first n rows of each group. Essentially equivalent to ``.apply(lambda x: x.head(n))``, except ignores as_index flag. %(see_also)s Examples -------- >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B']) >>> df.groupby('A', as_index=False).head(1) A B 0 1 2 2 5 6 >>> df.groupby('A').head(1) A B 0 1 2 2 5 6 """ self._reset_group_selection() mask = self._cumcount_array() < n return self._selected_obj[mask]
def head(self, n=5): """ Return first n rows of each group. Essentially equivalent to ``.apply(lambda x: x.head(n))``, except ignores as_index flag. %(see_also)s Examples -------- >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], columns=['A', 'B']) >>> df.groupby('A', as_index=False).head(1) A B 0 1 2 2 5 6 >>> df.groupby('A').head(1) A B 0 1 2 2 5 6 """ self._reset_group_selection() mask = self._cumcount_array() < n return self._selected_obj[mask]
[ "Return", "first", "n", "rows", "of", "each", "group", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L2137-L2160
[ "def", "head", "(", "self", ",", "n", "=", "5", ")", ":", "self", ".", "_reset_group_selection", "(", ")", "mask", "=", "self", ".", "_cumcount_array", "(", ")", "<", "n", "return", "self", ".", "_selected_obj", "[", "mask", "]" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
GroupBy.tail
Return last n rows of each group. Essentially equivalent to ``.apply(lambda x: x.tail(n))``, except ignores as_index flag. %(see_also)s Examples -------- >>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]], columns=['A', 'B']) >>> df.groupby('A').tail(1) A B 1 a 2 3 b 2 >>> df.groupby('A').head(1) A B 0 a 1 2 b 1
pandas/core/groupby/groupby.py
def tail(self, n=5): """ Return last n rows of each group. Essentially equivalent to ``.apply(lambda x: x.tail(n))``, except ignores as_index flag. %(see_also)s Examples -------- >>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]], columns=['A', 'B']) >>> df.groupby('A').tail(1) A B 1 a 2 3 b 2 >>> df.groupby('A').head(1) A B 0 a 1 2 b 1 """ self._reset_group_selection() mask = self._cumcount_array(ascending=False) < n return self._selected_obj[mask]
def tail(self, n=5): """ Return last n rows of each group. Essentially equivalent to ``.apply(lambda x: x.tail(n))``, except ignores as_index flag. %(see_also)s Examples -------- >>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]], columns=['A', 'B']) >>> df.groupby('A').tail(1) A B 1 a 2 3 b 2 >>> df.groupby('A').head(1) A B 0 a 1 2 b 1 """ self._reset_group_selection() mask = self._cumcount_array(ascending=False) < n return self._selected_obj[mask]
[ "Return", "last", "n", "rows", "of", "each", "group", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/groupby.py#L2164-L2187
[ "def", "tail", "(", "self", ",", "n", "=", "5", ")", ":", "self", ".", "_reset_group_selection", "(", ")", "mask", "=", "self", ".", "_cumcount_array", "(", "ascending", "=", "False", ")", "<", "n", "return", "self", ".", "_selected_obj", "[", "mask", "]" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
next_monday
If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday, use Monday instead
pandas/tseries/holiday.py
def next_monday(dt): """ If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday, use Monday instead """ if dt.weekday() == 5: return dt + timedelta(2) elif dt.weekday() == 6: return dt + timedelta(1) return dt
def next_monday(dt): """ If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday, use Monday instead """ if dt.weekday() == 5: return dt + timedelta(2) elif dt.weekday() == 6: return dt + timedelta(1) return dt
[ "If", "holiday", "falls", "on", "Saturday", "use", "following", "Monday", "instead", ";", "if", "holiday", "falls", "on", "Sunday", "use", "Monday", "instead" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L15-L24
[ "def", "next_monday", "(", "dt", ")", ":", "if", "dt", ".", "weekday", "(", ")", "==", "5", ":", "return", "dt", "+", "timedelta", "(", "2", ")", "elif", "dt", ".", "weekday", "(", ")", "==", "6", ":", "return", "dt", "+", "timedelta", "(", "1", ")", "return", "dt" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
next_monday_or_tuesday
For second holiday of two adjacent ones! If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday or Monday, use following Tuesday instead (because Monday is already taken by adjacent holiday on the day before)
pandas/tseries/holiday.py
def next_monday_or_tuesday(dt): """ For second holiday of two adjacent ones! If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday or Monday, use following Tuesday instead (because Monday is already taken by adjacent holiday on the day before) """ dow = dt.weekday() if dow == 5 or dow == 6: return dt + timedelta(2) elif dow == 0: return dt + timedelta(1) return dt
def next_monday_or_tuesday(dt): """ For second holiday of two adjacent ones! If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday or Monday, use following Tuesday instead (because Monday is already taken by adjacent holiday on the day before) """ dow = dt.weekday() if dow == 5 or dow == 6: return dt + timedelta(2) elif dow == 0: return dt + timedelta(1) return dt
[ "For", "second", "holiday", "of", "two", "adjacent", "ones!", "If", "holiday", "falls", "on", "Saturday", "use", "following", "Monday", "instead", ";", "if", "holiday", "falls", "on", "Sunday", "or", "Monday", "use", "following", "Tuesday", "instead", "(", "because", "Monday", "is", "already", "taken", "by", "adjacent", "holiday", "on", "the", "day", "before", ")" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L27-L39
[ "def", "next_monday_or_tuesday", "(", "dt", ")", ":", "dow", "=", "dt", ".", "weekday", "(", ")", "if", "dow", "==", "5", "or", "dow", "==", "6", ":", "return", "dt", "+", "timedelta", "(", "2", ")", "elif", "dow", "==", "0", ":", "return", "dt", "+", "timedelta", "(", "1", ")", "return", "dt" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
previous_friday
If holiday falls on Saturday or Sunday, use previous Friday instead.
pandas/tseries/holiday.py
def previous_friday(dt): """ If holiday falls on Saturday or Sunday, use previous Friday instead. """ if dt.weekday() == 5: return dt - timedelta(1) elif dt.weekday() == 6: return dt - timedelta(2) return dt
def previous_friday(dt): """ If holiday falls on Saturday or Sunday, use previous Friday instead. """ if dt.weekday() == 5: return dt - timedelta(1) elif dt.weekday() == 6: return dt - timedelta(2) return dt
[ "If", "holiday", "falls", "on", "Saturday", "or", "Sunday", "use", "previous", "Friday", "instead", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L42-L50
[ "def", "previous_friday", "(", "dt", ")", ":", "if", "dt", ".", "weekday", "(", ")", "==", "5", ":", "return", "dt", "-", "timedelta", "(", "1", ")", "elif", "dt", ".", "weekday", "(", ")", "==", "6", ":", "return", "dt", "-", "timedelta", "(", "2", ")", "return", "dt" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
weekend_to_monday
If holiday falls on Sunday or Saturday, use day thereafter (Monday) instead. Needed for holidays such as Christmas observation in Europe
pandas/tseries/holiday.py
def weekend_to_monday(dt): """ If holiday falls on Sunday or Saturday, use day thereafter (Monday) instead. Needed for holidays such as Christmas observation in Europe """ if dt.weekday() == 6: return dt + timedelta(1) elif dt.weekday() == 5: return dt + timedelta(2) return dt
def weekend_to_monday(dt): """ If holiday falls on Sunday or Saturday, use day thereafter (Monday) instead. Needed for holidays such as Christmas observation in Europe """ if dt.weekday() == 6: return dt + timedelta(1) elif dt.weekday() == 5: return dt + timedelta(2) return dt
[ "If", "holiday", "falls", "on", "Sunday", "or", "Saturday", "use", "day", "thereafter", "(", "Monday", ")", "instead", ".", "Needed", "for", "holidays", "such", "as", "Christmas", "observation", "in", "Europe" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L62-L72
[ "def", "weekend_to_monday", "(", "dt", ")", ":", "if", "dt", ".", "weekday", "(", ")", "==", "6", ":", "return", "dt", "+", "timedelta", "(", "1", ")", "elif", "dt", ".", "weekday", "(", ")", "==", "5", ":", "return", "dt", "+", "timedelta", "(", "2", ")", "return", "dt" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
nearest_workday
If holiday falls on Saturday, use day before (Friday) instead; if holiday falls on Sunday, use day thereafter (Monday) instead.
pandas/tseries/holiday.py
def nearest_workday(dt): """ If holiday falls on Saturday, use day before (Friday) instead; if holiday falls on Sunday, use day thereafter (Monday) instead. """ if dt.weekday() == 5: return dt - timedelta(1) elif dt.weekday() == 6: return dt + timedelta(1) return dt
def nearest_workday(dt): """ If holiday falls on Saturday, use day before (Friday) instead; if holiday falls on Sunday, use day thereafter (Monday) instead. """ if dt.weekday() == 5: return dt - timedelta(1) elif dt.weekday() == 6: return dt + timedelta(1) return dt
[ "If", "holiday", "falls", "on", "Saturday", "use", "day", "before", "(", "Friday", ")", "instead", ";", "if", "holiday", "falls", "on", "Sunday", "use", "day", "thereafter", "(", "Monday", ")", "instead", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L75-L84
[ "def", "nearest_workday", "(", "dt", ")", ":", "if", "dt", ".", "weekday", "(", ")", "==", "5", ":", "return", "dt", "-", "timedelta", "(", "1", ")", "elif", "dt", ".", "weekday", "(", ")", "==", "6", ":", "return", "dt", "+", "timedelta", "(", "1", ")", "return", "dt" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
next_workday
returns next weekday used for observances
pandas/tseries/holiday.py
def next_workday(dt): """ returns next weekday used for observances """ dt += timedelta(days=1) while dt.weekday() > 4: # Mon-Fri are 0-4 dt += timedelta(days=1) return dt
def next_workday(dt): """ returns next weekday used for observances """ dt += timedelta(days=1) while dt.weekday() > 4: # Mon-Fri are 0-4 dt += timedelta(days=1) return dt
[ "returns", "next", "weekday", "used", "for", "observances" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L87-L95
[ "def", "next_workday", "(", "dt", ")", ":", "dt", "+=", "timedelta", "(", "days", "=", "1", ")", "while", "dt", ".", "weekday", "(", ")", ">", "4", ":", "# Mon-Fri are 0-4", "dt", "+=", "timedelta", "(", "days", "=", "1", ")", "return", "dt" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
previous_workday
returns previous weekday used for observances
pandas/tseries/holiday.py
def previous_workday(dt): """ returns previous weekday used for observances """ dt -= timedelta(days=1) while dt.weekday() > 4: # Mon-Fri are 0-4 dt -= timedelta(days=1) return dt
def previous_workday(dt): """ returns previous weekday used for observances """ dt -= timedelta(days=1) while dt.weekday() > 4: # Mon-Fri are 0-4 dt -= timedelta(days=1) return dt
[ "returns", "previous", "weekday", "used", "for", "observances" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L98-L106
[ "def", "previous_workday", "(", "dt", ")", ":", "dt", "-=", "timedelta", "(", "days", "=", "1", ")", "while", "dt", ".", "weekday", "(", ")", ">", "4", ":", "# Mon-Fri are 0-4", "dt", "-=", "timedelta", "(", "days", "=", "1", ")", "return", "dt" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Holiday.dates
Calculate holidays observed between start date and end date Parameters ---------- start_date : starting date, datetime-like, optional end_date : ending date, datetime-like, optional return_name : bool, optional, default=False If True, return a series that has dates and holiday names. False will only return dates.
pandas/tseries/holiday.py
def dates(self, start_date, end_date, return_name=False): """ Calculate holidays observed between start date and end date Parameters ---------- start_date : starting date, datetime-like, optional end_date : ending date, datetime-like, optional return_name : bool, optional, default=False If True, return a series that has dates and holiday names. False will only return dates. """ start_date = Timestamp(start_date) end_date = Timestamp(end_date) filter_start_date = start_date filter_end_date = end_date if self.year is not None: dt = Timestamp(datetime(self.year, self.month, self.day)) if return_name: return Series(self.name, index=[dt]) else: return [dt] dates = self._reference_dates(start_date, end_date) holiday_dates = self._apply_rule(dates) if self.days_of_week is not None: holiday_dates = holiday_dates[np.in1d(holiday_dates.dayofweek, self.days_of_week)] if self.start_date is not None: filter_start_date = max(self.start_date.tz_localize( filter_start_date.tz), filter_start_date) if self.end_date is not None: filter_end_date = min(self.end_date.tz_localize( filter_end_date.tz), filter_end_date) holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date)] if return_name: return Series(self.name, index=holiday_dates) return holiday_dates
def dates(self, start_date, end_date, return_name=False): """ Calculate holidays observed between start date and end date Parameters ---------- start_date : starting date, datetime-like, optional end_date : ending date, datetime-like, optional return_name : bool, optional, default=False If True, return a series that has dates and holiday names. False will only return dates. """ start_date = Timestamp(start_date) end_date = Timestamp(end_date) filter_start_date = start_date filter_end_date = end_date if self.year is not None: dt = Timestamp(datetime(self.year, self.month, self.day)) if return_name: return Series(self.name, index=[dt]) else: return [dt] dates = self._reference_dates(start_date, end_date) holiday_dates = self._apply_rule(dates) if self.days_of_week is not None: holiday_dates = holiday_dates[np.in1d(holiday_dates.dayofweek, self.days_of_week)] if self.start_date is not None: filter_start_date = max(self.start_date.tz_localize( filter_start_date.tz), filter_start_date) if self.end_date is not None: filter_end_date = min(self.end_date.tz_localize( filter_end_date.tz), filter_end_date) holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date)] if return_name: return Series(self.name, index=holiday_dates) return holiday_dates
[ "Calculate", "holidays", "observed", "between", "start", "date", "and", "end", "date" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L192-L233
[ "def", "dates", "(", "self", ",", "start_date", ",", "end_date", ",", "return_name", "=", "False", ")", ":", "start_date", "=", "Timestamp", "(", "start_date", ")", "end_date", "=", "Timestamp", "(", "end_date", ")", "filter_start_date", "=", "start_date", "filter_end_date", "=", "end_date", "if", "self", ".", "year", "is", "not", "None", ":", "dt", "=", "Timestamp", "(", "datetime", "(", "self", ".", "year", ",", "self", ".", "month", ",", "self", ".", "day", ")", ")", "if", "return_name", ":", "return", "Series", "(", "self", ".", "name", ",", "index", "=", "[", "dt", "]", ")", "else", ":", "return", "[", "dt", "]", "dates", "=", "self", ".", "_reference_dates", "(", "start_date", ",", "end_date", ")", "holiday_dates", "=", "self", ".", "_apply_rule", "(", "dates", ")", "if", "self", ".", "days_of_week", "is", "not", "None", ":", "holiday_dates", "=", "holiday_dates", "[", "np", ".", "in1d", "(", "holiday_dates", ".", "dayofweek", ",", "self", ".", "days_of_week", ")", "]", "if", "self", ".", "start_date", "is", "not", "None", ":", "filter_start_date", "=", "max", "(", "self", ".", "start_date", ".", "tz_localize", "(", "filter_start_date", ".", "tz", ")", ",", "filter_start_date", ")", "if", "self", ".", "end_date", "is", "not", "None", ":", "filter_end_date", "=", "min", "(", "self", ".", "end_date", ".", "tz_localize", "(", "filter_end_date", ".", "tz", ")", ",", "filter_end_date", ")", "holiday_dates", "=", "holiday_dates", "[", "(", "holiday_dates", ">=", "filter_start_date", ")", "&", "(", "holiday_dates", "<=", "filter_end_date", ")", "]", "if", "return_name", ":", "return", "Series", "(", "self", ".", "name", ",", "index", "=", "holiday_dates", ")", "return", "holiday_dates" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Holiday._reference_dates
Get reference dates for the holiday. Return reference dates for the holiday also returning the year prior to the start_date and year following the end_date. This ensures that any offsets to be applied will yield the holidays within the passed in dates.
pandas/tseries/holiday.py
def _reference_dates(self, start_date, end_date): """ Get reference dates for the holiday. Return reference dates for the holiday also returning the year prior to the start_date and year following the end_date. This ensures that any offsets to be applied will yield the holidays within the passed in dates. """ if self.start_date is not None: start_date = self.start_date.tz_localize(start_date.tz) if self.end_date is not None: end_date = self.end_date.tz_localize(start_date.tz) year_offset = DateOffset(years=1) reference_start_date = Timestamp( datetime(start_date.year - 1, self.month, self.day)) reference_end_date = Timestamp( datetime(end_date.year + 1, self.month, self.day)) # Don't process unnecessary holidays dates = date_range(start=reference_start_date, end=reference_end_date, freq=year_offset, tz=start_date.tz) return dates
def _reference_dates(self, start_date, end_date): """ Get reference dates for the holiday. Return reference dates for the holiday also returning the year prior to the start_date and year following the end_date. This ensures that any offsets to be applied will yield the holidays within the passed in dates. """ if self.start_date is not None: start_date = self.start_date.tz_localize(start_date.tz) if self.end_date is not None: end_date = self.end_date.tz_localize(start_date.tz) year_offset = DateOffset(years=1) reference_start_date = Timestamp( datetime(start_date.year - 1, self.month, self.day)) reference_end_date = Timestamp( datetime(end_date.year + 1, self.month, self.day)) # Don't process unnecessary holidays dates = date_range(start=reference_start_date, end=reference_end_date, freq=year_offset, tz=start_date.tz) return dates
[ "Get", "reference", "dates", "for", "the", "holiday", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L235-L261
[ "def", "_reference_dates", "(", "self", ",", "start_date", ",", "end_date", ")", ":", "if", "self", ".", "start_date", "is", "not", "None", ":", "start_date", "=", "self", ".", "start_date", ".", "tz_localize", "(", "start_date", ".", "tz", ")", "if", "self", ".", "end_date", "is", "not", "None", ":", "end_date", "=", "self", ".", "end_date", ".", "tz_localize", "(", "start_date", ".", "tz", ")", "year_offset", "=", "DateOffset", "(", "years", "=", "1", ")", "reference_start_date", "=", "Timestamp", "(", "datetime", "(", "start_date", ".", "year", "-", "1", ",", "self", ".", "month", ",", "self", ".", "day", ")", ")", "reference_end_date", "=", "Timestamp", "(", "datetime", "(", "end_date", ".", "year", "+", "1", ",", "self", ".", "month", ",", "self", ".", "day", ")", ")", "# Don't process unnecessary holidays", "dates", "=", "date_range", "(", "start", "=", "reference_start_date", ",", "end", "=", "reference_end_date", ",", "freq", "=", "year_offset", ",", "tz", "=", "start_date", ".", "tz", ")", "return", "dates" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Holiday._apply_rule
Apply the given offset/observance to a DatetimeIndex of dates. Parameters ---------- dates : DatetimeIndex Dates to apply the given offset/observance rule Returns ------- Dates with rules applied
pandas/tseries/holiday.py
def _apply_rule(self, dates): """ Apply the given offset/observance to a DatetimeIndex of dates. Parameters ---------- dates : DatetimeIndex Dates to apply the given offset/observance rule Returns ------- Dates with rules applied """ if self.observance is not None: return dates.map(lambda d: self.observance(d)) if self.offset is not None: if not isinstance(self.offset, list): offsets = [self.offset] else: offsets = self.offset for offset in offsets: # if we are adding a non-vectorized value # ignore the PerformanceWarnings: with warnings.catch_warnings(): warnings.simplefilter("ignore", PerformanceWarning) dates += offset return dates
def _apply_rule(self, dates): """ Apply the given offset/observance to a DatetimeIndex of dates. Parameters ---------- dates : DatetimeIndex Dates to apply the given offset/observance rule Returns ------- Dates with rules applied """ if self.observance is not None: return dates.map(lambda d: self.observance(d)) if self.offset is not None: if not isinstance(self.offset, list): offsets = [self.offset] else: offsets = self.offset for offset in offsets: # if we are adding a non-vectorized value # ignore the PerformanceWarnings: with warnings.catch_warnings(): warnings.simplefilter("ignore", PerformanceWarning) dates += offset return dates
[ "Apply", "the", "given", "offset", "/", "observance", "to", "a", "DatetimeIndex", "of", "dates", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L263-L291
[ "def", "_apply_rule", "(", "self", ",", "dates", ")", ":", "if", "self", ".", "observance", "is", "not", "None", ":", "return", "dates", ".", "map", "(", "lambda", "d", ":", "self", ".", "observance", "(", "d", ")", ")", "if", "self", ".", "offset", "is", "not", "None", ":", "if", "not", "isinstance", "(", "self", ".", "offset", ",", "list", ")", ":", "offsets", "=", "[", "self", ".", "offset", "]", "else", ":", "offsets", "=", "self", ".", "offset", "for", "offset", "in", "offsets", ":", "# if we are adding a non-vectorized value", "# ignore the PerformanceWarnings:", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "PerformanceWarning", ")", "dates", "+=", "offset", "return", "dates" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
AbstractHolidayCalendar.holidays
Returns a curve with holidays between start_date and end_date Parameters ---------- start : starting date, datetime-like, optional end : ending date, datetime-like, optional return_name : bool, optional If True, return a series that has dates and holiday names. False will only return a DatetimeIndex of dates. Returns ------- DatetimeIndex of holidays
pandas/tseries/holiday.py
def holidays(self, start=None, end=None, return_name=False): """ Returns a curve with holidays between start_date and end_date Parameters ---------- start : starting date, datetime-like, optional end : ending date, datetime-like, optional return_name : bool, optional If True, return a series that has dates and holiday names. False will only return a DatetimeIndex of dates. Returns ------- DatetimeIndex of holidays """ if self.rules is None: raise Exception('Holiday Calendar {name} does not have any ' 'rules specified'.format(name=self.name)) if start is None: start = AbstractHolidayCalendar.start_date if end is None: end = AbstractHolidayCalendar.end_date start = Timestamp(start) end = Timestamp(end) holidays = None # If we don't have a cache or the dates are outside the prior cache, we # get them again if (self._cache is None or start < self._cache[0] or end > self._cache[1]): for rule in self.rules: rule_holidays = rule.dates(start, end, return_name=True) if holidays is None: holidays = rule_holidays else: holidays = holidays.append(rule_holidays) self._cache = (start, end, holidays.sort_index()) holidays = self._cache[2] holidays = holidays[start:end] if return_name: return holidays else: return holidays.index
def holidays(self, start=None, end=None, return_name=False): """ Returns a curve with holidays between start_date and end_date Parameters ---------- start : starting date, datetime-like, optional end : ending date, datetime-like, optional return_name : bool, optional If True, return a series that has dates and holiday names. False will only return a DatetimeIndex of dates. Returns ------- DatetimeIndex of holidays """ if self.rules is None: raise Exception('Holiday Calendar {name} does not have any ' 'rules specified'.format(name=self.name)) if start is None: start = AbstractHolidayCalendar.start_date if end is None: end = AbstractHolidayCalendar.end_date start = Timestamp(start) end = Timestamp(end) holidays = None # If we don't have a cache or the dates are outside the prior cache, we # get them again if (self._cache is None or start < self._cache[0] or end > self._cache[1]): for rule in self.rules: rule_holidays = rule.dates(start, end, return_name=True) if holidays is None: holidays = rule_holidays else: holidays = holidays.append(rule_holidays) self._cache = (start, end, holidays.sort_index()) holidays = self._cache[2] holidays = holidays[start:end] if return_name: return holidays else: return holidays.index
[ "Returns", "a", "curve", "with", "holidays", "between", "start_date", "and", "end_date" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L362-L412
[ "def", "holidays", "(", "self", ",", "start", "=", "None", ",", "end", "=", "None", ",", "return_name", "=", "False", ")", ":", "if", "self", ".", "rules", "is", "None", ":", "raise", "Exception", "(", "'Holiday Calendar {name} does not have any '", "'rules specified'", ".", "format", "(", "name", "=", "self", ".", "name", ")", ")", "if", "start", "is", "None", ":", "start", "=", "AbstractHolidayCalendar", ".", "start_date", "if", "end", "is", "None", ":", "end", "=", "AbstractHolidayCalendar", ".", "end_date", "start", "=", "Timestamp", "(", "start", ")", "end", "=", "Timestamp", "(", "end", ")", "holidays", "=", "None", "# If we don't have a cache or the dates are outside the prior cache, we", "# get them again", "if", "(", "self", ".", "_cache", "is", "None", "or", "start", "<", "self", ".", "_cache", "[", "0", "]", "or", "end", ">", "self", ".", "_cache", "[", "1", "]", ")", ":", "for", "rule", "in", "self", ".", "rules", ":", "rule_holidays", "=", "rule", ".", "dates", "(", "start", ",", "end", ",", "return_name", "=", "True", ")", "if", "holidays", "is", "None", ":", "holidays", "=", "rule_holidays", "else", ":", "holidays", "=", "holidays", ".", "append", "(", "rule_holidays", ")", "self", ".", "_cache", "=", "(", "start", ",", "end", ",", "holidays", ".", "sort_index", "(", ")", ")", "holidays", "=", "self", ".", "_cache", "[", "2", "]", "holidays", "=", "holidays", "[", "start", ":", "end", "]", "if", "return_name", ":", "return", "holidays", "else", ":", "return", "holidays", ".", "index" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
AbstractHolidayCalendar.merge_class
Merge holiday calendars together. The base calendar will take precedence to other. The merge will be done based on each holiday's name. Parameters ---------- base : AbstractHolidayCalendar instance/subclass or array of Holiday objects other : AbstractHolidayCalendar instance/subclass or array of Holiday objects
pandas/tseries/holiday.py
def merge_class(base, other): """ Merge holiday calendars together. The base calendar will take precedence to other. The merge will be done based on each holiday's name. Parameters ---------- base : AbstractHolidayCalendar instance/subclass or array of Holiday objects other : AbstractHolidayCalendar instance/subclass or array of Holiday objects """ try: other = other.rules except AttributeError: pass if not isinstance(other, list): other = [other] other_holidays = {holiday.name: holiday for holiday in other} try: base = base.rules except AttributeError: pass if not isinstance(base, list): base = [base] base_holidays = {holiday.name: holiday for holiday in base} other_holidays.update(base_holidays) return list(other_holidays.values())
def merge_class(base, other): """ Merge holiday calendars together. The base calendar will take precedence to other. The merge will be done based on each holiday's name. Parameters ---------- base : AbstractHolidayCalendar instance/subclass or array of Holiday objects other : AbstractHolidayCalendar instance/subclass or array of Holiday objects """ try: other = other.rules except AttributeError: pass if not isinstance(other, list): other = [other] other_holidays = {holiday.name: holiday for holiday in other} try: base = base.rules except AttributeError: pass if not isinstance(base, list): base = [base] base_holidays = {holiday.name: holiday for holiday in base} other_holidays.update(base_holidays) return list(other_holidays.values())
[ "Merge", "holiday", "calendars", "together", ".", "The", "base", "calendar", "will", "take", "precedence", "to", "other", ".", "The", "merge", "will", "be", "done", "based", "on", "each", "holiday", "s", "name", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L415-L447
[ "def", "merge_class", "(", "base", ",", "other", ")", ":", "try", ":", "other", "=", "other", ".", "rules", "except", "AttributeError", ":", "pass", "if", "not", "isinstance", "(", "other", ",", "list", ")", ":", "other", "=", "[", "other", "]", "other_holidays", "=", "{", "holiday", ".", "name", ":", "holiday", "for", "holiday", "in", "other", "}", "try", ":", "base", "=", "base", ".", "rules", "except", "AttributeError", ":", "pass", "if", "not", "isinstance", "(", "base", ",", "list", ")", ":", "base", "=", "[", "base", "]", "base_holidays", "=", "{", "holiday", ".", "name", ":", "holiday", "for", "holiday", "in", "base", "}", "other_holidays", ".", "update", "(", "base_holidays", ")", "return", "list", "(", "other_holidays", ".", "values", "(", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
AbstractHolidayCalendar.merge
Merge holiday calendars together. The caller's class rules take precedence. The merge will be done based on each holiday's name. Parameters ---------- other : holiday calendar inplace : bool (default=False) If True set rule_table to holidays, else return array of Holidays
pandas/tseries/holiday.py
def merge(self, other, inplace=False): """ Merge holiday calendars together. The caller's class rules take precedence. The merge will be done based on each holiday's name. Parameters ---------- other : holiday calendar inplace : bool (default=False) If True set rule_table to holidays, else return array of Holidays """ holidays = self.merge_class(self, other) if inplace: self.rules = holidays else: return holidays
def merge(self, other, inplace=False): """ Merge holiday calendars together. The caller's class rules take precedence. The merge will be done based on each holiday's name. Parameters ---------- other : holiday calendar inplace : bool (default=False) If True set rule_table to holidays, else return array of Holidays """ holidays = self.merge_class(self, other) if inplace: self.rules = holidays else: return holidays
[ "Merge", "holiday", "calendars", "together", ".", "The", "caller", "s", "class", "rules", "take", "precedence", ".", "The", "merge", "will", "be", "done", "based", "on", "each", "holiday", "s", "name", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/holiday.py#L449-L465
[ "def", "merge", "(", "self", ",", "other", ",", "inplace", "=", "False", ")", ":", "holidays", "=", "self", ".", "merge_class", "(", "self", ",", "other", ")", "if", "inplace", ":", "self", ".", "rules", "=", "holidays", "else", ":", "return", "holidays" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
register_option
Register an option in the package-wide pandas config object Parameters ---------- key - a fully-qualified key, e.g. "x.y.option - z". defval - the default value of the option doc - a string description of the option validator - a function of a single argument, should raise `ValueError` if called with a value which is not a legal value for the option. cb - a function of a single argument "key", which is called immediately after an option value is set/reset. key is the full name of the option. Returns ------- Nothing. Raises ------ ValueError if `validator` is specified and `defval` is not a valid value.
pandas/_config/config.py
def register_option(key, defval, doc='', validator=None, cb=None): """Register an option in the package-wide pandas config object Parameters ---------- key - a fully-qualified key, e.g. "x.y.option - z". defval - the default value of the option doc - a string description of the option validator - a function of a single argument, should raise `ValueError` if called with a value which is not a legal value for the option. cb - a function of a single argument "key", which is called immediately after an option value is set/reset. key is the full name of the option. Returns ------- Nothing. Raises ------ ValueError if `validator` is specified and `defval` is not a valid value. """ import tokenize import keyword key = key.lower() if key in _registered_options: msg = "Option '{key}' has already been registered" raise OptionError(msg.format(key=key)) if key in _reserved_keys: msg = "Option '{key}' is a reserved key" raise OptionError(msg.format(key=key)) # the default value should be legal if validator: validator(defval) # walk the nested dict, creating dicts as needed along the path path = key.split('.') for k in path: if not bool(re.match('^' + tokenize.Name + '$', k)): raise ValueError("{k} is not a valid identifier".format(k=k)) if keyword.iskeyword(k): raise ValueError("{k} is a python keyword".format(k=k)) cursor = _global_config msg = "Path prefix to option '{option}' is already an option" for i, p in enumerate(path[:-1]): if not isinstance(cursor, dict): raise OptionError(msg.format(option='.'.join(path[:i]))) if p not in cursor: cursor[p] = {} cursor = cursor[p] if not isinstance(cursor, dict): raise OptionError(msg.format(option='.'.join(path[:-1]))) cursor[path[-1]] = defval # initialize # save the option metadata _registered_options[key] = RegisteredOption(key=key, defval=defval, doc=doc, validator=validator, cb=cb)
def register_option(key, defval, doc='', validator=None, cb=None): """Register an option in the package-wide pandas config object Parameters ---------- key - a fully-qualified key, e.g. "x.y.option - z". defval - the default value of the option doc - a string description of the option validator - a function of a single argument, should raise `ValueError` if called with a value which is not a legal value for the option. cb - a function of a single argument "key", which is called immediately after an option value is set/reset. key is the full name of the option. Returns ------- Nothing. Raises ------ ValueError if `validator` is specified and `defval` is not a valid value. """ import tokenize import keyword key = key.lower() if key in _registered_options: msg = "Option '{key}' has already been registered" raise OptionError(msg.format(key=key)) if key in _reserved_keys: msg = "Option '{key}' is a reserved key" raise OptionError(msg.format(key=key)) # the default value should be legal if validator: validator(defval) # walk the nested dict, creating dicts as needed along the path path = key.split('.') for k in path: if not bool(re.match('^' + tokenize.Name + '$', k)): raise ValueError("{k} is not a valid identifier".format(k=k)) if keyword.iskeyword(k): raise ValueError("{k} is a python keyword".format(k=k)) cursor = _global_config msg = "Path prefix to option '{option}' is already an option" for i, p in enumerate(path[:-1]): if not isinstance(cursor, dict): raise OptionError(msg.format(option='.'.join(path[:i]))) if p not in cursor: cursor[p] = {} cursor = cursor[p] if not isinstance(cursor, dict): raise OptionError(msg.format(option='.'.join(path[:-1]))) cursor[path[-1]] = defval # initialize # save the option metadata _registered_options[key] = RegisteredOption(key=key, defval=defval, doc=doc, validator=validator, cb=cb)
[ "Register", "an", "option", "in", "the", "package", "-", "wide", "pandas", "config", "object" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/_config/config.py#L415-L479
[ "def", "register_option", "(", "key", ",", "defval", ",", "doc", "=", "''", ",", "validator", "=", "None", ",", "cb", "=", "None", ")", ":", "import", "tokenize", "import", "keyword", "key", "=", "key", ".", "lower", "(", ")", "if", "key", "in", "_registered_options", ":", "msg", "=", "\"Option '{key}' has already been registered\"", "raise", "OptionError", "(", "msg", ".", "format", "(", "key", "=", "key", ")", ")", "if", "key", "in", "_reserved_keys", ":", "msg", "=", "\"Option '{key}' is a reserved key\"", "raise", "OptionError", "(", "msg", ".", "format", "(", "key", "=", "key", ")", ")", "# the default value should be legal", "if", "validator", ":", "validator", "(", "defval", ")", "# walk the nested dict, creating dicts as needed along the path", "path", "=", "key", ".", "split", "(", "'.'", ")", "for", "k", "in", "path", ":", "if", "not", "bool", "(", "re", ".", "match", "(", "'^'", "+", "tokenize", ".", "Name", "+", "'$'", ",", "k", ")", ")", ":", "raise", "ValueError", "(", "\"{k} is not a valid identifier\"", ".", "format", "(", "k", "=", "k", ")", ")", "if", "keyword", ".", "iskeyword", "(", "k", ")", ":", "raise", "ValueError", "(", "\"{k} is a python keyword\"", ".", "format", "(", "k", "=", "k", ")", ")", "cursor", "=", "_global_config", "msg", "=", "\"Path prefix to option '{option}' is already an option\"", "for", "i", ",", "p", "in", "enumerate", "(", "path", "[", ":", "-", "1", "]", ")", ":", "if", "not", "isinstance", "(", "cursor", ",", "dict", ")", ":", "raise", "OptionError", "(", "msg", ".", "format", "(", "option", "=", "'.'", ".", "join", "(", "path", "[", ":", "i", "]", ")", ")", ")", "if", "p", "not", "in", "cursor", ":", "cursor", "[", "p", "]", "=", "{", "}", "cursor", "=", "cursor", "[", "p", "]", "if", "not", "isinstance", "(", "cursor", ",", "dict", ")", ":", "raise", "OptionError", "(", "msg", ".", "format", "(", "option", "=", "'.'", ".", "join", "(", "path", "[", ":", "-", "1", "]", ")", ")", ")", "cursor", "[", "path", "[", "-", "1", "]", "]", "=", "defval", "# initialize", "# save the option metadata", "_registered_options", "[", "key", "]", "=", "RegisteredOption", "(", "key", "=", "key", ",", "defval", "=", "defval", ",", "doc", "=", "doc", ",", "validator", "=", "validator", ",", "cb", "=", "cb", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
deprecate_option
Mark option `key` as deprecated, if code attempts to access this option, a warning will be produced, using `msg` if given, or a default message if not. if `rkey` is given, any access to the key will be re-routed to `rkey`. Neither the existence of `key` nor that if `rkey` is checked. If they do not exist, any subsequence access will fail as usual, after the deprecation warning is given. Parameters ---------- key - the name of the option to be deprecated. must be a fully-qualified option name (e.g "x.y.z.rkey"). msg - (Optional) a warning message to output when the key is referenced. if no message is given a default message will be emitted. rkey - (Optional) the name of an option to reroute access to. If specified, any referenced `key` will be re-routed to `rkey` including set/get/reset. rkey must be a fully-qualified option name (e.g "x.y.z.rkey"). used by the default message if no `msg` is specified. removal_ver - (Optional) specifies the version in which this option will be removed. used by the default message if no `msg` is specified. Returns ------- Nothing Raises ------ OptionError - if key has already been deprecated.
pandas/_config/config.py
def deprecate_option(key, msg=None, rkey=None, removal_ver=None): """ Mark option `key` as deprecated, if code attempts to access this option, a warning will be produced, using `msg` if given, or a default message if not. if `rkey` is given, any access to the key will be re-routed to `rkey`. Neither the existence of `key` nor that if `rkey` is checked. If they do not exist, any subsequence access will fail as usual, after the deprecation warning is given. Parameters ---------- key - the name of the option to be deprecated. must be a fully-qualified option name (e.g "x.y.z.rkey"). msg - (Optional) a warning message to output when the key is referenced. if no message is given a default message will be emitted. rkey - (Optional) the name of an option to reroute access to. If specified, any referenced `key` will be re-routed to `rkey` including set/get/reset. rkey must be a fully-qualified option name (e.g "x.y.z.rkey"). used by the default message if no `msg` is specified. removal_ver - (Optional) specifies the version in which this option will be removed. used by the default message if no `msg` is specified. Returns ------- Nothing Raises ------ OptionError - if key has already been deprecated. """ key = key.lower() if key in _deprecated_options: msg = "Option '{key}' has already been defined as deprecated." raise OptionError(msg.format(key=key)) _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
def deprecate_option(key, msg=None, rkey=None, removal_ver=None): """ Mark option `key` as deprecated, if code attempts to access this option, a warning will be produced, using `msg` if given, or a default message if not. if `rkey` is given, any access to the key will be re-routed to `rkey`. Neither the existence of `key` nor that if `rkey` is checked. If they do not exist, any subsequence access will fail as usual, after the deprecation warning is given. Parameters ---------- key - the name of the option to be deprecated. must be a fully-qualified option name (e.g "x.y.z.rkey"). msg - (Optional) a warning message to output when the key is referenced. if no message is given a default message will be emitted. rkey - (Optional) the name of an option to reroute access to. If specified, any referenced `key` will be re-routed to `rkey` including set/get/reset. rkey must be a fully-qualified option name (e.g "x.y.z.rkey"). used by the default message if no `msg` is specified. removal_ver - (Optional) specifies the version in which this option will be removed. used by the default message if no `msg` is specified. Returns ------- Nothing Raises ------ OptionError - if key has already been deprecated. """ key = key.lower() if key in _deprecated_options: msg = "Option '{key}' has already been defined as deprecated." raise OptionError(msg.format(key=key)) _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
[ "Mark", "option", "key", "as", "deprecated", "if", "code", "attempts", "to", "access", "this", "option", "a", "warning", "will", "be", "produced", "using", "msg", "if", "given", "or", "a", "default", "message", "if", "not", ".", "if", "rkey", "is", "given", "any", "access", "to", "the", "key", "will", "be", "re", "-", "routed", "to", "rkey", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/_config/config.py#L482-L527
[ "def", "deprecate_option", "(", "key", ",", "msg", "=", "None", ",", "rkey", "=", "None", ",", "removal_ver", "=", "None", ")", ":", "key", "=", "key", ".", "lower", "(", ")", "if", "key", "in", "_deprecated_options", ":", "msg", "=", "\"Option '{key}' has already been defined as deprecated.\"", "raise", "OptionError", "(", "msg", ".", "format", "(", "key", "=", "key", ")", ")", "_deprecated_options", "[", "key", "]", "=", "DeprecatedOption", "(", "key", ",", "msg", ",", "rkey", ",", "removal_ver", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_select_options
returns a list of keys matching `pat` if pat=="all", returns all registered options
pandas/_config/config.py
def _select_options(pat): """returns a list of keys matching `pat` if pat=="all", returns all registered options """ # short-circuit for exact key if pat in _registered_options: return [pat] # else look through all of them keys = sorted(_registered_options.keys()) if pat == 'all': # reserved key return keys return [k for k in keys if re.search(pat, k, re.I)]
def _select_options(pat): """returns a list of keys matching `pat` if pat=="all", returns all registered options """ # short-circuit for exact key if pat in _registered_options: return [pat] # else look through all of them keys = sorted(_registered_options.keys()) if pat == 'all': # reserved key return keys return [k for k in keys if re.search(pat, k, re.I)]
[ "returns", "a", "list", "of", "keys", "matching", "pat" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/_config/config.py#L533-L548
[ "def", "_select_options", "(", "pat", ")", ":", "# short-circuit for exact key", "if", "pat", "in", "_registered_options", ":", "return", "[", "pat", "]", "# else look through all of them", "keys", "=", "sorted", "(", "_registered_options", ".", "keys", "(", ")", ")", "if", "pat", "==", "'all'", ":", "# reserved key", "return", "keys", "return", "[", "k", "for", "k", "in", "keys", "if", "re", ".", "search", "(", "pat", ",", "k", ",", "re", ".", "I", ")", "]" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_translate_key
if key id deprecated and a replacement key defined, will return the replacement key, otherwise returns `key` as - is
pandas/_config/config.py
def _translate_key(key): """ if key id deprecated and a replacement key defined, will return the replacement key, otherwise returns `key` as - is """ d = _get_deprecated_option(key) if d: return d.rkey or key else: return key
def _translate_key(key): """ if key id deprecated and a replacement key defined, will return the replacement key, otherwise returns `key` as - is """ d = _get_deprecated_option(key) if d: return d.rkey or key else: return key
[ "if", "key", "id", "deprecated", "and", "a", "replacement", "key", "defined", "will", "return", "the", "replacement", "key", "otherwise", "returns", "key", "as", "-", "is" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/_config/config.py#L594-L604
[ "def", "_translate_key", "(", "key", ")", ":", "d", "=", "_get_deprecated_option", "(", "key", ")", "if", "d", ":", "return", "d", ".", "rkey", "or", "key", "else", ":", "return", "key" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_build_option_description
Builds a formatted description of a registered option and prints it
pandas/_config/config.py
def _build_option_description(k): """ Builds a formatted description of a registered option and prints it """ o = _get_registered_option(k) d = _get_deprecated_option(k) s = '{k} '.format(k=k) if o.doc: s += '\n'.join(o.doc.strip().split('\n')) else: s += 'No description available.' if o: s += ('\n [default: {default}] [currently: {current}]' .format(default=o.defval, current=_get_option(k, True))) if d: s += '\n (Deprecated' s += (', use `{rkey}` instead.' .format(rkey=d.rkey if d.rkey else '')) s += ')' return s
def _build_option_description(k): """ Builds a formatted description of a registered option and prints it """ o = _get_registered_option(k) d = _get_deprecated_option(k) s = '{k} '.format(k=k) if o.doc: s += '\n'.join(o.doc.strip().split('\n')) else: s += 'No description available.' if o: s += ('\n [default: {default}] [currently: {current}]' .format(default=o.defval, current=_get_option(k, True))) if d: s += '\n (Deprecated' s += (', use `{rkey}` instead.' .format(rkey=d.rkey if d.rkey else '')) s += ')' return s
[ "Builds", "a", "formatted", "description", "of", "a", "registered", "option", "and", "prints", "it" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/_config/config.py#L636-L659
[ "def", "_build_option_description", "(", "k", ")", ":", "o", "=", "_get_registered_option", "(", "k", ")", "d", "=", "_get_deprecated_option", "(", "k", ")", "s", "=", "'{k} '", ".", "format", "(", "k", "=", "k", ")", "if", "o", ".", "doc", ":", "s", "+=", "'\\n'", ".", "join", "(", "o", ".", "doc", ".", "strip", "(", ")", ".", "split", "(", "'\\n'", ")", ")", "else", ":", "s", "+=", "'No description available.'", "if", "o", ":", "s", "+=", "(", "'\\n [default: {default}] [currently: {current}]'", ".", "format", "(", "default", "=", "o", ".", "defval", ",", "current", "=", "_get_option", "(", "k", ",", "True", ")", ")", ")", "if", "d", ":", "s", "+=", "'\\n (Deprecated'", "s", "+=", "(", "', use `{rkey}` instead.'", ".", "format", "(", "rkey", "=", "d", ".", "rkey", "if", "d", ".", "rkey", "else", "''", ")", ")", "s", "+=", "')'", "return", "s" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
config_prefix
contextmanager for multiple invocations of API with a common prefix supported API functions: (register / get / set )__option Warning: This is not thread - safe, and won't work properly if you import the API functions into your module using the "from x import y" construct. Example: import pandas._config.config as cf with cf.config_prefix("display.font"): cf.register_option("color", "red") cf.register_option("size", " 5 pt") cf.set_option(size, " 6 pt") cf.get_option(size) ... etc' will register options "display.font.color", "display.font.size", set the value of "display.font.size"... and so on.
pandas/_config/config.py
def config_prefix(prefix): """contextmanager for multiple invocations of API with a common prefix supported API functions: (register / get / set )__option Warning: This is not thread - safe, and won't work properly if you import the API functions into your module using the "from x import y" construct. Example: import pandas._config.config as cf with cf.config_prefix("display.font"): cf.register_option("color", "red") cf.register_option("size", " 5 pt") cf.set_option(size, " 6 pt") cf.get_option(size) ... etc' will register options "display.font.color", "display.font.size", set the value of "display.font.size"... and so on. """ # Note: reset_option relies on set_option, and on key directly # it does not fit in to this monkey-patching scheme global register_option, get_option, set_option, reset_option def wrap(func): def inner(key, *args, **kwds): pkey = '{prefix}.{key}'.format(prefix=prefix, key=key) return func(pkey, *args, **kwds) return inner _register_option = register_option _get_option = get_option _set_option = set_option set_option = wrap(set_option) get_option = wrap(get_option) register_option = wrap(register_option) yield None set_option = _set_option get_option = _get_option register_option = _register_option
def config_prefix(prefix): """contextmanager for multiple invocations of API with a common prefix supported API functions: (register / get / set )__option Warning: This is not thread - safe, and won't work properly if you import the API functions into your module using the "from x import y" construct. Example: import pandas._config.config as cf with cf.config_prefix("display.font"): cf.register_option("color", "red") cf.register_option("size", " 5 pt") cf.set_option(size, " 6 pt") cf.get_option(size) ... etc' will register options "display.font.color", "display.font.size", set the value of "display.font.size"... and so on. """ # Note: reset_option relies on set_option, and on key directly # it does not fit in to this monkey-patching scheme global register_option, get_option, set_option, reset_option def wrap(func): def inner(key, *args, **kwds): pkey = '{prefix}.{key}'.format(prefix=prefix, key=key) return func(pkey, *args, **kwds) return inner _register_option = register_option _get_option = get_option _set_option = set_option set_option = wrap(set_option) get_option = wrap(get_option) register_option = wrap(register_option) yield None set_option = _set_option get_option = _get_option register_option = _register_option
[ "contextmanager", "for", "multiple", "invocations", "of", "API", "with", "a", "common", "prefix" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/_config/config.py#L696-L741
[ "def", "config_prefix", "(", "prefix", ")", ":", "# Note: reset_option relies on set_option, and on key directly", "# it does not fit in to this monkey-patching scheme", "global", "register_option", ",", "get_option", ",", "set_option", ",", "reset_option", "def", "wrap", "(", "func", ")", ":", "def", "inner", "(", "key", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "pkey", "=", "'{prefix}.{key}'", ".", "format", "(", "prefix", "=", "prefix", ",", "key", "=", "key", ")", "return", "func", "(", "pkey", ",", "*", "args", ",", "*", "*", "kwds", ")", "return", "inner", "_register_option", "=", "register_option", "_get_option", "=", "get_option", "_set_option", "=", "set_option", "set_option", "=", "wrap", "(", "set_option", ")", "get_option", "=", "wrap", "(", "get_option", ")", "register_option", "=", "wrap", "(", "register_option", ")", "yield", "None", "set_option", "=", "_set_option", "get_option", "=", "_get_option", "register_option", "=", "_register_option" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
CSSResolver.parse
Generates (prop, value) pairs from declarations In a future version may generate parsed tokens from tinycss/tinycss2
pandas/io/formats/css.py
def parse(self, declarations_str): """Generates (prop, value) pairs from declarations In a future version may generate parsed tokens from tinycss/tinycss2 """ for decl in declarations_str.split(';'): if not decl.strip(): continue prop, sep, val = decl.partition(':') prop = prop.strip().lower() # TODO: don't lowercase case sensitive parts of values (strings) val = val.strip().lower() if sep: yield prop, val else: warnings.warn('Ill-formatted attribute: expected a colon ' 'in {decl!r}'.format(decl=decl), CSSWarning)
def parse(self, declarations_str): """Generates (prop, value) pairs from declarations In a future version may generate parsed tokens from tinycss/tinycss2 """ for decl in declarations_str.split(';'): if not decl.strip(): continue prop, sep, val = decl.partition(':') prop = prop.strip().lower() # TODO: don't lowercase case sensitive parts of values (strings) val = val.strip().lower() if sep: yield prop, val else: warnings.warn('Ill-formatted attribute: expected a colon ' 'in {decl!r}'.format(decl=decl), CSSWarning)
[ "Generates", "(", "prop", "value", ")", "pairs", "from", "declarations" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/css.py#L231-L247
[ "def", "parse", "(", "self", ",", "declarations_str", ")", ":", "for", "decl", "in", "declarations_str", ".", "split", "(", "';'", ")", ":", "if", "not", "decl", ".", "strip", "(", ")", ":", "continue", "prop", ",", "sep", ",", "val", "=", "decl", ".", "partition", "(", "':'", ")", "prop", "=", "prop", ".", "strip", "(", ")", ".", "lower", "(", ")", "# TODO: don't lowercase case sensitive parts of values (strings)", "val", "=", "val", ".", "strip", "(", ")", ".", "lower", "(", ")", "if", "sep", ":", "yield", "prop", ",", "val", "else", ":", "warnings", ".", "warn", "(", "'Ill-formatted attribute: expected a colon '", "'in {decl!r}'", ".", "format", "(", "decl", "=", "decl", ")", ",", "CSSWarning", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037