id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
20,200
pandas-dev/pandas
pandas/core/generic.py
NDFrame._needs_reindex_multi
def _needs_reindex_multi(self, axes, method, level): """Check if we do need a multi reindex.""" return ((com.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type)
python
def _needs_reindex_multi(self, axes, method, level): """Check if we do need a multi reindex.""" return ((com.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type)
[ "def", "_needs_reindex_multi", "(", "self", ",", "axes", ",", "method", ",", "level", ")", ":", "return", "(", "(", "com", ".", "count_not_none", "(", "*", "axes", ".", "values", "(", ")", ")", "==", "self", ".", "_AXIS_LEN", ")", "and", "method", "is", "None", "and", "level", "is", "None", "and", "not", "self", ".", "_is_mixed_type", ")" ]
Check if we do need a multi reindex.
[ "Check", "if", "we", "do", "need", "a", "multi", "reindex", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L4413-L4416
20,201
pandas-dev/pandas
pandas/core/generic.py
NDFrame._reindex_with_indexers
def _reindex_with_indexers(self, reindexers, fill_value=None, copy=False, allow_dups=False): """allow_dups indicates an internal call here """ # reindex doing multiple operations on different axes if indicated new_data = self._data for axis in sorted(reindexers.keys()): index, indexer = reindexers[axis] baxis = self._get_block_manager_axis(axis) if index is None: continue index = ensure_index(index) if indexer is not None: indexer = ensure_int64(indexer) # TODO: speed up on homogeneous DataFrame objects new_data = new_data.reindex_indexer(index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy) if copy and new_data is self._data: new_data = new_data.copy() return self._constructor(new_data).__finalize__(self)
python
def _reindex_with_indexers(self, reindexers, fill_value=None, copy=False, allow_dups=False): """allow_dups indicates an internal call here """ # reindex doing multiple operations on different axes if indicated new_data = self._data for axis in sorted(reindexers.keys()): index, indexer = reindexers[axis] baxis = self._get_block_manager_axis(axis) if index is None: continue index = ensure_index(index) if indexer is not None: indexer = ensure_int64(indexer) # TODO: speed up on homogeneous DataFrame objects new_data = new_data.reindex_indexer(index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups, copy=copy) if copy and new_data is self._data: new_data = new_data.copy() return self._constructor(new_data).__finalize__(self)
[ "def", "_reindex_with_indexers", "(", "self", ",", "reindexers", ",", "fill_value", "=", "None", ",", "copy", "=", "False", ",", "allow_dups", "=", "False", ")", ":", "# reindex doing multiple operations on different axes if indicated", "new_data", "=", "self", ".", "_data", "for", "axis", "in", "sorted", "(", "reindexers", ".", "keys", "(", ")", ")", ":", "index", ",", "indexer", "=", "reindexers", "[", "axis", "]", "baxis", "=", "self", ".", "_get_block_manager_axis", "(", "axis", ")", "if", "index", "is", "None", ":", "continue", "index", "=", "ensure_index", "(", "index", ")", "if", "indexer", "is", "not", "None", ":", "indexer", "=", "ensure_int64", "(", "indexer", ")", "# TODO: speed up on homogeneous DataFrame objects", "new_data", "=", "new_data", ".", "reindex_indexer", "(", "index", ",", "indexer", ",", "axis", "=", "baxis", ",", "fill_value", "=", "fill_value", ",", "allow_dups", "=", "allow_dups", ",", "copy", "=", "copy", ")", "if", "copy", "and", "new_data", "is", "self", ".", "_data", ":", "new_data", "=", "new_data", ".", "copy", "(", ")", "return", "self", ".", "_constructor", "(", "new_data", ")", ".", "__finalize__", "(", "self", ")" ]
allow_dups indicates an internal call here
[ "allow_dups", "indicates", "an", "internal", "call", "here" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L4504-L4530
20,202
pandas-dev/pandas
pandas/core/generic.py
NDFrame.filter
def filter(self, items=None, like=None, regex=None, axis=None): """ Subset rows or columns of dataframe according to labels in the specified index. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : string Keep labels from axis for which "like in label == True". regex : string (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : int or string axis name The axis to filter on. By default this is the info axis, 'index' for Series, 'columns' for DataFrame. Returns ------- same type as input object See Also -------- DataFrame.loc Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 """ import re nkw = com.count_not_none(items, like, regex) if nkw > 1: raise TypeError('Keyword arguments `items`, `like`, or `regex` ' 'are mutually exclusive') if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) return self.reindex( **{name: [r for r in items if r in labels]}) elif like: def f(x): return like in to_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x): return matcher.search(to_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError('Must pass either `items`, `like`, or `regex`')
python
def filter(self, items=None, like=None, regex=None, axis=None): """ Subset rows or columns of dataframe according to labels in the specified index. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : string Keep labels from axis for which "like in label == True". regex : string (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : int or string axis name The axis to filter on. By default this is the info axis, 'index' for Series, 'columns' for DataFrame. Returns ------- same type as input object See Also -------- DataFrame.loc Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 """ import re nkw = com.count_not_none(items, like, regex) if nkw > 1: raise TypeError('Keyword arguments `items`, `like`, or `regex` ' 'are mutually exclusive') if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) return self.reindex( **{name: [r for r in items if r in labels]}) elif like: def f(x): return like in to_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x): return matcher.search(to_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError('Must pass either `items`, `like`, or `regex`')
[ "def", "filter", "(", "self", ",", "items", "=", "None", ",", "like", "=", "None", ",", "regex", "=", "None", ",", "axis", "=", "None", ")", ":", "import", "re", "nkw", "=", "com", ".", "count_not_none", "(", "items", ",", "like", ",", "regex", ")", "if", "nkw", ">", "1", ":", "raise", "TypeError", "(", "'Keyword arguments `items`, `like`, or `regex` '", "'are mutually exclusive'", ")", "if", "axis", "is", "None", ":", "axis", "=", "self", ".", "_info_axis_name", "labels", "=", "self", ".", "_get_axis", "(", "axis", ")", "if", "items", "is", "not", "None", ":", "name", "=", "self", ".", "_get_axis_name", "(", "axis", ")", "return", "self", ".", "reindex", "(", "*", "*", "{", "name", ":", "[", "r", "for", "r", "in", "items", "if", "r", "in", "labels", "]", "}", ")", "elif", "like", ":", "def", "f", "(", "x", ")", ":", "return", "like", "in", "to_str", "(", "x", ")", "values", "=", "labels", ".", "map", "(", "f", ")", "return", "self", ".", "loc", "(", "axis", "=", "axis", ")", "[", "values", "]", "elif", "regex", ":", "def", "f", "(", "x", ")", ":", "return", "matcher", ".", "search", "(", "to_str", "(", "x", ")", ")", "is", "not", "None", "matcher", "=", "re", ".", "compile", "(", "regex", ")", "values", "=", "labels", ".", "map", "(", "f", ")", "return", "self", ".", "loc", "(", "axis", "=", "axis", ")", "[", "values", "]", "else", ":", "raise", "TypeError", "(", "'Must pass either `items`, `like`, or `regex`'", ")" ]
Subset rows or columns of dataframe according to labels in the specified index. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : string Keep labels from axis for which "like in label == True". regex : string (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : int or string axis name The axis to filter on. By default this is the info axis, 'index' for Series, 'columns' for DataFrame. Returns ------- same type as input object See Also -------- DataFrame.loc Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6
[ "Subset", "rows", "or", "columns", "of", "dataframe", "according", "to", "labels", "in", "the", "specified", "index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L4532-L4618
20,203
pandas-dev/pandas
pandas/core/generic.py
NDFrame.sample
def sample(self, n=None, frac=None, replace=False, weights=None, random_state=None, axis=None): """ Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Sample with or without replacement. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int or numpy.random.RandomState, optional Seed for the random number generator (if int), or numpy RandomState object. axis : int or string, optional Axis to sample. Accepts axis number or name. Default is stat axis for given data type (0 for Series and DataFrames, 1 for Panels). Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- numpy.random.choice: Generates a random sample from a given 1-D numpy array. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) axis_length = self.shape[axis] # Process random_state argument rs = com.random_state(random_state) # Check weights for compliance if weights is not None: # If a series, align with frame if isinstance(weights, pd.Series): weights = weights.reindex(self.axes[axis]) # Strings acceptable if a dataframe and axis = 0 if isinstance(weights, str): if isinstance(self, pd.DataFrame): if axis == 0: try: weights = self[weights] except KeyError: raise KeyError("String passed to weights not a " "valid column") else: raise ValueError("Strings can only be passed to " "weights when sampling from rows on " "a DataFrame") else: raise ValueError("Strings cannot be passed as weights " "when sampling from a Series or Panel.") weights = pd.Series(weights, dtype='float64') if len(weights) != axis_length: raise ValueError("Weights and axis to be sampled must be of " "same length") if (weights == np.inf).any() or (weights == -np.inf).any(): raise ValueError("weight vector may not include `inf` values") if (weights < 0).any(): raise ValueError("weight vector many not include negative " "values") # If has nan, set to zero. weights = weights.fillna(0) # Renormalize if don't sum to 1 if weights.sum() != 1: if weights.sum() != 0: weights = weights / weights.sum() else: raise ValueError("Invalid weights: weights sum to zero") weights = weights.values # If no frac or n, default to n=1. if n is None and frac is None: n = 1 elif n is not None and frac is None and n % 1 != 0: raise ValueError("Only integers accepted as `n` values") elif n is None and frac is not None: n = int(round(frac * axis_length)) elif n is not None and frac is not None: raise ValueError('Please enter a value for `frac` OR `n`, not ' 'both') # Check for negative sizes if n < 0: raise ValueError("A negative number of rows requested. Please " "provide positive value.") locs = rs.choice(axis_length, size=n, replace=replace, p=weights) return self.take(locs, axis=axis, is_copy=False)
python
def sample(self, n=None, frac=None, replace=False, weights=None, random_state=None, axis=None): """ Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Sample with or without replacement. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int or numpy.random.RandomState, optional Seed for the random number generator (if int), or numpy RandomState object. axis : int or string, optional Axis to sample. Accepts axis number or name. Default is stat axis for given data type (0 for Series and DataFrames, 1 for Panels). Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- numpy.random.choice: Generates a random sample from a given 1-D numpy array. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) axis_length = self.shape[axis] # Process random_state argument rs = com.random_state(random_state) # Check weights for compliance if weights is not None: # If a series, align with frame if isinstance(weights, pd.Series): weights = weights.reindex(self.axes[axis]) # Strings acceptable if a dataframe and axis = 0 if isinstance(weights, str): if isinstance(self, pd.DataFrame): if axis == 0: try: weights = self[weights] except KeyError: raise KeyError("String passed to weights not a " "valid column") else: raise ValueError("Strings can only be passed to " "weights when sampling from rows on " "a DataFrame") else: raise ValueError("Strings cannot be passed as weights " "when sampling from a Series or Panel.") weights = pd.Series(weights, dtype='float64') if len(weights) != axis_length: raise ValueError("Weights and axis to be sampled must be of " "same length") if (weights == np.inf).any() or (weights == -np.inf).any(): raise ValueError("weight vector may not include `inf` values") if (weights < 0).any(): raise ValueError("weight vector many not include negative " "values") # If has nan, set to zero. weights = weights.fillna(0) # Renormalize if don't sum to 1 if weights.sum() != 1: if weights.sum() != 0: weights = weights / weights.sum() else: raise ValueError("Invalid weights: weights sum to zero") weights = weights.values # If no frac or n, default to n=1. if n is None and frac is None: n = 1 elif n is not None and frac is None and n % 1 != 0: raise ValueError("Only integers accepted as `n` values") elif n is None and frac is not None: n = int(round(frac * axis_length)) elif n is not None and frac is not None: raise ValueError('Please enter a value for `frac` OR `n`, not ' 'both') # Check for negative sizes if n < 0: raise ValueError("A negative number of rows requested. Please " "provide positive value.") locs = rs.choice(axis_length, size=n, replace=replace, p=weights) return self.take(locs, axis=axis, is_copy=False)
[ "def", "sample", "(", "self", ",", "n", "=", "None", ",", "frac", "=", "None", ",", "replace", "=", "False", ",", "weights", "=", "None", ",", "random_state", "=", "None", ",", "axis", "=", "None", ")", ":", "if", "axis", "is", "None", ":", "axis", "=", "self", ".", "_stat_axis_number", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "axis_length", "=", "self", ".", "shape", "[", "axis", "]", "# Process random_state argument", "rs", "=", "com", ".", "random_state", "(", "random_state", ")", "# Check weights for compliance", "if", "weights", "is", "not", "None", ":", "# If a series, align with frame", "if", "isinstance", "(", "weights", ",", "pd", ".", "Series", ")", ":", "weights", "=", "weights", ".", "reindex", "(", "self", ".", "axes", "[", "axis", "]", ")", "# Strings acceptable if a dataframe and axis = 0", "if", "isinstance", "(", "weights", ",", "str", ")", ":", "if", "isinstance", "(", "self", ",", "pd", ".", "DataFrame", ")", ":", "if", "axis", "==", "0", ":", "try", ":", "weights", "=", "self", "[", "weights", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "\"String passed to weights not a \"", "\"valid column\"", ")", "else", ":", "raise", "ValueError", "(", "\"Strings can only be passed to \"", "\"weights when sampling from rows on \"", "\"a DataFrame\"", ")", "else", ":", "raise", "ValueError", "(", "\"Strings cannot be passed as weights \"", "\"when sampling from a Series or Panel.\"", ")", "weights", "=", "pd", ".", "Series", "(", "weights", ",", "dtype", "=", "'float64'", ")", "if", "len", "(", "weights", ")", "!=", "axis_length", ":", "raise", "ValueError", "(", "\"Weights and axis to be sampled must be of \"", "\"same length\"", ")", "if", "(", "weights", "==", "np", ".", "inf", ")", ".", "any", "(", ")", "or", "(", "weights", "==", "-", "np", ".", "inf", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"weight vector may not include `inf` values\"", ")", "if", "(", "weights", "<", "0", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"weight vector many not include negative \"", "\"values\"", ")", "# If has nan, set to zero.", "weights", "=", "weights", ".", "fillna", "(", "0", ")", "# Renormalize if don't sum to 1", "if", "weights", ".", "sum", "(", ")", "!=", "1", ":", "if", "weights", ".", "sum", "(", ")", "!=", "0", ":", "weights", "=", "weights", "/", "weights", ".", "sum", "(", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid weights: weights sum to zero\"", ")", "weights", "=", "weights", ".", "values", "# If no frac or n, default to n=1.", "if", "n", "is", "None", "and", "frac", "is", "None", ":", "n", "=", "1", "elif", "n", "is", "not", "None", "and", "frac", "is", "None", "and", "n", "%", "1", "!=", "0", ":", "raise", "ValueError", "(", "\"Only integers accepted as `n` values\"", ")", "elif", "n", "is", "None", "and", "frac", "is", "not", "None", ":", "n", "=", "int", "(", "round", "(", "frac", "*", "axis_length", ")", ")", "elif", "n", "is", "not", "None", "and", "frac", "is", "not", "None", ":", "raise", "ValueError", "(", "'Please enter a value for `frac` OR `n`, not '", "'both'", ")", "# Check for negative sizes", "if", "n", "<", "0", ":", "raise", "ValueError", "(", "\"A negative number of rows requested. Please \"", "\"provide positive value.\"", ")", "locs", "=", "rs", ".", "choice", "(", "axis_length", ",", "size", "=", "n", ",", "replace", "=", "replace", ",", "p", "=", "weights", ")", "return", "self", ".", "take", "(", "locs", ",", "axis", "=", "axis", ",", "is_copy", "=", "False", ")" ]
Return a random sample of items from an axis of object. You can use `random_state` for reproducibility. Parameters ---------- n : int, optional Number of items from axis to return. Cannot be used with `frac`. Default = 1 if `frac` = None. frac : float, optional Fraction of axis items to return. Cannot be used with `n`. replace : bool, default False Sample with or without replacement. weights : str or ndarray-like, optional Default 'None' results in equal probability weighting. If passed a Series, will align with target object on index. Index values in weights not found in sampled object will be ignored and index values in sampled object not in weights will be assigned weights of zero. If called on a DataFrame, will accept the name of a column when axis = 0. Unless weights are a Series, weights must be same length as axis being sampled. If weights do not sum to 1, they will be normalized to sum to 1. Missing values in the weights column will be treated as zero. Infinite values not allowed. random_state : int or numpy.random.RandomState, optional Seed for the random number generator (if int), or numpy RandomState object. axis : int or string, optional Axis to sample. Accepts axis number or name. Default is stat axis for given data type (0 for Series and DataFrames, 1 for Panels). Returns ------- Series or DataFrame A new object of same type as caller containing `n` items randomly sampled from the caller object. See Also -------- numpy.random.choice: Generates a random sample from a given 1-D numpy array. Examples -------- >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish']) >>> df num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 Extract 3 random elements from the ``Series`` ``df['num_legs']``: Note that we use `random_state` to ensure the reproducibility of the examples. >>> df['num_legs'].sample(n=3, random_state=1) fish 0 spider 8 falcon 2 Name: num_legs, dtype: int64 A random 50% sample of the ``DataFrame`` with replacement: >>> df.sample(frac=0.5, replace=True, random_state=1) num_legs num_wings num_specimen_seen dog 4 0 2 fish 0 0 8 Using a DataFrame column as weights. Rows with larger value in the `num_specimen_seen` column are more likely to be sampled. >>> df.sample(n=2, weights='num_specimen_seen', random_state=1) num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8
[ "Return", "a", "random", "sample", "of", "items", "from", "an", "axis", "of", "object", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L4740-L4901
20,204
pandas-dev/pandas
pandas/core/generic.py
NDFrame._dir_additions
def _dir_additions(self): """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, it's first level values are used. """ additions = {c for c in self._info_axis.unique(level=0)[:100] if isinstance(c, str) and c.isidentifier()} return super()._dir_additions().union(additions)
python
def _dir_additions(self): """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, it's first level values are used. """ additions = {c for c in self._info_axis.unique(level=0)[:100] if isinstance(c, str) and c.isidentifier()} return super()._dir_additions().union(additions)
[ "def", "_dir_additions", "(", "self", ")", ":", "additions", "=", "{", "c", "for", "c", "in", "self", ".", "_info_axis", ".", "unique", "(", "level", "=", "0", ")", "[", ":", "100", "]", "if", "isinstance", "(", "c", ",", "str", ")", "and", "c", ".", "isidentifier", "(", ")", "}", "return", "super", "(", ")", ".", "_dir_additions", "(", ")", ".", "union", "(", "additions", ")" ]
add the string-like attributes from the info_axis. If info_axis is a MultiIndex, it's first level values are used.
[ "add", "the", "string", "-", "like", "attributes", "from", "the", "info_axis", ".", "If", "info_axis", "is", "a", "MultiIndex", "it", "s", "first", "level", "values", "are", "used", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5141-L5147
20,205
pandas-dev/pandas
pandas/core/generic.py
NDFrame._consolidate_inplace
def _consolidate_inplace(self): """Consolidate data in place and return None""" def f(): self._data = self._data.consolidate() self._protect_consolidate(f)
python
def _consolidate_inplace(self): """Consolidate data in place and return None""" def f(): self._data = self._data.consolidate() self._protect_consolidate(f)
[ "def", "_consolidate_inplace", "(", "self", ")", ":", "def", "f", "(", ")", ":", "self", ".", "_data", "=", "self", ".", "_data", ".", "consolidate", "(", ")", "self", ".", "_protect_consolidate", "(", "f", ")" ]
Consolidate data in place and return None
[ "Consolidate", "data", "in", "place", "and", "return", "None" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5165-L5171
20,206
pandas-dev/pandas
pandas/core/generic.py
NDFrame._check_inplace_setting
def _check_inplace_setting(self, value): """ check whether we allow in-place setting with this type of value """ if self._is_mixed_type: if not self._is_numeric_mixed_type: # allow an actual np.nan thru try: if np.isnan(value): return True except Exception: pass raise TypeError('Cannot do inplace boolean setting on ' 'mixed-types with a non np.nan value') return True
python
def _check_inplace_setting(self, value): """ check whether we allow in-place setting with this type of value """ if self._is_mixed_type: if not self._is_numeric_mixed_type: # allow an actual np.nan thru try: if np.isnan(value): return True except Exception: pass raise TypeError('Cannot do inplace boolean setting on ' 'mixed-types with a non np.nan value') return True
[ "def", "_check_inplace_setting", "(", "self", ",", "value", ")", ":", "if", "self", ".", "_is_mixed_type", ":", "if", "not", "self", ".", "_is_numeric_mixed_type", ":", "# allow an actual np.nan thru", "try", ":", "if", "np", ".", "isnan", "(", "value", ")", ":", "return", "True", "except", "Exception", ":", "pass", "raise", "TypeError", "(", "'Cannot do inplace boolean setting on '", "'mixed-types with a non np.nan value'", ")", "return", "True" ]
check whether we allow in-place setting with this type of value
[ "check", "whether", "we", "allow", "in", "-", "place", "setting", "with", "this", "type", "of", "value" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5210-L5226
20,207
pandas-dev/pandas
pandas/core/generic.py
NDFrame.as_matrix
def as_matrix(self, columns=None): """ Convert the frame to its Numpy-array representation. .. deprecated:: 0.23.0 Use :meth:`DataFrame.values` instead. Parameters ---------- columns : list, optional, default:None If None, return all columns, otherwise, returns specified columns. Returns ------- values : ndarray If the caller is heterogeneous and contains booleans or objects, the result will be of dtype=object. See Notes. See Also -------- DataFrame.values Notes ----- Return is NOT a Numpy-matrix, rather, a Numpy-array. The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcase to int32. By numpy.find_common_type convention, mixing int64 and uint64 will result in a float64 dtype. This method is provided for backwards compatibility. Generally, it is recommended to use '.values'. """ warnings.warn("Method .as_matrix will be removed in a future version. " "Use .values instead.", FutureWarning, stacklevel=2) self._consolidate_inplace() return self._data.as_array(transpose=self._AXIS_REVERSED, items=columns)
python
def as_matrix(self, columns=None): """ Convert the frame to its Numpy-array representation. .. deprecated:: 0.23.0 Use :meth:`DataFrame.values` instead. Parameters ---------- columns : list, optional, default:None If None, return all columns, otherwise, returns specified columns. Returns ------- values : ndarray If the caller is heterogeneous and contains booleans or objects, the result will be of dtype=object. See Notes. See Also -------- DataFrame.values Notes ----- Return is NOT a Numpy-matrix, rather, a Numpy-array. The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcase to int32. By numpy.find_common_type convention, mixing int64 and uint64 will result in a float64 dtype. This method is provided for backwards compatibility. Generally, it is recommended to use '.values'. """ warnings.warn("Method .as_matrix will be removed in a future version. " "Use .values instead.", FutureWarning, stacklevel=2) self._consolidate_inplace() return self._data.as_array(transpose=self._AXIS_REVERSED, items=columns)
[ "def", "as_matrix", "(", "self", ",", "columns", "=", "None", ")", ":", "warnings", ".", "warn", "(", "\"Method .as_matrix will be removed in a future version. \"", "\"Use .values instead.\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "self", ".", "_consolidate_inplace", "(", ")", "return", "self", ".", "_data", ".", "as_array", "(", "transpose", "=", "self", ".", "_AXIS_REVERSED", ",", "items", "=", "columns", ")" ]
Convert the frame to its Numpy-array representation. .. deprecated:: 0.23.0 Use :meth:`DataFrame.values` instead. Parameters ---------- columns : list, optional, default:None If None, return all columns, otherwise, returns specified columns. Returns ------- values : ndarray If the caller is heterogeneous and contains booleans or objects, the result will be of dtype=object. See Notes. See Also -------- DataFrame.values Notes ----- Return is NOT a Numpy-matrix, rather, a Numpy-array. The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcase to int32. By numpy.find_common_type convention, mixing int64 and uint64 will result in a float64 dtype. This method is provided for backwards compatibility. Generally, it is recommended to use '.values'.
[ "Convert", "the", "frame", "to", "its", "Numpy", "-", "array", "representation", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5238-L5281
20,208
pandas-dev/pandas
pandas/core/generic.py
NDFrame.values
def values(self): """ Return a Numpy representation of the DataFrame. .. warning:: We recommend using :meth:`DataFrame.to_numpy` instead. Only the values in the DataFrame will be returned, the axes labels will be removed. Returns ------- numpy.ndarray The values of the DataFrame. See Also -------- DataFrame.to_numpy : Recommended alternative to this method. DataFrame.index : Retrieve the index labels. DataFrame.columns : Retrieving the column names. Notes ----- The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcast to int32. By :func:`numpy.find_common_type` convention, mixing int64 and uint64 will result in a float64 dtype. Examples -------- A DataFrame where all columns are the same type (e.g., int64) results in an array of the same type. >>> df = pd.DataFrame({'age': [ 3, 29], ... 'height': [94, 170], ... 'weight': [31, 115]}) >>> df age height weight 0 3 94 31 1 29 170 115 >>> df.dtypes age int64 height int64 weight int64 dtype: object >>> df.values array([[ 3, 94, 31], [ 29, 170, 115]], dtype=int64) A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray of the broadest type that accommodates these mixed types (e.g., object). >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'), ... ('lion', 80.5, 1), ... ('monkey', np.nan, None)], ... columns=('name', 'max_speed', 'rank')) >>> df2.dtypes name object max_speed float64 rank object dtype: object >>> df2.values array([['parrot', 24.0, 'second'], ['lion', 80.5, 1], ['monkey', nan, None]], dtype=object) """ self._consolidate_inplace() return self._data.as_array(transpose=self._AXIS_REVERSED)
python
def values(self): """ Return a Numpy representation of the DataFrame. .. warning:: We recommend using :meth:`DataFrame.to_numpy` instead. Only the values in the DataFrame will be returned, the axes labels will be removed. Returns ------- numpy.ndarray The values of the DataFrame. See Also -------- DataFrame.to_numpy : Recommended alternative to this method. DataFrame.index : Retrieve the index labels. DataFrame.columns : Retrieving the column names. Notes ----- The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcast to int32. By :func:`numpy.find_common_type` convention, mixing int64 and uint64 will result in a float64 dtype. Examples -------- A DataFrame where all columns are the same type (e.g., int64) results in an array of the same type. >>> df = pd.DataFrame({'age': [ 3, 29], ... 'height': [94, 170], ... 'weight': [31, 115]}) >>> df age height weight 0 3 94 31 1 29 170 115 >>> df.dtypes age int64 height int64 weight int64 dtype: object >>> df.values array([[ 3, 94, 31], [ 29, 170, 115]], dtype=int64) A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray of the broadest type that accommodates these mixed types (e.g., object). >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'), ... ('lion', 80.5, 1), ... ('monkey', np.nan, None)], ... columns=('name', 'max_speed', 'rank')) >>> df2.dtypes name object max_speed float64 rank object dtype: object >>> df2.values array([['parrot', 24.0, 'second'], ['lion', 80.5, 1], ['monkey', nan, None]], dtype=object) """ self._consolidate_inplace() return self._data.as_array(transpose=self._AXIS_REVERSED)
[ "def", "values", "(", "self", ")", ":", "self", ".", "_consolidate_inplace", "(", ")", "return", "self", ".", "_data", ".", "as_array", "(", "transpose", "=", "self", ".", "_AXIS_REVERSED", ")" ]
Return a Numpy representation of the DataFrame. .. warning:: We recommend using :meth:`DataFrame.to_numpy` instead. Only the values in the DataFrame will be returned, the axes labels will be removed. Returns ------- numpy.ndarray The values of the DataFrame. See Also -------- DataFrame.to_numpy : Recommended alternative to this method. DataFrame.index : Retrieve the index labels. DataFrame.columns : Retrieving the column names. Notes ----- The dtype will be a lower-common-denominator dtype (implicit upcasting); that is to say if the dtypes (even of numeric types) are mixed, the one that accommodates all will be chosen. Use this with care if you are not dealing with the blocks. e.g. If the dtypes are float16 and float32, dtype will be upcast to float32. If dtypes are int32 and uint8, dtype will be upcast to int32. By :func:`numpy.find_common_type` convention, mixing int64 and uint64 will result in a float64 dtype. Examples -------- A DataFrame where all columns are the same type (e.g., int64) results in an array of the same type. >>> df = pd.DataFrame({'age': [ 3, 29], ... 'height': [94, 170], ... 'weight': [31, 115]}) >>> df age height weight 0 3 94 31 1 29 170 115 >>> df.dtypes age int64 height int64 weight int64 dtype: object >>> df.values array([[ 3, 94, 31], [ 29, 170, 115]], dtype=int64) A DataFrame with mixed type columns(e.g., str/object, int64, float32) results in an ndarray of the broadest type that accommodates these mixed types (e.g., object). >>> df2 = pd.DataFrame([('parrot', 24.0, 'second'), ... ('lion', 80.5, 1), ... ('monkey', np.nan, None)], ... columns=('name', 'max_speed', 'rank')) >>> df2.dtypes name object max_speed float64 rank object dtype: object >>> df2.values array([['parrot', 24.0, 'second'], ['lion', 80.5, 1], ['monkey', nan, None]], dtype=object)
[ "Return", "a", "Numpy", "representation", "of", "the", "DataFrame", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5284-L5358
20,209
pandas-dev/pandas
pandas/core/generic.py
NDFrame.get_ftype_counts
def get_ftype_counts(self): """ Return counts of unique ftypes in this object. .. deprecated:: 0.23.0 This is useful for SparseDataFrame or for DataFrames containing sparse arrays. Returns ------- dtype : Series Series with the count of columns with each type and sparsity (dense/sparse). See Also -------- ftypes : Return ftypes (indication of sparse/dense and dtype) in this object. Examples -------- >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]] >>> df = pd.DataFrame(a, columns=['str', 'int', 'float']) >>> df str int float 0 a 1 1.0 1 b 2 2.0 2 c 3 3.0 >>> df.get_ftype_counts() # doctest: +SKIP float64:dense 1 int64:dense 1 object:dense 1 dtype: int64 """ warnings.warn("get_ftype_counts is deprecated and will " "be removed in a future version", FutureWarning, stacklevel=2) from pandas import Series return Series(self._data.get_ftype_counts())
python
def get_ftype_counts(self): """ Return counts of unique ftypes in this object. .. deprecated:: 0.23.0 This is useful for SparseDataFrame or for DataFrames containing sparse arrays. Returns ------- dtype : Series Series with the count of columns with each type and sparsity (dense/sparse). See Also -------- ftypes : Return ftypes (indication of sparse/dense and dtype) in this object. Examples -------- >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]] >>> df = pd.DataFrame(a, columns=['str', 'int', 'float']) >>> df str int float 0 a 1 1.0 1 b 2 2.0 2 c 3 3.0 >>> df.get_ftype_counts() # doctest: +SKIP float64:dense 1 int64:dense 1 object:dense 1 dtype: int64 """ warnings.warn("get_ftype_counts is deprecated and will " "be removed in a future version", FutureWarning, stacklevel=2) from pandas import Series return Series(self._data.get_ftype_counts())
[ "def", "get_ftype_counts", "(", "self", ")", ":", "warnings", ".", "warn", "(", "\"get_ftype_counts is deprecated and will \"", "\"be removed in a future version\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "from", "pandas", "import", "Series", "return", "Series", "(", "self", ".", "_data", ".", "get_ftype_counts", "(", ")", ")" ]
Return counts of unique ftypes in this object. .. deprecated:: 0.23.0 This is useful for SparseDataFrame or for DataFrames containing sparse arrays. Returns ------- dtype : Series Series with the count of columns with each type and sparsity (dense/sparse). See Also -------- ftypes : Return ftypes (indication of sparse/dense and dtype) in this object. Examples -------- >>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]] >>> df = pd.DataFrame(a, columns=['str', 'int', 'float']) >>> df str int float 0 a 1 1.0 1 b 2 2.0 2 c 3 3.0 >>> df.get_ftype_counts() # doctest: +SKIP float64:dense 1 int64:dense 1 object:dense 1 dtype: int64
[ "Return", "counts", "of", "unique", "ftypes", "in", "this", "object", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5447-L5488
20,210
pandas-dev/pandas
pandas/core/generic.py
NDFrame.dtypes
def dtypes(self): """ Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. See Also -------- DataFrame.ftypes : Dtype and sparsity information. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object """ from pandas import Series return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
python
def dtypes(self): """ Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. See Also -------- DataFrame.ftypes : Dtype and sparsity information. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object """ from pandas import Series return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
[ "def", "dtypes", "(", "self", ")", ":", "from", "pandas", "import", "Series", "return", "Series", "(", "self", ".", "_data", ".", "get_dtypes", "(", ")", ",", "index", "=", "self", ".", "_info_axis", ",", "dtype", "=", "np", ".", "object_", ")" ]
Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. See Also -------- DataFrame.ftypes : Dtype and sparsity information. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object
[ "Return", "the", "dtypes", "in", "the", "DataFrame", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5491-L5524
20,211
pandas-dev/pandas
pandas/core/generic.py
NDFrame.as_blocks
def as_blocks(self, copy=True): """ Convert the frame to a dict of dtype -> Constructor Types that each has a homogeneous dtype. .. deprecated:: 0.21.0 NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in as_matrix) Parameters ---------- copy : boolean, default True Returns ------- values : a dict of dtype -> Constructor Types """ warnings.warn("as_blocks is deprecated and will " "be removed in a future version", FutureWarning, stacklevel=2) return self._to_dict_of_blocks(copy=copy)
python
def as_blocks(self, copy=True): """ Convert the frame to a dict of dtype -> Constructor Types that each has a homogeneous dtype. .. deprecated:: 0.21.0 NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in as_matrix) Parameters ---------- copy : boolean, default True Returns ------- values : a dict of dtype -> Constructor Types """ warnings.warn("as_blocks is deprecated and will " "be removed in a future version", FutureWarning, stacklevel=2) return self._to_dict_of_blocks(copy=copy)
[ "def", "as_blocks", "(", "self", ",", "copy", "=", "True", ")", ":", "warnings", ".", "warn", "(", "\"as_blocks is deprecated and will \"", "\"be removed in a future version\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "_to_dict_of_blocks", "(", "copy", "=", "copy", ")" ]
Convert the frame to a dict of dtype -> Constructor Types that each has a homogeneous dtype. .. deprecated:: 0.21.0 NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in as_matrix) Parameters ---------- copy : boolean, default True Returns ------- values : a dict of dtype -> Constructor Types
[ "Convert", "the", "frame", "to", "a", "dict", "of", "dtype", "-", ">", "Constructor", "Types", "that", "each", "has", "a", "homogeneous", "dtype", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5572-L5593
20,212
pandas-dev/pandas
pandas/core/generic.py
NDFrame._to_dict_of_blocks
def _to_dict_of_blocks(self, copy=True): """ Return a dict of dtype -> Constructor Types that each is a homogeneous dtype. Internal ONLY """ return {k: self._constructor(v).__finalize__(self) for k, v, in self._data.to_dict(copy=copy).items()}
python
def _to_dict_of_blocks(self, copy=True): """ Return a dict of dtype -> Constructor Types that each is a homogeneous dtype. Internal ONLY """ return {k: self._constructor(v).__finalize__(self) for k, v, in self._data.to_dict(copy=copy).items()}
[ "def", "_to_dict_of_blocks", "(", "self", ",", "copy", "=", "True", ")", ":", "return", "{", "k", ":", "self", ".", "_constructor", "(", "v", ")", ".", "__finalize__", "(", "self", ")", "for", "k", ",", "v", ",", "in", "self", ".", "_data", ".", "to_dict", "(", "copy", "=", "copy", ")", ".", "items", "(", ")", "}" ]
Return a dict of dtype -> Constructor Types that each is a homogeneous dtype. Internal ONLY
[ "Return", "a", "dict", "of", "dtype", "-", ">", "Constructor", "Types", "that", "each", "is", "a", "homogeneous", "dtype", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5604-L5612
20,213
pandas-dev/pandas
pandas/core/generic.py
NDFrame.astype
def astype(self, dtype, copy=True, errors='raise', **kwargs): """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire pandas object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object .. versionadded:: 0.20.0 kwargs : keyword arguments to pass on to the constructor Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Examples -------- >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int64): [1, 2] Convert to ordered categorical type with custom ordering: >>> cat_dtype = pd.api.types.CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Note that using ``copy=False`` and changing data on a new pandas object may propagate changes: >>> s1 = pd.Series([1,2]) >>> s2 = s1.astype('int64', copy=False) >>> s2[0] = 10 >>> s1 # note that s1[0] has changed too 0 10 1 2 dtype: int64 """ if is_dict_like(dtype): if self.ndim == 1: # i.e. Series if len(dtype) > 1 or self.name not in dtype: raise KeyError('Only the Series name can be used for ' 'the key in Series dtype mappings.') new_type = dtype[self.name] return self.astype(new_type, copy, errors, **kwargs) elif self.ndim > 2: raise NotImplementedError( 'astype() only accepts a dtype arg of type dict when ' 'invoked on Series and DataFrames. A single dtype must be ' 'specified when invoked on a Panel.' ) for col_name in dtype.keys(): if col_name not in self: raise KeyError('Only a column name can be used for the ' 'key in a dtype mappings argument.') results = [] for col_name, col in self.iteritems(): if col_name in dtype: results.append(col.astype(dtype=dtype[col_name], copy=copy, errors=errors, **kwargs)) else: results.append(results.append(col.copy() if copy else col)) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype # GH 24704: use iloc to handle duplicate column names results = (self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns))) else: # else, only a single dtype is given new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors, **kwargs) return self._constructor(new_data).__finalize__(self) # GH 19920: retain column metadata after concat result = pd.concat(results, axis=1, copy=False) result.columns = self.columns return result
python
def astype(self, dtype, copy=True, errors='raise', **kwargs): """ Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire pandas object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object .. versionadded:: 0.20.0 kwargs : keyword arguments to pass on to the constructor Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Examples -------- >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int64): [1, 2] Convert to ordered categorical type with custom ordering: >>> cat_dtype = pd.api.types.CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Note that using ``copy=False`` and changing data on a new pandas object may propagate changes: >>> s1 = pd.Series([1,2]) >>> s2 = s1.astype('int64', copy=False) >>> s2[0] = 10 >>> s1 # note that s1[0] has changed too 0 10 1 2 dtype: int64 """ if is_dict_like(dtype): if self.ndim == 1: # i.e. Series if len(dtype) > 1 or self.name not in dtype: raise KeyError('Only the Series name can be used for ' 'the key in Series dtype mappings.') new_type = dtype[self.name] return self.astype(new_type, copy, errors, **kwargs) elif self.ndim > 2: raise NotImplementedError( 'astype() only accepts a dtype arg of type dict when ' 'invoked on Series and DataFrames. A single dtype must be ' 'specified when invoked on a Panel.' ) for col_name in dtype.keys(): if col_name not in self: raise KeyError('Only a column name can be used for the ' 'key in a dtype mappings argument.') results = [] for col_name, col in self.iteritems(): if col_name in dtype: results.append(col.astype(dtype=dtype[col_name], copy=copy, errors=errors, **kwargs)) else: results.append(results.append(col.copy() if copy else col)) elif is_extension_array_dtype(dtype) and self.ndim > 1: # GH 18099/22869: columnwise conversion to extension dtype # GH 24704: use iloc to handle duplicate column names results = (self.iloc[:, i].astype(dtype, copy=copy) for i in range(len(self.columns))) else: # else, only a single dtype is given new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors, **kwargs) return self._constructor(new_data).__finalize__(self) # GH 19920: retain column metadata after concat result = pd.concat(results, axis=1, copy=False) result.columns = self.columns return result
[ "def", "astype", "(", "self", ",", "dtype", ",", "copy", "=", "True", ",", "errors", "=", "'raise'", ",", "*", "*", "kwargs", ")", ":", "if", "is_dict_like", "(", "dtype", ")", ":", "if", "self", ".", "ndim", "==", "1", ":", "# i.e. Series", "if", "len", "(", "dtype", ")", ">", "1", "or", "self", ".", "name", "not", "in", "dtype", ":", "raise", "KeyError", "(", "'Only the Series name can be used for '", "'the key in Series dtype mappings.'", ")", "new_type", "=", "dtype", "[", "self", ".", "name", "]", "return", "self", ".", "astype", "(", "new_type", ",", "copy", ",", "errors", ",", "*", "*", "kwargs", ")", "elif", "self", ".", "ndim", ">", "2", ":", "raise", "NotImplementedError", "(", "'astype() only accepts a dtype arg of type dict when '", "'invoked on Series and DataFrames. A single dtype must be '", "'specified when invoked on a Panel.'", ")", "for", "col_name", "in", "dtype", ".", "keys", "(", ")", ":", "if", "col_name", "not", "in", "self", ":", "raise", "KeyError", "(", "'Only a column name can be used for the '", "'key in a dtype mappings argument.'", ")", "results", "=", "[", "]", "for", "col_name", ",", "col", "in", "self", ".", "iteritems", "(", ")", ":", "if", "col_name", "in", "dtype", ":", "results", ".", "append", "(", "col", ".", "astype", "(", "dtype", "=", "dtype", "[", "col_name", "]", ",", "copy", "=", "copy", ",", "errors", "=", "errors", ",", "*", "*", "kwargs", ")", ")", "else", ":", "results", ".", "append", "(", "results", ".", "append", "(", "col", ".", "copy", "(", ")", "if", "copy", "else", "col", ")", ")", "elif", "is_extension_array_dtype", "(", "dtype", ")", "and", "self", ".", "ndim", ">", "1", ":", "# GH 18099/22869: columnwise conversion to extension dtype", "# GH 24704: use iloc to handle duplicate column names", "results", "=", "(", "self", ".", "iloc", "[", ":", ",", "i", "]", ".", "astype", "(", "dtype", ",", "copy", "=", "copy", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "columns", ")", ")", ")", "else", ":", "# else, only a single dtype is given", "new_data", "=", "self", ".", "_data", ".", "astype", "(", "dtype", "=", "dtype", ",", "copy", "=", "copy", ",", "errors", "=", "errors", ",", "*", "*", "kwargs", ")", "return", "self", ".", "_constructor", "(", "new_data", ")", ".", "__finalize__", "(", "self", ")", "# GH 19920: retain column metadata after concat", "result", "=", "pd", ".", "concat", "(", "results", ",", "axis", "=", "1", ",", "copy", "=", "False", ")", "result", ".", "columns", "=", "self", ".", "columns", "return", "result" ]
Cast a pandas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire pandas object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. copy : bool, default True Return a copy when ``copy=True`` (be very careful setting ``copy=False`` as changes to values then may propagate to other pandas objects). errors : {'raise', 'ignore'}, default 'raise' Control raising of exceptions on invalid data for provided dtype. - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object .. versionadded:: 0.20.0 kwargs : keyword arguments to pass on to the constructor Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to a numeric type. numpy.ndarray.astype : Cast a numpy array to a specified type. Examples -------- >>> ser = pd.Series([1, 2], dtype='int32') >>> ser 0 1 1 2 dtype: int32 >>> ser.astype('int64') 0 1 1 2 dtype: int64 Convert to categorical type: >>> ser.astype('category') 0 1 1 2 dtype: category Categories (2, int64): [1, 2] Convert to ordered categorical type with custom ordering: >>> cat_dtype = pd.api.types.CategoricalDtype( ... categories=[2, 1], ordered=True) >>> ser.astype(cat_dtype) 0 1 1 2 dtype: category Categories (2, int64): [2 < 1] Note that using ``copy=False`` and changing data on a new pandas object may propagate changes: >>> s1 = pd.Series([1,2]) >>> s2 = s1.astype('int64', copy=False) >>> s2[0] = 10 >>> s1 # note that s1[0] has changed too 0 10 1 2 dtype: int64
[ "Cast", "a", "pandas", "object", "to", "a", "specified", "dtype", "dtype", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5614-L5731
20,214
pandas-dev/pandas
pandas/core/generic.py
NDFrame.copy
def copy(self, deep=True): """ Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- copy : Series, DataFrame or Panel Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object """ data = self._data.copy(deep=deep) return self._constructor(data).__finalize__(self)
python
def copy(self, deep=True): """ Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- copy : Series, DataFrame or Panel Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object """ data = self._data.copy(deep=deep) return self._constructor(data).__finalize__(self)
[ "def", "copy", "(", "self", ",", "deep", "=", "True", ")", ":", "data", "=", "self", ".", "_data", ".", "copy", "(", "deep", "=", "deep", ")", "return", "self", ".", "_constructor", "(", "data", ")", ".", "__finalize__", "(", "self", ")" ]
Make a copy of this object's indices and data. When ``deep=True`` (default), a new object will be created with a copy of the calling object's data and indices. Modifications to the data or indices of the copy will not be reflected in the original object (see notes below). When ``deep=False``, a new object will be created without copying the calling object's data or index (only references to the data and index are copied). Any changes to the data of the original will be reflected in the shallow copy (and vice versa). Parameters ---------- deep : bool, default True Make a deep copy, including a copy of the data and the indices. With ``deep=False`` neither the indices nor the data are copied. Returns ------- copy : Series, DataFrame or Panel Object type matches caller. Notes ----- When ``deep=True``, data is copied but actual Python objects will not be copied recursively, only the reference to the object. This is in contrast to `copy.deepcopy` in the Standard Library, which recursively copies object data (see examples below). While ``Index`` objects are copied when ``deep=True``, the underlying numpy array is not copied for performance reasons. Since ``Index`` is immutable, the underlying data can be safely shared and a copy is not needed. Examples -------- >>> s = pd.Series([1, 2], index=["a", "b"]) >>> s a 1 b 2 dtype: int64 >>> s_copy = s.copy() >>> s_copy a 1 b 2 dtype: int64 **Shallow copy versus default (deep) copy:** >>> s = pd.Series([1, 2], index=["a", "b"]) >>> deep = s.copy() >>> shallow = s.copy(deep=False) Shallow copy shares data and index with original. >>> s is shallow False >>> s.values is shallow.values and s.index is shallow.index True Deep copy has own copy of data and index. >>> s is deep False >>> s.values is deep.values or s.index is deep.index False Updates to the data shared by shallow copy and original is reflected in both; deep copy remains unchanged. >>> s[0] = 3 >>> shallow[1] = 4 >>> s a 3 b 4 dtype: int64 >>> shallow a 3 b 4 dtype: int64 >>> deep a 1 b 2 dtype: int64 Note that when copying an object containing Python objects, a deep copy will copy the data, but will not do so recursively. Updating a nested data object will be reflected in the deep copy. >>> s = pd.Series([[1, 2], [3, 4]]) >>> deep = s.copy() >>> s[0][0] = 10 >>> s 0 [10, 2] 1 [3, 4] dtype: object >>> deep 0 [10, 2] 1 [3, 4] dtype: object
[ "Make", "a", "copy", "of", "this", "object", "s", "indices", "and", "data", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5733-L5839
20,215
pandas-dev/pandas
pandas/core/generic.py
NDFrame._convert
def _convert(self, datetime=False, numeric=False, timedelta=False, coerce=False, copy=True): """ Attempt to infer better dtype for object columns Parameters ---------- datetime : boolean, default False If True, convert to date where possible. numeric : boolean, default False If True, attempt to convert to numbers (including strings), with unconvertible values becoming NaN. timedelta : boolean, default False If True, convert to timedelta where possible. coerce : boolean, default False If True, force conversion with unconvertible values converted to nulls (NaN or NaT) copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object """ return self._constructor( self._data.convert(datetime=datetime, numeric=numeric, timedelta=timedelta, coerce=coerce, copy=copy)).__finalize__(self)
python
def _convert(self, datetime=False, numeric=False, timedelta=False, coerce=False, copy=True): """ Attempt to infer better dtype for object columns Parameters ---------- datetime : boolean, default False If True, convert to date where possible. numeric : boolean, default False If True, attempt to convert to numbers (including strings), with unconvertible values becoming NaN. timedelta : boolean, default False If True, convert to timedelta where possible. coerce : boolean, default False If True, force conversion with unconvertible values converted to nulls (NaN or NaT) copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object """ return self._constructor( self._data.convert(datetime=datetime, numeric=numeric, timedelta=timedelta, coerce=coerce, copy=copy)).__finalize__(self)
[ "def", "_convert", "(", "self", ",", "datetime", "=", "False", ",", "numeric", "=", "False", ",", "timedelta", "=", "False", ",", "coerce", "=", "False", ",", "copy", "=", "True", ")", ":", "return", "self", ".", "_constructor", "(", "self", ".", "_data", ".", "convert", "(", "datetime", "=", "datetime", ",", "numeric", "=", "numeric", ",", "timedelta", "=", "timedelta", ",", "coerce", "=", "coerce", ",", "copy", "=", "copy", ")", ")", ".", "__finalize__", "(", "self", ")" ]
Attempt to infer better dtype for object columns Parameters ---------- datetime : boolean, default False If True, convert to date where possible. numeric : boolean, default False If True, attempt to convert to numbers (including strings), with unconvertible values becoming NaN. timedelta : boolean, default False If True, convert to timedelta where possible. coerce : boolean, default False If True, force conversion with unconvertible values converted to nulls (NaN or NaT) copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object
[ "Attempt", "to", "infer", "better", "dtype", "for", "object", "columns" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5855-L5884
20,216
pandas-dev/pandas
pandas/core/generic.py
NDFrame.convert_objects
def convert_objects(self, convert_dates=True, convert_numeric=False, convert_timedeltas=True, copy=True): """ Attempt to infer better dtype for object columns. .. deprecated:: 0.21.0 Parameters ---------- convert_dates : boolean, default True If True, convert to date where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. convert_numeric : boolean, default False If True, attempt to coerce to numbers (including strings), with unconvertible values becoming NaN. convert_timedeltas : boolean, default True If True, convert to timedelta where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. """ msg = ("convert_objects is deprecated. To re-infer data dtypes for " "object columns, use {klass}.infer_objects()\nFor all " "other conversions use the data-type specific converters " "pd.to_datetime, pd.to_timedelta and pd.to_numeric." ).format(klass=self.__class__.__name__) warnings.warn(msg, FutureWarning, stacklevel=2) return self._constructor( self._data.convert(convert_dates=convert_dates, convert_numeric=convert_numeric, convert_timedeltas=convert_timedeltas, copy=copy)).__finalize__(self)
python
def convert_objects(self, convert_dates=True, convert_numeric=False, convert_timedeltas=True, copy=True): """ Attempt to infer better dtype for object columns. .. deprecated:: 0.21.0 Parameters ---------- convert_dates : boolean, default True If True, convert to date where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. convert_numeric : boolean, default False If True, attempt to coerce to numbers (including strings), with unconvertible values becoming NaN. convert_timedeltas : boolean, default True If True, convert to timedelta where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. """ msg = ("convert_objects is deprecated. To re-infer data dtypes for " "object columns, use {klass}.infer_objects()\nFor all " "other conversions use the data-type specific converters " "pd.to_datetime, pd.to_timedelta and pd.to_numeric." ).format(klass=self.__class__.__name__) warnings.warn(msg, FutureWarning, stacklevel=2) return self._constructor( self._data.convert(convert_dates=convert_dates, convert_numeric=convert_numeric, convert_timedeltas=convert_timedeltas, copy=copy)).__finalize__(self)
[ "def", "convert_objects", "(", "self", ",", "convert_dates", "=", "True", ",", "convert_numeric", "=", "False", ",", "convert_timedeltas", "=", "True", ",", "copy", "=", "True", ")", ":", "msg", "=", "(", "\"convert_objects is deprecated. To re-infer data dtypes for \"", "\"object columns, use {klass}.infer_objects()\\nFor all \"", "\"other conversions use the data-type specific converters \"", "\"pd.to_datetime, pd.to_timedelta and pd.to_numeric.\"", ")", ".", "format", "(", "klass", "=", "self", ".", "__class__", ".", "__name__", ")", "warnings", ".", "warn", "(", "msg", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "_constructor", "(", "self", ".", "_data", ".", "convert", "(", "convert_dates", "=", "convert_dates", ",", "convert_numeric", "=", "convert_numeric", ",", "convert_timedeltas", "=", "convert_timedeltas", ",", "copy", "=", "copy", ")", ")", ".", "__finalize__", "(", "self", ")" ]
Attempt to infer better dtype for object columns. .. deprecated:: 0.21.0 Parameters ---------- convert_dates : boolean, default True If True, convert to date where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. convert_numeric : boolean, default False If True, attempt to coerce to numbers (including strings), with unconvertible values becoming NaN. convert_timedeltas : boolean, default True If True, convert to timedelta where possible. If 'coerce', force conversion, with unconvertible values becoming NaT. copy : boolean, default True If True, return a copy even if no copy is necessary (e.g. no conversion was done). Note: This is meant for internal use, and should not be confused with inplace. Returns ------- converted : same as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type.
[ "Attempt", "to", "infer", "better", "dtype", "for", "object", "columns", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5886-L5930
20,217
pandas-dev/pandas
pandas/core/generic.py
NDFrame.infer_objects
def infer_objects(self): """ Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. .. versionadded:: 0.21.0 Returns ------- converted : same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object """ # numeric=False necessary to only soft convert; # python objects will still be converted to # native numpy numeric types return self._constructor( self._data.convert(datetime=True, numeric=False, timedelta=True, coerce=False, copy=True)).__finalize__(self)
python
def infer_objects(self): """ Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. .. versionadded:: 0.21.0 Returns ------- converted : same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object """ # numeric=False necessary to only soft convert; # python objects will still be converted to # native numpy numeric types return self._constructor( self._data.convert(datetime=True, numeric=False, timedelta=True, coerce=False, copy=True)).__finalize__(self)
[ "def", "infer_objects", "(", "self", ")", ":", "# numeric=False necessary to only soft convert;", "# python objects will still be converted to", "# native numpy numeric types", "return", "self", ".", "_constructor", "(", "self", ".", "_data", ".", "convert", "(", "datetime", "=", "True", ",", "numeric", "=", "False", ",", "timedelta", "=", "True", ",", "coerce", "=", "False", ",", "copy", "=", "True", ")", ")", ".", "__finalize__", "(", "self", ")" ]
Attempt to infer better dtypes for object columns. Attempts soft conversion of object-dtyped columns, leaving non-object and unconvertible columns unchanged. The inference rules are the same as during normal Series/DataFrame construction. .. versionadded:: 0.21.0 Returns ------- converted : same type as input object See Also -------- to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. to_numeric : Convert argument to numeric type. Examples -------- >>> df = pd.DataFrame({"A": ["a", 1, 2, 3]}) >>> df = df.iloc[1:] >>> df A 1 1 2 2 3 3 >>> df.dtypes A object dtype: object >>> df.infer_objects().dtypes A int64 dtype: object
[ "Attempt", "to", "infer", "better", "dtypes", "for", "object", "columns", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L5932-L5977
20,218
pandas-dev/pandas
pandas/core/generic.py
NDFrame.clip_upper
def clip_upper(self, threshold, axis=None, inplace=False): """ Trim values above a given threshold. .. deprecated:: 0.24.0 Use clip(upper=threshold) instead. Elements above the `threshold` will be changed to match the `threshold` value(s). Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : numeric or array-like Maximum value allowed. All values above threshold will be set to this value. * float : every value is compared to `threshold`. * array-like : The shape of `threshold` should match the object it's compared to. When `self` is a Series, `threshold` should be the length. When `self` is a DataFrame, `threshold` should 2-D and the same shape as `self` for ``axis=None``, or 1-D and the same length as the axis being compared. axis : {0 or 'index', 1 or 'columns'}, default 0 Align object with `threshold` along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame Original data with values trimmed. See Also -------- Series.clip : General purpose method to trim Series values to given threshold(s). DataFrame.clip : General purpose method to trim DataFrame values to given threshold(s). Examples -------- >>> s = pd.Series([1, 2, 3, 4, 5]) >>> s 0 1 1 2 2 3 3 4 4 5 dtype: int64 >>> s.clip(upper=3) 0 1 1 2 2 3 3 3 4 3 dtype: int64 >>> elemwise_thresholds = [5, 4, 3, 2, 1] >>> elemwise_thresholds [5, 4, 3, 2, 1] >>> s.clip(upper=elemwise_thresholds) 0 1 1 2 2 3 3 2 4 1 dtype: int64 """ warnings.warn('clip_upper(threshold) is deprecated, ' 'use clip(upper=threshold) instead', FutureWarning, stacklevel=2) return self._clip_with_one_bound(threshold, method=self.le, axis=axis, inplace=inplace)
python
def clip_upper(self, threshold, axis=None, inplace=False): """ Trim values above a given threshold. .. deprecated:: 0.24.0 Use clip(upper=threshold) instead. Elements above the `threshold` will be changed to match the `threshold` value(s). Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : numeric or array-like Maximum value allowed. All values above threshold will be set to this value. * float : every value is compared to `threshold`. * array-like : The shape of `threshold` should match the object it's compared to. When `self` is a Series, `threshold` should be the length. When `self` is a DataFrame, `threshold` should 2-D and the same shape as `self` for ``axis=None``, or 1-D and the same length as the axis being compared. axis : {0 or 'index', 1 or 'columns'}, default 0 Align object with `threshold` along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame Original data with values trimmed. See Also -------- Series.clip : General purpose method to trim Series values to given threshold(s). DataFrame.clip : General purpose method to trim DataFrame values to given threshold(s). Examples -------- >>> s = pd.Series([1, 2, 3, 4, 5]) >>> s 0 1 1 2 2 3 3 4 4 5 dtype: int64 >>> s.clip(upper=3) 0 1 1 2 2 3 3 3 4 3 dtype: int64 >>> elemwise_thresholds = [5, 4, 3, 2, 1] >>> elemwise_thresholds [5, 4, 3, 2, 1] >>> s.clip(upper=elemwise_thresholds) 0 1 1 2 2 3 3 2 4 1 dtype: int64 """ warnings.warn('clip_upper(threshold) is deprecated, ' 'use clip(upper=threshold) instead', FutureWarning, stacklevel=2) return self._clip_with_one_bound(threshold, method=self.le, axis=axis, inplace=inplace)
[ "def", "clip_upper", "(", "self", ",", "threshold", ",", "axis", "=", "None", ",", "inplace", "=", "False", ")", ":", "warnings", ".", "warn", "(", "'clip_upper(threshold) is deprecated, '", "'use clip(upper=threshold) instead'", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "_clip_with_one_bound", "(", "threshold", ",", "method", "=", "self", ".", "le", ",", "axis", "=", "axis", ",", "inplace", "=", "inplace", ")" ]
Trim values above a given threshold. .. deprecated:: 0.24.0 Use clip(upper=threshold) instead. Elements above the `threshold` will be changed to match the `threshold` value(s). Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : numeric or array-like Maximum value allowed. All values above threshold will be set to this value. * float : every value is compared to `threshold`. * array-like : The shape of `threshold` should match the object it's compared to. When `self` is a Series, `threshold` should be the length. When `self` is a DataFrame, `threshold` should 2-D and the same shape as `self` for ``axis=None``, or 1-D and the same length as the axis being compared. axis : {0 or 'index', 1 or 'columns'}, default 0 Align object with `threshold` along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame Original data with values trimmed. See Also -------- Series.clip : General purpose method to trim Series values to given threshold(s). DataFrame.clip : General purpose method to trim DataFrame values to given threshold(s). Examples -------- >>> s = pd.Series([1, 2, 3, 4, 5]) >>> s 0 1 1 2 2 3 3 4 4 5 dtype: int64 >>> s.clip(upper=3) 0 1 1 2 2 3 3 3 4 3 dtype: int64 >>> elemwise_thresholds = [5, 4, 3, 2, 1] >>> elemwise_thresholds [5, 4, 3, 2, 1] >>> s.clip(upper=elemwise_thresholds) 0 1 1 2 2 3 3 2 4 1 dtype: int64
[ "Trim", "values", "above", "a", "given", "threshold", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L7371-L7449
20,219
pandas-dev/pandas
pandas/core/generic.py
NDFrame.clip_lower
def clip_lower(self, threshold, axis=None, inplace=False): """ Trim values below a given threshold. .. deprecated:: 0.24.0 Use clip(lower=threshold) instead. Elements below the `threshold` will be changed to match the `threshold` value(s). Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : numeric or array-like Minimum value allowed. All values below threshold will be set to this value. * float : every value is compared to `threshold`. * array-like : The shape of `threshold` should match the object it's compared to. When `self` is a Series, `threshold` should be the length. When `self` is a DataFrame, `threshold` should 2-D and the same shape as `self` for ``axis=None``, or 1-D and the same length as the axis being compared. axis : {0 or 'index', 1 or 'columns'}, default 0 Align `self` with `threshold` along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame Original data with values trimmed. See Also -------- Series.clip : General purpose method to trim Series values to given threshold(s). DataFrame.clip : General purpose method to trim DataFrame values to given threshold(s). Examples -------- Series single threshold clipping: >>> s = pd.Series([5, 6, 7, 8, 9]) >>> s.clip(lower=8) 0 8 1 8 2 8 3 8 4 9 dtype: int64 Series clipping element-wise using an array of thresholds. `threshold` should be the same length as the Series. >>> elemwise_thresholds = [4, 8, 7, 2, 5] >>> s.clip(lower=elemwise_thresholds) 0 5 1 8 2 7 3 8 4 9 dtype: int64 DataFrames can be compared to a scalar. >>> df = pd.DataFrame({"A": [1, 3, 5], "B": [2, 4, 6]}) >>> df A B 0 1 2 1 3 4 2 5 6 >>> df.clip(lower=3) A B 0 3 3 1 3 4 2 5 6 Or to an array of values. By default, `threshold` should be the same shape as the DataFrame. >>> df.clip(lower=np.array([[3, 4], [2, 2], [6, 2]])) A B 0 3 4 1 3 4 2 6 6 Control how `threshold` is broadcast with `axis`. In this case `threshold` should be the same length as the axis specified by `axis`. >>> df.clip(lower=[3, 3, 5], axis='index') A B 0 3 3 1 3 4 2 5 6 >>> df.clip(lower=[4, 5], axis='columns') A B 0 4 5 1 4 5 2 5 6 """ warnings.warn('clip_lower(threshold) is deprecated, ' 'use clip(lower=threshold) instead', FutureWarning, stacklevel=2) return self._clip_with_one_bound(threshold, method=self.ge, axis=axis, inplace=inplace)
python
def clip_lower(self, threshold, axis=None, inplace=False): """ Trim values below a given threshold. .. deprecated:: 0.24.0 Use clip(lower=threshold) instead. Elements below the `threshold` will be changed to match the `threshold` value(s). Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : numeric or array-like Minimum value allowed. All values below threshold will be set to this value. * float : every value is compared to `threshold`. * array-like : The shape of `threshold` should match the object it's compared to. When `self` is a Series, `threshold` should be the length. When `self` is a DataFrame, `threshold` should 2-D and the same shape as `self` for ``axis=None``, or 1-D and the same length as the axis being compared. axis : {0 or 'index', 1 or 'columns'}, default 0 Align `self` with `threshold` along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame Original data with values trimmed. See Also -------- Series.clip : General purpose method to trim Series values to given threshold(s). DataFrame.clip : General purpose method to trim DataFrame values to given threshold(s). Examples -------- Series single threshold clipping: >>> s = pd.Series([5, 6, 7, 8, 9]) >>> s.clip(lower=8) 0 8 1 8 2 8 3 8 4 9 dtype: int64 Series clipping element-wise using an array of thresholds. `threshold` should be the same length as the Series. >>> elemwise_thresholds = [4, 8, 7, 2, 5] >>> s.clip(lower=elemwise_thresholds) 0 5 1 8 2 7 3 8 4 9 dtype: int64 DataFrames can be compared to a scalar. >>> df = pd.DataFrame({"A": [1, 3, 5], "B": [2, 4, 6]}) >>> df A B 0 1 2 1 3 4 2 5 6 >>> df.clip(lower=3) A B 0 3 3 1 3 4 2 5 6 Or to an array of values. By default, `threshold` should be the same shape as the DataFrame. >>> df.clip(lower=np.array([[3, 4], [2, 2], [6, 2]])) A B 0 3 4 1 3 4 2 6 6 Control how `threshold` is broadcast with `axis`. In this case `threshold` should be the same length as the axis specified by `axis`. >>> df.clip(lower=[3, 3, 5], axis='index') A B 0 3 3 1 3 4 2 5 6 >>> df.clip(lower=[4, 5], axis='columns') A B 0 4 5 1 4 5 2 5 6 """ warnings.warn('clip_lower(threshold) is deprecated, ' 'use clip(lower=threshold) instead', FutureWarning, stacklevel=2) return self._clip_with_one_bound(threshold, method=self.ge, axis=axis, inplace=inplace)
[ "def", "clip_lower", "(", "self", ",", "threshold", ",", "axis", "=", "None", ",", "inplace", "=", "False", ")", ":", "warnings", ".", "warn", "(", "'clip_lower(threshold) is deprecated, '", "'use clip(lower=threshold) instead'", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "_clip_with_one_bound", "(", "threshold", ",", "method", "=", "self", ".", "ge", ",", "axis", "=", "axis", ",", "inplace", "=", "inplace", ")" ]
Trim values below a given threshold. .. deprecated:: 0.24.0 Use clip(lower=threshold) instead. Elements below the `threshold` will be changed to match the `threshold` value(s). Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : numeric or array-like Minimum value allowed. All values below threshold will be set to this value. * float : every value is compared to `threshold`. * array-like : The shape of `threshold` should match the object it's compared to. When `self` is a Series, `threshold` should be the length. When `self` is a DataFrame, `threshold` should 2-D and the same shape as `self` for ``axis=None``, or 1-D and the same length as the axis being compared. axis : {0 or 'index', 1 or 'columns'}, default 0 Align `self` with `threshold` along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame Original data with values trimmed. See Also -------- Series.clip : General purpose method to trim Series values to given threshold(s). DataFrame.clip : General purpose method to trim DataFrame values to given threshold(s). Examples -------- Series single threshold clipping: >>> s = pd.Series([5, 6, 7, 8, 9]) >>> s.clip(lower=8) 0 8 1 8 2 8 3 8 4 9 dtype: int64 Series clipping element-wise using an array of thresholds. `threshold` should be the same length as the Series. >>> elemwise_thresholds = [4, 8, 7, 2, 5] >>> s.clip(lower=elemwise_thresholds) 0 5 1 8 2 7 3 8 4 9 dtype: int64 DataFrames can be compared to a scalar. >>> df = pd.DataFrame({"A": [1, 3, 5], "B": [2, 4, 6]}) >>> df A B 0 1 2 1 3 4 2 5 6 >>> df.clip(lower=3) A B 0 3 3 1 3 4 2 5 6 Or to an array of values. By default, `threshold` should be the same shape as the DataFrame. >>> df.clip(lower=np.array([[3, 4], [2, 2], [6, 2]])) A B 0 3 4 1 3 4 2 6 6 Control how `threshold` is broadcast with `axis`. In this case `threshold` should be the same length as the axis specified by `axis`. >>> df.clip(lower=[3, 3, 5], axis='index') A B 0 3 3 1 3 4 2 5 6 >>> df.clip(lower=[4, 5], axis='columns') A B 0 4 5 1 4 5 2 5 6
[ "Trim", "values", "below", "a", "given", "threshold", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L7451-L7565
20,220
pandas-dev/pandas
pandas/core/generic.py
NDFrame.groupby
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=False, observed=False, **kwargs): """ Group DataFrame or Series using a mapper or by a Series of columns. A groupby operation involves some combination of splitting the object, applying a function, and combining the results. This can be used to group large amounts of data and compute operations on these groups. Parameters ---------- by : mapping, function, label, or list of labels Used to determine the groups for the groupby. If ``by`` is a function, it's called on each value of the object's index. If a dict or Series is passed, the Series or dict VALUES will be used to determine the groups (the Series' values are first aligned; see ``.align()`` method). If an ndarray is passed, the values are used as-is determine the groups. A label or list of labels may be passed to group by the columns in ``self``. Notice that a tuple is interpreted a (single) key. axis : {0 or 'index', 1 or 'columns'}, default 0 Split along rows (0) or columns (1). level : int, level name, or sequence of such, default None If the axis is a MultiIndex (hierarchical), group by a particular level or levels. as_index : bool, default True For aggregated output, return object with group labels as the index. Only relevant for DataFrame input. as_index=False is effectively "SQL-style" grouped output. sort : bool, default True Sort group keys. Get better performance by turning this off. Note this does not influence the order of observations within each group. Groupby preserves the order of rows within each group. group_keys : bool, default True When calling apply, add group keys to index to identify pieces. squeeze : bool, default False Reduce the dimensionality of the return type if possible, otherwise return a consistent type. observed : bool, default False This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. .. versionadded:: 0.23.0 **kwargs Optional, only accepts keyword argument 'mutated' and is passed to groupby. Returns ------- DataFrameGroupBy or SeriesGroupBy Depends on the calling object and returns groupby object that contains information about the groups. See Also -------- resample : Convenience method for frequency conversion and resampling of time series. Notes ----- See the `user guide <http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more. Examples -------- >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon', ... 'Parrot', 'Parrot'], ... 'Max Speed': [380., 370., 24., 26.]}) >>> df Animal Max Speed 0 Falcon 380.0 1 Falcon 370.0 2 Parrot 24.0 3 Parrot 26.0 >>> df.groupby(['Animal']).mean() Max Speed Animal Falcon 375.0 Parrot 25.0 **Hierarchical Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]}, ... index=index) >>> df Max Speed Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 >>> df.groupby(level=0).mean() Max Speed Animal Falcon 370.0 Parrot 25.0 >>> df.groupby(level=1).mean() Max Speed Type Captive 210.0 Wild 185.0 """ from pandas.core.groupby.groupby import groupby if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") axis = self._get_axis_number(axis) return groupby(self, by=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze, observed=observed, **kwargs)
python
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=False, observed=False, **kwargs): """ Group DataFrame or Series using a mapper or by a Series of columns. A groupby operation involves some combination of splitting the object, applying a function, and combining the results. This can be used to group large amounts of data and compute operations on these groups. Parameters ---------- by : mapping, function, label, or list of labels Used to determine the groups for the groupby. If ``by`` is a function, it's called on each value of the object's index. If a dict or Series is passed, the Series or dict VALUES will be used to determine the groups (the Series' values are first aligned; see ``.align()`` method). If an ndarray is passed, the values are used as-is determine the groups. A label or list of labels may be passed to group by the columns in ``self``. Notice that a tuple is interpreted a (single) key. axis : {0 or 'index', 1 or 'columns'}, default 0 Split along rows (0) or columns (1). level : int, level name, or sequence of such, default None If the axis is a MultiIndex (hierarchical), group by a particular level or levels. as_index : bool, default True For aggregated output, return object with group labels as the index. Only relevant for DataFrame input. as_index=False is effectively "SQL-style" grouped output. sort : bool, default True Sort group keys. Get better performance by turning this off. Note this does not influence the order of observations within each group. Groupby preserves the order of rows within each group. group_keys : bool, default True When calling apply, add group keys to index to identify pieces. squeeze : bool, default False Reduce the dimensionality of the return type if possible, otherwise return a consistent type. observed : bool, default False This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. .. versionadded:: 0.23.0 **kwargs Optional, only accepts keyword argument 'mutated' and is passed to groupby. Returns ------- DataFrameGroupBy or SeriesGroupBy Depends on the calling object and returns groupby object that contains information about the groups. See Also -------- resample : Convenience method for frequency conversion and resampling of time series. Notes ----- See the `user guide <http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more. Examples -------- >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon', ... 'Parrot', 'Parrot'], ... 'Max Speed': [380., 370., 24., 26.]}) >>> df Animal Max Speed 0 Falcon 380.0 1 Falcon 370.0 2 Parrot 24.0 3 Parrot 26.0 >>> df.groupby(['Animal']).mean() Max Speed Animal Falcon 375.0 Parrot 25.0 **Hierarchical Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]}, ... index=index) >>> df Max Speed Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 >>> df.groupby(level=0).mean() Max Speed Animal Falcon 370.0 Parrot 25.0 >>> df.groupby(level=1).mean() Max Speed Type Captive 210.0 Wild 185.0 """ from pandas.core.groupby.groupby import groupby if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") axis = self._get_axis_number(axis) return groupby(self, by=by, axis=axis, level=level, as_index=as_index, sort=sort, group_keys=group_keys, squeeze=squeeze, observed=observed, **kwargs)
[ "def", "groupby", "(", "self", ",", "by", "=", "None", ",", "axis", "=", "0", ",", "level", "=", "None", ",", "as_index", "=", "True", ",", "sort", "=", "True", ",", "group_keys", "=", "True", ",", "squeeze", "=", "False", ",", "observed", "=", "False", ",", "*", "*", "kwargs", ")", ":", "from", "pandas", ".", "core", ".", "groupby", ".", "groupby", "import", "groupby", "if", "level", "is", "None", "and", "by", "is", "None", ":", "raise", "TypeError", "(", "\"You have to supply one of 'by' and 'level'\"", ")", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "return", "groupby", "(", "self", ",", "by", "=", "by", ",", "axis", "=", "axis", ",", "level", "=", "level", ",", "as_index", "=", "as_index", ",", "sort", "=", "sort", ",", "group_keys", "=", "group_keys", ",", "squeeze", "=", "squeeze", ",", "observed", "=", "observed", ",", "*", "*", "kwargs", ")" ]
Group DataFrame or Series using a mapper or by a Series of columns. A groupby operation involves some combination of splitting the object, applying a function, and combining the results. This can be used to group large amounts of data and compute operations on these groups. Parameters ---------- by : mapping, function, label, or list of labels Used to determine the groups for the groupby. If ``by`` is a function, it's called on each value of the object's index. If a dict or Series is passed, the Series or dict VALUES will be used to determine the groups (the Series' values are first aligned; see ``.align()`` method). If an ndarray is passed, the values are used as-is determine the groups. A label or list of labels may be passed to group by the columns in ``self``. Notice that a tuple is interpreted a (single) key. axis : {0 or 'index', 1 or 'columns'}, default 0 Split along rows (0) or columns (1). level : int, level name, or sequence of such, default None If the axis is a MultiIndex (hierarchical), group by a particular level or levels. as_index : bool, default True For aggregated output, return object with group labels as the index. Only relevant for DataFrame input. as_index=False is effectively "SQL-style" grouped output. sort : bool, default True Sort group keys. Get better performance by turning this off. Note this does not influence the order of observations within each group. Groupby preserves the order of rows within each group. group_keys : bool, default True When calling apply, add group keys to index to identify pieces. squeeze : bool, default False Reduce the dimensionality of the return type if possible, otherwise return a consistent type. observed : bool, default False This only applies if any of the groupers are Categoricals. If True: only show observed values for categorical groupers. If False: show all values for categorical groupers. .. versionadded:: 0.23.0 **kwargs Optional, only accepts keyword argument 'mutated' and is passed to groupby. Returns ------- DataFrameGroupBy or SeriesGroupBy Depends on the calling object and returns groupby object that contains information about the groups. See Also -------- resample : Convenience method for frequency conversion and resampling of time series. Notes ----- See the `user guide <http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more. Examples -------- >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon', ... 'Parrot', 'Parrot'], ... 'Max Speed': [380., 370., 24., 26.]}) >>> df Animal Max Speed 0 Falcon 380.0 1 Falcon 370.0 2 Parrot 24.0 3 Parrot 26.0 >>> df.groupby(['Animal']).mean() Max Speed Animal Falcon 375.0 Parrot 25.0 **Hierarchical Indexes** We can groupby different levels of a hierarchical index using the `level` parameter: >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], ... ['Captive', 'Wild', 'Captive', 'Wild']] >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) >>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]}, ... index=index) >>> df Max Speed Animal Type Falcon Captive 390.0 Wild 350.0 Parrot Captive 30.0 Wild 20.0 >>> df.groupby(level=0).mean() Max Speed Animal Falcon 370.0 Parrot 25.0 >>> df.groupby(level=1).mean() Max Speed Type Captive 210.0 Wild 185.0
[ "Group", "DataFrame", "or", "Series", "using", "a", "mapper", "or", "by", "a", "Series", "of", "columns", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L7567-L7685
20,221
pandas-dev/pandas
pandas/core/generic.py
NDFrame.asfreq
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None): """ Convert TimeSeries to specified frequency. Optionally provide filling method to pad/backfill missing values. Returns the original data conformed to a new index with the specified frequency. ``resample`` is more appropriate if an operation, such as summarization, is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset object, or string method : {'backfill'/'bfill', 'pad'/'ffill'}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill how : {'start', 'end'}, default end For PeriodIndex only, see PeriodIndex.asfreq normalize : bool, default False Whether to reset output index to midnight fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). .. versionadded:: 0.20.0 Returns ------- converted : same type as caller See Also -------- reindex Notes ----- To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({'s':series}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq(self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value)
python
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None): """ Convert TimeSeries to specified frequency. Optionally provide filling method to pad/backfill missing values. Returns the original data conformed to a new index with the specified frequency. ``resample`` is more appropriate if an operation, such as summarization, is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset object, or string method : {'backfill'/'bfill', 'pad'/'ffill'}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill how : {'start', 'end'}, default end For PeriodIndex only, see PeriodIndex.asfreq normalize : bool, default False Whether to reset output index to midnight fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). .. versionadded:: 0.20.0 Returns ------- converted : same type as caller See Also -------- reindex Notes ----- To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({'s':series}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq(self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value)
[ "def", "asfreq", "(", "self", ",", "freq", ",", "method", "=", "None", ",", "how", "=", "None", ",", "normalize", "=", "False", ",", "fill_value", "=", "None", ")", ":", "from", "pandas", ".", "core", ".", "resample", "import", "asfreq", "return", "asfreq", "(", "self", ",", "freq", ",", "method", "=", "method", ",", "how", "=", "how", ",", "normalize", "=", "normalize", ",", "fill_value", "=", "fill_value", ")" ]
Convert TimeSeries to specified frequency. Optionally provide filling method to pad/backfill missing values. Returns the original data conformed to a new index with the specified frequency. ``resample`` is more appropriate if an operation, such as summarization, is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset object, or string method : {'backfill'/'bfill', 'pad'/'ffill'}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill how : {'start', 'end'}, default end For PeriodIndex only, see PeriodIndex.asfreq normalize : bool, default False Whether to reset output index to midnight fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). .. versionadded:: 0.20.0 Returns ------- converted : same type as caller See Also -------- reindex Notes ----- To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({'s':series}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0
[ "Convert", "TimeSeries", "to", "specified", "frequency", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L7687-L7784
20,222
pandas-dev/pandas
pandas/core/generic.py
NDFrame.resample
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None, label=None, convention='start', kind=None, loffset=None, limit=None, base=0, on=None, level=None): """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. Object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values to the `on` or `level` keyword. Parameters ---------- rule : str The offset string or object representing target conversion. how : str Method for down/re-sampling, default to 'mean' for downsampling. .. deprecated:: 0.18.0 The new syntax is ``.resample(...).mean()``, or ``.resample(...).apply(<func>)`` axis : {0 or 'index', 1 or 'columns'}, default 0 Which axis to use for up- or down-sampling. For `Series` this will default to 0, i.e. along the rows. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. fill_method : str, default None Filling method for upsampling. .. deprecated:: 0.18.0 The new syntax is ``.resample(...).<func>()``, e.g. ``.resample(...).pad()`` closed : {'right', 'left'}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {'right', 'left'}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {'start', 'end', 's', 'e'}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {'timestamp', 'period'}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. loffset : timedelta, default None Adjust the resampled time labels. limit : int, default None Maximum size gap when reindexing with `fill_method`. .. deprecated:: 0.18.0 base : int, default 0 For frequencies that evenly subdivide 1 day, the "origin" of the aggregated intervals. For example, for '5min' frequency, base could range from 0 through 4. Defaults to 0. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. .. versionadded:: 0.19.0 level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. .. versionadded:: 0.19.0 Returns ------- Resampler object See Also -------- groupby : Group by mapping, function, label, or list of labels. Series.resample : Resample a Series. DataFrame.resample: Resample a DataFrame. Notes ----- See the `user guide <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_ for more. To learn more about the offset strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``pad`` method. >>> series.resample('30S').pad()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(array_like): ... return np.sum(array_like) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}) >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}) >>> df2 = pd.DataFrame(d2, ... index=pd.MultiIndex.from_product([days, ... ['morning', ... 'afternoon']] ... )) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90 """ from pandas.core.resample import (resample, _maybe_process_deprecations) axis = self._get_axis_number(axis) r = resample(self, freq=rule, label=label, closed=closed, axis=axis, kind=kind, loffset=loffset, convention=convention, base=base, key=on, level=level) return _maybe_process_deprecations(r, how=how, fill_method=fill_method, limit=limit)
python
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None, label=None, convention='start', kind=None, loffset=None, limit=None, base=0, on=None, level=None): """ Resample time-series data. Convenience method for frequency conversion and resampling of time series. Object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values to the `on` or `level` keyword. Parameters ---------- rule : str The offset string or object representing target conversion. how : str Method for down/re-sampling, default to 'mean' for downsampling. .. deprecated:: 0.18.0 The new syntax is ``.resample(...).mean()``, or ``.resample(...).apply(<func>)`` axis : {0 or 'index', 1 or 'columns'}, default 0 Which axis to use for up- or down-sampling. For `Series` this will default to 0, i.e. along the rows. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. fill_method : str, default None Filling method for upsampling. .. deprecated:: 0.18.0 The new syntax is ``.resample(...).<func>()``, e.g. ``.resample(...).pad()`` closed : {'right', 'left'}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {'right', 'left'}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {'start', 'end', 's', 'e'}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {'timestamp', 'period'}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. loffset : timedelta, default None Adjust the resampled time labels. limit : int, default None Maximum size gap when reindexing with `fill_method`. .. deprecated:: 0.18.0 base : int, default 0 For frequencies that evenly subdivide 1 day, the "origin" of the aggregated intervals. For example, for '5min' frequency, base could range from 0 through 4. Defaults to 0. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. .. versionadded:: 0.19.0 level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. .. versionadded:: 0.19.0 Returns ------- Resampler object See Also -------- groupby : Group by mapping, function, label, or list of labels. Series.resample : Resample a Series. DataFrame.resample: Resample a DataFrame. Notes ----- See the `user guide <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_ for more. To learn more about the offset strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``pad`` method. >>> series.resample('30S').pad()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(array_like): ... return np.sum(array_like) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}) >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}) >>> df2 = pd.DataFrame(d2, ... index=pd.MultiIndex.from_product([days, ... ['morning', ... 'afternoon']] ... )) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90 """ from pandas.core.resample import (resample, _maybe_process_deprecations) axis = self._get_axis_number(axis) r = resample(self, freq=rule, label=label, closed=closed, axis=axis, kind=kind, loffset=loffset, convention=convention, base=base, key=on, level=level) return _maybe_process_deprecations(r, how=how, fill_method=fill_method, limit=limit)
[ "def", "resample", "(", "self", ",", "rule", ",", "how", "=", "None", ",", "axis", "=", "0", ",", "fill_method", "=", "None", ",", "closed", "=", "None", ",", "label", "=", "None", ",", "convention", "=", "'start'", ",", "kind", "=", "None", ",", "loffset", "=", "None", ",", "limit", "=", "None", ",", "base", "=", "0", ",", "on", "=", "None", ",", "level", "=", "None", ")", ":", "from", "pandas", ".", "core", ".", "resample", "import", "(", "resample", ",", "_maybe_process_deprecations", ")", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "r", "=", "resample", "(", "self", ",", "freq", "=", "rule", ",", "label", "=", "label", ",", "closed", "=", "closed", ",", "axis", "=", "axis", ",", "kind", "=", "kind", ",", "loffset", "=", "loffset", ",", "convention", "=", "convention", ",", "base", "=", "base", ",", "key", "=", "on", ",", "level", "=", "level", ")", "return", "_maybe_process_deprecations", "(", "r", ",", "how", "=", "how", ",", "fill_method", "=", "fill_method", ",", "limit", "=", "limit", ")" ]
Resample time-series data. Convenience method for frequency conversion and resampling of time series. Object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values to the `on` or `level` keyword. Parameters ---------- rule : str The offset string or object representing target conversion. how : str Method for down/re-sampling, default to 'mean' for downsampling. .. deprecated:: 0.18.0 The new syntax is ``.resample(...).mean()``, or ``.resample(...).apply(<func>)`` axis : {0 or 'index', 1 or 'columns'}, default 0 Which axis to use for up- or down-sampling. For `Series` this will default to 0, i.e. along the rows. Must be `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`. fill_method : str, default None Filling method for upsampling. .. deprecated:: 0.18.0 The new syntax is ``.resample(...).<func>()``, e.g. ``.resample(...).pad()`` closed : {'right', 'left'}, default None Which side of bin interval is closed. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. label : {'right', 'left'}, default None Which bin edge label to label bucket with. The default is 'left' for all frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' which all have a default of 'right'. convention : {'start', 'end', 's', 'e'}, default 'start' For `PeriodIndex` only, controls whether to use the start or end of `rule`. kind : {'timestamp', 'period'}, optional, default None Pass 'timestamp' to convert the resulting index to a `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`. By default the input representation is retained. loffset : timedelta, default None Adjust the resampled time labels. limit : int, default None Maximum size gap when reindexing with `fill_method`. .. deprecated:: 0.18.0 base : int, default 0 For frequencies that evenly subdivide 1 day, the "origin" of the aggregated intervals. For example, for '5min' frequency, base could range from 0 through 4. Defaults to 0. on : str, optional For a DataFrame, column to use instead of index for resampling. Column must be datetime-like. .. versionadded:: 0.19.0 level : str or int, optional For a MultiIndex, level (name or number) to use for resampling. `level` must be datetime-like. .. versionadded:: 0.19.0 Returns ------- Resampler object See Also -------- groupby : Group by mapping, function, label, or list of labels. Series.resample : Resample a Series. DataFrame.resample: Resample a DataFrame. Notes ----- See the `user guide <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling>`_ for more. To learn more about the offset strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 9 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=9, freq='T') >>> series = pd.Series(range(9), index=index) >>> series 2000-01-01 00:00:00 0 2000-01-01 00:01:00 1 2000-01-01 00:02:00 2 2000-01-01 00:03:00 3 2000-01-01 00:04:00 4 2000-01-01 00:05:00 5 2000-01-01 00:06:00 6 2000-01-01 00:07:00 7 2000-01-01 00:08:00 8 Freq: T, dtype: int64 Downsample the series into 3 minute bins and sum the values of the timestamps falling into a bin. >>> series.resample('3T').sum() 2000-01-01 00:00:00 3 2000-01-01 00:03:00 12 2000-01-01 00:06:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but label each bin using the right edge instead of the left. Please note that the value in the bucket used as the label is not included in the bucket, which it labels. For example, in the original series the bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed value in the resampled bucket with the label ``2000-01-01 00:03:00`` does not include 3 (if it did, the summed value would be 6, not 3). To include this value close the right side of the bin interval as illustrated in the example below this one. >>> series.resample('3T', label='right').sum() 2000-01-01 00:03:00 3 2000-01-01 00:06:00 12 2000-01-01 00:09:00 21 Freq: 3T, dtype: int64 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> series.resample('3T', label='right', closed='right').sum() 2000-01-01 00:00:00 0 2000-01-01 00:03:00 6 2000-01-01 00:06:00 15 2000-01-01 00:09:00 15 Freq: 3T, dtype: int64 Upsample the series into 30 second bins. >>> series.resample('30S').asfreq()[0:5] # Select first 5 rows 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 1.0 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 Freq: 30S, dtype: float64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``pad`` method. >>> series.resample('30S').pad()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 0 2000-01-01 00:01:00 1 2000-01-01 00:01:30 1 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Upsample the series into 30 second bins and fill the ``NaN`` values using the ``bfill`` method. >>> series.resample('30S').bfill()[0:5] 2000-01-01 00:00:00 0 2000-01-01 00:00:30 1 2000-01-01 00:01:00 1 2000-01-01 00:01:30 2 2000-01-01 00:02:00 2 Freq: 30S, dtype: int64 Pass a custom function via ``apply`` >>> def custom_resampler(array_like): ... return np.sum(array_like) + 5 ... >>> series.resample('3T').apply(custom_resampler) 2000-01-01 00:00:00 8 2000-01-01 00:03:00 17 2000-01-01 00:06:00 26 Freq: 3T, dtype: int64 For a Series with a PeriodIndex, the keyword `convention` can be used to control whether to use the start or end of `rule`. Resample a year by quarter using 'start' `convention`. Values are assigned to the first quarter of the period. >>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01', ... freq='A', ... periods=2)) >>> s 2012 1 2013 2 Freq: A-DEC, dtype: int64 >>> s.resample('Q', convention='start').asfreq() 2012Q1 1.0 2012Q2 NaN 2012Q3 NaN 2012Q4 NaN 2013Q1 2.0 2013Q2 NaN 2013Q3 NaN 2013Q4 NaN Freq: Q-DEC, dtype: float64 Resample quarters by month using 'end' `convention`. Values are assigned to the last month of the period. >>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01', ... freq='Q', ... periods=4)) >>> q 2018Q1 1 2018Q2 2 2018Q3 3 2018Q4 4 Freq: Q-DEC, dtype: int64 >>> q.resample('M', convention='end').asfreq() 2018-03 1.0 2018-04 NaN 2018-05 NaN 2018-06 2.0 2018-07 NaN 2018-08 NaN 2018-09 3.0 2018-10 NaN 2018-11 NaN 2018-12 4.0 Freq: M, dtype: float64 For DataFrame objects, the keyword `on` can be used to specify the column instead of the index for resampling. >>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}) >>> df = pd.DataFrame(d) >>> df['week_starting'] = pd.date_range('01/01/2018', ... periods=8, ... freq='W') >>> df price volume week_starting 0 10 50 2018-01-07 1 11 60 2018-01-14 2 9 40 2018-01-21 3 13 100 2018-01-28 4 14 50 2018-02-04 5 18 100 2018-02-11 6 17 40 2018-02-18 7 19 50 2018-02-25 >>> df.resample('M', on='week_starting').mean() price volume week_starting 2018-01-31 10.75 62.5 2018-02-28 17.00 60.0 For a DataFrame with MultiIndex, the keyword `level` can be used to specify on which level the resampling needs to take place. >>> days = pd.date_range('1/1/2000', periods=4, freq='D') >>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19], ... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]}) >>> df2 = pd.DataFrame(d2, ... index=pd.MultiIndex.from_product([days, ... ['morning', ... 'afternoon']] ... )) >>> df2 price volume 2000-01-01 morning 10 50 afternoon 11 60 2000-01-02 morning 9 40 afternoon 13 100 2000-01-03 morning 14 50 afternoon 18 100 2000-01-04 morning 17 40 afternoon 19 50 >>> df2.resample('D', level=0).sum() price volume 2000-01-01 21 110 2000-01-02 22 140 2000-01-03 32 150 2000-01-04 36 90
[ "Resample", "time", "-", "series", "data", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L7915-L8212
20,223
pandas-dev/pandas
pandas/core/generic.py
NDFrame.first
def first(self, offset): """ Convenience method for subsetting initial periods of time series data based on a date offset. Parameters ---------- offset : string, DateOffset, dateutil.relativedelta Returns ------- subset : same type as caller Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calender days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex index") if len(self.index) == 0: return self offset = to_offset(offset) end_date = end = self.index[0] + offset # Tick-like, e.g. 3 weeks if not offset.isAnchored() and hasattr(offset, '_inc'): if end_date in self.index: end = self.index.searchsorted(end_date, side='left') return self.iloc[:end] return self.loc[:end]
python
def first(self, offset): """ Convenience method for subsetting initial periods of time series data based on a date offset. Parameters ---------- offset : string, DateOffset, dateutil.relativedelta Returns ------- subset : same type as caller Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calender days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'first' only supports a DatetimeIndex index") if len(self.index) == 0: return self offset = to_offset(offset) end_date = end = self.index[0] + offset # Tick-like, e.g. 3 weeks if not offset.isAnchored() and hasattr(offset, '_inc'): if end_date in self.index: end = self.index.searchsorted(end_date, side='left') return self.iloc[:end] return self.loc[:end]
[ "def", "first", "(", "self", ",", "offset", ")", ":", "if", "not", "isinstance", "(", "self", ".", "index", ",", "DatetimeIndex", ")", ":", "raise", "TypeError", "(", "\"'first' only supports a DatetimeIndex index\"", ")", "if", "len", "(", "self", ".", "index", ")", "==", "0", ":", "return", "self", "offset", "=", "to_offset", "(", "offset", ")", "end_date", "=", "end", "=", "self", ".", "index", "[", "0", "]", "+", "offset", "# Tick-like, e.g. 3 weeks", "if", "not", "offset", ".", "isAnchored", "(", ")", "and", "hasattr", "(", "offset", ",", "'_inc'", ")", ":", "if", "end_date", "in", "self", ".", "index", ":", "end", "=", "self", ".", "index", ".", "searchsorted", "(", "end_date", ",", "side", "=", "'left'", ")", "return", "self", ".", "iloc", "[", ":", "end", "]", "return", "self", ".", "loc", "[", ":", "end", "]" ]
Convenience method for subsetting initial periods of time series data based on a date offset. Parameters ---------- offset : string, DateOffset, dateutil.relativedelta Returns ------- subset : same type as caller Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- last : Select final periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the first 3 days: >>> ts.first('3D') A 2018-04-09 1 2018-04-11 2 Notice the data for 3 first calender days were returned, not the first 3 days observed in the dataset, and therefore data for 2018-04-13 was not returned.
[ "Convenience", "method", "for", "subsetting", "initial", "periods", "of", "time", "series", "data", "based", "on", "a", "date", "offset", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L8214-L8275
20,224
pandas-dev/pandas
pandas/core/generic.py
NDFrame.last
def last(self, offset): """ Convenience method for subsetting final periods of time series data based on a date offset. Parameters ---------- offset : string, DateOffset, dateutil.relativedelta Returns ------- subset : same type as caller Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calender days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex index") if len(self.index) == 0: return self offset = to_offset(offset) start_date = self.index[-1] - offset start = self.index.searchsorted(start_date, side='right') return self.iloc[start:]
python
def last(self, offset): """ Convenience method for subsetting final periods of time series data based on a date offset. Parameters ---------- offset : string, DateOffset, dateutil.relativedelta Returns ------- subset : same type as caller Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calender days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned. """ if not isinstance(self.index, DatetimeIndex): raise TypeError("'last' only supports a DatetimeIndex index") if len(self.index) == 0: return self offset = to_offset(offset) start_date = self.index[-1] - offset start = self.index.searchsorted(start_date, side='right') return self.iloc[start:]
[ "def", "last", "(", "self", ",", "offset", ")", ":", "if", "not", "isinstance", "(", "self", ".", "index", ",", "DatetimeIndex", ")", ":", "raise", "TypeError", "(", "\"'last' only supports a DatetimeIndex index\"", ")", "if", "len", "(", "self", ".", "index", ")", "==", "0", ":", "return", "self", "offset", "=", "to_offset", "(", "offset", ")", "start_date", "=", "self", ".", "index", "[", "-", "1", "]", "-", "offset", "start", "=", "self", ".", "index", ".", "searchsorted", "(", "start_date", ",", "side", "=", "'right'", ")", "return", "self", ".", "iloc", "[", "start", ":", "]" ]
Convenience method for subsetting final periods of time series data based on a date offset. Parameters ---------- offset : string, DateOffset, dateutil.relativedelta Returns ------- subset : same type as caller Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- first : Select initial periods of time series based on a date offset. at_time : Select values at a particular time of the day. between_time : Select values between particular times of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='2D') >>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i) >>> ts A 2018-04-09 1 2018-04-11 2 2018-04-13 3 2018-04-15 4 Get the rows for the last 3 days: >>> ts.last('3D') A 2018-04-13 3 2018-04-15 4 Notice the data for 3 last calender days were returned, not the last 3 observed days in the dataset, and therefore data for 2018-04-11 was not returned.
[ "Convenience", "method", "for", "subsetting", "final", "periods", "of", "time", "series", "data", "based", "on", "a", "date", "offset", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L8277-L8333
20,225
pandas-dev/pandas
pandas/core/generic.py
NDFrame.slice_shift
def slice_shift(self, periods=1, axis=0): """ Equivalent to `shift` without copying data. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. Parameters ---------- periods : int Number of periods to move, can be positive or negative Returns ------- shifted : same type as caller Notes ----- While the `slice_shift` is faster than `shift`, you may pay for it later during alignment. """ if periods == 0: return self if periods > 0: vslicer = slice(None, -periods) islicer = slice(periods, None) else: vslicer = slice(-periods, None) islicer = slice(None, periods) new_obj = self._slice(vslicer, axis=axis) shifted_axis = self._get_axis(axis)[islicer] new_obj.set_axis(shifted_axis, axis=axis, inplace=True) return new_obj.__finalize__(self)
python
def slice_shift(self, periods=1, axis=0): """ Equivalent to `shift` without copying data. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. Parameters ---------- periods : int Number of periods to move, can be positive or negative Returns ------- shifted : same type as caller Notes ----- While the `slice_shift` is faster than `shift`, you may pay for it later during alignment. """ if periods == 0: return self if periods > 0: vslicer = slice(None, -periods) islicer = slice(periods, None) else: vslicer = slice(-periods, None) islicer = slice(None, periods) new_obj = self._slice(vslicer, axis=axis) shifted_axis = self._get_axis(axis)[islicer] new_obj.set_axis(shifted_axis, axis=axis, inplace=True) return new_obj.__finalize__(self)
[ "def", "slice_shift", "(", "self", ",", "periods", "=", "1", ",", "axis", "=", "0", ")", ":", "if", "periods", "==", "0", ":", "return", "self", "if", "periods", ">", "0", ":", "vslicer", "=", "slice", "(", "None", ",", "-", "periods", ")", "islicer", "=", "slice", "(", "periods", ",", "None", ")", "else", ":", "vslicer", "=", "slice", "(", "-", "periods", ",", "None", ")", "islicer", "=", "slice", "(", "None", ",", "periods", ")", "new_obj", "=", "self", ".", "_slice", "(", "vslicer", ",", "axis", "=", "axis", ")", "shifted_axis", "=", "self", ".", "_get_axis", "(", "axis", ")", "[", "islicer", "]", "new_obj", ".", "set_axis", "(", "shifted_axis", ",", "axis", "=", "axis", ",", "inplace", "=", "True", ")", "return", "new_obj", ".", "__finalize__", "(", "self", ")" ]
Equivalent to `shift` without copying data. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. Parameters ---------- periods : int Number of periods to move, can be positive or negative Returns ------- shifted : same type as caller Notes ----- While the `slice_shift` is faster than `shift`, you may pay for it later during alignment.
[ "Equivalent", "to", "shift", "without", "copying", "data", ".", "The", "shifted", "data", "will", "not", "include", "the", "dropped", "periods", "and", "the", "shifted", "axis", "will", "be", "smaller", "than", "the", "original", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L9010-L9044
20,226
pandas-dev/pandas
pandas/core/generic.py
NDFrame.tshift
def tshift(self, periods=1, freq=None, axis=0): """ Shift the time index, using the index's frequency if available. Parameters ---------- periods : int Number of periods to move, can be positive or negative freq : DateOffset, timedelta, or time rule string, default None Increment to use from the tseries module or time rule (e.g. 'EOM') axis : int or basestring Corresponds to the axis that contains the Index Returns ------- shifted : NDFrame Notes ----- If freq is not specified then tries to use the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown """ index = self._get_axis(axis) if freq is None: freq = getattr(index, 'freq', None) if freq is None: freq = getattr(index, 'inferred_freq', None) if freq is None: msg = 'Freq was not given and was not set in the index' raise ValueError(msg) if periods == 0: return self if isinstance(freq, str): freq = to_offset(freq) block_axis = self._get_block_manager_axis(axis) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq == orig_freq: new_data = self._data.copy() new_data.axes[block_axis] = index.shift(periods) else: msg = ('Given freq %s does not match PeriodIndex freq %s' % (freq.rule_code, orig_freq.rule_code)) raise ValueError(msg) else: new_data = self._data.copy() new_data.axes[block_axis] = index.shift(periods, freq) return self._constructor(new_data).__finalize__(self)
python
def tshift(self, periods=1, freq=None, axis=0): """ Shift the time index, using the index's frequency if available. Parameters ---------- periods : int Number of periods to move, can be positive or negative freq : DateOffset, timedelta, or time rule string, default None Increment to use from the tseries module or time rule (e.g. 'EOM') axis : int or basestring Corresponds to the axis that contains the Index Returns ------- shifted : NDFrame Notes ----- If freq is not specified then tries to use the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown """ index = self._get_axis(axis) if freq is None: freq = getattr(index, 'freq', None) if freq is None: freq = getattr(index, 'inferred_freq', None) if freq is None: msg = 'Freq was not given and was not set in the index' raise ValueError(msg) if periods == 0: return self if isinstance(freq, str): freq = to_offset(freq) block_axis = self._get_block_manager_axis(axis) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq == orig_freq: new_data = self._data.copy() new_data.axes[block_axis] = index.shift(periods) else: msg = ('Given freq %s does not match PeriodIndex freq %s' % (freq.rule_code, orig_freq.rule_code)) raise ValueError(msg) else: new_data = self._data.copy() new_data.axes[block_axis] = index.shift(periods, freq) return self._constructor(new_data).__finalize__(self)
[ "def", "tshift", "(", "self", ",", "periods", "=", "1", ",", "freq", "=", "None", ",", "axis", "=", "0", ")", ":", "index", "=", "self", ".", "_get_axis", "(", "axis", ")", "if", "freq", "is", "None", ":", "freq", "=", "getattr", "(", "index", ",", "'freq'", ",", "None", ")", "if", "freq", "is", "None", ":", "freq", "=", "getattr", "(", "index", ",", "'inferred_freq'", ",", "None", ")", "if", "freq", "is", "None", ":", "msg", "=", "'Freq was not given and was not set in the index'", "raise", "ValueError", "(", "msg", ")", "if", "periods", "==", "0", ":", "return", "self", "if", "isinstance", "(", "freq", ",", "str", ")", ":", "freq", "=", "to_offset", "(", "freq", ")", "block_axis", "=", "self", ".", "_get_block_manager_axis", "(", "axis", ")", "if", "isinstance", "(", "index", ",", "PeriodIndex", ")", ":", "orig_freq", "=", "to_offset", "(", "index", ".", "freq", ")", "if", "freq", "==", "orig_freq", ":", "new_data", "=", "self", ".", "_data", ".", "copy", "(", ")", "new_data", ".", "axes", "[", "block_axis", "]", "=", "index", ".", "shift", "(", "periods", ")", "else", ":", "msg", "=", "(", "'Given freq %s does not match PeriodIndex freq %s'", "%", "(", "freq", ".", "rule_code", ",", "orig_freq", ".", "rule_code", ")", ")", "raise", "ValueError", "(", "msg", ")", "else", ":", "new_data", "=", "self", ".", "_data", ".", "copy", "(", ")", "new_data", ".", "axes", "[", "block_axis", "]", "=", "index", ".", "shift", "(", "periods", ",", "freq", ")", "return", "self", ".", "_constructor", "(", "new_data", ")", ".", "__finalize__", "(", "self", ")" ]
Shift the time index, using the index's frequency if available. Parameters ---------- periods : int Number of periods to move, can be positive or negative freq : DateOffset, timedelta, or time rule string, default None Increment to use from the tseries module or time rule (e.g. 'EOM') axis : int or basestring Corresponds to the axis that contains the Index Returns ------- shifted : NDFrame Notes ----- If freq is not specified then tries to use the freq or inferred_freq attributes of the index. If neither of those attributes exist, a ValueError is thrown
[ "Shift", "the", "time", "index", "using", "the", "index", "s", "frequency", "if", "available", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L9046-L9101
20,227
pandas-dev/pandas
pandas/core/generic.py
NDFrame.truncate
def truncate(self, before=None, after=None, axis=None, copy=True): """ Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, string, int Truncate all rows before this index value. after : date, string, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. copy : boolean, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) # GH 17935 # Check that index is sorted if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing: raise ValueError("truncate requires a sorted index") # if we have a date index, convert to dates, otherwise # treat like a slice if ax.is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None: if before > after: raise ValueError('Truncate: %s must be after %s' % (after, before)) slicer = [slice(None, None)] * self._AXIS_LEN slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) if copy: result = result.copy() return result
python
def truncate(self, before=None, after=None, axis=None, copy=True): """ Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, string, int Truncate all rows before this index value. after : date, string, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. copy : boolean, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1 """ if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) ax = self._get_axis(axis) # GH 17935 # Check that index is sorted if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing: raise ValueError("truncate requires a sorted index") # if we have a date index, convert to dates, otherwise # treat like a slice if ax.is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None: if before > after: raise ValueError('Truncate: %s must be after %s' % (after, before)) slicer = [slice(None, None)] * self._AXIS_LEN slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) if copy: result = result.copy() return result
[ "def", "truncate", "(", "self", ",", "before", "=", "None", ",", "after", "=", "None", ",", "axis", "=", "None", ",", "copy", "=", "True", ")", ":", "if", "axis", "is", "None", ":", "axis", "=", "self", ".", "_stat_axis_number", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "ax", "=", "self", ".", "_get_axis", "(", "axis", ")", "# GH 17935", "# Check that index is sorted", "if", "not", "ax", ".", "is_monotonic_increasing", "and", "not", "ax", ".", "is_monotonic_decreasing", ":", "raise", "ValueError", "(", "\"truncate requires a sorted index\"", ")", "# if we have a date index, convert to dates, otherwise", "# treat like a slice", "if", "ax", ".", "is_all_dates", ":", "from", "pandas", ".", "core", ".", "tools", ".", "datetimes", "import", "to_datetime", "before", "=", "to_datetime", "(", "before", ")", "after", "=", "to_datetime", "(", "after", ")", "if", "before", "is", "not", "None", "and", "after", "is", "not", "None", ":", "if", "before", ">", "after", ":", "raise", "ValueError", "(", "'Truncate: %s must be after %s'", "%", "(", "after", ",", "before", ")", ")", "slicer", "=", "[", "slice", "(", "None", ",", "None", ")", "]", "*", "self", ".", "_AXIS_LEN", "slicer", "[", "axis", "]", "=", "slice", "(", "before", ",", "after", ")", "result", "=", "self", ".", "loc", "[", "tuple", "(", "slicer", ")", "]", "if", "isinstance", "(", "ax", ",", "MultiIndex", ")", ":", "setattr", "(", "result", ",", "self", ".", "_get_axis_name", "(", "axis", ")", ",", "ax", ".", "truncate", "(", "before", ",", "after", ")", ")", "if", "copy", ":", "result", "=", "result", ".", "copy", "(", ")", "return", "result" ]
Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. Parameters ---------- before : date, string, int Truncate all rows before this index value. after : date, string, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. copy : boolean, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Notes ----- If the index being truncated contains only datetime values, `before` and `after` may be specified as strings instead of Timestamps. Examples -------- >>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object The index values in ``truncate`` can be datetimes or string dates. >>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s') >>> df = pd.DataFrame(index=dates, data={'A': 1}) >>> df.tail() A 2016-01-31 23:59:56 1 2016-01-31 23:59:57 1 2016-01-31 23:59:58 1 2016-01-31 23:59:59 1 2016-02-01 00:00:00 1 >>> df.truncate(before=pd.Timestamp('2016-01-05'), ... after=pd.Timestamp('2016-01-10')).tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Because the index is a DatetimeIndex containing only dates, we can specify `before` and `after` as strings. They will be coerced to Timestamps before truncation. >>> df.truncate('2016-01-05', '2016-01-10').tail() A 2016-01-09 23:59:56 1 2016-01-09 23:59:57 1 2016-01-09 23:59:58 1 2016-01-09 23:59:59 1 2016-01-10 00:00:00 1 Note that ``truncate`` assumes a 0 value for any unspecified time component (midnight). This differs from partial string slicing, which returns any partially matching dates. >>> df.loc['2016-01-05':'2016-01-10', :].tail() A 2016-01-10 23:59:55 1 2016-01-10 23:59:56 1 2016-01-10 23:59:57 1 2016-01-10 23:59:58 1 2016-01-10 23:59:59 1
[ "Truncate", "a", "Series", "or", "DataFrame", "before", "and", "after", "some", "index", "value", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L9103-L9255
20,228
pandas-dev/pandas
pandas/core/generic.py
NDFrame.tz_convert
def tz_convert(self, tz, axis=0, level=None, copy=True): """ Convert tz-aware axis to target time zone. Parameters ---------- tz : string or pytz.timezone object axis : the axis to convert level : int, str, default None If axis ia a MultiIndex, convert a specific level. Otherwise must be None copy : boolean, default True Also make a copy of the underlying data Returns ------- Raises ------ TypeError If the axis is tz-naive. """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if not hasattr(ax, 'tz_convert'): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError('%s is not a valid DatetimeIndex or ' 'PeriodIndex' % ax_name) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError("The level {0} is not valid".format(level)) ax = _tz_convert(ax, tz) result = self._constructor(self._data, copy=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self)
python
def tz_convert(self, tz, axis=0, level=None, copy=True): """ Convert tz-aware axis to target time zone. Parameters ---------- tz : string or pytz.timezone object axis : the axis to convert level : int, str, default None If axis ia a MultiIndex, convert a specific level. Otherwise must be None copy : boolean, default True Also make a copy of the underlying data Returns ------- Raises ------ TypeError If the axis is tz-naive. """ axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if not hasattr(ax, 'tz_convert'): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError('%s is not a valid DatetimeIndex or ' 'PeriodIndex' % ax_name) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError("The level {0} is not valid".format(level)) ax = _tz_convert(ax, tz) result = self._constructor(self._data, copy=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self)
[ "def", "tz_convert", "(", "self", ",", "tz", ",", "axis", "=", "0", ",", "level", "=", "None", ",", "copy", "=", "True", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "ax", "=", "self", ".", "_get_axis", "(", "axis", ")", "def", "_tz_convert", "(", "ax", ",", "tz", ")", ":", "if", "not", "hasattr", "(", "ax", ",", "'tz_convert'", ")", ":", "if", "len", "(", "ax", ")", ">", "0", ":", "ax_name", "=", "self", ".", "_get_axis_name", "(", "axis", ")", "raise", "TypeError", "(", "'%s is not a valid DatetimeIndex or '", "'PeriodIndex'", "%", "ax_name", ")", "else", ":", "ax", "=", "DatetimeIndex", "(", "[", "]", ",", "tz", "=", "tz", ")", "else", ":", "ax", "=", "ax", ".", "tz_convert", "(", "tz", ")", "return", "ax", "# if a level is given it must be a MultiIndex level or", "# equivalent to the axis name", "if", "isinstance", "(", "ax", ",", "MultiIndex", ")", ":", "level", "=", "ax", ".", "_get_level_number", "(", "level", ")", "new_level", "=", "_tz_convert", "(", "ax", ".", "levels", "[", "level", "]", ",", "tz", ")", "ax", "=", "ax", ".", "set_levels", "(", "new_level", ",", "level", "=", "level", ")", "else", ":", "if", "level", "not", "in", "(", "None", ",", "0", ",", "ax", ".", "name", ")", ":", "raise", "ValueError", "(", "\"The level {0} is not valid\"", ".", "format", "(", "level", ")", ")", "ax", "=", "_tz_convert", "(", "ax", ",", "tz", ")", "result", "=", "self", ".", "_constructor", "(", "self", ".", "_data", ",", "copy", "=", "copy", ")", "result", "=", "result", ".", "set_axis", "(", "ax", ",", "axis", "=", "axis", ",", "inplace", "=", "False", ")", "return", "result", ".", "__finalize__", "(", "self", ")" ]
Convert tz-aware axis to target time zone. Parameters ---------- tz : string or pytz.timezone object axis : the axis to convert level : int, str, default None If axis ia a MultiIndex, convert a specific level. Otherwise must be None copy : boolean, default True Also make a copy of the underlying data Returns ------- Raises ------ TypeError If the axis is tz-naive.
[ "Convert", "tz", "-", "aware", "axis", "to", "target", "time", "zone", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L9257-L9307
20,229
pandas-dev/pandas
pandas/core/generic.py
NDFrame.tz_localize
def tz_localize(self, tz, axis=0, level=None, copy=True, ambiguous='raise', nonexistent='raise'): """ Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : string or pytz.timezone object axis : the axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None copy : boolean, default True Also make a copy of the underlying data ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times .. versionadded:: 0.24.0 Returns ------- Series or DataFrame Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00'])) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), index=pd.DatetimeIndex([ ... '2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), index=pd.DatetimeIndex([ ... '2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. >>> s = pd.Series(range(2), index=pd.DatetimeIndex([ ... '2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 """ nonexistent_options = ('raise', 'NaT', 'shift_forward', 'shift_backward') if nonexistent not in nonexistent_options and not isinstance( nonexistent, timedelta): raise ValueError("The nonexistent argument must be one of 'raise'," " 'NaT', 'shift_forward', 'shift_backward' or" " a timedelta object") axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, 'tz_localize'): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError('%s is not a valid DatetimeIndex or ' 'PeriodIndex' % ax_name) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize( tz, ambiguous=ambiguous, nonexistent=nonexistent ) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize( ax.levels[level], tz, ambiguous, nonexistent ) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError("The level {0} is not valid".format(level)) ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self._constructor(self._data, copy=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self)
python
def tz_localize(self, tz, axis=0, level=None, copy=True, ambiguous='raise', nonexistent='raise'): """ Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : string or pytz.timezone object axis : the axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None copy : boolean, default True Also make a copy of the underlying data ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times .. versionadded:: 0.24.0 Returns ------- Series or DataFrame Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00'])) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), index=pd.DatetimeIndex([ ... '2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), index=pd.DatetimeIndex([ ... '2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. >>> s = pd.Series(range(2), index=pd.DatetimeIndex([ ... '2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 """ nonexistent_options = ('raise', 'NaT', 'shift_forward', 'shift_backward') if nonexistent not in nonexistent_options and not isinstance( nonexistent, timedelta): raise ValueError("The nonexistent argument must be one of 'raise'," " 'NaT', 'shift_forward', 'shift_backward' or" " a timedelta object") axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, 'tz_localize'): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError('%s is not a valid DatetimeIndex or ' 'PeriodIndex' % ax_name) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize( tz, ambiguous=ambiguous, nonexistent=nonexistent ) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize( ax.levels[level], tz, ambiguous, nonexistent ) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError("The level {0} is not valid".format(level)) ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self._constructor(self._data, copy=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self)
[ "def", "tz_localize", "(", "self", ",", "tz", ",", "axis", "=", "0", ",", "level", "=", "None", ",", "copy", "=", "True", ",", "ambiguous", "=", "'raise'", ",", "nonexistent", "=", "'raise'", ")", ":", "nonexistent_options", "=", "(", "'raise'", ",", "'NaT'", ",", "'shift_forward'", ",", "'shift_backward'", ")", "if", "nonexistent", "not", "in", "nonexistent_options", "and", "not", "isinstance", "(", "nonexistent", ",", "timedelta", ")", ":", "raise", "ValueError", "(", "\"The nonexistent argument must be one of 'raise',\"", "\" 'NaT', 'shift_forward', 'shift_backward' or\"", "\" a timedelta object\"", ")", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "ax", "=", "self", ".", "_get_axis", "(", "axis", ")", "def", "_tz_localize", "(", "ax", ",", "tz", ",", "ambiguous", ",", "nonexistent", ")", ":", "if", "not", "hasattr", "(", "ax", ",", "'tz_localize'", ")", ":", "if", "len", "(", "ax", ")", ">", "0", ":", "ax_name", "=", "self", ".", "_get_axis_name", "(", "axis", ")", "raise", "TypeError", "(", "'%s is not a valid DatetimeIndex or '", "'PeriodIndex'", "%", "ax_name", ")", "else", ":", "ax", "=", "DatetimeIndex", "(", "[", "]", ",", "tz", "=", "tz", ")", "else", ":", "ax", "=", "ax", ".", "tz_localize", "(", "tz", ",", "ambiguous", "=", "ambiguous", ",", "nonexistent", "=", "nonexistent", ")", "return", "ax", "# if a level is given it must be a MultiIndex level or", "# equivalent to the axis name", "if", "isinstance", "(", "ax", ",", "MultiIndex", ")", ":", "level", "=", "ax", ".", "_get_level_number", "(", "level", ")", "new_level", "=", "_tz_localize", "(", "ax", ".", "levels", "[", "level", "]", ",", "tz", ",", "ambiguous", ",", "nonexistent", ")", "ax", "=", "ax", ".", "set_levels", "(", "new_level", ",", "level", "=", "level", ")", "else", ":", "if", "level", "not", "in", "(", "None", ",", "0", ",", "ax", ".", "name", ")", ":", "raise", "ValueError", "(", "\"The level {0} is not valid\"", ".", "format", "(", "level", ")", ")", "ax", "=", "_tz_localize", "(", "ax", ",", "tz", ",", "ambiguous", ",", "nonexistent", ")", "result", "=", "self", ".", "_constructor", "(", "self", ".", "_data", ",", "copy", "=", "copy", ")", "result", "=", "result", ".", "set_axis", "(", "ax", ",", "axis", "=", "axis", ",", "inplace", "=", "False", ")", "return", "result", ".", "__finalize__", "(", "self", ")" ]
Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : string or pytz.timezone object axis : the axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None copy : boolean, default True Also make a copy of the underlying data ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times .. versionadded:: 0.24.0 Returns ------- Series or DataFrame Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00'])) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), index=pd.DatetimeIndex([ ... '2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), index=pd.DatetimeIndex([ ... '2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. >>> s = pd.Series(range(2), index=pd.DatetimeIndex([ ... '2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64
[ "Localize", "tz", "-", "naive", "index", "of", "a", "Series", "or", "DataFrame", "to", "target", "time", "zone", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L9309-L9471
20,230
pandas-dev/pandas
pandas/core/generic.py
NDFrame._add_series_or_dataframe_operations
def _add_series_or_dataframe_operations(cls): """ Add the series or dataframe only operations to the cls; evaluate the doc strings again. """ from pandas.core import window as rwindow @Appender(rwindow.rolling.__doc__) def rolling(self, window, min_periods=None, center=False, win_type=None, on=None, axis=0, closed=None): axis = self._get_axis_number(axis) return rwindow.rolling(self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed) cls.rolling = rolling @Appender(rwindow.expanding.__doc__) def expanding(self, min_periods=1, center=False, axis=0): axis = self._get_axis_number(axis) return rwindow.expanding(self, min_periods=min_periods, center=center, axis=axis) cls.expanding = expanding @Appender(rwindow.ewm.__doc__) def ewm(self, com=None, span=None, halflife=None, alpha=None, min_periods=0, adjust=True, ignore_na=False, axis=0): axis = self._get_axis_number(axis) return rwindow.ewm(self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis) cls.ewm = ewm
python
def _add_series_or_dataframe_operations(cls): """ Add the series or dataframe only operations to the cls; evaluate the doc strings again. """ from pandas.core import window as rwindow @Appender(rwindow.rolling.__doc__) def rolling(self, window, min_periods=None, center=False, win_type=None, on=None, axis=0, closed=None): axis = self._get_axis_number(axis) return rwindow.rolling(self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, axis=axis, closed=closed) cls.rolling = rolling @Appender(rwindow.expanding.__doc__) def expanding(self, min_periods=1, center=False, axis=0): axis = self._get_axis_number(axis) return rwindow.expanding(self, min_periods=min_periods, center=center, axis=axis) cls.expanding = expanding @Appender(rwindow.ewm.__doc__) def ewm(self, com=None, span=None, halflife=None, alpha=None, min_periods=0, adjust=True, ignore_na=False, axis=0): axis = self._get_axis_number(axis) return rwindow.ewm(self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, axis=axis) cls.ewm = ewm
[ "def", "_add_series_or_dataframe_operations", "(", "cls", ")", ":", "from", "pandas", ".", "core", "import", "window", "as", "rwindow", "@", "Appender", "(", "rwindow", ".", "rolling", ".", "__doc__", ")", "def", "rolling", "(", "self", ",", "window", ",", "min_periods", "=", "None", ",", "center", "=", "False", ",", "win_type", "=", "None", ",", "on", "=", "None", ",", "axis", "=", "0", ",", "closed", "=", "None", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "return", "rwindow", ".", "rolling", "(", "self", ",", "window", "=", "window", ",", "min_periods", "=", "min_periods", ",", "center", "=", "center", ",", "win_type", "=", "win_type", ",", "on", "=", "on", ",", "axis", "=", "axis", ",", "closed", "=", "closed", ")", "cls", ".", "rolling", "=", "rolling", "@", "Appender", "(", "rwindow", ".", "expanding", ".", "__doc__", ")", "def", "expanding", "(", "self", ",", "min_periods", "=", "1", ",", "center", "=", "False", ",", "axis", "=", "0", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "return", "rwindow", ".", "expanding", "(", "self", ",", "min_periods", "=", "min_periods", ",", "center", "=", "center", ",", "axis", "=", "axis", ")", "cls", ".", "expanding", "=", "expanding", "@", "Appender", "(", "rwindow", ".", "ewm", ".", "__doc__", ")", "def", "ewm", "(", "self", ",", "com", "=", "None", ",", "span", "=", "None", ",", "halflife", "=", "None", ",", "alpha", "=", "None", ",", "min_periods", "=", "0", ",", "adjust", "=", "True", ",", "ignore_na", "=", "False", ",", "axis", "=", "0", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "return", "rwindow", ".", "ewm", "(", "self", ",", "com", "=", "com", ",", "span", "=", "span", ",", "halflife", "=", "halflife", ",", "alpha", "=", "alpha", ",", "min_periods", "=", "min_periods", ",", "adjust", "=", "adjust", ",", "ignore_na", "=", "ignore_na", ",", "axis", "=", "axis", ")", "cls", ".", "ewm", "=", "ewm" ]
Add the series or dataframe only operations to the cls; evaluate the doc strings again.
[ "Add", "the", "series", "or", "dataframe", "only", "operations", "to", "the", "cls", ";", "evaluate", "the", "doc", "strings", "again", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L10190-L10226
20,231
pandas-dev/pandas
pandas/core/generic.py
NDFrame._find_valid_index
def _find_valid_index(self, how): """ Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index """ assert how in ['first', 'last'] if len(self) == 0: # early stop return None is_valid = ~self.isna() if self.ndim == 2: is_valid = is_valid.any(1) # reduce axis 1 if how == 'first': idxpos = is_valid.values[::].argmax() if how == 'last': idxpos = len(self) - 1 - is_valid.values[::-1].argmax() chk_notna = is_valid.iat[idxpos] idx = self.index[idxpos] if not chk_notna: return None return idx
python
def _find_valid_index(self, how): """ Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index """ assert how in ['first', 'last'] if len(self) == 0: # early stop return None is_valid = ~self.isna() if self.ndim == 2: is_valid = is_valid.any(1) # reduce axis 1 if how == 'first': idxpos = is_valid.values[::].argmax() if how == 'last': idxpos = len(self) - 1 - is_valid.values[::-1].argmax() chk_notna = is_valid.iat[idxpos] idx = self.index[idxpos] if not chk_notna: return None return idx
[ "def", "_find_valid_index", "(", "self", ",", "how", ")", ":", "assert", "how", "in", "[", "'first'", ",", "'last'", "]", "if", "len", "(", "self", ")", "==", "0", ":", "# early stop", "return", "None", "is_valid", "=", "~", "self", ".", "isna", "(", ")", "if", "self", ".", "ndim", "==", "2", ":", "is_valid", "=", "is_valid", ".", "any", "(", "1", ")", "# reduce axis 1", "if", "how", "==", "'first'", ":", "idxpos", "=", "is_valid", ".", "values", "[", ":", ":", "]", ".", "argmax", "(", ")", "if", "how", "==", "'last'", ":", "idxpos", "=", "len", "(", "self", ")", "-", "1", "-", "is_valid", ".", "values", "[", ":", ":", "-", "1", "]", ".", "argmax", "(", ")", "chk_notna", "=", "is_valid", ".", "iat", "[", "idxpos", "]", "idx", "=", "self", ".", "index", "[", "idxpos", "]", "if", "not", "chk_notna", ":", "return", "None", "return", "idx" ]
Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index
[ "Retrieves", "the", "index", "of", "the", "first", "valid", "value", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L10253-L10286
20,232
pandas-dev/pandas
pandas/core/base.py
PandasObject._reset_cache
def _reset_cache(self, key=None): """ Reset cached properties. If ``key`` is passed, only clears that key. """ if getattr(self, '_cache', None) is None: return if key is None: self._cache.clear() else: self._cache.pop(key, None)
python
def _reset_cache(self, key=None): """ Reset cached properties. If ``key`` is passed, only clears that key. """ if getattr(self, '_cache', None) is None: return if key is None: self._cache.clear() else: self._cache.pop(key, None)
[ "def", "_reset_cache", "(", "self", ",", "key", "=", "None", ")", ":", "if", "getattr", "(", "self", ",", "'_cache'", ",", "None", ")", "is", "None", ":", "return", "if", "key", "is", "None", ":", "self", ".", "_cache", ".", "clear", "(", ")", "else", ":", "self", ".", "_cache", ".", "pop", "(", "key", ",", "None", ")" ]
Reset cached properties. If ``key`` is passed, only clears that key.
[ "Reset", "cached", "properties", ".", "If", "key", "is", "passed", "only", "clears", "that", "key", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L86-L95
20,233
pandas-dev/pandas
pandas/core/base.py
SelectionMixin._shallow_copy
def _shallow_copy(self, obj=None, obj_type=None, **kwargs): """ return a new object with the replacement attributes """ if obj is None: obj = self._selected_obj.copy() if obj_type is None: obj_type = self._constructor if isinstance(obj, obj_type): obj = obj.obj for attr in self._attributes: if attr not in kwargs: kwargs[attr] = getattr(self, attr) return obj_type(obj, **kwargs)
python
def _shallow_copy(self, obj=None, obj_type=None, **kwargs): """ return a new object with the replacement attributes """ if obj is None: obj = self._selected_obj.copy() if obj_type is None: obj_type = self._constructor if isinstance(obj, obj_type): obj = obj.obj for attr in self._attributes: if attr not in kwargs: kwargs[attr] = getattr(self, attr) return obj_type(obj, **kwargs)
[ "def", "_shallow_copy", "(", "self", ",", "obj", "=", "None", ",", "obj_type", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "obj", "is", "None", ":", "obj", "=", "self", ".", "_selected_obj", ".", "copy", "(", ")", "if", "obj_type", "is", "None", ":", "obj_type", "=", "self", ".", "_constructor", "if", "isinstance", "(", "obj", ",", "obj_type", ")", ":", "obj", "=", "obj", ".", "obj", "for", "attr", "in", "self", ".", "_attributes", ":", "if", "attr", "not", "in", "kwargs", ":", "kwargs", "[", "attr", "]", "=", "getattr", "(", "self", ",", "attr", ")", "return", "obj_type", "(", "obj", ",", "*", "*", "kwargs", ")" ]
return a new object with the replacement attributes
[ "return", "a", "new", "object", "with", "the", "replacement", "attributes" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L619-L632
20,234
pandas-dev/pandas
pandas/core/base.py
IndexOpsMixin.itemsize
def itemsize(self): """ Return the size of the dtype of the item of the underlying data. .. deprecated:: 0.23.0 """ warnings.warn("{obj}.itemsize is deprecated and will be removed " "in a future version".format(obj=type(self).__name__), FutureWarning, stacklevel=2) return self._ndarray_values.itemsize
python
def itemsize(self): """ Return the size of the dtype of the item of the underlying data. .. deprecated:: 0.23.0 """ warnings.warn("{obj}.itemsize is deprecated and will be removed " "in a future version".format(obj=type(self).__name__), FutureWarning, stacklevel=2) return self._ndarray_values.itemsize
[ "def", "itemsize", "(", "self", ")", ":", "warnings", ".", "warn", "(", "\"{obj}.itemsize is deprecated and will be removed \"", "\"in a future version\"", ".", "format", "(", "obj", "=", "type", "(", "self", ")", ".", "__name__", ")", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "_ndarray_values", ".", "itemsize" ]
Return the size of the dtype of the item of the underlying data. .. deprecated:: 0.23.0
[ "Return", "the", "size", "of", "the", "dtype", "of", "the", "item", "of", "the", "underlying", "data", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L715-L724
20,235
pandas-dev/pandas
pandas/core/base.py
IndexOpsMixin.base
def base(self): """ Return the base object if the memory of the underlying data is shared. .. deprecated:: 0.23.0 """ warnings.warn("{obj}.base is deprecated and will be removed " "in a future version".format(obj=type(self).__name__), FutureWarning, stacklevel=2) return self.values.base
python
def base(self): """ Return the base object if the memory of the underlying data is shared. .. deprecated:: 0.23.0 """ warnings.warn("{obj}.base is deprecated and will be removed " "in a future version".format(obj=type(self).__name__), FutureWarning, stacklevel=2) return self.values.base
[ "def", "base", "(", "self", ")", ":", "warnings", ".", "warn", "(", "\"{obj}.base is deprecated and will be removed \"", "\"in a future version\"", ".", "format", "(", "obj", "=", "type", "(", "self", ")", ".", "__name__", ")", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "values", ".", "base" ]
Return the base object if the memory of the underlying data is shared. .. deprecated:: 0.23.0
[ "Return", "the", "base", "object", "if", "the", "memory", "of", "the", "underlying", "data", "is", "shared", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L765-L774
20,236
pandas-dev/pandas
pandas/core/base.py
IndexOpsMixin.array
def array(self) -> ExtensionArray: """ The ExtensionArray of the data backing this Series or Index. .. versionadded:: 0.24.0 Returns ------- ExtensionArray An ExtensionArray of the values stored within. For extension types, this is the actual array. For NumPy native types, this is a thin (no copy) wrapper around :class:`numpy.ndarray`. ``.array`` differs ``.values`` which may require converting the data to a different form. See Also -------- Index.to_numpy : Similar method that always returns a NumPy array. Series.to_numpy : Similar method that always returns a NumPy array. Notes ----- This table lays out the different array types for each extension dtype within pandas. ================== ============================= dtype array type ================== ============================= category Categorical period PeriodArray interval IntervalArray IntegerNA IntegerArray datetime64[ns, tz] DatetimeArray ================== ============================= For any 3rd-party extension types, the array type will be an ExtensionArray. For all remaining dtypes ``.array`` will be a :class:`arrays.NumpyExtensionArray` wrapping the actual ndarray stored within. If you absolutely need a NumPy array (possibly with copying / coercing data), then use :meth:`Series.to_numpy` instead. Examples -------- For regular NumPy types like int, and float, a PandasArray is returned. >>> pd.Series([1, 2, 3]).array <PandasArray> [1, 2, 3] Length: 3, dtype: int64 For extension types, like Categorical, the actual ExtensionArray is returned >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a'])) >>> ser.array [a, b, a] Categories (2, object): [a, b] """ result = self._values if is_datetime64_ns_dtype(result.dtype): from pandas.arrays import DatetimeArray result = DatetimeArray(result) elif is_timedelta64_ns_dtype(result.dtype): from pandas.arrays import TimedeltaArray result = TimedeltaArray(result) elif not is_extension_array_dtype(result.dtype): from pandas.core.arrays.numpy_ import PandasArray result = PandasArray(result) return result
python
def array(self) -> ExtensionArray: """ The ExtensionArray of the data backing this Series or Index. .. versionadded:: 0.24.0 Returns ------- ExtensionArray An ExtensionArray of the values stored within. For extension types, this is the actual array. For NumPy native types, this is a thin (no copy) wrapper around :class:`numpy.ndarray`. ``.array`` differs ``.values`` which may require converting the data to a different form. See Also -------- Index.to_numpy : Similar method that always returns a NumPy array. Series.to_numpy : Similar method that always returns a NumPy array. Notes ----- This table lays out the different array types for each extension dtype within pandas. ================== ============================= dtype array type ================== ============================= category Categorical period PeriodArray interval IntervalArray IntegerNA IntegerArray datetime64[ns, tz] DatetimeArray ================== ============================= For any 3rd-party extension types, the array type will be an ExtensionArray. For all remaining dtypes ``.array`` will be a :class:`arrays.NumpyExtensionArray` wrapping the actual ndarray stored within. If you absolutely need a NumPy array (possibly with copying / coercing data), then use :meth:`Series.to_numpy` instead. Examples -------- For regular NumPy types like int, and float, a PandasArray is returned. >>> pd.Series([1, 2, 3]).array <PandasArray> [1, 2, 3] Length: 3, dtype: int64 For extension types, like Categorical, the actual ExtensionArray is returned >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a'])) >>> ser.array [a, b, a] Categories (2, object): [a, b] """ result = self._values if is_datetime64_ns_dtype(result.dtype): from pandas.arrays import DatetimeArray result = DatetimeArray(result) elif is_timedelta64_ns_dtype(result.dtype): from pandas.arrays import TimedeltaArray result = TimedeltaArray(result) elif not is_extension_array_dtype(result.dtype): from pandas.core.arrays.numpy_ import PandasArray result = PandasArray(result) return result
[ "def", "array", "(", "self", ")", "->", "ExtensionArray", ":", "result", "=", "self", ".", "_values", "if", "is_datetime64_ns_dtype", "(", "result", ".", "dtype", ")", ":", "from", "pandas", ".", "arrays", "import", "DatetimeArray", "result", "=", "DatetimeArray", "(", "result", ")", "elif", "is_timedelta64_ns_dtype", "(", "result", ".", "dtype", ")", ":", "from", "pandas", ".", "arrays", "import", "TimedeltaArray", "result", "=", "TimedeltaArray", "(", "result", ")", "elif", "not", "is_extension_array_dtype", "(", "result", ".", "dtype", ")", ":", "from", "pandas", ".", "core", ".", "arrays", ".", "numpy_", "import", "PandasArray", "result", "=", "PandasArray", "(", "result", ")", "return", "result" ]
The ExtensionArray of the data backing this Series or Index. .. versionadded:: 0.24.0 Returns ------- ExtensionArray An ExtensionArray of the values stored within. For extension types, this is the actual array. For NumPy native types, this is a thin (no copy) wrapper around :class:`numpy.ndarray`. ``.array`` differs ``.values`` which may require converting the data to a different form. See Also -------- Index.to_numpy : Similar method that always returns a NumPy array. Series.to_numpy : Similar method that always returns a NumPy array. Notes ----- This table lays out the different array types for each extension dtype within pandas. ================== ============================= dtype array type ================== ============================= category Categorical period PeriodArray interval IntervalArray IntegerNA IntegerArray datetime64[ns, tz] DatetimeArray ================== ============================= For any 3rd-party extension types, the array type will be an ExtensionArray. For all remaining dtypes ``.array`` will be a :class:`arrays.NumpyExtensionArray` wrapping the actual ndarray stored within. If you absolutely need a NumPy array (possibly with copying / coercing data), then use :meth:`Series.to_numpy` instead. Examples -------- For regular NumPy types like int, and float, a PandasArray is returned. >>> pd.Series([1, 2, 3]).array <PandasArray> [1, 2, 3] Length: 3, dtype: int64 For extension types, like Categorical, the actual ExtensionArray is returned >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a'])) >>> ser.array [a, b, a] Categories (2, object): [a, b]
[ "The", "ExtensionArray", "of", "the", "data", "backing", "this", "Series", "or", "Index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L777-L853
20,237
pandas-dev/pandas
pandas/core/base.py
IndexOpsMixin.to_numpy
def to_numpy(self, dtype=None, copy=False): """ A NumPy ndarray representing the values in this Series or Index. .. versionadded:: 0.24.0 Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray` copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. Returns ------- numpy.ndarray See Also -------- Series.array : Get the actual data stored within. Index.array : Get the actual data stored within. DataFrame.to_numpy : Similar method for DataFrame. Notes ----- The returned array will be the same up to equality (values equal in `self` will be equal in the returned array; likewise for values that are not equal). When `self` contains an ExtensionArray, the dtype may be different. For example, for a category-dtype Series, ``to_numpy()`` will return a NumPy array and the categorical dtype will be lost. For NumPy dtypes, this will be a reference to the actual data stored in this Series or Index (assuming ``copy=False``). Modifying the result in place will modify the data stored in the Series or Index (not that we recommend doing that). For extension types, ``to_numpy()`` *may* require copying data and coercing the result to a NumPy type (possibly object), which may be expensive. When you need a no-copy reference to the underlying data, :attr:`Series.array` should be used instead. This table lays out the different dtypes and default return types of ``to_numpy()`` for various dtypes within pandas. ================== ================================ dtype array type ================== ================================ category[T] ndarray[T] (same dtype as input) period ndarray[object] (Periods) interval ndarray[object] (Intervals) IntegerNA ndarray[object] datetime64[ns] datetime64[ns] datetime64[ns, tz] ndarray[object] (Timestamps) ================== ================================ Examples -------- >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a'])) >>> ser.to_numpy() array(['a', 'b', 'a'], dtype=object) Specify the `dtype` to control how datetime-aware data is represented. Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp` objects, each with the correct ``tz``. >>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> ser.to_numpy(dtype=object) array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'), Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')], dtype=object) Or ``dtype='datetime64[ns]'`` to return an ndarray of native datetime64 values. The values are converted to UTC and the timezone info is dropped. >>> ser.to_numpy(dtype="datetime64[ns]") ... # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'], dtype='datetime64[ns]') """ if is_datetime64tz_dtype(self.dtype) and dtype is None: # note: this is going to change very soon. # I have a WIP PR making this unnecessary, but it's # a bit out of scope for the DatetimeArray PR. dtype = "object" result = np.asarray(self._values, dtype=dtype) # TODO(GH-24345): Avoid potential double copy if copy: result = result.copy() return result
python
def to_numpy(self, dtype=None, copy=False): """ A NumPy ndarray representing the values in this Series or Index. .. versionadded:: 0.24.0 Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray` copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. Returns ------- numpy.ndarray See Also -------- Series.array : Get the actual data stored within. Index.array : Get the actual data stored within. DataFrame.to_numpy : Similar method for DataFrame. Notes ----- The returned array will be the same up to equality (values equal in `self` will be equal in the returned array; likewise for values that are not equal). When `self` contains an ExtensionArray, the dtype may be different. For example, for a category-dtype Series, ``to_numpy()`` will return a NumPy array and the categorical dtype will be lost. For NumPy dtypes, this will be a reference to the actual data stored in this Series or Index (assuming ``copy=False``). Modifying the result in place will modify the data stored in the Series or Index (not that we recommend doing that). For extension types, ``to_numpy()`` *may* require copying data and coercing the result to a NumPy type (possibly object), which may be expensive. When you need a no-copy reference to the underlying data, :attr:`Series.array` should be used instead. This table lays out the different dtypes and default return types of ``to_numpy()`` for various dtypes within pandas. ================== ================================ dtype array type ================== ================================ category[T] ndarray[T] (same dtype as input) period ndarray[object] (Periods) interval ndarray[object] (Intervals) IntegerNA ndarray[object] datetime64[ns] datetime64[ns] datetime64[ns, tz] ndarray[object] (Timestamps) ================== ================================ Examples -------- >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a'])) >>> ser.to_numpy() array(['a', 'b', 'a'], dtype=object) Specify the `dtype` to control how datetime-aware data is represented. Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp` objects, each with the correct ``tz``. >>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> ser.to_numpy(dtype=object) array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'), Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')], dtype=object) Or ``dtype='datetime64[ns]'`` to return an ndarray of native datetime64 values. The values are converted to UTC and the timezone info is dropped. >>> ser.to_numpy(dtype="datetime64[ns]") ... # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'], dtype='datetime64[ns]') """ if is_datetime64tz_dtype(self.dtype) and dtype is None: # note: this is going to change very soon. # I have a WIP PR making this unnecessary, but it's # a bit out of scope for the DatetimeArray PR. dtype = "object" result = np.asarray(self._values, dtype=dtype) # TODO(GH-24345): Avoid potential double copy if copy: result = result.copy() return result
[ "def", "to_numpy", "(", "self", ",", "dtype", "=", "None", ",", "copy", "=", "False", ")", ":", "if", "is_datetime64tz_dtype", "(", "self", ".", "dtype", ")", "and", "dtype", "is", "None", ":", "# note: this is going to change very soon.", "# I have a WIP PR making this unnecessary, but it's", "# a bit out of scope for the DatetimeArray PR.", "dtype", "=", "\"object\"", "result", "=", "np", ".", "asarray", "(", "self", ".", "_values", ",", "dtype", "=", "dtype", ")", "# TODO(GH-24345): Avoid potential double copy", "if", "copy", ":", "result", "=", "result", ".", "copy", "(", ")", "return", "result" ]
A NumPy ndarray representing the values in this Series or Index. .. versionadded:: 0.24.0 Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray` copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. Returns ------- numpy.ndarray See Also -------- Series.array : Get the actual data stored within. Index.array : Get the actual data stored within. DataFrame.to_numpy : Similar method for DataFrame. Notes ----- The returned array will be the same up to equality (values equal in `self` will be equal in the returned array; likewise for values that are not equal). When `self` contains an ExtensionArray, the dtype may be different. For example, for a category-dtype Series, ``to_numpy()`` will return a NumPy array and the categorical dtype will be lost. For NumPy dtypes, this will be a reference to the actual data stored in this Series or Index (assuming ``copy=False``). Modifying the result in place will modify the data stored in the Series or Index (not that we recommend doing that). For extension types, ``to_numpy()`` *may* require copying data and coercing the result to a NumPy type (possibly object), which may be expensive. When you need a no-copy reference to the underlying data, :attr:`Series.array` should be used instead. This table lays out the different dtypes and default return types of ``to_numpy()`` for various dtypes within pandas. ================== ================================ dtype array type ================== ================================ category[T] ndarray[T] (same dtype as input) period ndarray[object] (Periods) interval ndarray[object] (Intervals) IntegerNA ndarray[object] datetime64[ns] datetime64[ns] datetime64[ns, tz] ndarray[object] (Timestamps) ================== ================================ Examples -------- >>> ser = pd.Series(pd.Categorical(['a', 'b', 'a'])) >>> ser.to_numpy() array(['a', 'b', 'a'], dtype=object) Specify the `dtype` to control how datetime-aware data is represented. Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp` objects, each with the correct ``tz``. >>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) >>> ser.to_numpy(dtype=object) array([Timestamp('2000-01-01 00:00:00+0100', tz='CET', freq='D'), Timestamp('2000-01-02 00:00:00+0100', tz='CET', freq='D')], dtype=object) Or ``dtype='datetime64[ns]'`` to return an ndarray of native datetime64 values. The values are converted to UTC and the timezone info is dropped. >>> ser.to_numpy(dtype="datetime64[ns]") ... # doctest: +ELLIPSIS array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'], dtype='datetime64[ns]')
[ "A", "NumPy", "ndarray", "representing", "the", "values", "in", "this", "Series", "or", "Index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L855-L949
20,238
pandas-dev/pandas
pandas/core/base.py
IndexOpsMixin._ndarray_values
def _ndarray_values(self) -> np.ndarray: """ The data as an ndarray, possibly losing information. The expectation is that this is cheap to compute, and is primarily used for interacting with our indexers. - categorical -> codes """ if is_extension_array_dtype(self): return self.array._ndarray_values return self.values
python
def _ndarray_values(self) -> np.ndarray: """ The data as an ndarray, possibly losing information. The expectation is that this is cheap to compute, and is primarily used for interacting with our indexers. - categorical -> codes """ if is_extension_array_dtype(self): return self.array._ndarray_values return self.values
[ "def", "_ndarray_values", "(", "self", ")", "->", "np", ".", "ndarray", ":", "if", "is_extension_array_dtype", "(", "self", ")", ":", "return", "self", ".", "array", ".", "_ndarray_values", "return", "self", ".", "values" ]
The data as an ndarray, possibly losing information. The expectation is that this is cheap to compute, and is primarily used for interacting with our indexers. - categorical -> codes
[ "The", "data", "as", "an", "ndarray", "possibly", "losing", "information", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L952-L963
20,239
pandas-dev/pandas
pandas/core/base.py
IndexOpsMixin.max
def max(self, axis=None, skipna=True): """ Return the maximum value of the Index. Parameters ---------- axis : int, optional For compatibility with NumPy. Only 0 or None are allowed. skipna : bool, default True Returns ------- scalar Maximum value. See Also -------- Index.min : Return the minimum value in an Index. Series.max : Return the maximum value in a Series. DataFrame.max : Return the maximum values in a DataFrame. Examples -------- >>> idx = pd.Index([3, 2, 1]) >>> idx.max() 3 >>> idx = pd.Index(['c', 'b', 'a']) >>> idx.max() 'c' For a MultiIndex, the maximum is determined lexicographically. >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) >>> idx.max() ('b', 2) """ nv.validate_minmax_axis(axis) return nanops.nanmax(self._values, skipna=skipna)
python
def max(self, axis=None, skipna=True): """ Return the maximum value of the Index. Parameters ---------- axis : int, optional For compatibility with NumPy. Only 0 or None are allowed. skipna : bool, default True Returns ------- scalar Maximum value. See Also -------- Index.min : Return the minimum value in an Index. Series.max : Return the maximum value in a Series. DataFrame.max : Return the maximum values in a DataFrame. Examples -------- >>> idx = pd.Index([3, 2, 1]) >>> idx.max() 3 >>> idx = pd.Index(['c', 'b', 'a']) >>> idx.max() 'c' For a MultiIndex, the maximum is determined lexicographically. >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) >>> idx.max() ('b', 2) """ nv.validate_minmax_axis(axis) return nanops.nanmax(self._values, skipna=skipna)
[ "def", "max", "(", "self", ",", "axis", "=", "None", ",", "skipna", "=", "True", ")", ":", "nv", ".", "validate_minmax_axis", "(", "axis", ")", "return", "nanops", ".", "nanmax", "(", "self", ".", "_values", ",", "skipna", "=", "skipna", ")" ]
Return the maximum value of the Index. Parameters ---------- axis : int, optional For compatibility with NumPy. Only 0 or None are allowed. skipna : bool, default True Returns ------- scalar Maximum value. See Also -------- Index.min : Return the minimum value in an Index. Series.max : Return the maximum value in a Series. DataFrame.max : Return the maximum values in a DataFrame. Examples -------- >>> idx = pd.Index([3, 2, 1]) >>> idx.max() 3 >>> idx = pd.Index(['c', 'b', 'a']) >>> idx.max() 'c' For a MultiIndex, the maximum is determined lexicographically. >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) >>> idx.max() ('b', 2)
[ "Return", "the", "maximum", "value", "of", "the", "Index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L969-L1007
20,240
pandas-dev/pandas
pandas/core/base.py
IndexOpsMixin.argmax
def argmax(self, axis=None, skipna=True): """ Return an ndarray of the maximum argument indexer. Parameters ---------- axis : {None} Dummy argument for consistency with Series skipna : bool, default True See Also -------- numpy.ndarray.argmax """ nv.validate_minmax_axis(axis) return nanops.nanargmax(self._values, skipna=skipna)
python
def argmax(self, axis=None, skipna=True): """ Return an ndarray of the maximum argument indexer. Parameters ---------- axis : {None} Dummy argument for consistency with Series skipna : bool, default True See Also -------- numpy.ndarray.argmax """ nv.validate_minmax_axis(axis) return nanops.nanargmax(self._values, skipna=skipna)
[ "def", "argmax", "(", "self", ",", "axis", "=", "None", ",", "skipna", "=", "True", ")", ":", "nv", ".", "validate_minmax_axis", "(", "axis", ")", "return", "nanops", ".", "nanargmax", "(", "self", ".", "_values", ",", "skipna", "=", "skipna", ")" ]
Return an ndarray of the maximum argument indexer. Parameters ---------- axis : {None} Dummy argument for consistency with Series skipna : bool, default True See Also -------- numpy.ndarray.argmax
[ "Return", "an", "ndarray", "of", "the", "maximum", "argument", "indexer", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L1009-L1024
20,241
pandas-dev/pandas
pandas/core/base.py
IndexOpsMixin.min
def min(self, axis=None, skipna=True): """ Return the minimum value of the Index. Parameters ---------- axis : {None} Dummy argument for consistency with Series skipna : bool, default True Returns ------- scalar Minimum value. See Also -------- Index.max : Return the maximum value of the object. Series.min : Return the minimum value in a Series. DataFrame.min : Return the minimum values in a DataFrame. Examples -------- >>> idx = pd.Index([3, 2, 1]) >>> idx.min() 1 >>> idx = pd.Index(['c', 'b', 'a']) >>> idx.min() 'a' For a MultiIndex, the minimum is determined lexicographically. >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) >>> idx.min() ('a', 1) """ nv.validate_minmax_axis(axis) return nanops.nanmin(self._values, skipna=skipna)
python
def min(self, axis=None, skipna=True): """ Return the minimum value of the Index. Parameters ---------- axis : {None} Dummy argument for consistency with Series skipna : bool, default True Returns ------- scalar Minimum value. See Also -------- Index.max : Return the maximum value of the object. Series.min : Return the minimum value in a Series. DataFrame.min : Return the minimum values in a DataFrame. Examples -------- >>> idx = pd.Index([3, 2, 1]) >>> idx.min() 1 >>> idx = pd.Index(['c', 'b', 'a']) >>> idx.min() 'a' For a MultiIndex, the minimum is determined lexicographically. >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) >>> idx.min() ('a', 1) """ nv.validate_minmax_axis(axis) return nanops.nanmin(self._values, skipna=skipna)
[ "def", "min", "(", "self", ",", "axis", "=", "None", ",", "skipna", "=", "True", ")", ":", "nv", ".", "validate_minmax_axis", "(", "axis", ")", "return", "nanops", ".", "nanmin", "(", "self", ".", "_values", ",", "skipna", "=", "skipna", ")" ]
Return the minimum value of the Index. Parameters ---------- axis : {None} Dummy argument for consistency with Series skipna : bool, default True Returns ------- scalar Minimum value. See Also -------- Index.max : Return the maximum value of the object. Series.min : Return the minimum value in a Series. DataFrame.min : Return the minimum values in a DataFrame. Examples -------- >>> idx = pd.Index([3, 2, 1]) >>> idx.min() 1 >>> idx = pd.Index(['c', 'b', 'a']) >>> idx.min() 'a' For a MultiIndex, the minimum is determined lexicographically. >>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)]) >>> idx.min() ('a', 1)
[ "Return", "the", "minimum", "value", "of", "the", "Index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L1026-L1064
20,242
pandas-dev/pandas
pandas/core/base.py
IndexOpsMixin.argmin
def argmin(self, axis=None, skipna=True): """ Return a ndarray of the minimum argument indexer. Parameters ---------- axis : {None} Dummy argument for consistency with Series skipna : bool, default True Returns ------- numpy.ndarray See Also -------- numpy.ndarray.argmin """ nv.validate_minmax_axis(axis) return nanops.nanargmin(self._values, skipna=skipna)
python
def argmin(self, axis=None, skipna=True): """ Return a ndarray of the minimum argument indexer. Parameters ---------- axis : {None} Dummy argument for consistency with Series skipna : bool, default True Returns ------- numpy.ndarray See Also -------- numpy.ndarray.argmin """ nv.validate_minmax_axis(axis) return nanops.nanargmin(self._values, skipna=skipna)
[ "def", "argmin", "(", "self", ",", "axis", "=", "None", ",", "skipna", "=", "True", ")", ":", "nv", ".", "validate_minmax_axis", "(", "axis", ")", "return", "nanops", ".", "nanargmin", "(", "self", ".", "_values", ",", "skipna", "=", "skipna", ")" ]
Return a ndarray of the minimum argument indexer. Parameters ---------- axis : {None} Dummy argument for consistency with Series skipna : bool, default True Returns ------- numpy.ndarray See Also -------- numpy.ndarray.argmin
[ "Return", "a", "ndarray", "of", "the", "minimum", "argument", "indexer", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L1066-L1085
20,243
pandas-dev/pandas
pandas/core/base.py
IndexOpsMixin.tolist
def tolist(self): """ Return a list of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) Returns ------- list See Also -------- numpy.ndarray.tolist """ if is_datetimelike(self._values): return [com.maybe_box_datetimelike(x) for x in self._values] elif is_extension_array_dtype(self._values): return list(self._values) else: return self._values.tolist()
python
def tolist(self): """ Return a list of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) Returns ------- list See Also -------- numpy.ndarray.tolist """ if is_datetimelike(self._values): return [com.maybe_box_datetimelike(x) for x in self._values] elif is_extension_array_dtype(self._values): return list(self._values) else: return self._values.tolist()
[ "def", "tolist", "(", "self", ")", ":", "if", "is_datetimelike", "(", "self", ".", "_values", ")", ":", "return", "[", "com", ".", "maybe_box_datetimelike", "(", "x", ")", "for", "x", "in", "self", ".", "_values", "]", "elif", "is_extension_array_dtype", "(", "self", ".", "_values", ")", ":", "return", "list", "(", "self", ".", "_values", ")", "else", ":", "return", "self", ".", "_values", ".", "tolist", "(", ")" ]
Return a list of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) Returns ------- list See Also -------- numpy.ndarray.tolist
[ "Return", "a", "list", "of", "the", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L1087-L1108
20,244
pandas-dev/pandas
pandas/core/base.py
IndexOpsMixin._reduce
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): """ perform the reduction type operation if we can """ func = getattr(self, name, None) if func is None: raise TypeError("{klass} cannot perform the operation {op}".format( klass=self.__class__.__name__, op=name)) return func(skipna=skipna, **kwds)
python
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): """ perform the reduction type operation if we can """ func = getattr(self, name, None) if func is None: raise TypeError("{klass} cannot perform the operation {op}".format( klass=self.__class__.__name__, op=name)) return func(skipna=skipna, **kwds)
[ "def", "_reduce", "(", "self", ",", "op", ",", "name", ",", "axis", "=", "0", ",", "skipna", "=", "True", ",", "numeric_only", "=", "None", ",", "filter_type", "=", "None", ",", "*", "*", "kwds", ")", ":", "func", "=", "getattr", "(", "self", ",", "name", ",", "None", ")", "if", "func", "is", "None", ":", "raise", "TypeError", "(", "\"{klass} cannot perform the operation {op}\"", ".", "format", "(", "klass", "=", "self", ".", "__class__", ".", "__name__", ",", "op", "=", "name", ")", ")", "return", "func", "(", "skipna", "=", "skipna", ",", "*", "*", "kwds", ")" ]
perform the reduction type operation if we can
[ "perform", "the", "reduction", "type", "operation", "if", "we", "can" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L1135-L1142
20,245
pandas-dev/pandas
pandas/core/base.py
IndexOpsMixin.nunique
def nunique(self, dropna=True): """ Return number of unique elements in the object. Excludes NA values by default. Parameters ---------- dropna : bool, default True Don't include NaN in the count. Returns ------- int See Also -------- DataFrame.nunique: Method nunique for DataFrame. Series.count: Count non-NA/null observations in the Series. Examples -------- >>> s = pd.Series([1, 3, 5, 7, 7]) >>> s 0 1 1 3 2 5 3 7 4 7 dtype: int64 >>> s.nunique() 4 """ uniqs = self.unique() n = len(uniqs) if dropna and isna(uniqs).any(): n -= 1 return n
python
def nunique(self, dropna=True): """ Return number of unique elements in the object. Excludes NA values by default. Parameters ---------- dropna : bool, default True Don't include NaN in the count. Returns ------- int See Also -------- DataFrame.nunique: Method nunique for DataFrame. Series.count: Count non-NA/null observations in the Series. Examples -------- >>> s = pd.Series([1, 3, 5, 7, 7]) >>> s 0 1 1 3 2 5 3 7 4 7 dtype: int64 >>> s.nunique() 4 """ uniqs = self.unique() n = len(uniqs) if dropna and isna(uniqs).any(): n -= 1 return n
[ "def", "nunique", "(", "self", ",", "dropna", "=", "True", ")", ":", "uniqs", "=", "self", ".", "unique", "(", ")", "n", "=", "len", "(", "uniqs", ")", "if", "dropna", "and", "isna", "(", "uniqs", ")", ".", "any", "(", ")", ":", "n", "-=", "1", "return", "n" ]
Return number of unique elements in the object. Excludes NA values by default. Parameters ---------- dropna : bool, default True Don't include NaN in the count. Returns ------- int See Also -------- DataFrame.nunique: Method nunique for DataFrame. Series.count: Count non-NA/null observations in the Series. Examples -------- >>> s = pd.Series([1, 3, 5, 7, 7]) >>> s 0 1 1 3 2 5 3 7 4 7 dtype: int64 >>> s.nunique() 4
[ "Return", "number", "of", "unique", "elements", "in", "the", "object", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L1313-L1351
20,246
pandas-dev/pandas
pandas/core/base.py
IndexOpsMixin.memory_usage
def memory_usage(self, deep=False): """ Memory usage of the values Parameters ---------- deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption Returns ------- bytes used See Also -------- numpy.ndarray.nbytes Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False or if used on PyPy """ if hasattr(self.array, 'memory_usage'): return self.array.memory_usage(deep=deep) v = self.array.nbytes if deep and is_object_dtype(self) and not PYPY: v += lib.memory_usage_of_objects(self.array) return v
python
def memory_usage(self, deep=False): """ Memory usage of the values Parameters ---------- deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption Returns ------- bytes used See Also -------- numpy.ndarray.nbytes Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False or if used on PyPy """ if hasattr(self.array, 'memory_usage'): return self.array.memory_usage(deep=deep) v = self.array.nbytes if deep and is_object_dtype(self) and not PYPY: v += lib.memory_usage_of_objects(self.array) return v
[ "def", "memory_usage", "(", "self", ",", "deep", "=", "False", ")", ":", "if", "hasattr", "(", "self", ".", "array", ",", "'memory_usage'", ")", ":", "return", "self", ".", "array", ".", "memory_usage", "(", "deep", "=", "deep", ")", "v", "=", "self", ".", "array", ".", "nbytes", "if", "deep", "and", "is_object_dtype", "(", "self", ")", "and", "not", "PYPY", ":", "v", "+=", "lib", ".", "memory_usage_of_objects", "(", "self", ".", "array", ")", "return", "v" ]
Memory usage of the values Parameters ---------- deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption Returns ------- bytes used See Also -------- numpy.ndarray.nbytes Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False or if used on PyPy
[ "Memory", "usage", "of", "the", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L1396-L1425
20,247
pandas-dev/pandas
pandas/io/common.py
_stringify_path
def _stringify_path(filepath_or_buffer): """Attempt to convert a path-like object to a string. Parameters ---------- filepath_or_buffer : object to be converted Returns ------- str_filepath_or_buffer : maybe a string version of the object Notes ----- Objects supporting the fspath protocol (python 3.6+) are coerced according to its __fspath__ method. For backwards compatibility with older pythons, pathlib.Path and py.path objects are specially coerced. Any other object is passed through unchanged, which includes bytes, strings, buffers, or anything else that's not even path-like. """ try: import pathlib _PATHLIB_INSTALLED = True except ImportError: _PATHLIB_INSTALLED = False try: from py.path import local as LocalPath _PY_PATH_INSTALLED = True except ImportError: _PY_PATH_INSTALLED = False if hasattr(filepath_or_buffer, '__fspath__'): return filepath_or_buffer.__fspath__() if _PATHLIB_INSTALLED and isinstance(filepath_or_buffer, pathlib.Path): return str(filepath_or_buffer) if _PY_PATH_INSTALLED and isinstance(filepath_or_buffer, LocalPath): return filepath_or_buffer.strpath return _expand_user(filepath_or_buffer)
python
def _stringify_path(filepath_or_buffer): """Attempt to convert a path-like object to a string. Parameters ---------- filepath_or_buffer : object to be converted Returns ------- str_filepath_or_buffer : maybe a string version of the object Notes ----- Objects supporting the fspath protocol (python 3.6+) are coerced according to its __fspath__ method. For backwards compatibility with older pythons, pathlib.Path and py.path objects are specially coerced. Any other object is passed through unchanged, which includes bytes, strings, buffers, or anything else that's not even path-like. """ try: import pathlib _PATHLIB_INSTALLED = True except ImportError: _PATHLIB_INSTALLED = False try: from py.path import local as LocalPath _PY_PATH_INSTALLED = True except ImportError: _PY_PATH_INSTALLED = False if hasattr(filepath_or_buffer, '__fspath__'): return filepath_or_buffer.__fspath__() if _PATHLIB_INSTALLED and isinstance(filepath_or_buffer, pathlib.Path): return str(filepath_or_buffer) if _PY_PATH_INSTALLED and isinstance(filepath_or_buffer, LocalPath): return filepath_or_buffer.strpath return _expand_user(filepath_or_buffer)
[ "def", "_stringify_path", "(", "filepath_or_buffer", ")", ":", "try", ":", "import", "pathlib", "_PATHLIB_INSTALLED", "=", "True", "except", "ImportError", ":", "_PATHLIB_INSTALLED", "=", "False", "try", ":", "from", "py", ".", "path", "import", "local", "as", "LocalPath", "_PY_PATH_INSTALLED", "=", "True", "except", "ImportError", ":", "_PY_PATH_INSTALLED", "=", "False", "if", "hasattr", "(", "filepath_or_buffer", ",", "'__fspath__'", ")", ":", "return", "filepath_or_buffer", ".", "__fspath__", "(", ")", "if", "_PATHLIB_INSTALLED", "and", "isinstance", "(", "filepath_or_buffer", ",", "pathlib", ".", "Path", ")", ":", "return", "str", "(", "filepath_or_buffer", ")", "if", "_PY_PATH_INSTALLED", "and", "isinstance", "(", "filepath_or_buffer", ",", "LocalPath", ")", ":", "return", "filepath_or_buffer", ".", "strpath", "return", "_expand_user", "(", "filepath_or_buffer", ")" ]
Attempt to convert a path-like object to a string. Parameters ---------- filepath_or_buffer : object to be converted Returns ------- str_filepath_or_buffer : maybe a string version of the object Notes ----- Objects supporting the fspath protocol (python 3.6+) are coerced according to its __fspath__ method. For backwards compatibility with older pythons, pathlib.Path and py.path objects are specially coerced. Any other object is passed through unchanged, which includes bytes, strings, buffers, or anything else that's not even path-like.
[ "Attempt", "to", "convert", "a", "path", "-", "like", "object", "to", "a", "string", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/common.py#L96-L136
20,248
pandas-dev/pandas
pandas/io/common.py
get_filepath_or_buffer
def get_filepath_or_buffer(filepath_or_buffer, encoding=None, compression=None, mode=None): """ If the filepath_or_buffer is a url, translate and return the buffer. Otherwise passthrough. Parameters ---------- filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path), or buffer compression : {{'gzip', 'bz2', 'zip', 'xz', None}}, optional encoding : the encoding to use to decode bytes, default is 'utf-8' mode : str, optional Returns ------- tuple of ({a filepath_ or buffer or S3File instance}, encoding, str, compression, str, should_close, bool) """ filepath_or_buffer = _stringify_path(filepath_or_buffer) if _is_url(filepath_or_buffer): req = urlopen(filepath_or_buffer) content_encoding = req.headers.get('Content-Encoding', None) if content_encoding == 'gzip': # Override compression based on Content-Encoding header compression = 'gzip' reader = BytesIO(req.read()) req.close() return reader, encoding, compression, True if is_s3_url(filepath_or_buffer): from pandas.io import s3 return s3.get_filepath_or_buffer(filepath_or_buffer, encoding=encoding, compression=compression, mode=mode) if is_gcs_url(filepath_or_buffer): from pandas.io import gcs return gcs.get_filepath_or_buffer(filepath_or_buffer, encoding=encoding, compression=compression, mode=mode) if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)): return _expand_user(filepath_or_buffer), None, compression, False if not is_file_like(filepath_or_buffer): msg = "Invalid file path or buffer object type: {_type}" raise ValueError(msg.format(_type=type(filepath_or_buffer))) return filepath_or_buffer, None, compression, False
python
def get_filepath_or_buffer(filepath_or_buffer, encoding=None, compression=None, mode=None): """ If the filepath_or_buffer is a url, translate and return the buffer. Otherwise passthrough. Parameters ---------- filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path), or buffer compression : {{'gzip', 'bz2', 'zip', 'xz', None}}, optional encoding : the encoding to use to decode bytes, default is 'utf-8' mode : str, optional Returns ------- tuple of ({a filepath_ or buffer or S3File instance}, encoding, str, compression, str, should_close, bool) """ filepath_or_buffer = _stringify_path(filepath_or_buffer) if _is_url(filepath_or_buffer): req = urlopen(filepath_or_buffer) content_encoding = req.headers.get('Content-Encoding', None) if content_encoding == 'gzip': # Override compression based on Content-Encoding header compression = 'gzip' reader = BytesIO(req.read()) req.close() return reader, encoding, compression, True if is_s3_url(filepath_or_buffer): from pandas.io import s3 return s3.get_filepath_or_buffer(filepath_or_buffer, encoding=encoding, compression=compression, mode=mode) if is_gcs_url(filepath_or_buffer): from pandas.io import gcs return gcs.get_filepath_or_buffer(filepath_or_buffer, encoding=encoding, compression=compression, mode=mode) if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)): return _expand_user(filepath_or_buffer), None, compression, False if not is_file_like(filepath_or_buffer): msg = "Invalid file path or buffer object type: {_type}" raise ValueError(msg.format(_type=type(filepath_or_buffer))) return filepath_or_buffer, None, compression, False
[ "def", "get_filepath_or_buffer", "(", "filepath_or_buffer", ",", "encoding", "=", "None", ",", "compression", "=", "None", ",", "mode", "=", "None", ")", ":", "filepath_or_buffer", "=", "_stringify_path", "(", "filepath_or_buffer", ")", "if", "_is_url", "(", "filepath_or_buffer", ")", ":", "req", "=", "urlopen", "(", "filepath_or_buffer", ")", "content_encoding", "=", "req", ".", "headers", ".", "get", "(", "'Content-Encoding'", ",", "None", ")", "if", "content_encoding", "==", "'gzip'", ":", "# Override compression based on Content-Encoding header", "compression", "=", "'gzip'", "reader", "=", "BytesIO", "(", "req", ".", "read", "(", ")", ")", "req", ".", "close", "(", ")", "return", "reader", ",", "encoding", ",", "compression", ",", "True", "if", "is_s3_url", "(", "filepath_or_buffer", ")", ":", "from", "pandas", ".", "io", "import", "s3", "return", "s3", ".", "get_filepath_or_buffer", "(", "filepath_or_buffer", ",", "encoding", "=", "encoding", ",", "compression", "=", "compression", ",", "mode", "=", "mode", ")", "if", "is_gcs_url", "(", "filepath_or_buffer", ")", ":", "from", "pandas", ".", "io", "import", "gcs", "return", "gcs", ".", "get_filepath_or_buffer", "(", "filepath_or_buffer", ",", "encoding", "=", "encoding", ",", "compression", "=", "compression", ",", "mode", "=", "mode", ")", "if", "isinstance", "(", "filepath_or_buffer", ",", "(", "str", ",", "bytes", ",", "mmap", ".", "mmap", ")", ")", ":", "return", "_expand_user", "(", "filepath_or_buffer", ")", ",", "None", ",", "compression", ",", "False", "if", "not", "is_file_like", "(", "filepath_or_buffer", ")", ":", "msg", "=", "\"Invalid file path or buffer object type: {_type}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "_type", "=", "type", "(", "filepath_or_buffer", ")", ")", ")", "return", "filepath_or_buffer", ",", "None", ",", "compression", ",", "False" ]
If the filepath_or_buffer is a url, translate and return the buffer. Otherwise passthrough. Parameters ---------- filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path), or buffer compression : {{'gzip', 'bz2', 'zip', 'xz', None}}, optional encoding : the encoding to use to decode bytes, default is 'utf-8' mode : str, optional Returns ------- tuple of ({a filepath_ or buffer or S3File instance}, encoding, str, compression, str, should_close, bool)
[ "If", "the", "filepath_or_buffer", "is", "a", "url", "translate", "and", "return", "the", "buffer", ".", "Otherwise", "passthrough", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/common.py#L155-L209
20,249
pandas-dev/pandas
pandas/io/common.py
_infer_compression
def _infer_compression(filepath_or_buffer, compression): """ Get the compression method for filepath_or_buffer. If compression='infer', the inferred compression method is returned. Otherwise, the input compression method is returned unchanged, unless it's invalid, in which case an error is raised. Parameters ---------- filepath_or_buffer : a path (str) or buffer compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None} If 'infer' and `filepath_or_buffer` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no compression). Returns ------- string or None : compression method Raises ------ ValueError on invalid compression specified """ # No compression has been explicitly specified if compression is None: return None # Infer compression if compression == 'infer': # Convert all path types (e.g. pathlib.Path) to strings filepath_or_buffer = _stringify_path(filepath_or_buffer) if not isinstance(filepath_or_buffer, str): # Cannot infer compression of a buffer, assume no compression return None # Infer compression from the filename/URL extension for compression, extension in _compression_to_extension.items(): if filepath_or_buffer.endswith(extension): return compression return None # Compression has been specified. Check that it's valid if compression in _compression_to_extension: return compression msg = 'Unrecognized compression type: {}'.format(compression) valid = ['infer', None] + sorted(_compression_to_extension) msg += '\nValid compression types are {}'.format(valid) raise ValueError(msg)
python
def _infer_compression(filepath_or_buffer, compression): """ Get the compression method for filepath_or_buffer. If compression='infer', the inferred compression method is returned. Otherwise, the input compression method is returned unchanged, unless it's invalid, in which case an error is raised. Parameters ---------- filepath_or_buffer : a path (str) or buffer compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None} If 'infer' and `filepath_or_buffer` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no compression). Returns ------- string or None : compression method Raises ------ ValueError on invalid compression specified """ # No compression has been explicitly specified if compression is None: return None # Infer compression if compression == 'infer': # Convert all path types (e.g. pathlib.Path) to strings filepath_or_buffer = _stringify_path(filepath_or_buffer) if not isinstance(filepath_or_buffer, str): # Cannot infer compression of a buffer, assume no compression return None # Infer compression from the filename/URL extension for compression, extension in _compression_to_extension.items(): if filepath_or_buffer.endswith(extension): return compression return None # Compression has been specified. Check that it's valid if compression in _compression_to_extension: return compression msg = 'Unrecognized compression type: {}'.format(compression) valid = ['infer', None] + sorted(_compression_to_extension) msg += '\nValid compression types are {}'.format(valid) raise ValueError(msg)
[ "def", "_infer_compression", "(", "filepath_or_buffer", ",", "compression", ")", ":", "# No compression has been explicitly specified", "if", "compression", "is", "None", ":", "return", "None", "# Infer compression", "if", "compression", "==", "'infer'", ":", "# Convert all path types (e.g. pathlib.Path) to strings", "filepath_or_buffer", "=", "_stringify_path", "(", "filepath_or_buffer", ")", "if", "not", "isinstance", "(", "filepath_or_buffer", ",", "str", ")", ":", "# Cannot infer compression of a buffer, assume no compression", "return", "None", "# Infer compression from the filename/URL extension", "for", "compression", ",", "extension", "in", "_compression_to_extension", ".", "items", "(", ")", ":", "if", "filepath_or_buffer", ".", "endswith", "(", "extension", ")", ":", "return", "compression", "return", "None", "# Compression has been specified. Check that it's valid", "if", "compression", "in", "_compression_to_extension", ":", "return", "compression", "msg", "=", "'Unrecognized compression type: {}'", ".", "format", "(", "compression", ")", "valid", "=", "[", "'infer'", ",", "None", "]", "+", "sorted", "(", "_compression_to_extension", ")", "msg", "+=", "'\\nValid compression types are {}'", ".", "format", "(", "valid", ")", "raise", "ValueError", "(", "msg", ")" ]
Get the compression method for filepath_or_buffer. If compression='infer', the inferred compression method is returned. Otherwise, the input compression method is returned unchanged, unless it's invalid, in which case an error is raised. Parameters ---------- filepath_or_buffer : a path (str) or buffer compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None} If 'infer' and `filepath_or_buffer` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no compression). Returns ------- string or None : compression method Raises ------ ValueError on invalid compression specified
[ "Get", "the", "compression", "method", "for", "filepath_or_buffer", ".", "If", "compression", "=", "infer", "the", "inferred", "compression", "method", "is", "returned", ".", "Otherwise", "the", "input", "compression", "method", "is", "returned", "unchanged", "unless", "it", "s", "invalid", "in", "which", "case", "an", "error", "is", "raised", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/common.py#L235-L286
20,250
pandas-dev/pandas
pandas/core/arrays/timedeltas.py
_td_array_cmp
def _td_array_cmp(cls, op): """ Wrap comparison operations to convert timedelta-like to timedelta64 """ opname = '__{name}__'.format(name=op.__name__) nat_result = opname == '__ne__' def wrapper(self, other): if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): return NotImplemented if _is_convertible_to_td(other) or other is NaT: try: other = Timedelta(other) except ValueError: # failed to parse as timedelta return ops.invalid_comparison(self, other, op) result = op(self.view('i8'), other.value) if isna(other): result.fill(nat_result) elif not is_list_like(other): return ops.invalid_comparison(self, other, op) elif len(other) != len(self): raise ValueError("Lengths must match") else: try: other = type(self)._from_sequence(other)._data except (ValueError, TypeError): return ops.invalid_comparison(self, other, op) result = op(self.view('i8'), other.view('i8')) result = com.values_from_object(result) o_mask = np.array(isna(other)) if o_mask.any(): result[o_mask] = nat_result if self._hasnans: result[self._isnan] = nat_result return result return compat.set_function_name(wrapper, opname, cls)
python
def _td_array_cmp(cls, op): """ Wrap comparison operations to convert timedelta-like to timedelta64 """ opname = '__{name}__'.format(name=op.__name__) nat_result = opname == '__ne__' def wrapper(self, other): if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): return NotImplemented if _is_convertible_to_td(other) or other is NaT: try: other = Timedelta(other) except ValueError: # failed to parse as timedelta return ops.invalid_comparison(self, other, op) result = op(self.view('i8'), other.value) if isna(other): result.fill(nat_result) elif not is_list_like(other): return ops.invalid_comparison(self, other, op) elif len(other) != len(self): raise ValueError("Lengths must match") else: try: other = type(self)._from_sequence(other)._data except (ValueError, TypeError): return ops.invalid_comparison(self, other, op) result = op(self.view('i8'), other.view('i8')) result = com.values_from_object(result) o_mask = np.array(isna(other)) if o_mask.any(): result[o_mask] = nat_result if self._hasnans: result[self._isnan] = nat_result return result return compat.set_function_name(wrapper, opname, cls)
[ "def", "_td_array_cmp", "(", "cls", ",", "op", ")", ":", "opname", "=", "'__{name}__'", ".", "format", "(", "name", "=", "op", ".", "__name__", ")", "nat_result", "=", "opname", "==", "'__ne__'", "def", "wrapper", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "(", "ABCDataFrame", ",", "ABCSeries", ",", "ABCIndexClass", ")", ")", ":", "return", "NotImplemented", "if", "_is_convertible_to_td", "(", "other", ")", "or", "other", "is", "NaT", ":", "try", ":", "other", "=", "Timedelta", "(", "other", ")", "except", "ValueError", ":", "# failed to parse as timedelta", "return", "ops", ".", "invalid_comparison", "(", "self", ",", "other", ",", "op", ")", "result", "=", "op", "(", "self", ".", "view", "(", "'i8'", ")", ",", "other", ".", "value", ")", "if", "isna", "(", "other", ")", ":", "result", ".", "fill", "(", "nat_result", ")", "elif", "not", "is_list_like", "(", "other", ")", ":", "return", "ops", ".", "invalid_comparison", "(", "self", ",", "other", ",", "op", ")", "elif", "len", "(", "other", ")", "!=", "len", "(", "self", ")", ":", "raise", "ValueError", "(", "\"Lengths must match\"", ")", "else", ":", "try", ":", "other", "=", "type", "(", "self", ")", ".", "_from_sequence", "(", "other", ")", ".", "_data", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "ops", ".", "invalid_comparison", "(", "self", ",", "other", ",", "op", ")", "result", "=", "op", "(", "self", ".", "view", "(", "'i8'", ")", ",", "other", ".", "view", "(", "'i8'", ")", ")", "result", "=", "com", ".", "values_from_object", "(", "result", ")", "o_mask", "=", "np", ".", "array", "(", "isna", "(", "other", ")", ")", "if", "o_mask", ".", "any", "(", ")", ":", "result", "[", "o_mask", "]", "=", "nat_result", "if", "self", ".", "_hasnans", ":", "result", "[", "self", ".", "_isnan", "]", "=", "nat_result", "return", "result", "return", "compat", ".", "set_function_name", "(", "wrapper", ",", "opname", ",", "cls", ")" ]
Wrap comparison operations to convert timedelta-like to timedelta64
[ "Wrap", "comparison", "operations", "to", "convert", "timedelta", "-", "like", "to", "timedelta64" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/timedeltas.py#L56-L102
20,251
pandas-dev/pandas
pandas/io/excel/_util.py
register_writer
def register_writer(klass): """ Add engine to the excel writer registry.io.excel. You must use this method to integrate with ``to_excel``. Parameters ---------- klass : ExcelWriter """ if not callable(klass): raise ValueError("Can only register callables as engines") engine_name = klass.engine _writers[engine_name] = klass
python
def register_writer(klass): """ Add engine to the excel writer registry.io.excel. You must use this method to integrate with ``to_excel``. Parameters ---------- klass : ExcelWriter """ if not callable(klass): raise ValueError("Can only register callables as engines") engine_name = klass.engine _writers[engine_name] = klass
[ "def", "register_writer", "(", "klass", ")", ":", "if", "not", "callable", "(", "klass", ")", ":", "raise", "ValueError", "(", "\"Can only register callables as engines\"", ")", "engine_name", "=", "klass", ".", "engine", "_writers", "[", "engine_name", "]", "=", "klass" ]
Add engine to the excel writer registry.io.excel. You must use this method to integrate with ``to_excel``. Parameters ---------- klass : ExcelWriter
[ "Add", "engine", "to", "the", "excel", "writer", "registry", ".", "io", ".", "excel", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_util.py#L10-L23
20,252
pandas-dev/pandas
pandas/io/excel/_util.py
_excel2num
def _excel2num(x): """ Convert Excel column name like 'AB' to 0-based column index. Parameters ---------- x : str The Excel column name to convert to a 0-based column index. Returns ------- num : int The column index corresponding to the name. Raises ------ ValueError Part of the Excel column name was invalid. """ index = 0 for c in x.upper().strip(): cp = ord(c) if cp < ord("A") or cp > ord("Z"): raise ValueError("Invalid column name: {x}".format(x=x)) index = index * 26 + cp - ord("A") + 1 return index - 1
python
def _excel2num(x): """ Convert Excel column name like 'AB' to 0-based column index. Parameters ---------- x : str The Excel column name to convert to a 0-based column index. Returns ------- num : int The column index corresponding to the name. Raises ------ ValueError Part of the Excel column name was invalid. """ index = 0 for c in x.upper().strip(): cp = ord(c) if cp < ord("A") or cp > ord("Z"): raise ValueError("Invalid column name: {x}".format(x=x)) index = index * 26 + cp - ord("A") + 1 return index - 1
[ "def", "_excel2num", "(", "x", ")", ":", "index", "=", "0", "for", "c", "in", "x", ".", "upper", "(", ")", ".", "strip", "(", ")", ":", "cp", "=", "ord", "(", "c", ")", "if", "cp", "<", "ord", "(", "\"A\"", ")", "or", "cp", ">", "ord", "(", "\"Z\"", ")", ":", "raise", "ValueError", "(", "\"Invalid column name: {x}\"", ".", "format", "(", "x", "=", "x", ")", ")", "index", "=", "index", "*", "26", "+", "cp", "-", "ord", "(", "\"A\"", ")", "+", "1", "return", "index", "-", "1" ]
Convert Excel column name like 'AB' to 0-based column index. Parameters ---------- x : str The Excel column name to convert to a 0-based column index. Returns ------- num : int The column index corresponding to the name. Raises ------ ValueError Part of the Excel column name was invalid.
[ "Convert", "Excel", "column", "name", "like", "AB", "to", "0", "-", "based", "column", "index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_util.py#L57-L86
20,253
pandas-dev/pandas
pandas/io/excel/_util.py
_range2cols
def _range2cols(areas): """ Convert comma separated list of column names and ranges to indices. Parameters ---------- areas : str A string containing a sequence of column ranges (or areas). Returns ------- cols : list A list of 0-based column indices. Examples -------- >>> _range2cols('A:E') [0, 1, 2, 3, 4] >>> _range2cols('A,C,Z:AB') [0, 2, 25, 26, 27] """ cols = [] for rng in areas.split(","): if ":" in rng: rng = rng.split(":") cols.extend(lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1)) else: cols.append(_excel2num(rng)) return cols
python
def _range2cols(areas): """ Convert comma separated list of column names and ranges to indices. Parameters ---------- areas : str A string containing a sequence of column ranges (or areas). Returns ------- cols : list A list of 0-based column indices. Examples -------- >>> _range2cols('A:E') [0, 1, 2, 3, 4] >>> _range2cols('A,C,Z:AB') [0, 2, 25, 26, 27] """ cols = [] for rng in areas.split(","): if ":" in rng: rng = rng.split(":") cols.extend(lrange(_excel2num(rng[0]), _excel2num(rng[1]) + 1)) else: cols.append(_excel2num(rng)) return cols
[ "def", "_range2cols", "(", "areas", ")", ":", "cols", "=", "[", "]", "for", "rng", "in", "areas", ".", "split", "(", "\",\"", ")", ":", "if", "\":\"", "in", "rng", ":", "rng", "=", "rng", ".", "split", "(", "\":\"", ")", "cols", ".", "extend", "(", "lrange", "(", "_excel2num", "(", "rng", "[", "0", "]", ")", ",", "_excel2num", "(", "rng", "[", "1", "]", ")", "+", "1", ")", ")", "else", ":", "cols", ".", "append", "(", "_excel2num", "(", "rng", ")", ")", "return", "cols" ]
Convert comma separated list of column names and ranges to indices. Parameters ---------- areas : str A string containing a sequence of column ranges (or areas). Returns ------- cols : list A list of 0-based column indices. Examples -------- >>> _range2cols('A:E') [0, 1, 2, 3, 4] >>> _range2cols('A,C,Z:AB') [0, 2, 25, 26, 27]
[ "Convert", "comma", "separated", "list", "of", "column", "names", "and", "ranges", "to", "indices", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_util.py#L89-L119
20,254
pandas-dev/pandas
pandas/io/excel/_util.py
_maybe_convert_usecols
def _maybe_convert_usecols(usecols): """ Convert `usecols` into a compatible format for parsing in `parsers.py`. Parameters ---------- usecols : object The use-columns object to potentially convert. Returns ------- converted : object The compatible format of `usecols`. """ if usecols is None: return usecols if is_integer(usecols): warnings.warn(("Passing in an integer for `usecols` has been " "deprecated. Please pass in a list of int from " "0 to `usecols` inclusive instead."), FutureWarning, stacklevel=2) return lrange(usecols + 1) if isinstance(usecols, str): return _range2cols(usecols) return usecols
python
def _maybe_convert_usecols(usecols): """ Convert `usecols` into a compatible format for parsing in `parsers.py`. Parameters ---------- usecols : object The use-columns object to potentially convert. Returns ------- converted : object The compatible format of `usecols`. """ if usecols is None: return usecols if is_integer(usecols): warnings.warn(("Passing in an integer for `usecols` has been " "deprecated. Please pass in a list of int from " "0 to `usecols` inclusive instead."), FutureWarning, stacklevel=2) return lrange(usecols + 1) if isinstance(usecols, str): return _range2cols(usecols) return usecols
[ "def", "_maybe_convert_usecols", "(", "usecols", ")", ":", "if", "usecols", "is", "None", ":", "return", "usecols", "if", "is_integer", "(", "usecols", ")", ":", "warnings", ".", "warn", "(", "(", "\"Passing in an integer for `usecols` has been \"", "\"deprecated. Please pass in a list of int from \"", "\"0 to `usecols` inclusive instead.\"", ")", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "lrange", "(", "usecols", "+", "1", ")", "if", "isinstance", "(", "usecols", ",", "str", ")", ":", "return", "_range2cols", "(", "usecols", ")", "return", "usecols" ]
Convert `usecols` into a compatible format for parsing in `parsers.py`. Parameters ---------- usecols : object The use-columns object to potentially convert. Returns ------- converted : object The compatible format of `usecols`.
[ "Convert", "usecols", "into", "a", "compatible", "format", "for", "parsing", "in", "parsers", ".", "py", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_util.py#L122-L149
20,255
pandas-dev/pandas
pandas/io/excel/_util.py
_fill_mi_header
def _fill_mi_header(row, control_row): """Forward fill blank entries in row but only inside the same parent index. Used for creating headers in Multiindex. Parameters ---------- row : list List of items in a single row. control_row : list of bool Helps to determine if particular column is in same parent index as the previous value. Used to stop propagation of empty cells between different indexes. Returns ---------- Returns changed row and control_row """ last = row[0] for i in range(1, len(row)): if not control_row[i]: last = row[i] if row[i] == '' or row[i] is None: row[i] = last else: control_row[i] = False last = row[i] return row, control_row
python
def _fill_mi_header(row, control_row): """Forward fill blank entries in row but only inside the same parent index. Used for creating headers in Multiindex. Parameters ---------- row : list List of items in a single row. control_row : list of bool Helps to determine if particular column is in same parent index as the previous value. Used to stop propagation of empty cells between different indexes. Returns ---------- Returns changed row and control_row """ last = row[0] for i in range(1, len(row)): if not control_row[i]: last = row[i] if row[i] == '' or row[i] is None: row[i] = last else: control_row[i] = False last = row[i] return row, control_row
[ "def", "_fill_mi_header", "(", "row", ",", "control_row", ")", ":", "last", "=", "row", "[", "0", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "row", ")", ")", ":", "if", "not", "control_row", "[", "i", "]", ":", "last", "=", "row", "[", "i", "]", "if", "row", "[", "i", "]", "==", "''", "or", "row", "[", "i", "]", "is", "None", ":", "row", "[", "i", "]", "=", "last", "else", ":", "control_row", "[", "i", "]", "=", "False", "last", "=", "row", "[", "i", "]", "return", "row", ",", "control_row" ]
Forward fill blank entries in row but only inside the same parent index. Used for creating headers in Multiindex. Parameters ---------- row : list List of items in a single row. control_row : list of bool Helps to determine if particular column is in same parent index as the previous value. Used to stop propagation of empty cells between different indexes. Returns ---------- Returns changed row and control_row
[ "Forward", "fill", "blank", "entries", "in", "row", "but", "only", "inside", "the", "same", "parent", "index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_util.py#L176-L204
20,256
pandas-dev/pandas
pandas/io/excel/_util.py
_pop_header_name
def _pop_header_name(row, index_col): """ Pop the header name for MultiIndex parsing. Parameters ---------- row : list The data row to parse for the header name. index_col : int, list The index columns for our data. Assumed to be non-null. Returns ------- header_name : str The extracted header name. trimmed_row : list The original data row with the header name removed. """ # Pop out header name and fill w/blank. i = index_col if not is_list_like(index_col) else max(index_col) header_name = row[i] header_name = None if header_name == "" else header_name return header_name, row[:i] + [''] + row[i + 1:]
python
def _pop_header_name(row, index_col): """ Pop the header name for MultiIndex parsing. Parameters ---------- row : list The data row to parse for the header name. index_col : int, list The index columns for our data. Assumed to be non-null. Returns ------- header_name : str The extracted header name. trimmed_row : list The original data row with the header name removed. """ # Pop out header name and fill w/blank. i = index_col if not is_list_like(index_col) else max(index_col) header_name = row[i] header_name = None if header_name == "" else header_name return header_name, row[:i] + [''] + row[i + 1:]
[ "def", "_pop_header_name", "(", "row", ",", "index_col", ")", ":", "# Pop out header name and fill w/blank.", "i", "=", "index_col", "if", "not", "is_list_like", "(", "index_col", ")", "else", "max", "(", "index_col", ")", "header_name", "=", "row", "[", "i", "]", "header_name", "=", "None", "if", "header_name", "==", "\"\"", "else", "header_name", "return", "header_name", ",", "row", "[", ":", "i", "]", "+", "[", "''", "]", "+", "row", "[", "i", "+", "1", ":", "]" ]
Pop the header name for MultiIndex parsing. Parameters ---------- row : list The data row to parse for the header name. index_col : int, list The index columns for our data. Assumed to be non-null. Returns ------- header_name : str The extracted header name. trimmed_row : list The original data row with the header name removed.
[ "Pop", "the", "header", "name", "for", "MultiIndex", "parsing", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_util.py#L207-L231
20,257
pandas-dev/pandas
pandas/core/computation/scope.py
_ensure_scope
def _ensure_scope(level, global_dict=None, local_dict=None, resolvers=(), target=None, **kwargs): """Ensure that we are grabbing the correct scope.""" return Scope(level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target)
python
def _ensure_scope(level, global_dict=None, local_dict=None, resolvers=(), target=None, **kwargs): """Ensure that we are grabbing the correct scope.""" return Scope(level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target)
[ "def", "_ensure_scope", "(", "level", ",", "global_dict", "=", "None", ",", "local_dict", "=", "None", ",", "resolvers", "=", "(", ")", ",", "target", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "Scope", "(", "level", "+", "1", ",", "global_dict", "=", "global_dict", ",", "local_dict", "=", "local_dict", ",", "resolvers", "=", "resolvers", ",", "target", "=", "target", ")" ]
Ensure that we are grabbing the correct scope.
[ "Ensure", "that", "we", "are", "grabbing", "the", "correct", "scope", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/scope.py#L22-L26
20,258
pandas-dev/pandas
pandas/core/computation/scope.py
_replacer
def _replacer(x): """Replace a number with its hexadecimal representation. Used to tag temporary variables with their calling scope's id. """ # get the hex repr of the binary char and remove 0x and pad by pad_size # zeros try: hexin = ord(x) except TypeError: # bytes literals masquerade as ints when iterating in py3 hexin = x return hex(hexin)
python
def _replacer(x): """Replace a number with its hexadecimal representation. Used to tag temporary variables with their calling scope's id. """ # get the hex repr of the binary char and remove 0x and pad by pad_size # zeros try: hexin = ord(x) except TypeError: # bytes literals masquerade as ints when iterating in py3 hexin = x return hex(hexin)
[ "def", "_replacer", "(", "x", ")", ":", "# get the hex repr of the binary char and remove 0x and pad by pad_size", "# zeros", "try", ":", "hexin", "=", "ord", "(", "x", ")", "except", "TypeError", ":", "# bytes literals masquerade as ints when iterating in py3", "hexin", "=", "x", "return", "hex", "(", "hexin", ")" ]
Replace a number with its hexadecimal representation. Used to tag temporary variables with their calling scope's id.
[ "Replace", "a", "number", "with", "its", "hexadecimal", "representation", ".", "Used", "to", "tag", "temporary", "variables", "with", "their", "calling", "scope", "s", "id", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/scope.py#L29-L41
20,259
pandas-dev/pandas
pandas/core/computation/scope.py
_raw_hex_id
def _raw_hex_id(obj): """Return the padded hexadecimal id of ``obj``.""" # interpret as a pointer since that's what really what id returns packed = struct.pack('@P', id(obj)) return ''.join(map(_replacer, packed))
python
def _raw_hex_id(obj): """Return the padded hexadecimal id of ``obj``.""" # interpret as a pointer since that's what really what id returns packed = struct.pack('@P', id(obj)) return ''.join(map(_replacer, packed))
[ "def", "_raw_hex_id", "(", "obj", ")", ":", "# interpret as a pointer since that's what really what id returns", "packed", "=", "struct", ".", "pack", "(", "'@P'", ",", "id", "(", "obj", ")", ")", "return", "''", ".", "join", "(", "map", "(", "_replacer", ",", "packed", ")", ")" ]
Return the padded hexadecimal id of ``obj``.
[ "Return", "the", "padded", "hexadecimal", "id", "of", "obj", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/scope.py#L44-L48
20,260
pandas-dev/pandas
pandas/core/computation/scope.py
_get_pretty_string
def _get_pretty_string(obj): """Return a prettier version of obj Parameters ---------- obj : object Object to pretty print Returns ------- s : str Pretty print object repr """ sio = StringIO() pprint.pprint(obj, stream=sio) return sio.getvalue()
python
def _get_pretty_string(obj): """Return a prettier version of obj Parameters ---------- obj : object Object to pretty print Returns ------- s : str Pretty print object repr """ sio = StringIO() pprint.pprint(obj, stream=sio) return sio.getvalue()
[ "def", "_get_pretty_string", "(", "obj", ")", ":", "sio", "=", "StringIO", "(", ")", "pprint", ".", "pprint", "(", "obj", ",", "stream", "=", "sio", ")", "return", "sio", ".", "getvalue", "(", ")" ]
Return a prettier version of obj Parameters ---------- obj : object Object to pretty print Returns ------- s : str Pretty print object repr
[ "Return", "a", "prettier", "version", "of", "obj" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/scope.py#L63-L78
20,261
pandas-dev/pandas
pandas/core/computation/scope.py
Scope.resolve
def resolve(self, key, is_local): """Resolve a variable name in a possibly local context Parameters ---------- key : str A variable name is_local : bool Flag indicating whether the variable is local or not (prefixed with the '@' symbol) Returns ------- value : object The value of a particular variable """ try: # only look for locals in outer scope if is_local: return self.scope[key] # not a local variable so check in resolvers if we have them if self.has_resolvers: return self.resolvers[key] # if we're here that means that we have no locals and we also have # no resolvers assert not is_local and not self.has_resolvers return self.scope[key] except KeyError: try: # last ditch effort we look in temporaries # these are created when parsing indexing expressions # e.g., df[df > 0] return self.temps[key] except KeyError: raise compu.ops.UndefinedVariableError(key, is_local)
python
def resolve(self, key, is_local): """Resolve a variable name in a possibly local context Parameters ---------- key : str A variable name is_local : bool Flag indicating whether the variable is local or not (prefixed with the '@' symbol) Returns ------- value : object The value of a particular variable """ try: # only look for locals in outer scope if is_local: return self.scope[key] # not a local variable so check in resolvers if we have them if self.has_resolvers: return self.resolvers[key] # if we're here that means that we have no locals and we also have # no resolvers assert not is_local and not self.has_resolvers return self.scope[key] except KeyError: try: # last ditch effort we look in temporaries # these are created when parsing indexing expressions # e.g., df[df > 0] return self.temps[key] except KeyError: raise compu.ops.UndefinedVariableError(key, is_local)
[ "def", "resolve", "(", "self", ",", "key", ",", "is_local", ")", ":", "try", ":", "# only look for locals in outer scope", "if", "is_local", ":", "return", "self", ".", "scope", "[", "key", "]", "# not a local variable so check in resolvers if we have them", "if", "self", ".", "has_resolvers", ":", "return", "self", ".", "resolvers", "[", "key", "]", "# if we're here that means that we have no locals and we also have", "# no resolvers", "assert", "not", "is_local", "and", "not", "self", ".", "has_resolvers", "return", "self", ".", "scope", "[", "key", "]", "except", "KeyError", ":", "try", ":", "# last ditch effort we look in temporaries", "# these are created when parsing indexing expressions", "# e.g., df[df > 0]", "return", "self", ".", "temps", "[", "key", "]", "except", "KeyError", ":", "raise", "compu", ".", "ops", ".", "UndefinedVariableError", "(", "key", ",", "is_local", ")" ]
Resolve a variable name in a possibly local context Parameters ---------- key : str A variable name is_local : bool Flag indicating whether the variable is local or not (prefixed with the '@' symbol) Returns ------- value : object The value of a particular variable
[ "Resolve", "a", "variable", "name", "in", "a", "possibly", "local", "context" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/scope.py#L159-L195
20,262
pandas-dev/pandas
pandas/core/computation/scope.py
Scope.swapkey
def swapkey(self, old_key, new_key, new_value=None): """Replace a variable name, with a potentially new value. Parameters ---------- old_key : str Current variable name to replace new_key : str New variable name to replace `old_key` with new_value : object Value to be replaced along with the possible renaming """ if self.has_resolvers: maps = self.resolvers.maps + self.scope.maps else: maps = self.scope.maps maps.append(self.temps) for mapping in maps: if old_key in mapping: mapping[new_key] = new_value return
python
def swapkey(self, old_key, new_key, new_value=None): """Replace a variable name, with a potentially new value. Parameters ---------- old_key : str Current variable name to replace new_key : str New variable name to replace `old_key` with new_value : object Value to be replaced along with the possible renaming """ if self.has_resolvers: maps = self.resolvers.maps + self.scope.maps else: maps = self.scope.maps maps.append(self.temps) for mapping in maps: if old_key in mapping: mapping[new_key] = new_value return
[ "def", "swapkey", "(", "self", ",", "old_key", ",", "new_key", ",", "new_value", "=", "None", ")", ":", "if", "self", ".", "has_resolvers", ":", "maps", "=", "self", ".", "resolvers", ".", "maps", "+", "self", ".", "scope", ".", "maps", "else", ":", "maps", "=", "self", ".", "scope", ".", "maps", "maps", ".", "append", "(", "self", ".", "temps", ")", "for", "mapping", "in", "maps", ":", "if", "old_key", "in", "mapping", ":", "mapping", "[", "new_key", "]", "=", "new_value", "return" ]
Replace a variable name, with a potentially new value. Parameters ---------- old_key : str Current variable name to replace new_key : str New variable name to replace `old_key` with new_value : object Value to be replaced along with the possible renaming
[ "Replace", "a", "variable", "name", "with", "a", "potentially", "new", "value", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/scope.py#L197-L219
20,263
pandas-dev/pandas
pandas/core/computation/scope.py
Scope._get_vars
def _get_vars(self, stack, scopes): """Get specifically scoped variables from a list of stack frames. Parameters ---------- stack : list A list of stack frames as returned by ``inspect.stack()`` scopes : sequence of strings A sequence containing valid stack frame attribute names that evaluate to a dictionary. For example, ('locals', 'globals') """ variables = itertools.product(scopes, stack) for scope, (frame, _, _, _, _, _) in variables: try: d = getattr(frame, 'f_' + scope) self.scope = self.scope.new_child(d) finally: # won't remove it, but DECREF it # in Py3 this probably isn't necessary since frame won't be # scope after the loop del frame
python
def _get_vars(self, stack, scopes): """Get specifically scoped variables from a list of stack frames. Parameters ---------- stack : list A list of stack frames as returned by ``inspect.stack()`` scopes : sequence of strings A sequence containing valid stack frame attribute names that evaluate to a dictionary. For example, ('locals', 'globals') """ variables = itertools.product(scopes, stack) for scope, (frame, _, _, _, _, _) in variables: try: d = getattr(frame, 'f_' + scope) self.scope = self.scope.new_child(d) finally: # won't remove it, but DECREF it # in Py3 this probably isn't necessary since frame won't be # scope after the loop del frame
[ "def", "_get_vars", "(", "self", ",", "stack", ",", "scopes", ")", ":", "variables", "=", "itertools", ".", "product", "(", "scopes", ",", "stack", ")", "for", "scope", ",", "(", "frame", ",", "_", ",", "_", ",", "_", ",", "_", ",", "_", ")", "in", "variables", ":", "try", ":", "d", "=", "getattr", "(", "frame", ",", "'f_'", "+", "scope", ")", "self", ".", "scope", "=", "self", ".", "scope", ".", "new_child", "(", "d", ")", "finally", ":", "# won't remove it, but DECREF it", "# in Py3 this probably isn't necessary since frame won't be", "# scope after the loop", "del", "frame" ]
Get specifically scoped variables from a list of stack frames. Parameters ---------- stack : list A list of stack frames as returned by ``inspect.stack()`` scopes : sequence of strings A sequence containing valid stack frame attribute names that evaluate to a dictionary. For example, ('locals', 'globals')
[ "Get", "specifically", "scoped", "variables", "from", "a", "list", "of", "stack", "frames", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/scope.py#L221-L241
20,264
pandas-dev/pandas
pandas/core/computation/scope.py
Scope.update
def update(self, level): """Update the current scope by going back `level` levels. Parameters ---------- level : int or None, optional, default None """ sl = level + 1 # add sl frames to the scope starting with the # most distant and overwriting with more current # makes sure that we can capture variable scope stack = inspect.stack() try: self._get_vars(stack[:sl], scopes=['locals']) finally: del stack[:], stack
python
def update(self, level): """Update the current scope by going back `level` levels. Parameters ---------- level : int or None, optional, default None """ sl = level + 1 # add sl frames to the scope starting with the # most distant and overwriting with more current # makes sure that we can capture variable scope stack = inspect.stack() try: self._get_vars(stack[:sl], scopes=['locals']) finally: del stack[:], stack
[ "def", "update", "(", "self", ",", "level", ")", ":", "sl", "=", "level", "+", "1", "# add sl frames to the scope starting with the", "# most distant and overwriting with more current", "# makes sure that we can capture variable scope", "stack", "=", "inspect", ".", "stack", "(", ")", "try", ":", "self", ".", "_get_vars", "(", "stack", "[", ":", "sl", "]", ",", "scopes", "=", "[", "'locals'", "]", ")", "finally", ":", "del", "stack", "[", ":", "]", ",", "stack" ]
Update the current scope by going back `level` levels. Parameters ---------- level : int or None, optional, default None
[ "Update", "the", "current", "scope", "by", "going", "back", "level", "levels", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/scope.py#L243-L260
20,265
pandas-dev/pandas
pandas/core/computation/scope.py
Scope.add_tmp
def add_tmp(self, value): """Add a temporary variable to the scope. Parameters ---------- value : object An arbitrary object to be assigned to a temporary variable. Returns ------- name : basestring The name of the temporary variable created. """ name = '{name}_{num}_{hex_id}'.format(name=type(value).__name__, num=self.ntemps, hex_id=_raw_hex_id(self)) # add to inner most scope assert name not in self.temps self.temps[name] = value assert name in self.temps # only increment if the variable gets put in the scope return name
python
def add_tmp(self, value): """Add a temporary variable to the scope. Parameters ---------- value : object An arbitrary object to be assigned to a temporary variable. Returns ------- name : basestring The name of the temporary variable created. """ name = '{name}_{num}_{hex_id}'.format(name=type(value).__name__, num=self.ntemps, hex_id=_raw_hex_id(self)) # add to inner most scope assert name not in self.temps self.temps[name] = value assert name in self.temps # only increment if the variable gets put in the scope return name
[ "def", "add_tmp", "(", "self", ",", "value", ")", ":", "name", "=", "'{name}_{num}_{hex_id}'", ".", "format", "(", "name", "=", "type", "(", "value", ")", ".", "__name__", ",", "num", "=", "self", ".", "ntemps", ",", "hex_id", "=", "_raw_hex_id", "(", "self", ")", ")", "# add to inner most scope", "assert", "name", "not", "in", "self", ".", "temps", "self", ".", "temps", "[", "name", "]", "=", "value", "assert", "name", "in", "self", ".", "temps", "# only increment if the variable gets put in the scope", "return", "name" ]
Add a temporary variable to the scope. Parameters ---------- value : object An arbitrary object to be assigned to a temporary variable. Returns ------- name : basestring The name of the temporary variable created.
[ "Add", "a", "temporary", "variable", "to", "the", "scope", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/scope.py#L262-L285
20,266
pandas-dev/pandas
pandas/core/computation/scope.py
Scope.full_scope
def full_scope(self): """Return the full scope for use with passing to engines transparently as a mapping. Returns ------- vars : DeepChainMap All variables in this scope. """ maps = [self.temps] + self.resolvers.maps + self.scope.maps return DeepChainMap(*maps)
python
def full_scope(self): """Return the full scope for use with passing to engines transparently as a mapping. Returns ------- vars : DeepChainMap All variables in this scope. """ maps = [self.temps] + self.resolvers.maps + self.scope.maps return DeepChainMap(*maps)
[ "def", "full_scope", "(", "self", ")", ":", "maps", "=", "[", "self", ".", "temps", "]", "+", "self", ".", "resolvers", ".", "maps", "+", "self", ".", "scope", ".", "maps", "return", "DeepChainMap", "(", "*", "maps", ")" ]
Return the full scope for use with passing to engines transparently as a mapping. Returns ------- vars : DeepChainMap All variables in this scope.
[ "Return", "the", "full", "scope", "for", "use", "with", "passing", "to", "engines", "transparently", "as", "a", "mapping", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/scope.py#L293-L303
20,267
pandas-dev/pandas
pandas/io/sas/sasreader.py
read_sas
def read_sas(filepath_or_buffer, format=None, index=None, encoding=None, chunksize=None, iterator=False): """ Read SAS files stored as either XPORT or SAS7BDAT format files. Parameters ---------- filepath_or_buffer : string or file-like object Path to the SAS file. format : string {'xport', 'sas7bdat'} or None If None, file format is inferred from file extension. If 'xport' or 'sas7bdat', uses the corresponding format. index : identifier of index column, defaults to None Identifier of column that should be used as index of the DataFrame. encoding : string, default is None Encoding for text data. If None, text data are stored as raw bytes. chunksize : int Read file `chunksize` lines at a time, returns iterator. iterator : bool, defaults to False If True, returns an iterator for reading the file incrementally. Returns ------- DataFrame if iterator=False and chunksize=None, else SAS7BDATReader or XportReader """ if format is None: buffer_error_msg = ("If this is a buffer object rather " "than a string name, you must specify " "a format string") filepath_or_buffer = _stringify_path(filepath_or_buffer) if not isinstance(filepath_or_buffer, str): raise ValueError(buffer_error_msg) fname = filepath_or_buffer.lower() if fname.endswith(".xpt"): format = "xport" elif fname.endswith(".sas7bdat"): format = "sas7bdat" else: raise ValueError("unable to infer format of SAS file") if format.lower() == 'xport': from pandas.io.sas.sas_xport import XportReader reader = XportReader(filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize) elif format.lower() == 'sas7bdat': from pandas.io.sas.sas7bdat import SAS7BDATReader reader = SAS7BDATReader(filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize) else: raise ValueError('unknown SAS format') if iterator or chunksize: return reader data = reader.read() reader.close() return data
python
def read_sas(filepath_or_buffer, format=None, index=None, encoding=None, chunksize=None, iterator=False): """ Read SAS files stored as either XPORT or SAS7BDAT format files. Parameters ---------- filepath_or_buffer : string or file-like object Path to the SAS file. format : string {'xport', 'sas7bdat'} or None If None, file format is inferred from file extension. If 'xport' or 'sas7bdat', uses the corresponding format. index : identifier of index column, defaults to None Identifier of column that should be used as index of the DataFrame. encoding : string, default is None Encoding for text data. If None, text data are stored as raw bytes. chunksize : int Read file `chunksize` lines at a time, returns iterator. iterator : bool, defaults to False If True, returns an iterator for reading the file incrementally. Returns ------- DataFrame if iterator=False and chunksize=None, else SAS7BDATReader or XportReader """ if format is None: buffer_error_msg = ("If this is a buffer object rather " "than a string name, you must specify " "a format string") filepath_or_buffer = _stringify_path(filepath_or_buffer) if not isinstance(filepath_or_buffer, str): raise ValueError(buffer_error_msg) fname = filepath_or_buffer.lower() if fname.endswith(".xpt"): format = "xport" elif fname.endswith(".sas7bdat"): format = "sas7bdat" else: raise ValueError("unable to infer format of SAS file") if format.lower() == 'xport': from pandas.io.sas.sas_xport import XportReader reader = XportReader(filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize) elif format.lower() == 'sas7bdat': from pandas.io.sas.sas7bdat import SAS7BDATReader reader = SAS7BDATReader(filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize) else: raise ValueError('unknown SAS format') if iterator or chunksize: return reader data = reader.read() reader.close() return data
[ "def", "read_sas", "(", "filepath_or_buffer", ",", "format", "=", "None", ",", "index", "=", "None", ",", "encoding", "=", "None", ",", "chunksize", "=", "None", ",", "iterator", "=", "False", ")", ":", "if", "format", "is", "None", ":", "buffer_error_msg", "=", "(", "\"If this is a buffer object rather \"", "\"than a string name, you must specify \"", "\"a format string\"", ")", "filepath_or_buffer", "=", "_stringify_path", "(", "filepath_or_buffer", ")", "if", "not", "isinstance", "(", "filepath_or_buffer", ",", "str", ")", ":", "raise", "ValueError", "(", "buffer_error_msg", ")", "fname", "=", "filepath_or_buffer", ".", "lower", "(", ")", "if", "fname", ".", "endswith", "(", "\".xpt\"", ")", ":", "format", "=", "\"xport\"", "elif", "fname", ".", "endswith", "(", "\".sas7bdat\"", ")", ":", "format", "=", "\"sas7bdat\"", "else", ":", "raise", "ValueError", "(", "\"unable to infer format of SAS file\"", ")", "if", "format", ".", "lower", "(", ")", "==", "'xport'", ":", "from", "pandas", ".", "io", ".", "sas", ".", "sas_xport", "import", "XportReader", "reader", "=", "XportReader", "(", "filepath_or_buffer", ",", "index", "=", "index", ",", "encoding", "=", "encoding", ",", "chunksize", "=", "chunksize", ")", "elif", "format", ".", "lower", "(", ")", "==", "'sas7bdat'", ":", "from", "pandas", ".", "io", ".", "sas", ".", "sas7bdat", "import", "SAS7BDATReader", "reader", "=", "SAS7BDATReader", "(", "filepath_or_buffer", ",", "index", "=", "index", ",", "encoding", "=", "encoding", ",", "chunksize", "=", "chunksize", ")", "else", ":", "raise", "ValueError", "(", "'unknown SAS format'", ")", "if", "iterator", "or", "chunksize", ":", "return", "reader", "data", "=", "reader", ".", "read", "(", ")", "reader", ".", "close", "(", ")", "return", "data" ]
Read SAS files stored as either XPORT or SAS7BDAT format files. Parameters ---------- filepath_or_buffer : string or file-like object Path to the SAS file. format : string {'xport', 'sas7bdat'} or None If None, file format is inferred from file extension. If 'xport' or 'sas7bdat', uses the corresponding format. index : identifier of index column, defaults to None Identifier of column that should be used as index of the DataFrame. encoding : string, default is None Encoding for text data. If None, text data are stored as raw bytes. chunksize : int Read file `chunksize` lines at a time, returns iterator. iterator : bool, defaults to False If True, returns an iterator for reading the file incrementally. Returns ------- DataFrame if iterator=False and chunksize=None, else SAS7BDATReader or XportReader
[ "Read", "SAS", "files", "stored", "as", "either", "XPORT", "or", "SAS7BDAT", "format", "files", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sas/sasreader.py#L7-L66
20,268
pandas-dev/pandas
pandas/core/series.py
_coerce_method
def _coerce_method(converter): """ Install the scalar coercion methods. """ def wrapper(self): if len(self) == 1: return converter(self.iloc[0]) raise TypeError("cannot convert the series to " "{0}".format(str(converter))) wrapper.__name__ = "__{name}__".format(name=converter.__name__) return wrapper
python
def _coerce_method(converter): """ Install the scalar coercion methods. """ def wrapper(self): if len(self) == 1: return converter(self.iloc[0]) raise TypeError("cannot convert the series to " "{0}".format(str(converter))) wrapper.__name__ = "__{name}__".format(name=converter.__name__) return wrapper
[ "def", "_coerce_method", "(", "converter", ")", ":", "def", "wrapper", "(", "self", ")", ":", "if", "len", "(", "self", ")", "==", "1", ":", "return", "converter", "(", "self", ".", "iloc", "[", "0", "]", ")", "raise", "TypeError", "(", "\"cannot convert the series to \"", "\"{0}\"", ".", "format", "(", "str", "(", "converter", ")", ")", ")", "wrapper", ".", "__name__", "=", "\"__{name}__\"", ".", "format", "(", "name", "=", "converter", ".", "__name__", ")", "return", "wrapper" ]
Install the scalar coercion methods.
[ "Install", "the", "scalar", "coercion", "methods", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L80-L92
20,269
pandas-dev/pandas
pandas/core/series.py
Series._init_dict
def _init_dict(self, data, index=None, dtype=None): """ Derive the "_data" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series index : Index or index-like, default None index for the new Series: if None, use dict keys dtype : dtype, default None dtype for the new Series: if None, infer from data Returns ------- _data : BlockManager for the new Series index : index for the new Series """ # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: keys, values = zip(*data.items()) values = list(values) elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. values = na_value_for_dtype(dtype) keys = index else: keys, values = [], [] # Input is now list-like, so rely on "standard" construction: s = Series(values, index=keys, dtype=dtype) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) elif not PY36 and not isinstance(data, OrderedDict) and data: # Need the `and data` to avoid sorting Series(None, index=[...]) # since that isn't really dict-like try: s = s.sort_index() except TypeError: pass return s._data, s.index
python
def _init_dict(self, data, index=None, dtype=None): """ Derive the "_data" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series index : Index or index-like, default None index for the new Series: if None, use dict keys dtype : dtype, default None dtype for the new Series: if None, infer from data Returns ------- _data : BlockManager for the new Series index : index for the new Series """ # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] # raises KeyError), so we iterate the entire dict, and align if data: keys, values = zip(*data.items()) values = list(values) elif index is not None: # fastpath for Series(data=None). Just use broadcasting a scalar # instead of reindexing. values = na_value_for_dtype(dtype) keys = index else: keys, values = [], [] # Input is now list-like, so rely on "standard" construction: s = Series(values, index=keys, dtype=dtype) # Now we just make sure the order is respected, if any if data and index is not None: s = s.reindex(index, copy=False) elif not PY36 and not isinstance(data, OrderedDict) and data: # Need the `and data` to avoid sorting Series(None, index=[...]) # since that isn't really dict-like try: s = s.sort_index() except TypeError: pass return s._data, s.index
[ "def", "_init_dict", "(", "self", ",", "data", ",", "index", "=", "None", ",", "dtype", "=", "None", ")", ":", "# Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')]", "# raises KeyError), so we iterate the entire dict, and align", "if", "data", ":", "keys", ",", "values", "=", "zip", "(", "*", "data", ".", "items", "(", ")", ")", "values", "=", "list", "(", "values", ")", "elif", "index", "is", "not", "None", ":", "# fastpath for Series(data=None). Just use broadcasting a scalar", "# instead of reindexing.", "values", "=", "na_value_for_dtype", "(", "dtype", ")", "keys", "=", "index", "else", ":", "keys", ",", "values", "=", "[", "]", ",", "[", "]", "# Input is now list-like, so rely on \"standard\" construction:", "s", "=", "Series", "(", "values", ",", "index", "=", "keys", ",", "dtype", "=", "dtype", ")", "# Now we just make sure the order is respected, if any", "if", "data", "and", "index", "is", "not", "None", ":", "s", "=", "s", ".", "reindex", "(", "index", ",", "copy", "=", "False", ")", "elif", "not", "PY36", "and", "not", "isinstance", "(", "data", ",", "OrderedDict", ")", "and", "data", ":", "# Need the `and data` to avoid sorting Series(None, index=[...])", "# since that isn't really dict-like", "try", ":", "s", "=", "s", ".", "sort_index", "(", ")", "except", "TypeError", ":", "pass", "return", "s", ".", "_data", ",", "s", ".", "index" ]
Derive the "_data" and "index" attributes of a new Series from a dictionary input. Parameters ---------- data : dict or dict-like Data used to populate the new Series index : Index or index-like, default None index for the new Series: if None, use dict keys dtype : dtype, default None dtype for the new Series: if None, infer from data Returns ------- _data : BlockManager for the new Series index : index for the new Series
[ "Derive", "the", "_data", "and", "index", "attributes", "of", "a", "new", "Series", "from", "a", "dictionary", "input", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L267-L312
20,270
pandas-dev/pandas
pandas/core/series.py
Series.from_array
def from_array(cls, arr, index=None, name=None, dtype=None, copy=False, fastpath=False): """ Construct Series from array. .. deprecated :: 0.23.0 Use pd.Series(..) constructor instead. """ warnings.warn("'from_array' is deprecated and will be removed in a " "future version. Please use the pd.Series(..) " "constructor instead.", FutureWarning, stacklevel=2) if isinstance(arr, ABCSparseArray): from pandas.core.sparse.series import SparseSeries cls = SparseSeries return cls(arr, index=index, name=name, dtype=dtype, copy=copy, fastpath=fastpath)
python
def from_array(cls, arr, index=None, name=None, dtype=None, copy=False, fastpath=False): """ Construct Series from array. .. deprecated :: 0.23.0 Use pd.Series(..) constructor instead. """ warnings.warn("'from_array' is deprecated and will be removed in a " "future version. Please use the pd.Series(..) " "constructor instead.", FutureWarning, stacklevel=2) if isinstance(arr, ABCSparseArray): from pandas.core.sparse.series import SparseSeries cls = SparseSeries return cls(arr, index=index, name=name, dtype=dtype, copy=copy, fastpath=fastpath)
[ "def", "from_array", "(", "cls", ",", "arr", ",", "index", "=", "None", ",", "name", "=", "None", ",", "dtype", "=", "None", ",", "copy", "=", "False", ",", "fastpath", "=", "False", ")", ":", "warnings", ".", "warn", "(", "\"'from_array' is deprecated and will be removed in a \"", "\"future version. Please use the pd.Series(..) \"", "\"constructor instead.\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "if", "isinstance", "(", "arr", ",", "ABCSparseArray", ")", ":", "from", "pandas", ".", "core", ".", "sparse", ".", "series", "import", "SparseSeries", "cls", "=", "SparseSeries", "return", "cls", "(", "arr", ",", "index", "=", "index", ",", "name", "=", "name", ",", "dtype", "=", "dtype", ",", "copy", "=", "copy", ",", "fastpath", "=", "fastpath", ")" ]
Construct Series from array. .. deprecated :: 0.23.0 Use pd.Series(..) constructor instead.
[ "Construct", "Series", "from", "array", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L315-L330
20,271
pandas-dev/pandas
pandas/core/series.py
Series._set_axis
def _set_axis(self, axis, labels, fastpath=False): """ Override generic, we want to set the _typ here. """ if not fastpath: labels = ensure_index(labels) is_all_dates = labels.is_all_dates if is_all_dates: if not isinstance(labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): try: labels = DatetimeIndex(labels) # need to set here because we changed the index if fastpath: self._data.set_axis(axis, labels) except (tslibs.OutOfBoundsDatetime, ValueError): # labels may exceeds datetime bounds, # or not be a DatetimeIndex pass self._set_subtyp(is_all_dates) object.__setattr__(self, '_index', labels) if not fastpath: self._data.set_axis(axis, labels)
python
def _set_axis(self, axis, labels, fastpath=False): """ Override generic, we want to set the _typ here. """ if not fastpath: labels = ensure_index(labels) is_all_dates = labels.is_all_dates if is_all_dates: if not isinstance(labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): try: labels = DatetimeIndex(labels) # need to set here because we changed the index if fastpath: self._data.set_axis(axis, labels) except (tslibs.OutOfBoundsDatetime, ValueError): # labels may exceeds datetime bounds, # or not be a DatetimeIndex pass self._set_subtyp(is_all_dates) object.__setattr__(self, '_index', labels) if not fastpath: self._data.set_axis(axis, labels)
[ "def", "_set_axis", "(", "self", ",", "axis", ",", "labels", ",", "fastpath", "=", "False", ")", ":", "if", "not", "fastpath", ":", "labels", "=", "ensure_index", "(", "labels", ")", "is_all_dates", "=", "labels", ".", "is_all_dates", "if", "is_all_dates", ":", "if", "not", "isinstance", "(", "labels", ",", "(", "DatetimeIndex", ",", "PeriodIndex", ",", "TimedeltaIndex", ")", ")", ":", "try", ":", "labels", "=", "DatetimeIndex", "(", "labels", ")", "# need to set here because we changed the index", "if", "fastpath", ":", "self", ".", "_data", ".", "set_axis", "(", "axis", ",", "labels", ")", "except", "(", "tslibs", ".", "OutOfBoundsDatetime", ",", "ValueError", ")", ":", "# labels may exceeds datetime bounds,", "# or not be a DatetimeIndex", "pass", "self", ".", "_set_subtyp", "(", "is_all_dates", ")", "object", ".", "__setattr__", "(", "self", ",", "'_index'", ",", "labels", ")", "if", "not", "fastpath", ":", "self", ".", "_data", ".", "set_axis", "(", "axis", ",", "labels", ")" ]
Override generic, we want to set the _typ here.
[ "Override", "generic", "we", "want", "to", "set", "the", "_typ", "here", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L350-L376
20,272
pandas-dev/pandas
pandas/core/series.py
Series.asobject
def asobject(self): """ Return object Series which contains boxed values. .. deprecated :: 0.23.0 Use ``astype(object)`` instead. *this is an internal non-public method* """ warnings.warn("'asobject' is deprecated. Use 'astype(object)'" " instead", FutureWarning, stacklevel=2) return self.astype(object).values
python
def asobject(self): """ Return object Series which contains boxed values. .. deprecated :: 0.23.0 Use ``astype(object)`` instead. *this is an internal non-public method* """ warnings.warn("'asobject' is deprecated. Use 'astype(object)'" " instead", FutureWarning, stacklevel=2) return self.astype(object).values
[ "def", "asobject", "(", "self", ")", ":", "warnings", ".", "warn", "(", "\"'asobject' is deprecated. Use 'astype(object)'\"", "\" instead\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "astype", "(", "object", ")", ".", "values" ]
Return object Series which contains boxed values. .. deprecated :: 0.23.0 Use ``astype(object)`` instead. *this is an internal non-public method*
[ "Return", "object", "Series", "which", "contains", "boxed", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L493-L505
20,273
pandas-dev/pandas
pandas/core/series.py
Series.compress
def compress(self, condition, *args, **kwargs): """ Return selected slices of an array along given axis as a Series. .. deprecated:: 0.24.0 See Also -------- numpy.ndarray.compress """ msg = ("Series.compress(condition) is deprecated. " "Use 'Series[condition]' or " "'np.asarray(series).compress(condition)' instead.") warnings.warn(msg, FutureWarning, stacklevel=2) nv.validate_compress(args, kwargs) return self[condition]
python
def compress(self, condition, *args, **kwargs): """ Return selected slices of an array along given axis as a Series. .. deprecated:: 0.24.0 See Also -------- numpy.ndarray.compress """ msg = ("Series.compress(condition) is deprecated. " "Use 'Series[condition]' or " "'np.asarray(series).compress(condition)' instead.") warnings.warn(msg, FutureWarning, stacklevel=2) nv.validate_compress(args, kwargs) return self[condition]
[ "def", "compress", "(", "self", ",", "condition", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "msg", "=", "(", "\"Series.compress(condition) is deprecated. \"", "\"Use 'Series[condition]' or \"", "\"'np.asarray(series).compress(condition)' instead.\"", ")", "warnings", ".", "warn", "(", "msg", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "nv", ".", "validate_compress", "(", "args", ",", "kwargs", ")", "return", "self", "[", "condition", "]" ]
Return selected slices of an array along given axis as a Series. .. deprecated:: 0.24.0 See Also -------- numpy.ndarray.compress
[ "Return", "selected", "slices", "of", "an", "array", "along", "given", "axis", "as", "a", "Series", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L523-L538
20,274
pandas-dev/pandas
pandas/core/series.py
Series.view
def view(self, dtype=None): """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ return self._constructor(self._values.view(dtype), index=self.index).__finalize__(self)
python
def view(self, dtype=None): """ Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8 """ return self._constructor(self._values.view(dtype), index=self.index).__finalize__(self)
[ "def", "view", "(", "self", ",", "dtype", "=", "None", ")", ":", "return", "self", ".", "_constructor", "(", "self", ".", "_values", ".", "view", "(", "dtype", ")", ",", "index", "=", "self", ".", "index", ")", ".", "__finalize__", "(", "self", ")" ]
Create a new view of the Series. This function will return a new Series with a view of the same underlying values in memory, optionally reinterpreted with a new data type. The new data type must preserve the same size in bytes as to not cause index misalignment. Parameters ---------- dtype : data type Data type object or one of their string representations. Returns ------- Series A new Series object as a view of the same data in memory. See Also -------- numpy.ndarray.view : Equivalent numpy function to create a new view of the same data in memory. Notes ----- Series are instantiated with ``dtype=float64`` by default. While ``numpy.ndarray.view()`` will return a view with the same data type as the original array, ``Series.view()`` (without specified dtype) will try using ``float64`` and may fail if the original data type size in bytes is not the same. Examples -------- >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') >>> s 0 -2 1 -1 2 0 3 1 4 2 dtype: int8 The 8 bit signed integer representation of `-1` is `0b11111111`, but the same bytes represent 255 if read as an 8 bit unsigned integer: >>> us = s.view('uint8') >>> us 0 254 1 255 2 0 3 1 4 2 dtype: uint8 The views share the same underlying values: >>> us[0] = 128 >>> s 0 -128 1 -1 2 0 3 1 4 2 dtype: int8
[ "Create", "a", "new", "view", "of", "the", "Series", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L598-L665
20,275
pandas-dev/pandas
pandas/core/series.py
Series._ixs
def _ixs(self, i, axis=0): """ Return the i-th value or values in the Series by location. Parameters ---------- i : int, slice, or sequence of integers Returns ------- scalar (int) or Series (slice, sequence) """ try: # dispatch to the values if we need values = self._values if isinstance(values, np.ndarray): return libindex.get_value_at(values, i) else: return values[i] except IndexError: raise except Exception: if isinstance(i, slice): indexer = self.index._convert_slice_indexer(i, kind='iloc') return self._get_values(indexer) else: label = self.index[i] if isinstance(label, Index): return self.take(i, axis=axis, convert=True) else: return libindex.get_value_at(self, i)
python
def _ixs(self, i, axis=0): """ Return the i-th value or values in the Series by location. Parameters ---------- i : int, slice, or sequence of integers Returns ------- scalar (int) or Series (slice, sequence) """ try: # dispatch to the values if we need values = self._values if isinstance(values, np.ndarray): return libindex.get_value_at(values, i) else: return values[i] except IndexError: raise except Exception: if isinstance(i, slice): indexer = self.index._convert_slice_indexer(i, kind='iloc') return self._get_values(indexer) else: label = self.index[i] if isinstance(label, Index): return self.take(i, axis=axis, convert=True) else: return libindex.get_value_at(self, i)
[ "def", "_ixs", "(", "self", ",", "i", ",", "axis", "=", "0", ")", ":", "try", ":", "# dispatch to the values if we need", "values", "=", "self", ".", "_values", "if", "isinstance", "(", "values", ",", "np", ".", "ndarray", ")", ":", "return", "libindex", ".", "get_value_at", "(", "values", ",", "i", ")", "else", ":", "return", "values", "[", "i", "]", "except", "IndexError", ":", "raise", "except", "Exception", ":", "if", "isinstance", "(", "i", ",", "slice", ")", ":", "indexer", "=", "self", ".", "index", ".", "_convert_slice_indexer", "(", "i", ",", "kind", "=", "'iloc'", ")", "return", "self", ".", "_get_values", "(", "indexer", ")", "else", ":", "label", "=", "self", ".", "index", "[", "i", "]", "if", "isinstance", "(", "label", ",", "Index", ")", ":", "return", "self", ".", "take", "(", "i", ",", "axis", "=", "axis", ",", "convert", "=", "True", ")", "else", ":", "return", "libindex", ".", "get_value_at", "(", "self", ",", "i", ")" ]
Return the i-th value or values in the Series by location. Parameters ---------- i : int, slice, or sequence of integers Returns ------- scalar (int) or Series (slice, sequence)
[ "Return", "the", "i", "-", "th", "value", "or", "values", "in", "the", "Series", "by", "location", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L824-L855
20,276
pandas-dev/pandas
pandas/core/series.py
Series.repeat
def repeat(self, repeats, axis=None): """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Must be ``None``. Has no effect but is accepted for compatibility with numpy. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat(tuple(), dict(axis=axis)) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index).__finalize__(self)
python
def repeat(self, repeats, axis=None): """ Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Must be ``None``. Has no effect but is accepted for compatibility with numpy. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object """ nv.validate_repeat(tuple(), dict(axis=axis)) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index).__finalize__(self)
[ "def", "repeat", "(", "self", ",", "repeats", ",", "axis", "=", "None", ")", ":", "nv", ".", "validate_repeat", "(", "tuple", "(", ")", ",", "dict", "(", "axis", "=", "axis", ")", ")", "new_index", "=", "self", ".", "index", ".", "repeat", "(", "repeats", ")", "new_values", "=", "self", ".", "_values", ".", "repeat", "(", "repeats", ")", "return", "self", ".", "_constructor", "(", "new_values", ",", "index", "=", "new_index", ")", ".", "__finalize__", "(", "self", ")" ]
Repeat elements of a Series. Returns a new Series where each element of the current Series is repeated consecutively a given number of times. Parameters ---------- repeats : int or array of ints The number of repetitions for each element. This should be a non-negative integer. Repeating 0 times will return an empty Series. axis : None Must be ``None``. Has no effect but is accepted for compatibility with numpy. Returns ------- Series Newly created Series with repeated elements. See Also -------- Index.repeat : Equivalent function for Index. numpy.repeat : Similar method for :class:`numpy.ndarray`. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object >>> s.repeat(2) 0 a 0 a 1 b 1 b 2 c 2 c dtype: object >>> s.repeat([1, 2, 3]) 0 a 1 b 1 b 2 c 2 c 2 c dtype: object
[ "Repeat", "elements", "of", "a", "Series", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L1105-L1161
20,277
pandas-dev/pandas
pandas/core/series.py
Series.reset_index
def reset_index(self, level=None, drop=False, name=None, inplace=False): """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). Returns ------- Series or DataFrame When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 To update the Series in place, without generating a new one set `inplace` to True. Note that it also requires ``drop=True``. >>> s.reset_index(inplace=True, drop=True) >>> s 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, 'inplace') if drop: new_index = ibase.default_index(len(self)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] if len(level) < self.index.nlevels: new_index = self.index.droplevel(level) if inplace: self.index = new_index # set name if it was passed, otherwise, keep the previous name self.name = name or self.name else: return self._constructor(self._values.copy(), index=new_index).__finalize__(self) elif inplace: raise TypeError('Cannot reset_index inplace on a Series ' 'to create a DataFrame') else: df = self.to_frame(name) return df.reset_index(level=level, drop=drop)
python
def reset_index(self, level=None, drop=False, name=None, inplace=False): """ Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). Returns ------- Series or DataFrame When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 To update the Series in place, without generating a new one set `inplace` to True. Note that it also requires ``drop=True``. >>> s.reset_index(inplace=True, drop=True) >>> s 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3 """ inplace = validate_bool_kwarg(inplace, 'inplace') if drop: new_index = ibase.default_index(len(self)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] if len(level) < self.index.nlevels: new_index = self.index.droplevel(level) if inplace: self.index = new_index # set name if it was passed, otherwise, keep the previous name self.name = name or self.name else: return self._constructor(self._values.copy(), index=new_index).__finalize__(self) elif inplace: raise TypeError('Cannot reset_index inplace on a Series ' 'to create a DataFrame') else: df = self.to_frame(name) return df.reset_index(level=level, drop=drop)
[ "def", "reset_index", "(", "self", ",", "level", "=", "None", ",", "drop", "=", "False", ",", "name", "=", "None", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "if", "drop", ":", "new_index", "=", "ibase", ".", "default_index", "(", "len", "(", "self", ")", ")", "if", "level", "is", "not", "None", ":", "if", "not", "isinstance", "(", "level", ",", "(", "tuple", ",", "list", ")", ")", ":", "level", "=", "[", "level", "]", "level", "=", "[", "self", ".", "index", ".", "_get_level_number", "(", "lev", ")", "for", "lev", "in", "level", "]", "if", "len", "(", "level", ")", "<", "self", ".", "index", ".", "nlevels", ":", "new_index", "=", "self", ".", "index", ".", "droplevel", "(", "level", ")", "if", "inplace", ":", "self", ".", "index", "=", "new_index", "# set name if it was passed, otherwise, keep the previous name", "self", ".", "name", "=", "name", "or", "self", ".", "name", "else", ":", "return", "self", ".", "_constructor", "(", "self", ".", "_values", ".", "copy", "(", ")", ",", "index", "=", "new_index", ")", ".", "__finalize__", "(", "self", ")", "elif", "inplace", ":", "raise", "TypeError", "(", "'Cannot reset_index inplace on a Series '", "'to create a DataFrame'", ")", "else", ":", "df", "=", "self", ".", "to_frame", "(", "name", ")", "return", "df", ".", "reset_index", "(", "level", "=", "level", ",", "drop", "=", "drop", ")" ]
Generate a new DataFrame or Series with the index reset. This is useful when the index needs to be treated as a column, or when the index is meaningless and needs to be reset to the default before another operation. Parameters ---------- level : int, str, tuple, or list, default optional For a Series with a MultiIndex, only remove the specified levels from the index. Removes all levels by default. drop : bool, default False Just reset the index, without inserting it as a column in the new DataFrame. name : object, optional The name to use for the column containing the original Series values. Uses ``self.name`` by default. This argument is ignored when `drop` is True. inplace : bool, default False Modify the Series in place (do not create a new object). Returns ------- Series or DataFrame When `drop` is False (the default), a DataFrame is returned. The newly created columns will come first in the DataFrame, followed by the original Series values. When `drop` is True, a `Series` is returned. In either case, if ``inplace=True``, no value is returned. See Also -------- DataFrame.reset_index: Analogous function for DataFrame. Examples -------- >>> s = pd.Series([1, 2, 3, 4], name='foo', ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) Generate a DataFrame with default index. >>> s.reset_index() idx foo 0 a 1 1 b 2 2 c 3 3 d 4 To specify the name of the new column use `name`. >>> s.reset_index(name='values') idx values 0 a 1 1 b 2 2 c 3 3 d 4 To generate a new Series with the default set `drop` to True. >>> s.reset_index(drop=True) 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 To update the Series in place, without generating a new one set `inplace` to True. Note that it also requires ``drop=True``. >>> s.reset_index(inplace=True, drop=True) >>> s 0 1 1 2 2 3 3 4 Name: foo, dtype: int64 The `level` parameter is interesting for Series with a multi-level index. >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), ... np.array(['one', 'two', 'one', 'two'])] >>> s2 = pd.Series( ... range(4), name='foo', ... index=pd.MultiIndex.from_arrays(arrays, ... names=['a', 'b'])) To remove a specific level from the Index, use `level`. >>> s2.reset_index(level='a') a foo b one bar 0 two bar 1 one baz 2 two baz 3 If `level` is not set, all levels are removed from the Index. >>> s2.reset_index() a b foo 0 bar one 0 1 bar two 1 2 baz one 2 3 baz two 3
[ "Generate", "a", "new", "DataFrame", "or", "Series", "with", "the", "index", "reset", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L1235-L1365
20,278
pandas-dev/pandas
pandas/core/series.py
Series.to_string
def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True, index=True, length=False, dtype=False, name=False, max_rows=None): """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter(self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, max_rows=max_rows) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError("result must be of type unicode, type" " of result is {0!r}" "".format(result.__class__.__name__)) if buf is None: return result else: try: buf.write(result) except AttributeError: with open(buf, 'w') as f: f.write(result)
python
def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True, index=True, length=False, dtype=False, name=False, max_rows=None): """ Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. Returns ------- str or None String representation of Series if ``buf=None``, otherwise None. """ formatter = fmt.SeriesFormatter(self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, max_rows=max_rows) result = formatter.to_string() # catch contract violations if not isinstance(result, str): raise AssertionError("result must be of type unicode, type" " of result is {0!r}" "".format(result.__class__.__name__)) if buf is None: return result else: try: buf.write(result) except AttributeError: with open(buf, 'w') as f: f.write(result)
[ "def", "to_string", "(", "self", ",", "buf", "=", "None", ",", "na_rep", "=", "'NaN'", ",", "float_format", "=", "None", ",", "header", "=", "True", ",", "index", "=", "True", ",", "length", "=", "False", ",", "dtype", "=", "False", ",", "name", "=", "False", ",", "max_rows", "=", "None", ")", ":", "formatter", "=", "fmt", ".", "SeriesFormatter", "(", "self", ",", "name", "=", "name", ",", "length", "=", "length", ",", "header", "=", "header", ",", "index", "=", "index", ",", "dtype", "=", "dtype", ",", "na_rep", "=", "na_rep", ",", "float_format", "=", "float_format", ",", "max_rows", "=", "max_rows", ")", "result", "=", "formatter", ".", "to_string", "(", ")", "# catch contract violations", "if", "not", "isinstance", "(", "result", ",", "str", ")", ":", "raise", "AssertionError", "(", "\"result must be of type unicode, type\"", "\" of result is {0!r}\"", "\"\"", ".", "format", "(", "result", ".", "__class__", ".", "__name__", ")", ")", "if", "buf", "is", "None", ":", "return", "result", "else", ":", "try", ":", "buf", ".", "write", "(", "result", ")", "except", "AttributeError", ":", "with", "open", "(", "buf", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "result", ")" ]
Render a string representation of the Series. Parameters ---------- buf : StringIO-like, optional Buffer to write to. na_rep : str, optional String representation of NaN to use, default 'NaN'. float_format : one-parameter function, optional Formatter function to apply to columns' elements if they are floats, default None. header : bool, default True Add the Series header (index name). index : bool, optional Add index (row) labels, default True. length : bool, default False Add the Series length. dtype : bool, default False Add the Series dtype. name : bool, default False Add the Series name if not None. max_rows : int, optional Maximum number of rows to show before truncating. If None, show all. Returns ------- str or None String representation of Series if ``buf=None``, otherwise None.
[ "Render", "a", "string", "representation", "of", "the", "Series", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L1386-L1441
20,279
pandas-dev/pandas
pandas/core/series.py
Series.to_frame
def to_frame(self, name=None): """ Convert Series to DataFrame. Parameters ---------- name : object, default None The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ if name is None: df = self._constructor_expanddim(self) else: df = self._constructor_expanddim({name: self}) return df
python
def to_frame(self, name=None): """ Convert Series to DataFrame. Parameters ---------- name : object, default None The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c """ if name is None: df = self._constructor_expanddim(self) else: df = self._constructor_expanddim({name: self}) return df
[ "def", "to_frame", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "df", "=", "self", ".", "_constructor_expanddim", "(", "self", ")", "else", ":", "df", "=", "self", ".", "_constructor_expanddim", "(", "{", "name", ":", "self", "}", ")", "return", "df" ]
Convert Series to DataFrame. Parameters ---------- name : object, default None The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. Examples -------- >>> s = pd.Series(["a", "b", "c"], ... name="vals") >>> s.to_frame() vals 0 a 1 b 2 c
[ "Convert", "Series", "to", "DataFrame", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L1520-L1550
20,280
pandas-dev/pandas
pandas/core/series.py
Series.to_sparse
def to_sparse(self, kind='block', fill_value=None): """ Convert Series to SparseSeries. Parameters ---------- kind : {'block', 'integer'}, default 'block' fill_value : float, defaults to NaN (missing) Value to use for filling NaN values. Returns ------- SparseSeries Sparse representation of the Series. """ # TODO: deprecate from pandas.core.sparse.series import SparseSeries values = SparseArray(self, kind=kind, fill_value=fill_value) return SparseSeries( values, index=self.index, name=self.name ).__finalize__(self)
python
def to_sparse(self, kind='block', fill_value=None): """ Convert Series to SparseSeries. Parameters ---------- kind : {'block', 'integer'}, default 'block' fill_value : float, defaults to NaN (missing) Value to use for filling NaN values. Returns ------- SparseSeries Sparse representation of the Series. """ # TODO: deprecate from pandas.core.sparse.series import SparseSeries values = SparseArray(self, kind=kind, fill_value=fill_value) return SparseSeries( values, index=self.index, name=self.name ).__finalize__(self)
[ "def", "to_sparse", "(", "self", ",", "kind", "=", "'block'", ",", "fill_value", "=", "None", ")", ":", "# TODO: deprecate", "from", "pandas", ".", "core", ".", "sparse", ".", "series", "import", "SparseSeries", "values", "=", "SparseArray", "(", "self", ",", "kind", "=", "kind", ",", "fill_value", "=", "fill_value", ")", "return", "SparseSeries", "(", "values", ",", "index", "=", "self", ".", "index", ",", "name", "=", "self", ".", "name", ")", ".", "__finalize__", "(", "self", ")" ]
Convert Series to SparseSeries. Parameters ---------- kind : {'block', 'integer'}, default 'block' fill_value : float, defaults to NaN (missing) Value to use for filling NaN values. Returns ------- SparseSeries Sparse representation of the Series.
[ "Convert", "Series", "to", "SparseSeries", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L1552-L1573
20,281
pandas-dev/pandas
pandas/core/series.py
Series._set_name
def _set_name(self, name, inplace=False): """ Set the Series name. Parameters ---------- name : str inplace : bool whether to modify `self` directly or return a copy """ inplace = validate_bool_kwarg(inplace, 'inplace') ser = self if inplace else self.copy() ser.name = name return ser
python
def _set_name(self, name, inplace=False): """ Set the Series name. Parameters ---------- name : str inplace : bool whether to modify `self` directly or return a copy """ inplace = validate_bool_kwarg(inplace, 'inplace') ser = self if inplace else self.copy() ser.name = name return ser
[ "def", "_set_name", "(", "self", ",", "name", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "ser", "=", "self", "if", "inplace", "else", "self", ".", "copy", "(", ")", "ser", ".", "name", "=", "name", "return", "ser" ]
Set the Series name. Parameters ---------- name : str inplace : bool whether to modify `self` directly or return a copy
[ "Set", "the", "Series", "name", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L1575-L1588
20,282
pandas-dev/pandas
pandas/core/series.py
Series.drop_duplicates
def drop_duplicates(self, keep='first', inplace=False): """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. Returns ------- Series Series with duplicates dropped. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Examples -------- Generate an Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. Setting the value of 'inplace' to ``True`` performs the operation inplace and returns ``None``. >>> s.drop_duplicates(keep=False, inplace=True) >>> s 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ return super().drop_duplicates(keep=keep, inplace=inplace)
python
def drop_duplicates(self, keep='first', inplace=False): """ Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. Returns ------- Series Series with duplicates dropped. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Examples -------- Generate an Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. Setting the value of 'inplace' to ``True`` performs the operation inplace and returns ``None``. >>> s.drop_duplicates(keep=False, inplace=True) >>> s 1 cow 3 beetle 5 hippo Name: animal, dtype: object """ return super().drop_duplicates(keep=keep, inplace=inplace)
[ "def", "drop_duplicates", "(", "self", ",", "keep", "=", "'first'", ",", "inplace", "=", "False", ")", ":", "return", "super", "(", ")", ".", "drop_duplicates", "(", "keep", "=", "keep", ",", "inplace", "=", "inplace", ")" ]
Return Series with duplicate values removed. Parameters ---------- keep : {'first', 'last', ``False``}, default 'first' - 'first' : Drop duplicates except for the first occurrence. - 'last' : Drop duplicates except for the last occurrence. - ``False`` : Drop all duplicates. inplace : bool, default ``False`` If ``True``, performs operation inplace and returns None. Returns ------- Series Series with duplicates dropped. See Also -------- Index.drop_duplicates : Equivalent method on Index. DataFrame.drop_duplicates : Equivalent method on DataFrame. Series.duplicated : Related method on Series, indicating duplicate Series values. Examples -------- Generate an Series with duplicated entries. >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], ... name='animal') >>> s 0 lama 1 cow 2 lama 3 beetle 4 lama 5 hippo Name: animal, dtype: object With the 'keep' parameter, the selection behaviour of duplicated values can be changed. The value 'first' keeps the first occurrence for each set of duplicated entries. The default value of keep is 'first'. >>> s.drop_duplicates() 0 lama 1 cow 3 beetle 5 hippo Name: animal, dtype: object The value 'last' for parameter 'keep' keeps the last occurrence for each set of duplicated entries. >>> s.drop_duplicates(keep='last') 1 cow 3 beetle 4 lama 5 hippo Name: animal, dtype: object The value ``False`` for parameter 'keep' discards all sets of duplicated entries. Setting the value of 'inplace' to ``True`` performs the operation inplace and returns ``None``. >>> s.drop_duplicates(keep=False, inplace=True) >>> s 1 cow 3 beetle 5 hippo Name: animal, dtype: object
[ "Return", "Series", "with", "duplicate", "values", "removed", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L1720-L1792
20,283
pandas-dev/pandas
pandas/core/series.py
Series.idxmin
def idxmin(self, axis=0, skipna=True, *args, **kwargs): """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. axis : int, default 0 For compatibility with DataFrame.idxmin. Redundant for application on Series. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) i = nanops.nanargmin(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i]
python
def idxmin(self, axis=0, skipna=True, *args, **kwargs): """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. axis : int, default 0 For compatibility with DataFrame.idxmin. Redundant for application on Series. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) i = nanops.nanargmin(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i]
[ "def", "idxmin", "(", "self", ",", "axis", "=", "0", ",", "skipna", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "skipna", "=", "nv", ".", "validate_argmin_with_skipna", "(", "skipna", ",", "args", ",", "kwargs", ")", "i", "=", "nanops", ".", "nanargmin", "(", "com", ".", "values_from_object", "(", "self", ")", ",", "skipna", "=", "skipna", ")", "if", "i", "==", "-", "1", ":", "return", "np", ".", "nan", "return", "self", ".", "index", "[", "i", "]" ]
Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. axis : int, default 0 For compatibility with DataFrame.idxmin. Redundant for application on Series. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan
[ "Return", "the", "row", "label", "of", "the", "minimum", "value", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L1870-L1938
20,284
pandas-dev/pandas
pandas/core/series.py
Series.idxmax
def idxmax(self, axis=0, skipna=True, *args, **kwargs): """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. axis : int, default 0 For compatibility with DataFrame.idxmax. Redundant for application on Series. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) i = nanops.nanargmax(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i]
python
def idxmax(self, axis=0, skipna=True, *args, **kwargs): """ Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. axis : int, default 0 For compatibility with DataFrame.idxmax. Redundant for application on Series. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan """ skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) i = nanops.nanargmax(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i]
[ "def", "idxmax", "(", "self", ",", "axis", "=", "0", ",", "skipna", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "skipna", "=", "nv", ".", "validate_argmax_with_skipna", "(", "skipna", ",", "args", ",", "kwargs", ")", "i", "=", "nanops", ".", "nanargmax", "(", "com", ".", "values_from_object", "(", "self", ")", ",", "skipna", "=", "skipna", ")", "if", "i", "==", "-", "1", ":", "return", "np", ".", "nan", "return", "self", ".", "index", "[", "i", "]" ]
Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. axis : int, default 0 For compatibility with DataFrame.idxmax. Redundant for application on Series. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the maximum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmax : Return indices of the maximum values along the given axis. DataFrame.idxmax : Return index of first occurrence of maximum over requested axis. Series.idxmin : Return index *label* of the first occurrence of minimum of values. Notes ----- This method is the Series version of ``ndarray.argmax``. This method returns the label of the maximum, while ``ndarray.argmax`` returns the position. To get the position, use ``series.values.argmax()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], ... index=['A', 'B', 'C', 'D', 'E']) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmax(skipna=False) nan
[ "Return", "the", "row", "label", "of", "the", "maximum", "value", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L1940-L2009
20,285
pandas-dev/pandas
pandas/core/series.py
Series.round
def round(self, decimals=0, *args, **kwargs): """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = com.values_from_object(self).round(decimals) result = self._constructor(result, index=self.index).__finalize__(self) return result
python
def round(self, decimals=0, *args, **kwargs): """ Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64 """ nv.validate_round(args, kwargs) result = com.values_from_object(self).round(decimals) result = self._constructor(result, index=self.index).__finalize__(self) return result
[ "def", "round", "(", "self", ",", "decimals", "=", "0", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_round", "(", "args", ",", "kwargs", ")", "result", "=", "com", ".", "values_from_object", "(", "self", ")", ".", "round", "(", "decimals", ")", "result", "=", "self", ".", "_constructor", "(", "result", ",", "index", "=", "self", ".", "index", ")", ".", "__finalize__", "(", "self", ")", "return", "result" ]
Round each value in a Series to the given number of decimals. Parameters ---------- decimals : int Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Series Rounded values of the Series. See Also -------- numpy.around : Round values of an np.array. DataFrame.round : Round values of a DataFrame. Examples -------- >>> s = pd.Series([0.1, 1.3, 2.7]) >>> s.round() 0 0.0 1 1.0 2 3.0 dtype: float64
[ "Round", "each", "value", "in", "a", "Series", "to", "the", "given", "number", "of", "decimals", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L2033-L2067
20,286
pandas-dev/pandas
pandas/core/series.py
Series.quantile
def quantile(self, q=0.5, interpolation='linear'): """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} .. versionadded:: 0.18.0 This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile numpy.percentile Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ self._check_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name return self._constructor(result, index=Float64Index(q), name=self.name) else: # scalar return result.iloc[0]
python
def quantile(self, q=0.5, interpolation='linear'): """ Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} .. versionadded:: 0.18.0 This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile numpy.percentile Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64 """ self._check_percentile(q) # We dispatch to DataFrame so that core.internals only has to worry # about 2D cases. df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name return self._constructor(result, index=Float64Index(q), name=self.name) else: # scalar return result.iloc[0]
[ "def", "quantile", "(", "self", ",", "q", "=", "0.5", ",", "interpolation", "=", "'linear'", ")", ":", "self", ".", "_check_percentile", "(", "q", ")", "# We dispatch to DataFrame so that core.internals only has to worry", "# about 2D cases.", "df", "=", "self", ".", "to_frame", "(", ")", "result", "=", "df", ".", "quantile", "(", "q", "=", "q", ",", "interpolation", "=", "interpolation", ",", "numeric_only", "=", "False", ")", "if", "result", ".", "ndim", "==", "2", ":", "result", "=", "result", ".", "iloc", "[", ":", ",", "0", "]", "if", "is_list_like", "(", "q", ")", ":", "result", ".", "name", "=", "self", ".", "name", "return", "self", ".", "_constructor", "(", "result", ",", "index", "=", "Float64Index", "(", "q", ")", ",", "name", "=", "self", ".", "name", ")", "else", ":", "# scalar", "return", "result", ".", "iloc", "[", "0", "]" ]
Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} .. versionadded:: 0.18.0 This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- float or Series If ``q`` is an array, a Series will be returned where the index is ``q`` and the values are the quantiles, otherwise a float will be returned. See Also -------- core.window.Rolling.quantile numpy.percentile Examples -------- >>> s = pd.Series([1, 2, 3, 4]) >>> s.quantile(.5) 2.5 >>> s.quantile([.25, .5, .75]) 0.25 1.75 0.50 2.50 0.75 3.25 dtype: float64
[ "Return", "value", "at", "the", "given", "quantile", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L2069-L2132
20,287
pandas-dev/pandas
pandas/core/series.py
Series.corr
def corr(self, other, method='pearson', min_periods=None): """ Compute correlation with `other` Series, excluding missing values. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ this, other = self.align(other, join='inner', copy=False) if len(this) == 0: return np.nan if method in ['pearson', 'spearman', 'kendall'] or callable(method): return nanops.nancorr(this.values, other.values, method=method, min_periods=min_periods) raise ValueError("method must be either 'pearson', " "'spearman', 'kendall', or a callable, " "'{method}' was supplied".format(method=method))
python
def corr(self, other, method='pearson', min_periods=None): """ Compute correlation with `other` Series, excluding missing values. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3 """ this, other = self.align(other, join='inner', copy=False) if len(this) == 0: return np.nan if method in ['pearson', 'spearman', 'kendall'] or callable(method): return nanops.nancorr(this.values, other.values, method=method, min_periods=min_periods) raise ValueError("method must be either 'pearson', " "'spearman', 'kendall', or a callable, " "'{method}' was supplied".format(method=method))
[ "def", "corr", "(", "self", ",", "other", ",", "method", "=", "'pearson'", ",", "min_periods", "=", "None", ")", ":", "this", ",", "other", "=", "self", ".", "align", "(", "other", ",", "join", "=", "'inner'", ",", "copy", "=", "False", ")", "if", "len", "(", "this", ")", "==", "0", ":", "return", "np", ".", "nan", "if", "method", "in", "[", "'pearson'", ",", "'spearman'", ",", "'kendall'", "]", "or", "callable", "(", "method", ")", ":", "return", "nanops", ".", "nancorr", "(", "this", ".", "values", ",", "other", ".", "values", ",", "method", "=", "method", ",", "min_periods", "=", "min_periods", ")", "raise", "ValueError", "(", "\"method must be either 'pearson', \"", "\"'spearman', 'kendall', or a callable, \"", "\"'{method}' was supplied\"", ".", "format", "(", "method", "=", "method", ")", ")" ]
Compute correlation with `other` Series, excluding missing values. Parameters ---------- other : Series Series with which to compute the correlation. method : {'pearson', 'kendall', 'spearman'} or callable * pearson : standard correlation coefficient * kendall : Kendall Tau correlation coefficient * spearman : Spearman rank correlation * callable: callable with input two 1d ndarrays and returning a float. Note that the returned matrix from corr will have 1 along the diagonals and will be symmetric regardless of the callable's behavior .. versionadded:: 0.24.0 min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Correlation with other. Examples -------- >>> def histogram_intersection(a, b): ... v = np.minimum(a, b).sum().round(decimals=1) ... return v >>> s1 = pd.Series([.2, .0, .6, .2]) >>> s2 = pd.Series([.3, .6, .0, .1]) >>> s1.corr(s2, method=histogram_intersection) 0.3
[ "Compute", "correlation", "with", "other", "Series", "excluding", "missing", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L2134-L2180
20,288
pandas-dev/pandas
pandas/core/series.py
Series.cov
def cov(self, other, min_periods=None): """ Compute covariance with Series, excluding missing values. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join='inner', copy=False) if len(this) == 0: return np.nan return nanops.nancov(this.values, other.values, min_periods=min_periods)
python
def cov(self, other, min_periods=None): """ Compute covariance with Series, excluding missing values. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874 """ this, other = self.align(other, join='inner', copy=False) if len(this) == 0: return np.nan return nanops.nancov(this.values, other.values, min_periods=min_periods)
[ "def", "cov", "(", "self", ",", "other", ",", "min_periods", "=", "None", ")", ":", "this", ",", "other", "=", "self", ".", "align", "(", "other", ",", "join", "=", "'inner'", ",", "copy", "=", "False", ")", "if", "len", "(", "this", ")", "==", "0", ":", "return", "np", ".", "nan", "return", "nanops", ".", "nancov", "(", "this", ".", "values", ",", "other", ".", "values", ",", "min_periods", "=", "min_periods", ")" ]
Compute covariance with Series, excluding missing values. Parameters ---------- other : Series Series with which to compute the covariance. min_periods : int, optional Minimum number of observations needed to have a valid result. Returns ------- float Covariance between Series and other normalized by N-1 (unbiased estimator). Examples -------- >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) >>> s1.cov(s2) -0.01685762652715874
[ "Compute", "covariance", "with", "Series", "excluding", "missing", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L2182-L2210
20,289
pandas-dev/pandas
pandas/core/series.py
Series.dot
def dot(self, other): """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ from pandas.core.frame import DataFrame if isinstance(other, (Series, DataFrame)): common = self.index.union(other.index) if (len(common) > len(self.index) or len(common) > len(other.index)): raise ValueError('matrices are not aligned') left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception('Dot product shape mismatch, %s vs %s' % (lvals.shape, rvals.shape)) if isinstance(other, DataFrame): return self._constructor(np.dot(lvals, rvals), index=other.columns).__finalize__(self) elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError('unsupported type: %s' % type(other))
python
def dot(self, other): """ Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14]) """ from pandas.core.frame import DataFrame if isinstance(other, (Series, DataFrame)): common = self.index.union(other.index) if (len(common) > len(self.index) or len(common) > len(other.index)): raise ValueError('matrices are not aligned') left = self.reindex(index=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception('Dot product shape mismatch, %s vs %s' % (lvals.shape, rvals.shape)) if isinstance(other, DataFrame): return self._constructor(np.dot(lvals, rvals), index=other.columns).__finalize__(self) elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: # pragma: no cover raise TypeError('unsupported type: %s' % type(other))
[ "def", "dot", "(", "self", ",", "other", ")", ":", "from", "pandas", ".", "core", ".", "frame", "import", "DataFrame", "if", "isinstance", "(", "other", ",", "(", "Series", ",", "DataFrame", ")", ")", ":", "common", "=", "self", ".", "index", ".", "union", "(", "other", ".", "index", ")", "if", "(", "len", "(", "common", ")", ">", "len", "(", "self", ".", "index", ")", "or", "len", "(", "common", ")", ">", "len", "(", "other", ".", "index", ")", ")", ":", "raise", "ValueError", "(", "'matrices are not aligned'", ")", "left", "=", "self", ".", "reindex", "(", "index", "=", "common", ",", "copy", "=", "False", ")", "right", "=", "other", ".", "reindex", "(", "index", "=", "common", ",", "copy", "=", "False", ")", "lvals", "=", "left", ".", "values", "rvals", "=", "right", ".", "values", "else", ":", "lvals", "=", "self", ".", "values", "rvals", "=", "np", ".", "asarray", "(", "other", ")", "if", "lvals", ".", "shape", "[", "0", "]", "!=", "rvals", ".", "shape", "[", "0", "]", ":", "raise", "Exception", "(", "'Dot product shape mismatch, %s vs %s'", "%", "(", "lvals", ".", "shape", ",", "rvals", ".", "shape", ")", ")", "if", "isinstance", "(", "other", ",", "DataFrame", ")", ":", "return", "self", ".", "_constructor", "(", "np", ".", "dot", "(", "lvals", ",", "rvals", ")", ",", "index", "=", "other", ".", "columns", ")", ".", "__finalize__", "(", "self", ")", "elif", "isinstance", "(", "other", ",", "Series", ")", ":", "return", "np", ".", "dot", "(", "lvals", ",", "rvals", ")", "elif", "isinstance", "(", "rvals", ",", "np", ".", "ndarray", ")", ":", "return", "np", ".", "dot", "(", "lvals", ",", "rvals", ")", "else", ":", "# pragma: no cover", "raise", "TypeError", "(", "'unsupported type: %s'", "%", "type", "(", "other", ")", ")" ]
Compute the dot product between the Series and the columns of other. This method computes the dot product between the Series and another one, or the Series and each columns of a DataFrame, or the Series and each columns of an array. It can also be called using `self @ other` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the dot product with its columns. Returns ------- scalar, Series or numpy.ndarray Return the dot product of the Series and other if other is a Series, the Series of the dot product of Series and each rows of other if other is a DataFrame or a numpy.ndarray between the Series and each columns of the numpy array. See Also -------- DataFrame.dot: Compute the matrix product with the DataFrame. Series.mul: Multiplication of series and other, element-wise. Notes ----- The Series and other has to share the same index if other is a Series or a DataFrame. Examples -------- >>> s = pd.Series([0, 1, 2, 3]) >>> other = pd.Series([-1, 2, -3, 4]) >>> s.dot(other) 8 >>> s @ other 8 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(df) 0 24 1 14 dtype: int64 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) >>> s.dot(arr) array([24, 14])
[ "Compute", "the", "dot", "product", "between", "the", "Series", "and", "the", "columns", "of", "other", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L2321-L2397
20,290
pandas-dev/pandas
pandas/core/series.py
Series.append
def append(self, to_append, ignore_index=False, verify_integrity=False): """ Concatenate two or more Series. Parameters ---------- to_append : Series or list/tuple of Series Series to append with self. ignore_index : bool, default False If True, do not use the index labels. .. versionadded:: 0.19.0 verify_integrity : bool, default False If True, raise Exception on creating index with duplicates. Returns ------- Series Concatenated Series. See Also -------- concat : General function to concatenate DataFrame, Series or Panel objects. Notes ----- Iteratively appending to a Series can be more computationally intensive than a single concatenate. A better solution is to append values to a list and then concatenate the list with the original Series all at once. Examples -------- >>> s1 = pd.Series([1, 2, 3]) >>> s2 = pd.Series([4, 5, 6]) >>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5]) >>> s1.append(s2) 0 1 1 2 2 3 0 4 1 5 2 6 dtype: int64 >>> s1.append(s3) 0 1 1 2 2 3 3 4 4 5 5 6 dtype: int64 With `ignore_index` set to True: >>> s1.append(s2, ignore_index=True) 0 1 1 2 2 3 3 4 4 5 5 6 dtype: int64 With `verify_integrity` set to True: >>> s1.append(s2, verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: [0, 1, 2] """ from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] + to_append else: to_concat = [self, to_append] return concat(to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity)
python
def append(self, to_append, ignore_index=False, verify_integrity=False): """ Concatenate two or more Series. Parameters ---------- to_append : Series or list/tuple of Series Series to append with self. ignore_index : bool, default False If True, do not use the index labels. .. versionadded:: 0.19.0 verify_integrity : bool, default False If True, raise Exception on creating index with duplicates. Returns ------- Series Concatenated Series. See Also -------- concat : General function to concatenate DataFrame, Series or Panel objects. Notes ----- Iteratively appending to a Series can be more computationally intensive than a single concatenate. A better solution is to append values to a list and then concatenate the list with the original Series all at once. Examples -------- >>> s1 = pd.Series([1, 2, 3]) >>> s2 = pd.Series([4, 5, 6]) >>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5]) >>> s1.append(s2) 0 1 1 2 2 3 0 4 1 5 2 6 dtype: int64 >>> s1.append(s3) 0 1 1 2 2 3 3 4 4 5 5 6 dtype: int64 With `ignore_index` set to True: >>> s1.append(s2, ignore_index=True) 0 1 1 2 2 3 3 4 4 5 5 6 dtype: int64 With `verify_integrity` set to True: >>> s1.append(s2, verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: [0, 1, 2] """ from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] + to_append else: to_concat = [self, to_append] return concat(to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity)
[ "def", "append", "(", "self", ",", "to_append", ",", "ignore_index", "=", "False", ",", "verify_integrity", "=", "False", ")", ":", "from", "pandas", ".", "core", ".", "reshape", ".", "concat", "import", "concat", "if", "isinstance", "(", "to_append", ",", "(", "list", ",", "tuple", ")", ")", ":", "to_concat", "=", "[", "self", "]", "+", "to_append", "else", ":", "to_concat", "=", "[", "self", ",", "to_append", "]", "return", "concat", "(", "to_concat", ",", "ignore_index", "=", "ignore_index", ",", "verify_integrity", "=", "verify_integrity", ")" ]
Concatenate two or more Series. Parameters ---------- to_append : Series or list/tuple of Series Series to append with self. ignore_index : bool, default False If True, do not use the index labels. .. versionadded:: 0.19.0 verify_integrity : bool, default False If True, raise Exception on creating index with duplicates. Returns ------- Series Concatenated Series. See Also -------- concat : General function to concatenate DataFrame, Series or Panel objects. Notes ----- Iteratively appending to a Series can be more computationally intensive than a single concatenate. A better solution is to append values to a list and then concatenate the list with the original Series all at once. Examples -------- >>> s1 = pd.Series([1, 2, 3]) >>> s2 = pd.Series([4, 5, 6]) >>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5]) >>> s1.append(s2) 0 1 1 2 2 3 0 4 1 5 2 6 dtype: int64 >>> s1.append(s3) 0 1 1 2 2 3 3 4 4 5 5 6 dtype: int64 With `ignore_index` set to True: >>> s1.append(s2, ignore_index=True) 0 1 1 2 2 3 3 4 4 5 5 6 dtype: int64 With `verify_integrity` set to True: >>> s1.append(s2, verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: [0, 1, 2]
[ "Concatenate", "two", "or", "more", "Series", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L2420-L2501
20,291
pandas-dev/pandas
pandas/core/series.py
Series._binop
def _binop(self, other, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level Returns ------- Series """ if not isinstance(other, Series): raise AssertionError('Other operand must be Series') new_index = self.index this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join='outer', copy=False) new_index = this.index this_vals, other_vals = ops.fill_binop(this.values, other.values, fill_value) with np.errstate(all='ignore'): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) if func.__name__ in ['divmod', 'rdivmod']: ret = ops._construct_divmod_result(self, result, new_index, name) else: ret = ops._construct_result(self, result, new_index, name) return ret
python
def _binop(self, other, func, level=None, fill_value=None): """ Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level Returns ------- Series """ if not isinstance(other, Series): raise AssertionError('Other operand must be Series') new_index = self.index this = self if not self.index.equals(other.index): this, other = self.align(other, level=level, join='outer', copy=False) new_index = this.index this_vals, other_vals = ops.fill_binop(this.values, other.values, fill_value) with np.errstate(all='ignore'): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) if func.__name__ in ['divmod', 'rdivmod']: ret = ops._construct_divmod_result(self, result, new_index, name) else: ret = ops._construct_result(self, result, new_index, name) return ret
[ "def", "_binop", "(", "self", ",", "other", ",", "func", ",", "level", "=", "None", ",", "fill_value", "=", "None", ")", ":", "if", "not", "isinstance", "(", "other", ",", "Series", ")", ":", "raise", "AssertionError", "(", "'Other operand must be Series'", ")", "new_index", "=", "self", ".", "index", "this", "=", "self", "if", "not", "self", ".", "index", ".", "equals", "(", "other", ".", "index", ")", ":", "this", ",", "other", "=", "self", ".", "align", "(", "other", ",", "level", "=", "level", ",", "join", "=", "'outer'", ",", "copy", "=", "False", ")", "new_index", "=", "this", ".", "index", "this_vals", ",", "other_vals", "=", "ops", ".", "fill_binop", "(", "this", ".", "values", ",", "other", ".", "values", ",", "fill_value", ")", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "result", "=", "func", "(", "this_vals", ",", "other_vals", ")", "name", "=", "ops", ".", "get_op_result_name", "(", "self", ",", "other", ")", "if", "func", ".", "__name__", "in", "[", "'divmod'", ",", "'rdivmod'", "]", ":", "ret", "=", "ops", ".", "_construct_divmod_result", "(", "self", ",", "result", ",", "new_index", ",", "name", ")", "else", ":", "ret", "=", "ops", ".", "_construct_result", "(", "self", ",", "result", ",", "new_index", ",", "name", ")", "return", "ret" ]
Perform generic binary operation with optional fill value. Parameters ---------- other : Series func : binary operator fill_value : float or object Value to substitute for NA/null values. If both Series are NA in a location, the result will be NA regardless of the passed fill value level : int or level name, default None Broadcast across a level, matching Index values on the passed MultiIndex level Returns ------- Series
[ "Perform", "generic", "binary", "operation", "with", "optional", "fill", "value", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L2503-L2545
20,292
pandas-dev/pandas
pandas/core/series.py
Series.combine
def combine(self, other, func, fill_value=None): """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = [] for idx in new_index: lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all='ignore'): new_values.append(func(lv, rv)) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index with np.errstate(all='ignore'): new_values = [func(lv, other) for lv in self._values] new_name = self.name if is_categorical_dtype(self.values): pass elif is_extension_array_dtype(self.values): # The function can return something of any type, so check # if the type is compatible with the calling EA. try: new_values = self._values._from_sequence(new_values) except Exception: # https://github.com/pandas-dev/pandas/issues/22850 # pandas has no control over what 3rd-party ExtensionArrays # do in _values_from_sequence. We still want ops to work # though, so we catch any regular Exception. pass return self._constructor(new_values, index=new_index, name=new_name)
python
def combine(self, other, func, fill_value=None): """ Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64 """ if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): # If other is a Series, result is based on union of Series, # so do this element by element new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = [] for idx in new_index: lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) with np.errstate(all='ignore'): new_values.append(func(lv, rv)) else: # Assume that other is a scalar, so apply the function for # each element in the Series new_index = self.index with np.errstate(all='ignore'): new_values = [func(lv, other) for lv in self._values] new_name = self.name if is_categorical_dtype(self.values): pass elif is_extension_array_dtype(self.values): # The function can return something of any type, so check # if the type is compatible with the calling EA. try: new_values = self._values._from_sequence(new_values) except Exception: # https://github.com/pandas-dev/pandas/issues/22850 # pandas has no control over what 3rd-party ExtensionArrays # do in _values_from_sequence. We still want ops to work # though, so we catch any regular Exception. pass return self._constructor(new_values, index=new_index, name=new_name)
[ "def", "combine", "(", "self", ",", "other", ",", "func", ",", "fill_value", "=", "None", ")", ":", "if", "fill_value", "is", "None", ":", "fill_value", "=", "na_value_for_dtype", "(", "self", ".", "dtype", ",", "compat", "=", "False", ")", "if", "isinstance", "(", "other", ",", "Series", ")", ":", "# If other is a Series, result is based on union of Series,", "# so do this element by element", "new_index", "=", "self", ".", "index", ".", "union", "(", "other", ".", "index", ")", "new_name", "=", "ops", ".", "get_op_result_name", "(", "self", ",", "other", ")", "new_values", "=", "[", "]", "for", "idx", "in", "new_index", ":", "lv", "=", "self", ".", "get", "(", "idx", ",", "fill_value", ")", "rv", "=", "other", ".", "get", "(", "idx", ",", "fill_value", ")", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "new_values", ".", "append", "(", "func", "(", "lv", ",", "rv", ")", ")", "else", ":", "# Assume that other is a scalar, so apply the function for", "# each element in the Series", "new_index", "=", "self", ".", "index", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "new_values", "=", "[", "func", "(", "lv", ",", "other", ")", "for", "lv", "in", "self", ".", "_values", "]", "new_name", "=", "self", ".", "name", "if", "is_categorical_dtype", "(", "self", ".", "values", ")", ":", "pass", "elif", "is_extension_array_dtype", "(", "self", ".", "values", ")", ":", "# The function can return something of any type, so check", "# if the type is compatible with the calling EA.", "try", ":", "new_values", "=", "self", ".", "_values", ".", "_from_sequence", "(", "new_values", ")", "except", "Exception", ":", "# https://github.com/pandas-dev/pandas/issues/22850", "# pandas has no control over what 3rd-party ExtensionArrays", "# do in _values_from_sequence. We still want ops to work", "# though, so we catch any regular Exception.", "pass", "return", "self", ".", "_constructor", "(", "new_values", ",", "index", "=", "new_index", ",", "name", "=", "new_name", ")" ]
Combine the Series with a Series or scalar according to `func`. Combine the Series and `other` using `func` to perform elementwise selection for combined Series. `fill_value` is assumed when value is missing at some index from one of the two objects being combined. Parameters ---------- other : Series or scalar The value(s) to be combined with the `Series`. func : function Function that takes two scalars as inputs and returns an element. fill_value : scalar, optional The value to assume when an index is missing from one Series or the other. The default specifies to use the appropriate NaN value for the underlying dtype of the Series. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine_first : Combine Series values, choosing the calling Series' values first. Examples -------- Consider 2 Datasets ``s1`` and ``s2`` containing highest clocked speeds of different birds. >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) >>> s1 falcon 330.0 eagle 160.0 dtype: float64 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) >>> s2 falcon 345.0 eagle 200.0 duck 30.0 dtype: float64 Now, to combine the two datasets and view the highest speeds of the birds across the two datasets >>> s1.combine(s2, max) duck NaN eagle 200.0 falcon 345.0 dtype: float64 In the previous example, the resulting value for duck is missing, because the maximum of a NaN and a float is a NaN. So, in the example, we set ``fill_value=0``, so the maximum value returned will be the value from some dataset. >>> s1.combine(s2, max, fill_value=0) duck 30.0 eagle 200.0 falcon 345.0 dtype: float64
[ "Combine", "the", "Series", "with", "a", "Series", "or", "scalar", "according", "to", "func", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L2547-L2650
20,293
pandas-dev/pandas
pandas/core/series.py
Series.combine_first
def combine_first(self, other): """ Combine Series values, choosing the calling Series's values first. Parameters ---------- other : Series The value(s) to be combined with the `Series`. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine : Perform elementwise operation on two Series using a given function. Notes ----- Result index will be the union of the two indexes. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4]) >>> s1.combine_first(s2) 0 1.0 1 4.0 dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if is_datetimelike(this) and not is_datetimelike(other): other = to_datetime(other) return this.where(notna(this), other)
python
def combine_first(self, other): """ Combine Series values, choosing the calling Series's values first. Parameters ---------- other : Series The value(s) to be combined with the `Series`. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine : Perform elementwise operation on two Series using a given function. Notes ----- Result index will be the union of the two indexes. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4]) >>> s1.combine_first(s2) 0 1.0 1 4.0 dtype: float64 """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if is_datetimelike(this) and not is_datetimelike(other): other = to_datetime(other) return this.where(notna(this), other)
[ "def", "combine_first", "(", "self", ",", "other", ")", ":", "new_index", "=", "self", ".", "index", ".", "union", "(", "other", ".", "index", ")", "this", "=", "self", ".", "reindex", "(", "new_index", ",", "copy", "=", "False", ")", "other", "=", "other", ".", "reindex", "(", "new_index", ",", "copy", "=", "False", ")", "if", "is_datetimelike", "(", "this", ")", "and", "not", "is_datetimelike", "(", "other", ")", ":", "other", "=", "to_datetime", "(", "other", ")", "return", "this", ".", "where", "(", "notna", "(", "this", ")", ",", "other", ")" ]
Combine Series values, choosing the calling Series's values first. Parameters ---------- other : Series The value(s) to be combined with the `Series`. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine : Perform elementwise operation on two Series using a given function. Notes ----- Result index will be the union of the two indexes. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4]) >>> s1.combine_first(s2) 0 1.0 1 4.0 dtype: float64
[ "Combine", "Series", "values", "choosing", "the", "calling", "Series", "s", "values", "first", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L2652-L2690
20,294
pandas-dev/pandas
pandas/core/series.py
Series.update
def update(self, other): """ Modify Series in place using non-NA values from passed Series. Aligns on index. Parameters ---------- other : Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 """ other = other.reindex_like(self) mask = notna(other) self._data = self._data.putmask(mask=mask, new=other, inplace=True) self._maybe_update_cacher()
python
def update(self, other): """ Modify Series in place using non-NA values from passed Series. Aligns on index. Parameters ---------- other : Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64 """ other = other.reindex_like(self) mask = notna(other) self._data = self._data.putmask(mask=mask, new=other, inplace=True) self._maybe_update_cacher()
[ "def", "update", "(", "self", ",", "other", ")", ":", "other", "=", "other", ".", "reindex_like", "(", "self", ")", "mask", "=", "notna", "(", "other", ")", "self", ".", "_data", "=", "self", ".", "_data", ".", "putmask", "(", "mask", "=", "mask", ",", "new", "=", "other", ",", "inplace", "=", "True", ")", "self", ".", "_maybe_update_cacher", "(", ")" ]
Modify Series in place using non-NA values from passed Series. Aligns on index. Parameters ---------- other : Series Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series(['a', 'b', 'c']) >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If ``other`` contains NaNs the corresponding values are not updated in the original Series. >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, np.nan, 6])) >>> s 0 4 1 2 2 6 dtype: int64
[ "Modify", "Series", "in", "place", "using", "non", "-", "NA", "values", "from", "passed", "Series", ".", "Aligns", "on", "index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L2692-L2742
20,295
pandas-dev/pandas
pandas/core/series.py
Series.sort_values
def sort_values(self, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'}, default 0 Axis to direct sorting. The value 'index' is accepted for compatibility with DataFrame.sort_values. ascending : bool, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort' or 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' is the only stable algorithm. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Returns ------- Series Series ordered by values. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values inplace >>> s.sort_values(ascending=False, inplace=True) >>> s 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object """ inplace = validate_bool_kwarg(inplace, 'inplace') # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError("This Series is a view of some other array, to " "sort in-place you must create a copy") def _try_kind_sort(arr): # easier to ask forgiveness than permission try: # if kind==mergesort, it can fail for object dtype return arr.argsort(kind=kind) except TypeError: # stable sort not available for object dtype # uses the argsort default quicksort return arr.argsort(kind='quicksort') arr = self._values sortedIdx = np.empty(len(self), dtype=np.int32) bad = isna(arr) good = ~bad idx = ibase.default_index(len(self)) argsorted = _try_kind_sort(arr[good]) if is_list_like(ascending): if len(ascending) != 1: raise ValueError('Length of ascending (%d) must be 1 ' 'for Series' % (len(ascending))) ascending = ascending[0] if not is_bool(ascending): raise ValueError('ascending must be boolean') if not ascending: argsorted = argsorted[::-1] if na_position == 'last': n = good.sum() sortedIdx[:n] = idx[good][argsorted] sortedIdx[n:] = idx[bad] elif na_position == 'first': n = bad.sum() sortedIdx[n:] = idx[good][argsorted] sortedIdx[:n] = idx[bad] else: raise ValueError('invalid na_position: {!r}'.format(na_position)) result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx]) if inplace: self._update_inplace(result) else: return result.__finalize__(self)
python
def sort_values(self, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last'): """ Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'}, default 0 Axis to direct sorting. The value 'index' is accepted for compatibility with DataFrame.sort_values. ascending : bool, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort' or 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' is the only stable algorithm. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Returns ------- Series Series ordered by values. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values inplace >>> s.sort_values(ascending=False, inplace=True) >>> s 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object """ inplace = validate_bool_kwarg(inplace, 'inplace') # Validate the axis parameter self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: raise ValueError("This Series is a view of some other array, to " "sort in-place you must create a copy") def _try_kind_sort(arr): # easier to ask forgiveness than permission try: # if kind==mergesort, it can fail for object dtype return arr.argsort(kind=kind) except TypeError: # stable sort not available for object dtype # uses the argsort default quicksort return arr.argsort(kind='quicksort') arr = self._values sortedIdx = np.empty(len(self), dtype=np.int32) bad = isna(arr) good = ~bad idx = ibase.default_index(len(self)) argsorted = _try_kind_sort(arr[good]) if is_list_like(ascending): if len(ascending) != 1: raise ValueError('Length of ascending (%d) must be 1 ' 'for Series' % (len(ascending))) ascending = ascending[0] if not is_bool(ascending): raise ValueError('ascending must be boolean') if not ascending: argsorted = argsorted[::-1] if na_position == 'last': n = good.sum() sortedIdx[:n] = idx[good][argsorted] sortedIdx[n:] = idx[bad] elif na_position == 'first': n = bad.sum() sortedIdx[n:] = idx[good][argsorted] sortedIdx[:n] = idx[bad] else: raise ValueError('invalid na_position: {!r}'.format(na_position)) result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx]) if inplace: self._update_inplace(result) else: return result.__finalize__(self)
[ "def", "sort_values", "(", "self", ",", "axis", "=", "0", ",", "ascending", "=", "True", ",", "inplace", "=", "False", ",", "kind", "=", "'quicksort'", ",", "na_position", "=", "'last'", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "# Validate the axis parameter", "self", ".", "_get_axis_number", "(", "axis", ")", "# GH 5856/5853", "if", "inplace", "and", "self", ".", "_is_cached", ":", "raise", "ValueError", "(", "\"This Series is a view of some other array, to \"", "\"sort in-place you must create a copy\"", ")", "def", "_try_kind_sort", "(", "arr", ")", ":", "# easier to ask forgiveness than permission", "try", ":", "# if kind==mergesort, it can fail for object dtype", "return", "arr", ".", "argsort", "(", "kind", "=", "kind", ")", "except", "TypeError", ":", "# stable sort not available for object dtype", "# uses the argsort default quicksort", "return", "arr", ".", "argsort", "(", "kind", "=", "'quicksort'", ")", "arr", "=", "self", ".", "_values", "sortedIdx", "=", "np", ".", "empty", "(", "len", "(", "self", ")", ",", "dtype", "=", "np", ".", "int32", ")", "bad", "=", "isna", "(", "arr", ")", "good", "=", "~", "bad", "idx", "=", "ibase", ".", "default_index", "(", "len", "(", "self", ")", ")", "argsorted", "=", "_try_kind_sort", "(", "arr", "[", "good", "]", ")", "if", "is_list_like", "(", "ascending", ")", ":", "if", "len", "(", "ascending", ")", "!=", "1", ":", "raise", "ValueError", "(", "'Length of ascending (%d) must be 1 '", "'for Series'", "%", "(", "len", "(", "ascending", ")", ")", ")", "ascending", "=", "ascending", "[", "0", "]", "if", "not", "is_bool", "(", "ascending", ")", ":", "raise", "ValueError", "(", "'ascending must be boolean'", ")", "if", "not", "ascending", ":", "argsorted", "=", "argsorted", "[", ":", ":", "-", "1", "]", "if", "na_position", "==", "'last'", ":", "n", "=", "good", ".", "sum", "(", ")", "sortedIdx", "[", ":", "n", "]", "=", "idx", "[", "good", "]", "[", "argsorted", "]", "sortedIdx", "[", "n", ":", "]", "=", "idx", "[", "bad", "]", "elif", "na_position", "==", "'first'", ":", "n", "=", "bad", ".", "sum", "(", ")", "sortedIdx", "[", "n", ":", "]", "=", "idx", "[", "good", "]", "[", "argsorted", "]", "sortedIdx", "[", ":", "n", "]", "=", "idx", "[", "bad", "]", "else", ":", "raise", "ValueError", "(", "'invalid na_position: {!r}'", ".", "format", "(", "na_position", ")", ")", "result", "=", "self", ".", "_constructor", "(", "arr", "[", "sortedIdx", "]", ",", "index", "=", "self", ".", "index", "[", "sortedIdx", "]", ")", "if", "inplace", ":", "self", ".", "_update_inplace", "(", "result", ")", "else", ":", "return", "result", ".", "__finalize__", "(", "self", ")" ]
Sort by the values. Sort a Series in ascending or descending order by some criterion. Parameters ---------- axis : {0 or 'index'}, default 0 Axis to direct sorting. The value 'index' is accepted for compatibility with DataFrame.sort_values. ascending : bool, default True If True, sort values in ascending order, otherwise descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort' or 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' is the only stable algorithm. na_position : {'first' or 'last'}, default 'last' Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Returns ------- Series Series ordered by values. See Also -------- Series.sort_index : Sort by the Series indices. DataFrame.sort_values : Sort DataFrame by the values along either axis. DataFrame.sort_index : Sort DataFrame by indices. Examples -------- >>> s = pd.Series([np.nan, 1, 3, 10, 5]) >>> s 0 NaN 1 1.0 2 3.0 3 10.0 4 5.0 dtype: float64 Sort values ascending order (default behaviour) >>> s.sort_values(ascending=True) 1 1.0 2 3.0 4 5.0 3 10.0 0 NaN dtype: float64 Sort values descending order >>> s.sort_values(ascending=False) 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values inplace >>> s.sort_values(ascending=False, inplace=True) >>> s 3 10.0 4 5.0 2 3.0 1 1.0 0 NaN dtype: float64 Sort values putting NAs first >>> s.sort_values(na_position='first') 0 NaN 1 1.0 2 3.0 4 5.0 3 10.0 dtype: float64 Sort a series of strings >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) >>> s 0 z 1 b 2 d 3 a 4 c dtype: object >>> s.sort_values() 3 a 1 b 4 c 2 d 0 z dtype: object
[ "Sort", "by", "the", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L2747-L2910
20,296
pandas-dev/pandas
pandas/core/series.py
Series.sort_index
def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True): """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : int, default 0 Axis to direct sorting. This can only be 0 for Series. level : int, optional If not None, sort on values in specified index level(s). ascending : bool, default true Sort ascending vs. descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. Returns ------- Series The original Series sorted by the labels. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object Sort Inplace >>> s.sort_index(inplace=True) >>> s 1 c 2 b 3 a 4 d dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 """ # TODO: this can be combined with DataFrame.sort_index impl as # almost identical inplace = validate_bool_kwarg(inplace, 'inplace') # Validate the axis parameter self._get_axis_number(axis) index = self.index if level is not None: new_index, indexer = index.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) elif isinstance(index, MultiIndex): from pandas.core.sorting import lexsort_indexer labels = index._sort_levels_monotonic() indexer = lexsort_indexer(labels._get_codes_for_sorting(), orders=ascending, na_position=na_position) else: from pandas.core.sorting import nargsort # Check monotonic-ness before sort an index # GH11080 if ((ascending and index.is_monotonic_increasing) or (not ascending and index.is_monotonic_decreasing)): if inplace: return else: return self.copy() indexer = nargsort(index, kind=kind, ascending=ascending, na_position=na_position) indexer = ensure_platform_int(indexer) new_index = index.take(indexer) new_index = new_index._sort_levels_monotonic() new_values = self._values.take(indexer) result = self._constructor(new_values, index=new_index) if inplace: self._update_inplace(result) else: return result.__finalize__(self)
python
def sort_index(self, axis=0, level=None, ascending=True, inplace=False, kind='quicksort', na_position='last', sort_remaining=True): """ Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : int, default 0 Axis to direct sorting. This can only be 0 for Series. level : int, optional If not None, sort on values in specified index level(s). ascending : bool, default true Sort ascending vs. descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. Returns ------- Series The original Series sorted by the labels. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object Sort Inplace >>> s.sort_index(inplace=True) >>> s 1 c 2 b 3 a 4 d dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64 """ # TODO: this can be combined with DataFrame.sort_index impl as # almost identical inplace = validate_bool_kwarg(inplace, 'inplace') # Validate the axis parameter self._get_axis_number(axis) index = self.index if level is not None: new_index, indexer = index.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) elif isinstance(index, MultiIndex): from pandas.core.sorting import lexsort_indexer labels = index._sort_levels_monotonic() indexer = lexsort_indexer(labels._get_codes_for_sorting(), orders=ascending, na_position=na_position) else: from pandas.core.sorting import nargsort # Check monotonic-ness before sort an index # GH11080 if ((ascending and index.is_monotonic_increasing) or (not ascending and index.is_monotonic_decreasing)): if inplace: return else: return self.copy() indexer = nargsort(index, kind=kind, ascending=ascending, na_position=na_position) indexer = ensure_platform_int(indexer) new_index = index.take(indexer) new_index = new_index._sort_levels_monotonic() new_values = self._values.take(indexer) result = self._constructor(new_values, index=new_index) if inplace: self._update_inplace(result) else: return result.__finalize__(self)
[ "def", "sort_index", "(", "self", ",", "axis", "=", "0", ",", "level", "=", "None", ",", "ascending", "=", "True", ",", "inplace", "=", "False", ",", "kind", "=", "'quicksort'", ",", "na_position", "=", "'last'", ",", "sort_remaining", "=", "True", ")", ":", "# TODO: this can be combined with DataFrame.sort_index impl as", "# almost identical", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "# Validate the axis parameter", "self", ".", "_get_axis_number", "(", "axis", ")", "index", "=", "self", ".", "index", "if", "level", "is", "not", "None", ":", "new_index", ",", "indexer", "=", "index", ".", "sortlevel", "(", "level", ",", "ascending", "=", "ascending", ",", "sort_remaining", "=", "sort_remaining", ")", "elif", "isinstance", "(", "index", ",", "MultiIndex", ")", ":", "from", "pandas", ".", "core", ".", "sorting", "import", "lexsort_indexer", "labels", "=", "index", ".", "_sort_levels_monotonic", "(", ")", "indexer", "=", "lexsort_indexer", "(", "labels", ".", "_get_codes_for_sorting", "(", ")", ",", "orders", "=", "ascending", ",", "na_position", "=", "na_position", ")", "else", ":", "from", "pandas", ".", "core", ".", "sorting", "import", "nargsort", "# Check monotonic-ness before sort an index", "# GH11080", "if", "(", "(", "ascending", "and", "index", ".", "is_monotonic_increasing", ")", "or", "(", "not", "ascending", "and", "index", ".", "is_monotonic_decreasing", ")", ")", ":", "if", "inplace", ":", "return", "else", ":", "return", "self", ".", "copy", "(", ")", "indexer", "=", "nargsort", "(", "index", ",", "kind", "=", "kind", ",", "ascending", "=", "ascending", ",", "na_position", "=", "na_position", ")", "indexer", "=", "ensure_platform_int", "(", "indexer", ")", "new_index", "=", "index", ".", "take", "(", "indexer", ")", "new_index", "=", "new_index", ".", "_sort_levels_monotonic", "(", ")", "new_values", "=", "self", ".", "_values", ".", "take", "(", "indexer", ")", "result", "=", "self", ".", "_constructor", "(", "new_values", ",", "index", "=", "new_index", ")", "if", "inplace", ":", "self", ".", "_update_inplace", "(", "result", ")", "else", ":", "return", "result", ".", "__finalize__", "(", "self", ")" ]
Sort Series by index labels. Returns a new Series sorted by label if `inplace` argument is ``False``, otherwise updates the original series and returns None. Parameters ---------- axis : int, default 0 Axis to direct sorting. This can only be 0 for Series. level : int, optional If not None, sort on values in specified index level(s). ascending : bool, default true Sort ascending vs. descending. inplace : bool, default False If True, perform operation in-place. kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort' Choice of sorting algorithm. See also :func:`numpy.sort` for more information. 'mergesort' is the only stable algorithm. For DataFrames, this option is only applied when sorting on a single column or label. na_position : {'first', 'last'}, default 'last' If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. Not implemented for MultiIndex. sort_remaining : bool, default True If True and sorting by level and index is multilevel, sort by other levels too (in order) after sorting by specified level. Returns ------- Series The original Series sorted by the labels. See Also -------- DataFrame.sort_index: Sort DataFrame by the index. DataFrame.sort_values: Sort DataFrame by the value. Series.sort_values : Sort Series by the value. Examples -------- >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) >>> s.sort_index() 1 c 2 b 3 a 4 d dtype: object Sort Descending >>> s.sort_index(ascending=False) 4 d 3 a 2 b 1 c dtype: object Sort Inplace >>> s.sort_index(inplace=True) >>> s 1 c 2 b 3 a 4 d dtype: object By default NaNs are put at the end, but use `na_position` to place them at the beginning >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) >>> s.sort_index(na_position='first') NaN d 1.0 c 2.0 b 3.0 a dtype: object Specify index level to sort >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', ... 'baz', 'baz', 'bar', 'bar']), ... np.array(['two', 'one', 'two', 'one', ... 'two', 'one', 'two', 'one'])] >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) >>> s.sort_index(level=1) bar one 8 baz one 6 foo one 4 qux one 2 bar two 7 baz two 5 foo two 3 qux two 1 dtype: int64 Does not sort by remaining levels when sorting by levels >>> s.sort_index(level=1, sort_remaining=False) qux one 2 foo one 4 baz one 6 bar one 8 qux two 1 foo two 3 baz two 5 bar two 7 dtype: int64
[ "Sort", "Series", "by", "index", "labels", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L2912-L3065
20,297
pandas-dev/pandas
pandas/core/series.py
Series.nlargest
def nlargest(self, n=5, keep='first'): """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Monserat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Monserat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
python
def nlargest(self, n=5, keep='first'): """ Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Monserat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Monserat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 """ return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest()
[ "def", "nlargest", "(", "self", ",", "n", "=", "5", ",", "keep", "=", "'first'", ")", ":", "return", "algorithms", ".", "SelectNSeries", "(", "self", ",", "n", "=", "n", ",", "keep", "=", "keep", ")", ".", "nlargest", "(", ")" ]
Return the largest `n` elements. Parameters ---------- n : int, default 5 Return this many descending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` largest values in the Series, sorted in decreasing order. See Also -------- Series.nsmallest: Get the `n` smallest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values(ascending=False).head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Malta": 434000, "Maldives": 434000, ... "Brunei": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Monserat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Malta 434000 Maldives 434000 Brunei 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Monserat 5200 dtype: int64 The `n` largest elements where ``n=5`` by default. >>> s.nlargest() France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3``. Default `keep` value is 'first' so Malta will be kept. >>> s.nlargest(3) France 65000000 Italy 59000000 Malta 434000 dtype: int64 The `n` largest elements where ``n=3`` and keeping the last duplicates. Brunei will be kept since it is the last with value 434000 based on the index order. >>> s.nlargest(3, keep='last') France 65000000 Italy 59000000 Brunei 434000 dtype: int64 The `n` largest elements where ``n=3`` with all duplicates kept. Note that the returned Series has five elements due to the three duplicates. >>> s.nlargest(3, keep='all') France 65000000 Italy 59000000 Malta 434000 Maldives 434000 Brunei 434000 dtype: int64
[ "Return", "the", "largest", "n", "elements", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L3107-L3203
20,298
pandas-dev/pandas
pandas/core/series.py
Series.nsmallest
def nsmallest(self, n=5, keep='first'): """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Monserat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Monserat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Monserat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Monserat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Monserat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Monserat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
python
def nsmallest(self, n=5, keep='first'): """ Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Monserat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Monserat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Monserat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Monserat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Monserat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Monserat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64 """ return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest()
[ "def", "nsmallest", "(", "self", ",", "n", "=", "5", ",", "keep", "=", "'first'", ")", ":", "return", "algorithms", ".", "SelectNSeries", "(", "self", ",", "n", "=", "n", ",", "keep", "=", "keep", ")", ".", "nsmallest", "(", ")" ]
Return the smallest `n` elements. Parameters ---------- n : int, default 5 Return this many ascending sorted values. keep : {'first', 'last', 'all'}, default 'first' When there are duplicate values that cannot all fit in a Series of `n` elements: - ``first`` : return the first `n` occurrences in order of appearance. - ``last`` : return the last `n` occurrences in reverse order of appearance. - ``all`` : keep all occurrences. This can result in a Series of size larger than `n`. Returns ------- Series The `n` smallest values in the Series, sorted in increasing order. See Also -------- Series.nlargest: Get the `n` largest elements. Series.sort_values: Sort Series by values. Series.head: Return the first `n` rows. Notes ----- Faster than ``.sort_values().head(n)`` for small `n` relative to the size of the ``Series`` object. Examples -------- >>> countries_population = {"Italy": 59000000, "France": 65000000, ... "Brunei": 434000, "Malta": 434000, ... "Maldives": 434000, "Iceland": 337000, ... "Nauru": 11300, "Tuvalu": 11300, ... "Anguilla": 11300, "Monserat": 5200} >>> s = pd.Series(countries_population) >>> s Italy 59000000 France 65000000 Brunei 434000 Malta 434000 Maldives 434000 Iceland 337000 Nauru 11300 Tuvalu 11300 Anguilla 11300 Monserat 5200 dtype: int64 The `n` smallest elements where ``n=5`` by default. >>> s.nsmallest() Monserat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 Iceland 337000 dtype: int64 The `n` smallest elements where ``n=3``. Default `keep` value is 'first' so Nauru and Tuvalu will be kept. >>> s.nsmallest(3) Monserat 5200 Nauru 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` and keeping the last duplicates. Anguilla and Tuvalu will be kept since they are the last with value 11300 based on the index order. >>> s.nsmallest(3, keep='last') Monserat 5200 Anguilla 11300 Tuvalu 11300 dtype: int64 The `n` smallest elements where ``n=3`` with all duplicates kept. Note that the returned Series has four elements due to the three duplicates. >>> s.nsmallest(3, keep='all') Monserat 5200 Nauru 11300 Tuvalu 11300 Anguilla 11300 dtype: int64
[ "Return", "the", "smallest", "n", "elements", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L3205-L3300
20,299
pandas-dev/pandas
pandas/core/series.py
Series.swaplevel
def swaplevel(self, i=-2, j=-1, copy=True): """ Swap levels i and j in a MultiIndex. Parameters ---------- i, j : int, str (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- Series Series with levels swapped in MultiIndex. .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. """ new_index = self.index.swaplevel(i, j) return self._constructor(self._values, index=new_index, copy=copy).__finalize__(self)
python
def swaplevel(self, i=-2, j=-1, copy=True): """ Swap levels i and j in a MultiIndex. Parameters ---------- i, j : int, str (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- Series Series with levels swapped in MultiIndex. .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. """ new_index = self.index.swaplevel(i, j) return self._constructor(self._values, index=new_index, copy=copy).__finalize__(self)
[ "def", "swaplevel", "(", "self", ",", "i", "=", "-", "2", ",", "j", "=", "-", "1", ",", "copy", "=", "True", ")", ":", "new_index", "=", "self", ".", "index", ".", "swaplevel", "(", "i", ",", "j", ")", "return", "self", ".", "_constructor", "(", "self", ".", "_values", ",", "index", "=", "new_index", ",", "copy", "=", "copy", ")", ".", "__finalize__", "(", "self", ")" ]
Swap levels i and j in a MultiIndex. Parameters ---------- i, j : int, str (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- Series Series with levels swapped in MultiIndex. .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index.
[ "Swap", "levels", "i", "and", "j", "in", "a", "MultiIndex", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L3302-L3323