id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
20,300
pandas-dev/pandas
pandas/core/series.py
Series.map
def map(self, arg, na_action=None): """ Map values of Series according to input correspondence. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = super()._map_values( arg, na_action=na_action) return self._constructor(new_values, index=self.index).__finalize__(self)
python
def map(self, arg, na_action=None): """ Map values of Series according to input correspondence. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object """ new_values = super()._map_values( arg, na_action=na_action) return self._constructor(new_values, index=self.index).__finalize__(self)
[ "def", "map", "(", "self", ",", "arg", ",", "na_action", "=", "None", ")", ":", "new_values", "=", "super", "(", ")", ".", "_map_values", "(", "arg", ",", "na_action", "=", "na_action", ")", "return", "self", ".", "_constructor", "(", "new_values", ",", "index", "=", "self", ".", "index", ")", ".", "__finalize__", "(", "self", ")" ]
Map values of Series according to input correspondence. Used for substituting each value in a Series with another value, that may be derived from a function, a ``dict`` or a :class:`Series`. Parameters ---------- arg : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NaN values, without passing them to the mapping correspondence. Returns ------- Series Same index as caller. See Also -------- Series.apply : For applying more complex functions on a Series. DataFrame.apply : Apply a function row-/column-wise. DataFrame.applymap : Apply a function elementwise on a whole DataFrame. Notes ----- When ``arg`` is a dictionary, values in Series that are not in the dictionary (as keys) are converted to ``NaN``. However, if the dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. provides a method for default values), then this default is used rather than ``NaN``. Examples -------- >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) >>> s 0 cat 1 dog 2 NaN 3 rabbit dtype: object ``map`` accepts a ``dict`` or a ``Series``. Values that are not found in the ``dict`` are converted to ``NaN``, unless the dict has a default value (e.g. ``defaultdict``): >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 0 kitten 1 puppy 2 NaN 3 NaN dtype: object It also accepts a function: >>> s.map('I am a {}'.format) 0 I am a cat 1 I am a dog 2 I am a nan 3 I am a rabbit dtype: object To avoid applying the function to missing values (and keep them as ``NaN``) ``na_action='ignore'`` can be used: >>> s.map('I am a {}'.format, na_action='ignore') 0 I am a cat 1 I am a dog 2 NaN 3 I am a rabbit dtype: object
[ "Map", "values", "of", "Series", "according", "to", "input", "correspondence", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L3394-L3472
20,301
pandas-dev/pandas
pandas/core/series.py
Series.apply
def apply(self, func, convert_dtype=True, args=(), **kwds): """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. args : tuple Positional arguments passed to func after the series value. **kwds Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ if len(self) == 0: return self._constructor(dtype=self.dtype, index=self.index).__finalize__(self) # dispatch to agg if isinstance(func, (list, dict)): return self.aggregate(func, *args, **kwds) # if we are a string, try to dispatch if isinstance(func, str): return self._try_aggregate_string_function(func, *args, **kwds) # handle ufuncs and lambdas if kwds or args and not isinstance(func, np.ufunc): def f(x): return func(x, *args, **kwds) else: f = func with np.errstate(all='ignore'): if isinstance(f, np.ufunc): return f(self) # row-wise access if is_extension_type(self.dtype): mapped = self._values.map(f) else: values = self.astype(object).values mapped = lib.map_infer(values, f, convert=convert_dtype) if len(mapped) and isinstance(mapped[0], Series): from pandas.core.frame import DataFrame return DataFrame(mapped.tolist(), index=self.index) else: return self._constructor(mapped, index=self.index).__finalize__(self)
python
def apply(self, func, convert_dtype=True, args=(), **kwds): """ Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. args : tuple Positional arguments passed to func after the series value. **kwds Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64 """ if len(self) == 0: return self._constructor(dtype=self.dtype, index=self.index).__finalize__(self) # dispatch to agg if isinstance(func, (list, dict)): return self.aggregate(func, *args, **kwds) # if we are a string, try to dispatch if isinstance(func, str): return self._try_aggregate_string_function(func, *args, **kwds) # handle ufuncs and lambdas if kwds or args and not isinstance(func, np.ufunc): def f(x): return func(x, *args, **kwds) else: f = func with np.errstate(all='ignore'): if isinstance(f, np.ufunc): return f(self) # row-wise access if is_extension_type(self.dtype): mapped = self._values.map(f) else: values = self.astype(object).values mapped = lib.map_infer(values, f, convert=convert_dtype) if len(mapped) and isinstance(mapped[0], Series): from pandas.core.frame import DataFrame return DataFrame(mapped.tolist(), index=self.index) else: return self._constructor(mapped, index=self.index).__finalize__(self)
[ "def", "apply", "(", "self", ",", "func", ",", "convert_dtype", "=", "True", ",", "args", "=", "(", ")", ",", "*", "*", "kwds", ")", ":", "if", "len", "(", "self", ")", "==", "0", ":", "return", "self", ".", "_constructor", "(", "dtype", "=", "self", ".", "dtype", ",", "index", "=", "self", ".", "index", ")", ".", "__finalize__", "(", "self", ")", "# dispatch to agg", "if", "isinstance", "(", "func", ",", "(", "list", ",", "dict", ")", ")", ":", "return", "self", ".", "aggregate", "(", "func", ",", "*", "args", ",", "*", "*", "kwds", ")", "# if we are a string, try to dispatch", "if", "isinstance", "(", "func", ",", "str", ")", ":", "return", "self", ".", "_try_aggregate_string_function", "(", "func", ",", "*", "args", ",", "*", "*", "kwds", ")", "# handle ufuncs and lambdas", "if", "kwds", "or", "args", "and", "not", "isinstance", "(", "func", ",", "np", ".", "ufunc", ")", ":", "def", "f", "(", "x", ")", ":", "return", "func", "(", "x", ",", "*", "args", ",", "*", "*", "kwds", ")", "else", ":", "f", "=", "func", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "if", "isinstance", "(", "f", ",", "np", ".", "ufunc", ")", ":", "return", "f", "(", "self", ")", "# row-wise access", "if", "is_extension_type", "(", "self", ".", "dtype", ")", ":", "mapped", "=", "self", ".", "_values", ".", "map", "(", "f", ")", "else", ":", "values", "=", "self", ".", "astype", "(", "object", ")", ".", "values", "mapped", "=", "lib", ".", "map_infer", "(", "values", ",", "f", ",", "convert", "=", "convert_dtype", ")", "if", "len", "(", "mapped", ")", "and", "isinstance", "(", "mapped", "[", "0", "]", ",", "Series", ")", ":", "from", "pandas", ".", "core", ".", "frame", "import", "DataFrame", "return", "DataFrame", "(", "mapped", ".", "tolist", "(", ")", ",", "index", "=", "self", ".", "index", ")", "else", ":", "return", "self", ".", "_constructor", "(", "mapped", ",", "index", "=", "self", ".", "index", ")", ".", "__finalize__", "(", "self", ")" ]
Invoke function on values of Series. Can be ufunc (a NumPy function that applies to the entire Series) or a Python function that only works on single values. Parameters ---------- func : function Python function or NumPy ufunc to apply. convert_dtype : bool, default True Try to find better dtype for elementwise function results. If False, leave as dtype=object. args : tuple Positional arguments passed to func after the series value. **kwds Additional keyword arguments passed to func. Returns ------- Series or DataFrame If func returns a Series object the result will be a DataFrame. See Also -------- Series.map: For element-wise operations. Series.agg: Only perform aggregating type operations. Series.transform: Only perform transforming type operations. Examples -------- Create a series with typical summer temperatures for each city. >>> s = pd.Series([20, 21, 12], ... index=['London', 'New York', 'Helsinki']) >>> s London 20 New York 21 Helsinki 12 dtype: int64 Square the values by defining a function and passing it as an argument to ``apply()``. >>> def square(x): ... return x ** 2 >>> s.apply(square) London 400 New York 441 Helsinki 144 dtype: int64 Square the values by passing an anonymous function as an argument to ``apply()``. >>> s.apply(lambda x: x ** 2) London 400 New York 441 Helsinki 144 dtype: int64 Define a custom function that needs additional positional arguments and pass these additional arguments using the ``args`` keyword. >>> def subtract_custom_value(x, custom_value): ... return x - custom_value >>> s.apply(subtract_custom_value, args=(5,)) London 15 New York 16 Helsinki 7 dtype: int64 Define a custom function that takes keyword arguments and pass these arguments to ``apply``. >>> def add_custom_values(x, **kwargs): ... for month in kwargs: ... x += kwargs[month] ... return x >>> s.apply(add_custom_values, june=30, july=20, august=25) London 95 New York 96 Helsinki 87 dtype: int64 Use a function from the Numpy library. >>> s.apply(np.log) London 2.995732 New York 3.044522 Helsinki 2.484907 dtype: float64
[ "Invoke", "function", "on", "values", "of", "Series", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L3554-L3686
20,302
pandas-dev/pandas
pandas/core/series.py
Series._reduce
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, Categorical): # TODO deprecate numeric_only argument for Categorical and use # skipna as well, see GH25303 return delegate._reduce(name, numeric_only=numeric_only, **kwds) elif isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) elif is_datetime64_dtype(delegate): # use DatetimeIndex implementation to handle skipna correctly delegate = DatetimeIndex(delegate) # dispatch to numpy arrays elif isinstance(delegate, np.ndarray): if numeric_only: raise NotImplementedError('Series.{0} does not implement ' 'numeric_only.'.format(name)) with np.errstate(all='ignore'): return op(delegate, skipna=skipna, **kwds) # TODO(EA) dispatch to Index # remove once all internals extension types are # moved to ExtensionArrays return delegate._reduce(op=op, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, filter_type=filter_type, **kwds)
python
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): """ Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object. """ delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, Categorical): # TODO deprecate numeric_only argument for Categorical and use # skipna as well, see GH25303 return delegate._reduce(name, numeric_only=numeric_only, **kwds) elif isinstance(delegate, ExtensionArray): # dispatch to ExtensionArray interface return delegate._reduce(name, skipna=skipna, **kwds) elif is_datetime64_dtype(delegate): # use DatetimeIndex implementation to handle skipna correctly delegate = DatetimeIndex(delegate) # dispatch to numpy arrays elif isinstance(delegate, np.ndarray): if numeric_only: raise NotImplementedError('Series.{0} does not implement ' 'numeric_only.'.format(name)) with np.errstate(all='ignore'): return op(delegate, skipna=skipna, **kwds) # TODO(EA) dispatch to Index # remove once all internals extension types are # moved to ExtensionArrays return delegate._reduce(op=op, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, filter_type=filter_type, **kwds)
[ "def", "_reduce", "(", "self", ",", "op", ",", "name", ",", "axis", "=", "0", ",", "skipna", "=", "True", ",", "numeric_only", "=", "None", ",", "filter_type", "=", "None", ",", "*", "*", "kwds", ")", ":", "delegate", "=", "self", ".", "_values", "if", "axis", "is", "not", "None", ":", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "isinstance", "(", "delegate", ",", "Categorical", ")", ":", "# TODO deprecate numeric_only argument for Categorical and use", "# skipna as well, see GH25303", "return", "delegate", ".", "_reduce", "(", "name", ",", "numeric_only", "=", "numeric_only", ",", "*", "*", "kwds", ")", "elif", "isinstance", "(", "delegate", ",", "ExtensionArray", ")", ":", "# dispatch to ExtensionArray interface", "return", "delegate", ".", "_reduce", "(", "name", ",", "skipna", "=", "skipna", ",", "*", "*", "kwds", ")", "elif", "is_datetime64_dtype", "(", "delegate", ")", ":", "# use DatetimeIndex implementation to handle skipna correctly", "delegate", "=", "DatetimeIndex", "(", "delegate", ")", "# dispatch to numpy arrays", "elif", "isinstance", "(", "delegate", ",", "np", ".", "ndarray", ")", ":", "if", "numeric_only", ":", "raise", "NotImplementedError", "(", "'Series.{0} does not implement '", "'numeric_only.'", ".", "format", "(", "name", ")", ")", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "return", "op", "(", "delegate", ",", "skipna", "=", "skipna", ",", "*", "*", "kwds", ")", "# TODO(EA) dispatch to Index", "# remove once all internals extension types are", "# moved to ExtensionArrays", "return", "delegate", ".", "_reduce", "(", "op", "=", "op", ",", "name", "=", "name", ",", "axis", "=", "axis", ",", "skipna", "=", "skipna", ",", "numeric_only", "=", "numeric_only", ",", "filter_type", "=", "filter_type", ",", "*", "*", "kwds", ")" ]
Perform a reduction operation. If we have an ndarray as a value, then simply perform the operation, otherwise delegate to the object.
[ "Perform", "a", "reduction", "operation", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L3688-L3725
20,303
pandas-dev/pandas
pandas/core/series.py
Series.rename
def rename(self, index=None, **kwargs): """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function, optional dict-like or functions are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. copy : bool, default True Whether to copy underlying data. inplace : bool, default False Whether to return a new Series. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. Returns ------- Series Series with index labels or name altered. See Also -------- Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ kwargs['inplace'] = validate_bool_kwarg(kwargs.get('inplace', False), 'inplace') non_mapping = is_scalar(index) or (is_list_like(index) and not is_dict_like(index)) if non_mapping: return self._set_name(index, inplace=kwargs.get('inplace')) return super().rename(index=index, **kwargs)
python
def rename(self, index=None, **kwargs): """ Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function, optional dict-like or functions are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. copy : bool, default True Whether to copy underlying data. inplace : bool, default False Whether to return a new Series. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. Returns ------- Series Series with index labels or name altered. See Also -------- Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64 """ kwargs['inplace'] = validate_bool_kwarg(kwargs.get('inplace', False), 'inplace') non_mapping = is_scalar(index) or (is_list_like(index) and not is_dict_like(index)) if non_mapping: return self._set_name(index, inplace=kwargs.get('inplace')) return super().rename(index=index, **kwargs)
[ "def", "rename", "(", "self", ",", "index", "=", "None", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'inplace'", "]", "=", "validate_bool_kwarg", "(", "kwargs", ".", "get", "(", "'inplace'", ",", "False", ")", ",", "'inplace'", ")", "non_mapping", "=", "is_scalar", "(", "index", ")", "or", "(", "is_list_like", "(", "index", ")", "and", "not", "is_dict_like", "(", "index", ")", ")", "if", "non_mapping", ":", "return", "self", ".", "_set_name", "(", "index", ",", "inplace", "=", "kwargs", ".", "get", "(", "'inplace'", ")", ")", "return", "super", "(", ")", ".", "rename", "(", "index", "=", "index", ",", "*", "*", "kwargs", ")" ]
Alter Series index labels or name. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. Alternatively, change ``Series.name`` with a scalar value. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- index : scalar, hashable sequence, dict-like or function, optional dict-like or functions are transformations to apply to the index. Scalar or hashable sequence-like will alter the ``Series.name`` attribute. copy : bool, default True Whether to copy underlying data. inplace : bool, default False Whether to return a new Series. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. Returns ------- Series Series with index labels or name altered. See Also -------- Series.rename_axis : Set the name of the axis. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s 0 1 1 2 2 3 dtype: int64 >>> s.rename("my_name") # scalar, changes Series.name 0 1 1 2 2 3 Name: my_name, dtype: int64 >>> s.rename(lambda x: x ** 2) # function, changes labels 0 1 1 2 4 3 dtype: int64 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 0 1 3 2 5 3 dtype: int64
[ "Alter", "Series", "index", "labels", "or", "name", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L3753-L3821
20,304
pandas-dev/pandas
pandas/core/series.py
Series.reindex_axis
def reindex_axis(self, labels, axis=0, **kwargs): """ Conform Series to new index with optional filling logic. .. deprecated:: 0.21.0 Use ``Series.reindex`` instead. """ # for compatibility with higher dims if axis != 0: raise ValueError("cannot reindex series on non-zero axis!") msg = ("'.reindex_axis' is deprecated and will be removed in a future " "version. Use '.reindex' instead.") warnings.warn(msg, FutureWarning, stacklevel=2) return self.reindex(index=labels, **kwargs)
python
def reindex_axis(self, labels, axis=0, **kwargs): """ Conform Series to new index with optional filling logic. .. deprecated:: 0.21.0 Use ``Series.reindex`` instead. """ # for compatibility with higher dims if axis != 0: raise ValueError("cannot reindex series on non-zero axis!") msg = ("'.reindex_axis' is deprecated and will be removed in a future " "version. Use '.reindex' instead.") warnings.warn(msg, FutureWarning, stacklevel=2) return self.reindex(index=labels, **kwargs)
[ "def", "reindex_axis", "(", "self", ",", "labels", ",", "axis", "=", "0", ",", "*", "*", "kwargs", ")", ":", "# for compatibility with higher dims", "if", "axis", "!=", "0", ":", "raise", "ValueError", "(", "\"cannot reindex series on non-zero axis!\"", ")", "msg", "=", "(", "\"'.reindex_axis' is deprecated and will be removed in a future \"", "\"version. Use '.reindex' instead.\"", ")", "warnings", ".", "warn", "(", "msg", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "reindex", "(", "index", "=", "labels", ",", "*", "*", "kwargs", ")" ]
Conform Series to new index with optional filling logic. .. deprecated:: 0.21.0 Use ``Series.reindex`` instead.
[ "Conform", "Series", "to", "new", "index", "with", "optional", "filling", "logic", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L3940-L3954
20,305
pandas-dev/pandas
pandas/core/series.py
Series.memory_usage
def memory_usage(self, index=True, deep=False): """ Return the memory usage of the Series. The memory usage can optionally include the contribution of the index and of elements of `object` dtype. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the Series index. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned value. Returns ------- int Bytes of memory consumed. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of the array. DataFrame.memory_usage : Bytes consumed by a DataFrame. Examples -------- >>> s = pd.Series(range(3)) >>> s.memory_usage() 104 Not including the index gives the size of the rest of the data, which is necessarily smaller: >>> s.memory_usage(index=False) 24 The memory footprint of `object` values is ignored by default: >>> s = pd.Series(["a", "b"]) >>> s.values array(['a', 'b'], dtype=object) >>> s.memory_usage() 96 >>> s.memory_usage(deep=True) 212 """ v = super().memory_usage(deep=deep) if index: v += self.index.memory_usage(deep=deep) return v
python
def memory_usage(self, index=True, deep=False): """ Return the memory usage of the Series. The memory usage can optionally include the contribution of the index and of elements of `object` dtype. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the Series index. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned value. Returns ------- int Bytes of memory consumed. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of the array. DataFrame.memory_usage : Bytes consumed by a DataFrame. Examples -------- >>> s = pd.Series(range(3)) >>> s.memory_usage() 104 Not including the index gives the size of the rest of the data, which is necessarily smaller: >>> s.memory_usage(index=False) 24 The memory footprint of `object` values is ignored by default: >>> s = pd.Series(["a", "b"]) >>> s.values array(['a', 'b'], dtype=object) >>> s.memory_usage() 96 >>> s.memory_usage(deep=True) 212 """ v = super().memory_usage(deep=deep) if index: v += self.index.memory_usage(deep=deep) return v
[ "def", "memory_usage", "(", "self", ",", "index", "=", "True", ",", "deep", "=", "False", ")", ":", "v", "=", "super", "(", ")", ".", "memory_usage", "(", "deep", "=", "deep", ")", "if", "index", ":", "v", "+=", "self", ".", "index", ".", "memory_usage", "(", "deep", "=", "deep", ")", "return", "v" ]
Return the memory usage of the Series. The memory usage can optionally include the contribution of the index and of elements of `object` dtype. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the Series index. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned value. Returns ------- int Bytes of memory consumed. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of the array. DataFrame.memory_usage : Bytes consumed by a DataFrame. Examples -------- >>> s = pd.Series(range(3)) >>> s.memory_usage() 104 Not including the index gives the size of the rest of the data, which is necessarily smaller: >>> s.memory_usage(index=False) 24 The memory footprint of `object` values is ignored by default: >>> s = pd.Series(["a", "b"]) >>> s.values array(['a', 'b'], dtype=object) >>> s.memory_usage() 96 >>> s.memory_usage(deep=True) 212
[ "Return", "the", "memory", "usage", "of", "the", "Series", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L3956-L4008
20,306
pandas-dev/pandas
pandas/core/series.py
Series.isin
def isin(self, values): """ Check whether `values` are contained in Series. Return a boolean Series showing whether each element in the Series matches an element in the passed sequence of `values` exactly. Parameters ---------- values : set or list-like The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a list of one element. .. versionadded:: 0.18.1 Support for values as a set. Returns ------- Series Series of booleans indicating if each element is in values. Raises ------ TypeError * If `values` is a string See Also -------- DataFrame.isin : Equivalent method on DataFrame. Examples -------- >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', ... 'hippo'], name='animal') >>> s.isin(['cow', 'lama']) 0 True 1 True 2 True 3 False 4 True 5 False Name: animal, dtype: bool Passing a single string as ``s.isin('lama')`` will raise an error. Use a list of one element instead: >>> s.isin(['lama']) 0 True 1 False 2 True 3 False 4 True 5 False Name: animal, dtype: bool """ result = algorithms.isin(self, values) return self._constructor(result, index=self.index).__finalize__(self)
python
def isin(self, values): """ Check whether `values` are contained in Series. Return a boolean Series showing whether each element in the Series matches an element in the passed sequence of `values` exactly. Parameters ---------- values : set or list-like The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a list of one element. .. versionadded:: 0.18.1 Support for values as a set. Returns ------- Series Series of booleans indicating if each element is in values. Raises ------ TypeError * If `values` is a string See Also -------- DataFrame.isin : Equivalent method on DataFrame. Examples -------- >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', ... 'hippo'], name='animal') >>> s.isin(['cow', 'lama']) 0 True 1 True 2 True 3 False 4 True 5 False Name: animal, dtype: bool Passing a single string as ``s.isin('lama')`` will raise an error. Use a list of one element instead: >>> s.isin(['lama']) 0 True 1 False 2 True 3 False 4 True 5 False Name: animal, dtype: bool """ result = algorithms.isin(self, values) return self._constructor(result, index=self.index).__finalize__(self)
[ "def", "isin", "(", "self", ",", "values", ")", ":", "result", "=", "algorithms", ".", "isin", "(", "self", ",", "values", ")", "return", "self", ".", "_constructor", "(", "result", ",", "index", "=", "self", ".", "index", ")", ".", "__finalize__", "(", "self", ")" ]
Check whether `values` are contained in Series. Return a boolean Series showing whether each element in the Series matches an element in the passed sequence of `values` exactly. Parameters ---------- values : set or list-like The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a list of one element. .. versionadded:: 0.18.1 Support for values as a set. Returns ------- Series Series of booleans indicating if each element is in values. Raises ------ TypeError * If `values` is a string See Also -------- DataFrame.isin : Equivalent method on DataFrame. Examples -------- >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', ... 'hippo'], name='animal') >>> s.isin(['cow', 'lama']) 0 True 1 True 2 True 3 False 4 True 5 False Name: animal, dtype: bool Passing a single string as ``s.isin('lama')`` will raise an error. Use a list of one element instead: >>> s.isin(['lama']) 0 True 1 False 2 True 3 False 4 True 5 False Name: animal, dtype: bool
[ "Check", "whether", "values", "are", "contained", "in", "Series", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L4035-L4093
20,307
pandas-dev/pandas
pandas/core/series.py
Series.between
def between(self, left, right, inclusive=True): """ Return boolean Series equivalent to left <= series <= right. This function returns a boolean vector containing `True` wherever the corresponding Series element is between the boundary values `left` and `right`. NA values are treated as `False`. Parameters ---------- left : scalar Left boundary. right : scalar Right boundary. inclusive : bool, default True Include boundaries. Returns ------- Series Series representing whether each element is between left and right (inclusive). See Also -------- Series.gt : Greater than of series and other. Series.lt : Less than of series and other. Notes ----- This function is equivalent to ``(left <= ser) & (ser <= right)`` Examples -------- >>> s = pd.Series([2, 0, 4, 8, np.nan]) Boundary values are included by default: >>> s.between(1, 4) 0 True 1 False 2 True 3 False 4 False dtype: bool With `inclusive` set to ``False`` boundary values are excluded: >>> s.between(1, 4, inclusive=False) 0 True 1 False 2 False 3 False 4 False dtype: bool `left` and `right` can be any scalar value: >>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve']) >>> s.between('Anna', 'Daniel') 0 False 1 True 2 True 3 False dtype: bool """ if inclusive: lmask = self >= left rmask = self <= right else: lmask = self > left rmask = self < right return lmask & rmask
python
def between(self, left, right, inclusive=True): """ Return boolean Series equivalent to left <= series <= right. This function returns a boolean vector containing `True` wherever the corresponding Series element is between the boundary values `left` and `right`. NA values are treated as `False`. Parameters ---------- left : scalar Left boundary. right : scalar Right boundary. inclusive : bool, default True Include boundaries. Returns ------- Series Series representing whether each element is between left and right (inclusive). See Also -------- Series.gt : Greater than of series and other. Series.lt : Less than of series and other. Notes ----- This function is equivalent to ``(left <= ser) & (ser <= right)`` Examples -------- >>> s = pd.Series([2, 0, 4, 8, np.nan]) Boundary values are included by default: >>> s.between(1, 4) 0 True 1 False 2 True 3 False 4 False dtype: bool With `inclusive` set to ``False`` boundary values are excluded: >>> s.between(1, 4, inclusive=False) 0 True 1 False 2 False 3 False 4 False dtype: bool `left` and `right` can be any scalar value: >>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve']) >>> s.between('Anna', 'Daniel') 0 False 1 True 2 True 3 False dtype: bool """ if inclusive: lmask = self >= left rmask = self <= right else: lmask = self > left rmask = self < right return lmask & rmask
[ "def", "between", "(", "self", ",", "left", ",", "right", ",", "inclusive", "=", "True", ")", ":", "if", "inclusive", ":", "lmask", "=", "self", ">=", "left", "rmask", "=", "self", "<=", "right", "else", ":", "lmask", "=", "self", ">", "left", "rmask", "=", "self", "<", "right", "return", "lmask", "&", "rmask" ]
Return boolean Series equivalent to left <= series <= right. This function returns a boolean vector containing `True` wherever the corresponding Series element is between the boundary values `left` and `right`. NA values are treated as `False`. Parameters ---------- left : scalar Left boundary. right : scalar Right boundary. inclusive : bool, default True Include boundaries. Returns ------- Series Series representing whether each element is between left and right (inclusive). See Also -------- Series.gt : Greater than of series and other. Series.lt : Less than of series and other. Notes ----- This function is equivalent to ``(left <= ser) & (ser <= right)`` Examples -------- >>> s = pd.Series([2, 0, 4, 8, np.nan]) Boundary values are included by default: >>> s.between(1, 4) 0 True 1 False 2 True 3 False 4 False dtype: bool With `inclusive` set to ``False`` boundary values are excluded: >>> s.between(1, 4, inclusive=False) 0 True 1 False 2 False 3 False 4 False dtype: bool `left` and `right` can be any scalar value: >>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve']) >>> s.between('Anna', 'Daniel') 0 False 1 True 2 True 3 False dtype: bool
[ "Return", "boolean", "Series", "equivalent", "to", "left", "<", "=", "series", "<", "=", "right", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L4095-L4168
20,308
pandas-dev/pandas
pandas/core/series.py
Series.dropna
def dropna(self, axis=0, inplace=False, **kwargs): """ Return a new Series with missing values removed. See the :ref:`User Guide <missing_data>` for more on which values are considered missing, and how to work with missing data. Parameters ---------- axis : {0 or 'index'}, default 0 There is only one axis to drop values from. inplace : bool, default False If True, do operation inplace and return None. **kwargs Not in use. Returns ------- Series Series with NA entries dropped from it. See Also -------- Series.isna: Indicate missing values. Series.notna : Indicate existing (non-missing) values. Series.fillna : Replace missing values. DataFrame.dropna : Drop rows or columns which contain NA values. Index.dropna : Drop missing indices. Examples -------- >>> ser = pd.Series([1., 2., np.nan]) >>> ser 0 1.0 1 2.0 2 NaN dtype: float64 Drop NA values from a Series. >>> ser.dropna() 0 1.0 1 2.0 dtype: float64 Keep the Series with valid entries in the same variable. >>> ser.dropna(inplace=True) >>> ser 0 1.0 1 2.0 dtype: float64 Empty strings are not considered NA values. ``None`` is considered an NA value. >>> ser = pd.Series([np.NaN, 2, pd.NaT, '', None, 'I stay']) >>> ser 0 NaN 1 2 2 NaT 3 4 None 5 I stay dtype: object >>> ser.dropna() 1 2 3 5 I stay dtype: object """ inplace = validate_bool_kwarg(inplace, 'inplace') kwargs.pop('how', None) if kwargs: raise TypeError('dropna() got an unexpected keyword ' 'argument "{0}"'.format(list(kwargs.keys())[0])) # Validate the axis parameter self._get_axis_number(axis or 0) if self._can_hold_na: result = remove_na_arraylike(self) if inplace: self._update_inplace(result) else: return result else: if inplace: # do nothing pass else: return self.copy()
python
def dropna(self, axis=0, inplace=False, **kwargs): """ Return a new Series with missing values removed. See the :ref:`User Guide <missing_data>` for more on which values are considered missing, and how to work with missing data. Parameters ---------- axis : {0 or 'index'}, default 0 There is only one axis to drop values from. inplace : bool, default False If True, do operation inplace and return None. **kwargs Not in use. Returns ------- Series Series with NA entries dropped from it. See Also -------- Series.isna: Indicate missing values. Series.notna : Indicate existing (non-missing) values. Series.fillna : Replace missing values. DataFrame.dropna : Drop rows or columns which contain NA values. Index.dropna : Drop missing indices. Examples -------- >>> ser = pd.Series([1., 2., np.nan]) >>> ser 0 1.0 1 2.0 2 NaN dtype: float64 Drop NA values from a Series. >>> ser.dropna() 0 1.0 1 2.0 dtype: float64 Keep the Series with valid entries in the same variable. >>> ser.dropna(inplace=True) >>> ser 0 1.0 1 2.0 dtype: float64 Empty strings are not considered NA values. ``None`` is considered an NA value. >>> ser = pd.Series([np.NaN, 2, pd.NaT, '', None, 'I stay']) >>> ser 0 NaN 1 2 2 NaT 3 4 None 5 I stay dtype: object >>> ser.dropna() 1 2 3 5 I stay dtype: object """ inplace = validate_bool_kwarg(inplace, 'inplace') kwargs.pop('how', None) if kwargs: raise TypeError('dropna() got an unexpected keyword ' 'argument "{0}"'.format(list(kwargs.keys())[0])) # Validate the axis parameter self._get_axis_number(axis or 0) if self._can_hold_na: result = remove_na_arraylike(self) if inplace: self._update_inplace(result) else: return result else: if inplace: # do nothing pass else: return self.copy()
[ "def", "dropna", "(", "self", ",", "axis", "=", "0", ",", "inplace", "=", "False", ",", "*", "*", "kwargs", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "kwargs", ".", "pop", "(", "'how'", ",", "None", ")", "if", "kwargs", ":", "raise", "TypeError", "(", "'dropna() got an unexpected keyword '", "'argument \"{0}\"'", ".", "format", "(", "list", "(", "kwargs", ".", "keys", "(", ")", ")", "[", "0", "]", ")", ")", "# Validate the axis parameter", "self", ".", "_get_axis_number", "(", "axis", "or", "0", ")", "if", "self", ".", "_can_hold_na", ":", "result", "=", "remove_na_arraylike", "(", "self", ")", "if", "inplace", ":", "self", ".", "_update_inplace", "(", "result", ")", "else", ":", "return", "result", "else", ":", "if", "inplace", ":", "# do nothing", "pass", "else", ":", "return", "self", ".", "copy", "(", ")" ]
Return a new Series with missing values removed. See the :ref:`User Guide <missing_data>` for more on which values are considered missing, and how to work with missing data. Parameters ---------- axis : {0 or 'index'}, default 0 There is only one axis to drop values from. inplace : bool, default False If True, do operation inplace and return None. **kwargs Not in use. Returns ------- Series Series with NA entries dropped from it. See Also -------- Series.isna: Indicate missing values. Series.notna : Indicate existing (non-missing) values. Series.fillna : Replace missing values. DataFrame.dropna : Drop rows or columns which contain NA values. Index.dropna : Drop missing indices. Examples -------- >>> ser = pd.Series([1., 2., np.nan]) >>> ser 0 1.0 1 2.0 2 NaN dtype: float64 Drop NA values from a Series. >>> ser.dropna() 0 1.0 1 2.0 dtype: float64 Keep the Series with valid entries in the same variable. >>> ser.dropna(inplace=True) >>> ser 0 1.0 1 2.0 dtype: float64 Empty strings are not considered NA values. ``None`` is considered an NA value. >>> ser = pd.Series([np.NaN, 2, pd.NaT, '', None, 'I stay']) >>> ser 0 NaN 1 2 2 NaT 3 4 None 5 I stay dtype: object >>> ser.dropna() 1 2 3 5 I stay dtype: object
[ "Return", "a", "new", "Series", "with", "missing", "values", "removed", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L4311-L4401
20,309
pandas-dev/pandas
pandas/core/series.py
Series.valid
def valid(self, inplace=False, **kwargs): """ Return Series without null values. .. deprecated:: 0.23.0 Use :meth:`Series.dropna` instead. """ warnings.warn("Method .valid will be removed in a future version. " "Use .dropna instead.", FutureWarning, stacklevel=2) return self.dropna(inplace=inplace, **kwargs)
python
def valid(self, inplace=False, **kwargs): """ Return Series without null values. .. deprecated:: 0.23.0 Use :meth:`Series.dropna` instead. """ warnings.warn("Method .valid will be removed in a future version. " "Use .dropna instead.", FutureWarning, stacklevel=2) return self.dropna(inplace=inplace, **kwargs)
[ "def", "valid", "(", "self", ",", "inplace", "=", "False", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "\"Method .valid will be removed in a future version. \"", "\"Use .dropna instead.\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "dropna", "(", "inplace", "=", "inplace", ",", "*", "*", "kwargs", ")" ]
Return Series without null values. .. deprecated:: 0.23.0 Use :meth:`Series.dropna` instead.
[ "Return", "Series", "without", "null", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L4403-L4412
20,310
pandas-dev/pandas
pandas/core/tools/numeric.py
to_numeric
def to_numeric(arg, errors='raise', downcast=None): """ Convert argument to a numeric type. The default return dtype is `float64` or `int64` depending on the data supplied. Use the `downcast` parameter to obtain other dtypes. Please note that precision loss may occur if really large numbers are passed in. Due to the internal limitations of `ndarray`, if numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min) or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are passed in, it is very likely they will be converted to float so that they can stored in an `ndarray`. These warnings apply similarly to `Series` since it internally leverages `ndarray`. Parameters ---------- arg : scalar, list, tuple, 1-d array, or Series errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaN - If 'ignore', then invalid parsing will return the input downcast : {'integer', 'signed', 'unsigned', 'float'} , default None If not None, and if the data has been successfully cast to a numerical dtype (or if the data was numeric to begin with), downcast that resulting data to the smallest numerical dtype possible according to the following rules: - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) - 'unsigned': smallest unsigned int dtype (min.: np.uint8) - 'float': smallest float dtype (min.: np.float32) As this behaviour is separate from the core conversion to numeric values, any errors raised during the downcasting will be surfaced regardless of the value of the 'errors' input. In addition, downcasting will only occur if the size of the resulting data's dtype is strictly larger than the dtype it is to be cast to, so if none of the dtypes checked satisfy that specification, no downcasting will be performed on the data. .. versionadded:: 0.19.0 Returns ------- ret : numeric if parsing succeeded. Return type depends on input. Series if Series, otherwise ndarray. See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. numpy.ndarray.astype : Cast a numpy array to a specified type. Examples -------- Take separate series and convert to numeric, coercing when told to >>> s = pd.Series(['1.0', '2', -3]) >>> pd.to_numeric(s) 0 1.0 1 2.0 2 -3.0 dtype: float64 >>> pd.to_numeric(s, downcast='float') 0 1.0 1 2.0 2 -3.0 dtype: float32 >>> pd.to_numeric(s, downcast='signed') 0 1 1 2 2 -3 dtype: int8 >>> s = pd.Series(['apple', '1.0', '2', -3]) >>> pd.to_numeric(s, errors='ignore') 0 apple 1 1.0 2 2 3 -3 dtype: object >>> pd.to_numeric(s, errors='coerce') 0 NaN 1 1.0 2 2.0 3 -3.0 dtype: float64 """ if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'): raise ValueError('invalid downcasting method provided') is_series = False is_index = False is_scalars = False if isinstance(arg, ABCSeries): is_series = True values = arg.values elif isinstance(arg, ABCIndexClass): is_index = True values = arg.asi8 if values is None: values = arg.values elif isinstance(arg, (list, tuple)): values = np.array(arg, dtype='O') elif is_scalar(arg): if is_decimal(arg): return float(arg) if is_number(arg): return arg is_scalars = True values = np.array([arg], dtype='O') elif getattr(arg, 'ndim', 1) > 1: raise TypeError('arg must be a list, tuple, 1-d array, or Series') else: values = arg try: if is_numeric_dtype(values): pass elif is_datetime_or_timedelta_dtype(values): values = values.astype(np.int64) else: values = ensure_object(values) coerce_numeric = errors not in ('ignore', 'raise') values = lib.maybe_convert_numeric(values, set(), coerce_numeric=coerce_numeric) except Exception: if errors == 'raise': raise # attempt downcast only if the data has been successfully converted # to a numerical dtype and if a downcast method has been specified if downcast is not None and is_numeric_dtype(values): typecodes = None if downcast in ('integer', 'signed'): typecodes = np.typecodes['Integer'] elif downcast == 'unsigned' and np.min(values) >= 0: typecodes = np.typecodes['UnsignedInteger'] elif downcast == 'float': typecodes = np.typecodes['Float'] # pandas support goes only to np.float32, # as float dtypes smaller than that are # extremely rare and not well supported float_32_char = np.dtype(np.float32).char float_32_ind = typecodes.index(float_32_char) typecodes = typecodes[float_32_ind:] if typecodes is not None: # from smallest to largest for dtype in typecodes: if np.dtype(dtype).itemsize <= values.dtype.itemsize: values = maybe_downcast_to_dtype(values, dtype) # successful conversion if values.dtype == dtype: break if is_series: return pd.Series(values, index=arg.index, name=arg.name) elif is_index: # because we want to coerce to numeric if possible, # do not use _shallow_copy_with_infer return pd.Index(values, name=arg.name) elif is_scalars: return values[0] else: return values
python
def to_numeric(arg, errors='raise', downcast=None): """ Convert argument to a numeric type. The default return dtype is `float64` or `int64` depending on the data supplied. Use the `downcast` parameter to obtain other dtypes. Please note that precision loss may occur if really large numbers are passed in. Due to the internal limitations of `ndarray`, if numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min) or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are passed in, it is very likely they will be converted to float so that they can stored in an `ndarray`. These warnings apply similarly to `Series` since it internally leverages `ndarray`. Parameters ---------- arg : scalar, list, tuple, 1-d array, or Series errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaN - If 'ignore', then invalid parsing will return the input downcast : {'integer', 'signed', 'unsigned', 'float'} , default None If not None, and if the data has been successfully cast to a numerical dtype (or if the data was numeric to begin with), downcast that resulting data to the smallest numerical dtype possible according to the following rules: - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) - 'unsigned': smallest unsigned int dtype (min.: np.uint8) - 'float': smallest float dtype (min.: np.float32) As this behaviour is separate from the core conversion to numeric values, any errors raised during the downcasting will be surfaced regardless of the value of the 'errors' input. In addition, downcasting will only occur if the size of the resulting data's dtype is strictly larger than the dtype it is to be cast to, so if none of the dtypes checked satisfy that specification, no downcasting will be performed on the data. .. versionadded:: 0.19.0 Returns ------- ret : numeric if parsing succeeded. Return type depends on input. Series if Series, otherwise ndarray. See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. numpy.ndarray.astype : Cast a numpy array to a specified type. Examples -------- Take separate series and convert to numeric, coercing when told to >>> s = pd.Series(['1.0', '2', -3]) >>> pd.to_numeric(s) 0 1.0 1 2.0 2 -3.0 dtype: float64 >>> pd.to_numeric(s, downcast='float') 0 1.0 1 2.0 2 -3.0 dtype: float32 >>> pd.to_numeric(s, downcast='signed') 0 1 1 2 2 -3 dtype: int8 >>> s = pd.Series(['apple', '1.0', '2', -3]) >>> pd.to_numeric(s, errors='ignore') 0 apple 1 1.0 2 2 3 -3 dtype: object >>> pd.to_numeric(s, errors='coerce') 0 NaN 1 1.0 2 2.0 3 -3.0 dtype: float64 """ if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'): raise ValueError('invalid downcasting method provided') is_series = False is_index = False is_scalars = False if isinstance(arg, ABCSeries): is_series = True values = arg.values elif isinstance(arg, ABCIndexClass): is_index = True values = arg.asi8 if values is None: values = arg.values elif isinstance(arg, (list, tuple)): values = np.array(arg, dtype='O') elif is_scalar(arg): if is_decimal(arg): return float(arg) if is_number(arg): return arg is_scalars = True values = np.array([arg], dtype='O') elif getattr(arg, 'ndim', 1) > 1: raise TypeError('arg must be a list, tuple, 1-d array, or Series') else: values = arg try: if is_numeric_dtype(values): pass elif is_datetime_or_timedelta_dtype(values): values = values.astype(np.int64) else: values = ensure_object(values) coerce_numeric = errors not in ('ignore', 'raise') values = lib.maybe_convert_numeric(values, set(), coerce_numeric=coerce_numeric) except Exception: if errors == 'raise': raise # attempt downcast only if the data has been successfully converted # to a numerical dtype and if a downcast method has been specified if downcast is not None and is_numeric_dtype(values): typecodes = None if downcast in ('integer', 'signed'): typecodes = np.typecodes['Integer'] elif downcast == 'unsigned' and np.min(values) >= 0: typecodes = np.typecodes['UnsignedInteger'] elif downcast == 'float': typecodes = np.typecodes['Float'] # pandas support goes only to np.float32, # as float dtypes smaller than that are # extremely rare and not well supported float_32_char = np.dtype(np.float32).char float_32_ind = typecodes.index(float_32_char) typecodes = typecodes[float_32_ind:] if typecodes is not None: # from smallest to largest for dtype in typecodes: if np.dtype(dtype).itemsize <= values.dtype.itemsize: values = maybe_downcast_to_dtype(values, dtype) # successful conversion if values.dtype == dtype: break if is_series: return pd.Series(values, index=arg.index, name=arg.name) elif is_index: # because we want to coerce to numeric if possible, # do not use _shallow_copy_with_infer return pd.Index(values, name=arg.name) elif is_scalars: return values[0] else: return values
[ "def", "to_numeric", "(", "arg", ",", "errors", "=", "'raise'", ",", "downcast", "=", "None", ")", ":", "if", "downcast", "not", "in", "(", "None", ",", "'integer'", ",", "'signed'", ",", "'unsigned'", ",", "'float'", ")", ":", "raise", "ValueError", "(", "'invalid downcasting method provided'", ")", "is_series", "=", "False", "is_index", "=", "False", "is_scalars", "=", "False", "if", "isinstance", "(", "arg", ",", "ABCSeries", ")", ":", "is_series", "=", "True", "values", "=", "arg", ".", "values", "elif", "isinstance", "(", "arg", ",", "ABCIndexClass", ")", ":", "is_index", "=", "True", "values", "=", "arg", ".", "asi8", "if", "values", "is", "None", ":", "values", "=", "arg", ".", "values", "elif", "isinstance", "(", "arg", ",", "(", "list", ",", "tuple", ")", ")", ":", "values", "=", "np", ".", "array", "(", "arg", ",", "dtype", "=", "'O'", ")", "elif", "is_scalar", "(", "arg", ")", ":", "if", "is_decimal", "(", "arg", ")", ":", "return", "float", "(", "arg", ")", "if", "is_number", "(", "arg", ")", ":", "return", "arg", "is_scalars", "=", "True", "values", "=", "np", ".", "array", "(", "[", "arg", "]", ",", "dtype", "=", "'O'", ")", "elif", "getattr", "(", "arg", ",", "'ndim'", ",", "1", ")", ">", "1", ":", "raise", "TypeError", "(", "'arg must be a list, tuple, 1-d array, or Series'", ")", "else", ":", "values", "=", "arg", "try", ":", "if", "is_numeric_dtype", "(", "values", ")", ":", "pass", "elif", "is_datetime_or_timedelta_dtype", "(", "values", ")", ":", "values", "=", "values", ".", "astype", "(", "np", ".", "int64", ")", "else", ":", "values", "=", "ensure_object", "(", "values", ")", "coerce_numeric", "=", "errors", "not", "in", "(", "'ignore'", ",", "'raise'", ")", "values", "=", "lib", ".", "maybe_convert_numeric", "(", "values", ",", "set", "(", ")", ",", "coerce_numeric", "=", "coerce_numeric", ")", "except", "Exception", ":", "if", "errors", "==", "'raise'", ":", "raise", "# attempt downcast only if the data has been successfully converted", "# to a numerical dtype and if a downcast method has been specified", "if", "downcast", "is", "not", "None", "and", "is_numeric_dtype", "(", "values", ")", ":", "typecodes", "=", "None", "if", "downcast", "in", "(", "'integer'", ",", "'signed'", ")", ":", "typecodes", "=", "np", ".", "typecodes", "[", "'Integer'", "]", "elif", "downcast", "==", "'unsigned'", "and", "np", ".", "min", "(", "values", ")", ">=", "0", ":", "typecodes", "=", "np", ".", "typecodes", "[", "'UnsignedInteger'", "]", "elif", "downcast", "==", "'float'", ":", "typecodes", "=", "np", ".", "typecodes", "[", "'Float'", "]", "# pandas support goes only to np.float32,", "# as float dtypes smaller than that are", "# extremely rare and not well supported", "float_32_char", "=", "np", ".", "dtype", "(", "np", ".", "float32", ")", ".", "char", "float_32_ind", "=", "typecodes", ".", "index", "(", "float_32_char", ")", "typecodes", "=", "typecodes", "[", "float_32_ind", ":", "]", "if", "typecodes", "is", "not", "None", ":", "# from smallest to largest", "for", "dtype", "in", "typecodes", ":", "if", "np", ".", "dtype", "(", "dtype", ")", ".", "itemsize", "<=", "values", ".", "dtype", ".", "itemsize", ":", "values", "=", "maybe_downcast_to_dtype", "(", "values", ",", "dtype", ")", "# successful conversion", "if", "values", ".", "dtype", "==", "dtype", ":", "break", "if", "is_series", ":", "return", "pd", ".", "Series", "(", "values", ",", "index", "=", "arg", ".", "index", ",", "name", "=", "arg", ".", "name", ")", "elif", "is_index", ":", "# because we want to coerce to numeric if possible,", "# do not use _shallow_copy_with_infer", "return", "pd", ".", "Index", "(", "values", ",", "name", "=", "arg", ".", "name", ")", "elif", "is_scalars", ":", "return", "values", "[", "0", "]", "else", ":", "return", "values" ]
Convert argument to a numeric type. The default return dtype is `float64` or `int64` depending on the data supplied. Use the `downcast` parameter to obtain other dtypes. Please note that precision loss may occur if really large numbers are passed in. Due to the internal limitations of `ndarray`, if numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min) or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are passed in, it is very likely they will be converted to float so that they can stored in an `ndarray`. These warnings apply similarly to `Series` since it internally leverages `ndarray`. Parameters ---------- arg : scalar, list, tuple, 1-d array, or Series errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaN - If 'ignore', then invalid parsing will return the input downcast : {'integer', 'signed', 'unsigned', 'float'} , default None If not None, and if the data has been successfully cast to a numerical dtype (or if the data was numeric to begin with), downcast that resulting data to the smallest numerical dtype possible according to the following rules: - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) - 'unsigned': smallest unsigned int dtype (min.: np.uint8) - 'float': smallest float dtype (min.: np.float32) As this behaviour is separate from the core conversion to numeric values, any errors raised during the downcasting will be surfaced regardless of the value of the 'errors' input. In addition, downcasting will only occur if the size of the resulting data's dtype is strictly larger than the dtype it is to be cast to, so if none of the dtypes checked satisfy that specification, no downcasting will be performed on the data. .. versionadded:: 0.19.0 Returns ------- ret : numeric if parsing succeeded. Return type depends on input. Series if Series, otherwise ndarray. See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_datetime : Convert argument to datetime. to_timedelta : Convert argument to timedelta. numpy.ndarray.astype : Cast a numpy array to a specified type. Examples -------- Take separate series and convert to numeric, coercing when told to >>> s = pd.Series(['1.0', '2', -3]) >>> pd.to_numeric(s) 0 1.0 1 2.0 2 -3.0 dtype: float64 >>> pd.to_numeric(s, downcast='float') 0 1.0 1 2.0 2 -3.0 dtype: float32 >>> pd.to_numeric(s, downcast='signed') 0 1 1 2 2 -3 dtype: int8 >>> s = pd.Series(['apple', '1.0', '2', -3]) >>> pd.to_numeric(s, errors='ignore') 0 apple 1 1.0 2 2 3 -3 dtype: object >>> pd.to_numeric(s, errors='coerce') 0 NaN 1 1.0 2 2.0 3 -3.0 dtype: float64
[ "Convert", "argument", "to", "a", "numeric", "type", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/numeric.py#L14-L187
20,311
pandas-dev/pandas
pandas/core/arrays/sparse.py
_get_fill
def _get_fill(arr: ABCSparseArray) -> np.ndarray: """ Create a 0-dim ndarray containing the fill value Parameters ---------- arr : SparseArray Returns ------- fill_value : ndarray 0-dim ndarray with just the fill value. Notes ----- coerce fill_value to arr dtype if possible int64 SparseArray can have NaN as fill_value if there is no missing """ try: return np.asarray(arr.fill_value, dtype=arr.dtype.subtype) except ValueError: return np.asarray(arr.fill_value)
python
def _get_fill(arr: ABCSparseArray) -> np.ndarray: """ Create a 0-dim ndarray containing the fill value Parameters ---------- arr : SparseArray Returns ------- fill_value : ndarray 0-dim ndarray with just the fill value. Notes ----- coerce fill_value to arr dtype if possible int64 SparseArray can have NaN as fill_value if there is no missing """ try: return np.asarray(arr.fill_value, dtype=arr.dtype.subtype) except ValueError: return np.asarray(arr.fill_value)
[ "def", "_get_fill", "(", "arr", ":", "ABCSparseArray", ")", "->", "np", ".", "ndarray", ":", "try", ":", "return", "np", ".", "asarray", "(", "arr", ".", "fill_value", ",", "dtype", "=", "arr", ".", "dtype", ".", "subtype", ")", "except", "ValueError", ":", "return", "np", ".", "asarray", "(", "arr", ".", "fill_value", ")" ]
Create a 0-dim ndarray containing the fill value Parameters ---------- arr : SparseArray Returns ------- fill_value : ndarray 0-dim ndarray with just the fill value. Notes ----- coerce fill_value to arr dtype if possible int64 SparseArray can have NaN as fill_value if there is no missing
[ "Create", "a", "0", "-", "dim", "ndarray", "containing", "the", "fill", "value" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/sparse.py#L386-L407
20,312
pandas-dev/pandas
pandas/core/arrays/sparse.py
_sparse_array_op
def _sparse_array_op( left: ABCSparseArray, right: ABCSparseArray, op: Callable, name: str ) -> Any: """ Perform a binary operation between two arrays. Parameters ---------- left : Union[SparseArray, ndarray] right : Union[SparseArray, ndarray] op : Callable The binary operation to perform name str Name of the callable. Returns ------- SparseArray """ if name.startswith('__'): # For lookups in _libs.sparse we need non-dunder op name name = name[2:-2] # dtype used to find corresponding sparse method ltype = left.dtype.subtype rtype = right.dtype.subtype if not is_dtype_equal(ltype, rtype): subtype = find_common_type([ltype, rtype]) ltype = SparseDtype(subtype, left.fill_value) rtype = SparseDtype(subtype, right.fill_value) # TODO(GH-23092): pass copy=False. Need to fix astype_nansafe left = left.astype(ltype) right = right.astype(rtype) dtype = ltype.subtype else: dtype = ltype # dtype the result must have result_dtype = None if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0: with np.errstate(all='ignore'): result = op(left.get_values(), right.get_values()) fill = op(_get_fill(left), _get_fill(right)) if left.sp_index.ngaps == 0: index = left.sp_index else: index = right.sp_index elif left.sp_index.equals(right.sp_index): with np.errstate(all='ignore'): result = op(left.sp_values, right.sp_values) fill = op(_get_fill(left), _get_fill(right)) index = left.sp_index else: if name[0] == 'r': left, right = right, left name = name[1:] if name in ('and', 'or') and dtype == 'bool': opname = 'sparse_{name}_uint8'.format(name=name) # to make template simple, cast here left_sp_values = left.sp_values.view(np.uint8) right_sp_values = right.sp_values.view(np.uint8) result_dtype = np.bool else: opname = 'sparse_{name}_{dtype}'.format(name=name, dtype=dtype) left_sp_values = left.sp_values right_sp_values = right.sp_values sparse_op = getattr(splib, opname) with np.errstate(all='ignore'): result, index, fill = sparse_op( left_sp_values, left.sp_index, left.fill_value, right_sp_values, right.sp_index, right.fill_value) if result_dtype is None: result_dtype = result.dtype return _wrap_result(name, result, index, fill, dtype=result_dtype)
python
def _sparse_array_op( left: ABCSparseArray, right: ABCSparseArray, op: Callable, name: str ) -> Any: """ Perform a binary operation between two arrays. Parameters ---------- left : Union[SparseArray, ndarray] right : Union[SparseArray, ndarray] op : Callable The binary operation to perform name str Name of the callable. Returns ------- SparseArray """ if name.startswith('__'): # For lookups in _libs.sparse we need non-dunder op name name = name[2:-2] # dtype used to find corresponding sparse method ltype = left.dtype.subtype rtype = right.dtype.subtype if not is_dtype_equal(ltype, rtype): subtype = find_common_type([ltype, rtype]) ltype = SparseDtype(subtype, left.fill_value) rtype = SparseDtype(subtype, right.fill_value) # TODO(GH-23092): pass copy=False. Need to fix astype_nansafe left = left.astype(ltype) right = right.astype(rtype) dtype = ltype.subtype else: dtype = ltype # dtype the result must have result_dtype = None if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0: with np.errstate(all='ignore'): result = op(left.get_values(), right.get_values()) fill = op(_get_fill(left), _get_fill(right)) if left.sp_index.ngaps == 0: index = left.sp_index else: index = right.sp_index elif left.sp_index.equals(right.sp_index): with np.errstate(all='ignore'): result = op(left.sp_values, right.sp_values) fill = op(_get_fill(left), _get_fill(right)) index = left.sp_index else: if name[0] == 'r': left, right = right, left name = name[1:] if name in ('and', 'or') and dtype == 'bool': opname = 'sparse_{name}_uint8'.format(name=name) # to make template simple, cast here left_sp_values = left.sp_values.view(np.uint8) right_sp_values = right.sp_values.view(np.uint8) result_dtype = np.bool else: opname = 'sparse_{name}_{dtype}'.format(name=name, dtype=dtype) left_sp_values = left.sp_values right_sp_values = right.sp_values sparse_op = getattr(splib, opname) with np.errstate(all='ignore'): result, index, fill = sparse_op( left_sp_values, left.sp_index, left.fill_value, right_sp_values, right.sp_index, right.fill_value) if result_dtype is None: result_dtype = result.dtype return _wrap_result(name, result, index, fill, dtype=result_dtype)
[ "def", "_sparse_array_op", "(", "left", ":", "ABCSparseArray", ",", "right", ":", "ABCSparseArray", ",", "op", ":", "Callable", ",", "name", ":", "str", ")", "->", "Any", ":", "if", "name", ".", "startswith", "(", "'__'", ")", ":", "# For lookups in _libs.sparse we need non-dunder op name", "name", "=", "name", "[", "2", ":", "-", "2", "]", "# dtype used to find corresponding sparse method", "ltype", "=", "left", ".", "dtype", ".", "subtype", "rtype", "=", "right", ".", "dtype", ".", "subtype", "if", "not", "is_dtype_equal", "(", "ltype", ",", "rtype", ")", ":", "subtype", "=", "find_common_type", "(", "[", "ltype", ",", "rtype", "]", ")", "ltype", "=", "SparseDtype", "(", "subtype", ",", "left", ".", "fill_value", ")", "rtype", "=", "SparseDtype", "(", "subtype", ",", "right", ".", "fill_value", ")", "# TODO(GH-23092): pass copy=False. Need to fix astype_nansafe", "left", "=", "left", ".", "astype", "(", "ltype", ")", "right", "=", "right", ".", "astype", "(", "rtype", ")", "dtype", "=", "ltype", ".", "subtype", "else", ":", "dtype", "=", "ltype", "# dtype the result must have", "result_dtype", "=", "None", "if", "left", ".", "sp_index", ".", "ngaps", "==", "0", "or", "right", ".", "sp_index", ".", "ngaps", "==", "0", ":", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "result", "=", "op", "(", "left", ".", "get_values", "(", ")", ",", "right", ".", "get_values", "(", ")", ")", "fill", "=", "op", "(", "_get_fill", "(", "left", ")", ",", "_get_fill", "(", "right", ")", ")", "if", "left", ".", "sp_index", ".", "ngaps", "==", "0", ":", "index", "=", "left", ".", "sp_index", "else", ":", "index", "=", "right", ".", "sp_index", "elif", "left", ".", "sp_index", ".", "equals", "(", "right", ".", "sp_index", ")", ":", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "result", "=", "op", "(", "left", ".", "sp_values", ",", "right", ".", "sp_values", ")", "fill", "=", "op", "(", "_get_fill", "(", "left", ")", ",", "_get_fill", "(", "right", ")", ")", "index", "=", "left", ".", "sp_index", "else", ":", "if", "name", "[", "0", "]", "==", "'r'", ":", "left", ",", "right", "=", "right", ",", "left", "name", "=", "name", "[", "1", ":", "]", "if", "name", "in", "(", "'and'", ",", "'or'", ")", "and", "dtype", "==", "'bool'", ":", "opname", "=", "'sparse_{name}_uint8'", ".", "format", "(", "name", "=", "name", ")", "# to make template simple, cast here", "left_sp_values", "=", "left", ".", "sp_values", ".", "view", "(", "np", ".", "uint8", ")", "right_sp_values", "=", "right", ".", "sp_values", ".", "view", "(", "np", ".", "uint8", ")", "result_dtype", "=", "np", ".", "bool", "else", ":", "opname", "=", "'sparse_{name}_{dtype}'", ".", "format", "(", "name", "=", "name", ",", "dtype", "=", "dtype", ")", "left_sp_values", "=", "left", ".", "sp_values", "right_sp_values", "=", "right", ".", "sp_values", "sparse_op", "=", "getattr", "(", "splib", ",", "opname", ")", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "result", ",", "index", ",", "fill", "=", "sparse_op", "(", "left_sp_values", ",", "left", ".", "sp_index", ",", "left", ".", "fill_value", ",", "right_sp_values", ",", "right", ".", "sp_index", ",", "right", ".", "fill_value", ")", "if", "result_dtype", "is", "None", ":", "result_dtype", "=", "result", ".", "dtype", "return", "_wrap_result", "(", "name", ",", "result", ",", "index", ",", "fill", ",", "dtype", "=", "result_dtype", ")" ]
Perform a binary operation between two arrays. Parameters ---------- left : Union[SparseArray, ndarray] right : Union[SparseArray, ndarray] op : Callable The binary operation to perform name str Name of the callable. Returns ------- SparseArray
[ "Perform", "a", "binary", "operation", "between", "two", "arrays", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/sparse.py#L410-L495
20,313
pandas-dev/pandas
pandas/core/arrays/sparse.py
_wrap_result
def _wrap_result(name, data, sparse_index, fill_value, dtype=None): """ wrap op result to have correct dtype """ if name.startswith('__'): # e.g. __eq__ --> eq name = name[2:-2] if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'): dtype = np.bool fill_value = lib.item_from_zerodim(fill_value) if is_bool_dtype(dtype): # fill_value may be np.bool_ fill_value = bool(fill_value) return SparseArray(data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype)
python
def _wrap_result(name, data, sparse_index, fill_value, dtype=None): """ wrap op result to have correct dtype """ if name.startswith('__'): # e.g. __eq__ --> eq name = name[2:-2] if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'): dtype = np.bool fill_value = lib.item_from_zerodim(fill_value) if is_bool_dtype(dtype): # fill_value may be np.bool_ fill_value = bool(fill_value) return SparseArray(data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype)
[ "def", "_wrap_result", "(", "name", ",", "data", ",", "sparse_index", ",", "fill_value", ",", "dtype", "=", "None", ")", ":", "if", "name", ".", "startswith", "(", "'__'", ")", ":", "# e.g. __eq__ --> eq", "name", "=", "name", "[", "2", ":", "-", "2", "]", "if", "name", "in", "(", "'eq'", ",", "'ne'", ",", "'lt'", ",", "'gt'", ",", "'le'", ",", "'ge'", ")", ":", "dtype", "=", "np", ".", "bool", "fill_value", "=", "lib", ".", "item_from_zerodim", "(", "fill_value", ")", "if", "is_bool_dtype", "(", "dtype", ")", ":", "# fill_value may be np.bool_", "fill_value", "=", "bool", "(", "fill_value", ")", "return", "SparseArray", "(", "data", ",", "sparse_index", "=", "sparse_index", ",", "fill_value", "=", "fill_value", ",", "dtype", "=", "dtype", ")" ]
wrap op result to have correct dtype
[ "wrap", "op", "result", "to", "have", "correct", "dtype" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/sparse.py#L498-L517
20,314
pandas-dev/pandas
pandas/core/arrays/sparse.py
_maybe_to_sparse
def _maybe_to_sparse(array): """ array must be SparseSeries or SparseArray """ if isinstance(array, ABCSparseSeries): array = array.values.copy() return array
python
def _maybe_to_sparse(array): """ array must be SparseSeries or SparseArray """ if isinstance(array, ABCSparseSeries): array = array.values.copy() return array
[ "def", "_maybe_to_sparse", "(", "array", ")", ":", "if", "isinstance", "(", "array", ",", "ABCSparseSeries", ")", ":", "array", "=", "array", ".", "values", ".", "copy", "(", ")", "return", "array" ]
array must be SparseSeries or SparseArray
[ "array", "must", "be", "SparseSeries", "or", "SparseArray" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/sparse.py#L1803-L1809
20,315
pandas-dev/pandas
pandas/core/arrays/sparse.py
_sanitize_values
def _sanitize_values(arr): """ return an ndarray for our input, in a platform independent manner """ if hasattr(arr, 'values'): arr = arr.values else: # scalar if is_scalar(arr): arr = [arr] # ndarray if isinstance(arr, np.ndarray): pass elif is_list_like(arr) and len(arr) > 0: arr = maybe_convert_platform(arr) else: arr = np.asarray(arr) return arr
python
def _sanitize_values(arr): """ return an ndarray for our input, in a platform independent manner """ if hasattr(arr, 'values'): arr = arr.values else: # scalar if is_scalar(arr): arr = [arr] # ndarray if isinstance(arr, np.ndarray): pass elif is_list_like(arr) and len(arr) > 0: arr = maybe_convert_platform(arr) else: arr = np.asarray(arr) return arr
[ "def", "_sanitize_values", "(", "arr", ")", ":", "if", "hasattr", "(", "arr", ",", "'values'", ")", ":", "arr", "=", "arr", ".", "values", "else", ":", "# scalar", "if", "is_scalar", "(", "arr", ")", ":", "arr", "=", "[", "arr", "]", "# ndarray", "if", "isinstance", "(", "arr", ",", "np", ".", "ndarray", ")", ":", "pass", "elif", "is_list_like", "(", "arr", ")", "and", "len", "(", "arr", ")", ">", "0", ":", "arr", "=", "maybe_convert_platform", "(", "arr", ")", "else", ":", "arr", "=", "np", ".", "asarray", "(", "arr", ")", "return", "arr" ]
return an ndarray for our input, in a platform independent manner
[ "return", "an", "ndarray", "for", "our", "input", "in", "a", "platform", "independent", "manner" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/sparse.py#L1812-L1836
20,316
pandas-dev/pandas
pandas/core/arrays/sparse.py
make_sparse
def make_sparse(arr, kind='block', fill_value=None, dtype=None, copy=False): """ Convert ndarray to sparse format Parameters ---------- arr : ndarray kind : {'block', 'integer'} fill_value : NaN or another value dtype : np.dtype, optional copy : bool, default False Returns ------- (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar) """ arr = _sanitize_values(arr) if arr.ndim > 1: raise TypeError("expected dimension <= 1 data") if fill_value is None: fill_value = na_value_for_dtype(arr.dtype) if isna(fill_value): mask = notna(arr) else: # cast to object comparison to be safe if is_string_dtype(arr): arr = arr.astype(object) if is_object_dtype(arr.dtype): # element-wise equality check method in numpy doesn't treat # each element type, eg. 0, 0.0, and False are treated as # same. So we have to check the both of its type and value. mask = splib.make_mask_object_ndarray(arr, fill_value) else: mask = arr != fill_value length = len(arr) if length != len(mask): # the arr is a SparseArray indices = mask.sp_index.indices else: indices = mask.nonzero()[0].astype(np.int32) index = _make_index(length, indices, kind) sparsified_values = arr[mask] if dtype is not None: sparsified_values = astype_nansafe(sparsified_values, dtype=dtype) # TODO: copy return sparsified_values, index, fill_value
python
def make_sparse(arr, kind='block', fill_value=None, dtype=None, copy=False): """ Convert ndarray to sparse format Parameters ---------- arr : ndarray kind : {'block', 'integer'} fill_value : NaN or another value dtype : np.dtype, optional copy : bool, default False Returns ------- (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar) """ arr = _sanitize_values(arr) if arr.ndim > 1: raise TypeError("expected dimension <= 1 data") if fill_value is None: fill_value = na_value_for_dtype(arr.dtype) if isna(fill_value): mask = notna(arr) else: # cast to object comparison to be safe if is_string_dtype(arr): arr = arr.astype(object) if is_object_dtype(arr.dtype): # element-wise equality check method in numpy doesn't treat # each element type, eg. 0, 0.0, and False are treated as # same. So we have to check the both of its type and value. mask = splib.make_mask_object_ndarray(arr, fill_value) else: mask = arr != fill_value length = len(arr) if length != len(mask): # the arr is a SparseArray indices = mask.sp_index.indices else: indices = mask.nonzero()[0].astype(np.int32) index = _make_index(length, indices, kind) sparsified_values = arr[mask] if dtype is not None: sparsified_values = astype_nansafe(sparsified_values, dtype=dtype) # TODO: copy return sparsified_values, index, fill_value
[ "def", "make_sparse", "(", "arr", ",", "kind", "=", "'block'", ",", "fill_value", "=", "None", ",", "dtype", "=", "None", ",", "copy", "=", "False", ")", ":", "arr", "=", "_sanitize_values", "(", "arr", ")", "if", "arr", ".", "ndim", ">", "1", ":", "raise", "TypeError", "(", "\"expected dimension <= 1 data\"", ")", "if", "fill_value", "is", "None", ":", "fill_value", "=", "na_value_for_dtype", "(", "arr", ".", "dtype", ")", "if", "isna", "(", "fill_value", ")", ":", "mask", "=", "notna", "(", "arr", ")", "else", ":", "# cast to object comparison to be safe", "if", "is_string_dtype", "(", "arr", ")", ":", "arr", "=", "arr", ".", "astype", "(", "object", ")", "if", "is_object_dtype", "(", "arr", ".", "dtype", ")", ":", "# element-wise equality check method in numpy doesn't treat", "# each element type, eg. 0, 0.0, and False are treated as", "# same. So we have to check the both of its type and value.", "mask", "=", "splib", ".", "make_mask_object_ndarray", "(", "arr", ",", "fill_value", ")", "else", ":", "mask", "=", "arr", "!=", "fill_value", "length", "=", "len", "(", "arr", ")", "if", "length", "!=", "len", "(", "mask", ")", ":", "# the arr is a SparseArray", "indices", "=", "mask", ".", "sp_index", ".", "indices", "else", ":", "indices", "=", "mask", ".", "nonzero", "(", ")", "[", "0", "]", ".", "astype", "(", "np", ".", "int32", ")", "index", "=", "_make_index", "(", "length", ",", "indices", ",", "kind", ")", "sparsified_values", "=", "arr", "[", "mask", "]", "if", "dtype", "is", "not", "None", ":", "sparsified_values", "=", "astype_nansafe", "(", "sparsified_values", ",", "dtype", "=", "dtype", ")", "# TODO: copy", "return", "sparsified_values", ",", "index", ",", "fill_value" ]
Convert ndarray to sparse format Parameters ---------- arr : ndarray kind : {'block', 'integer'} fill_value : NaN or another value dtype : np.dtype, optional copy : bool, default False Returns ------- (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar)
[ "Convert", "ndarray", "to", "sparse", "format" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/sparse.py#L1839-L1891
20,317
pandas-dev/pandas
pandas/core/arrays/sparse.py
SparseArray.density
def density(self): """ The percent of non- ``fill_value`` points, as decimal. Examples -------- >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0) >>> s.density 0.6 """ r = float(self.sp_index.npoints) / float(self.sp_index.length) return r
python
def density(self): """ The percent of non- ``fill_value`` points, as decimal. Examples -------- >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0) >>> s.density 0.6 """ r = float(self.sp_index.npoints) / float(self.sp_index.length) return r
[ "def", "density", "(", "self", ")", ":", "r", "=", "float", "(", "self", ".", "sp_index", ".", "npoints", ")", "/", "float", "(", "self", ".", "sp_index", ".", "length", ")", "return", "r" ]
The percent of non- ``fill_value`` points, as decimal. Examples -------- >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0) >>> s.density 0.6
[ "The", "percent", "of", "non", "-", "fill_value", "points", "as", "decimal", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/sparse.py#L814-L825
20,318
pandas-dev/pandas
pandas/core/arrays/sparse.py
SparseArray.fillna
def fillna(self, value=None, method=None, limit=None): """ Fill missing values with `value`. Parameters ---------- value : scalar, optional method : str, optional .. warning:: Using 'method' will result in high memory use, as all `fill_value` methods will be converted to an in-memory ndarray limit : int, optional Returns ------- SparseArray Notes ----- When `value` is specified, the result's ``fill_value`` depends on ``self.fill_value``. The goal is to maintain low-memory use. If ``self.fill_value`` is NA, the result dtype will be ``SparseDtype(self.dtype, fill_value=value)``. This will preserve amount of memory used before and after filling. When ``self.fill_value`` is not NA, the result dtype will be ``self.dtype``. Again, this preserves the amount of memory used. """ if ((method is None and value is None) or (method is not None and value is not None)): raise ValueError("Must specify one of 'method' or 'value'.") elif method is not None: msg = "fillna with 'method' requires high memory usage." warnings.warn(msg, PerformanceWarning) filled = interpolate_2d(np.asarray(self), method=method, limit=limit) return type(self)(filled, fill_value=self.fill_value) else: new_values = np.where(isna(self.sp_values), value, self.sp_values) if self._null_fill_value: # This is essentially just updating the dtype. new_dtype = SparseDtype(self.dtype.subtype, fill_value=value) else: new_dtype = self.dtype return self._simple_new(new_values, self._sparse_index, new_dtype)
python
def fillna(self, value=None, method=None, limit=None): """ Fill missing values with `value`. Parameters ---------- value : scalar, optional method : str, optional .. warning:: Using 'method' will result in high memory use, as all `fill_value` methods will be converted to an in-memory ndarray limit : int, optional Returns ------- SparseArray Notes ----- When `value` is specified, the result's ``fill_value`` depends on ``self.fill_value``. The goal is to maintain low-memory use. If ``self.fill_value`` is NA, the result dtype will be ``SparseDtype(self.dtype, fill_value=value)``. This will preserve amount of memory used before and after filling. When ``self.fill_value`` is not NA, the result dtype will be ``self.dtype``. Again, this preserves the amount of memory used. """ if ((method is None and value is None) or (method is not None and value is not None)): raise ValueError("Must specify one of 'method' or 'value'.") elif method is not None: msg = "fillna with 'method' requires high memory usage." warnings.warn(msg, PerformanceWarning) filled = interpolate_2d(np.asarray(self), method=method, limit=limit) return type(self)(filled, fill_value=self.fill_value) else: new_values = np.where(isna(self.sp_values), value, self.sp_values) if self._null_fill_value: # This is essentially just updating the dtype. new_dtype = SparseDtype(self.dtype.subtype, fill_value=value) else: new_dtype = self.dtype return self._simple_new(new_values, self._sparse_index, new_dtype)
[ "def", "fillna", "(", "self", ",", "value", "=", "None", ",", "method", "=", "None", ",", "limit", "=", "None", ")", ":", "if", "(", "(", "method", "is", "None", "and", "value", "is", "None", ")", "or", "(", "method", "is", "not", "None", "and", "value", "is", "not", "None", ")", ")", ":", "raise", "ValueError", "(", "\"Must specify one of 'method' or 'value'.\"", ")", "elif", "method", "is", "not", "None", ":", "msg", "=", "\"fillna with 'method' requires high memory usage.\"", "warnings", ".", "warn", "(", "msg", ",", "PerformanceWarning", ")", "filled", "=", "interpolate_2d", "(", "np", ".", "asarray", "(", "self", ")", ",", "method", "=", "method", ",", "limit", "=", "limit", ")", "return", "type", "(", "self", ")", "(", "filled", ",", "fill_value", "=", "self", ".", "fill_value", ")", "else", ":", "new_values", "=", "np", ".", "where", "(", "isna", "(", "self", ".", "sp_values", ")", ",", "value", ",", "self", ".", "sp_values", ")", "if", "self", ".", "_null_fill_value", ":", "# This is essentially just updating the dtype.", "new_dtype", "=", "SparseDtype", "(", "self", ".", "dtype", ".", "subtype", ",", "fill_value", "=", "value", ")", "else", ":", "new_dtype", "=", "self", ".", "dtype", "return", "self", ".", "_simple_new", "(", "new_values", ",", "self", ".", "_sparse_index", ",", "new_dtype", ")" ]
Fill missing values with `value`. Parameters ---------- value : scalar, optional method : str, optional .. warning:: Using 'method' will result in high memory use, as all `fill_value` methods will be converted to an in-memory ndarray limit : int, optional Returns ------- SparseArray Notes ----- When `value` is specified, the result's ``fill_value`` depends on ``self.fill_value``. The goal is to maintain low-memory use. If ``self.fill_value`` is NA, the result dtype will be ``SparseDtype(self.dtype, fill_value=value)``. This will preserve amount of memory used before and after filling. When ``self.fill_value`` is not NA, the result dtype will be ``self.dtype``. Again, this preserves the amount of memory used.
[ "Fill", "missing", "values", "with", "value", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/sparse.py#L855-L908
20,319
pandas-dev/pandas
pandas/core/arrays/sparse.py
SparseArray._first_fill_value_loc
def _first_fill_value_loc(self): """ Get the location of the first missing value. Returns ------- int """ if len(self) == 0 or self.sp_index.npoints == len(self): return -1 indices = self.sp_index.to_int_index().indices if not len(indices) or indices[0] > 0: return 0 diff = indices[1:] - indices[:-1] return np.searchsorted(diff, 2) + 1
python
def _first_fill_value_loc(self): """ Get the location of the first missing value. Returns ------- int """ if len(self) == 0 or self.sp_index.npoints == len(self): return -1 indices = self.sp_index.to_int_index().indices if not len(indices) or indices[0] > 0: return 0 diff = indices[1:] - indices[:-1] return np.searchsorted(diff, 2) + 1
[ "def", "_first_fill_value_loc", "(", "self", ")", ":", "if", "len", "(", "self", ")", "==", "0", "or", "self", ".", "sp_index", ".", "npoints", "==", "len", "(", "self", ")", ":", "return", "-", "1", "indices", "=", "self", ".", "sp_index", ".", "to_int_index", "(", ")", ".", "indices", "if", "not", "len", "(", "indices", ")", "or", "indices", "[", "0", "]", ">", "0", ":", "return", "0", "diff", "=", "indices", "[", "1", ":", "]", "-", "indices", "[", ":", "-", "1", "]", "return", "np", ".", "searchsorted", "(", "diff", ",", "2", ")", "+", "1" ]
Get the location of the first missing value. Returns ------- int
[ "Get", "the", "location", "of", "the", "first", "missing", "value", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/sparse.py#L939-L955
20,320
pandas-dev/pandas
pandas/core/arrays/sparse.py
SparseArray.value_counts
def value_counts(self, dropna=True): """ Returns a Series containing counts of unique values. Parameters ---------- dropna : boolean, default True Don't include counts of NaN, even if NaN is in sp_values. Returns ------- counts : Series """ from pandas import Index, Series keys, counts = algos._value_counts_arraylike(self.sp_values, dropna=dropna) fcounts = self.sp_index.ngaps if fcounts > 0: if self._null_fill_value and dropna: pass else: if self._null_fill_value: mask = isna(keys) else: mask = keys == self.fill_value if mask.any(): counts[mask] += fcounts else: keys = np.insert(keys, 0, self.fill_value) counts = np.insert(counts, 0, fcounts) if not isinstance(keys, ABCIndexClass): keys = Index(keys) result = Series(counts, index=keys) return result
python
def value_counts(self, dropna=True): """ Returns a Series containing counts of unique values. Parameters ---------- dropna : boolean, default True Don't include counts of NaN, even if NaN is in sp_values. Returns ------- counts : Series """ from pandas import Index, Series keys, counts = algos._value_counts_arraylike(self.sp_values, dropna=dropna) fcounts = self.sp_index.ngaps if fcounts > 0: if self._null_fill_value and dropna: pass else: if self._null_fill_value: mask = isna(keys) else: mask = keys == self.fill_value if mask.any(): counts[mask] += fcounts else: keys = np.insert(keys, 0, self.fill_value) counts = np.insert(counts, 0, fcounts) if not isinstance(keys, ABCIndexClass): keys = Index(keys) result = Series(counts, index=keys) return result
[ "def", "value_counts", "(", "self", ",", "dropna", "=", "True", ")", ":", "from", "pandas", "import", "Index", ",", "Series", "keys", ",", "counts", "=", "algos", ".", "_value_counts_arraylike", "(", "self", ".", "sp_values", ",", "dropna", "=", "dropna", ")", "fcounts", "=", "self", ".", "sp_index", ".", "ngaps", "if", "fcounts", ">", "0", ":", "if", "self", ".", "_null_fill_value", "and", "dropna", ":", "pass", "else", ":", "if", "self", ".", "_null_fill_value", ":", "mask", "=", "isna", "(", "keys", ")", "else", ":", "mask", "=", "keys", "==", "self", ".", "fill_value", "if", "mask", ".", "any", "(", ")", ":", "counts", "[", "mask", "]", "+=", "fcounts", "else", ":", "keys", "=", "np", ".", "insert", "(", "keys", ",", "0", ",", "self", ".", "fill_value", ")", "counts", "=", "np", ".", "insert", "(", "counts", ",", "0", ",", "fcounts", ")", "if", "not", "isinstance", "(", "keys", ",", "ABCIndexClass", ")", ":", "keys", "=", "Index", "(", "keys", ")", "result", "=", "Series", "(", "counts", ",", "index", "=", "keys", ")", "return", "result" ]
Returns a Series containing counts of unique values. Parameters ---------- dropna : boolean, default True Don't include counts of NaN, even if NaN is in sp_values. Returns ------- counts : Series
[ "Returns", "a", "Series", "containing", "counts", "of", "unique", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/sparse.py#L979-L1015
20,321
pandas-dev/pandas
pandas/core/arrays/sparse.py
SparseArray.astype
def astype(self, dtype=None, copy=True): """ Change the dtype of a SparseArray. The output will always be a SparseArray. To convert to a dense ndarray with a certain dtype, use :meth:`numpy.asarray`. Parameters ---------- dtype : np.dtype or ExtensionDtype For SparseDtype, this changes the dtype of ``self.sp_values`` and the ``self.fill_value``. For other dtypes, this only changes the dtype of ``self.sp_values``. copy : bool, default True Whether to ensure a copy is made, even if not necessary. Returns ------- SparseArray Examples -------- >>> arr = SparseArray([0, 0, 1, 2]) >>> arr [0, 0, 1, 2] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) >>> arr.astype(np.dtype('int32')) [0, 0, 1, 2] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) Using a NumPy dtype with a different kind (e.g. float) will coerce just ``self.sp_values``. >>> arr.astype(np.dtype('float64')) ... # doctest: +NORMALIZE_WHITESPACE [0, 0, 1.0, 2.0] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) Use a SparseDtype if you wish to be change the fill value as well. >>> arr.astype(SparseDtype("float64", fill_value=np.nan)) ... # doctest: +NORMALIZE_WHITESPACE [nan, nan, 1.0, 2.0] Fill: nan IntIndex Indices: array([2, 3], dtype=int32) """ dtype = self.dtype.update_dtype(dtype) subtype = dtype._subtype_with_str sp_values = astype_nansafe(self.sp_values, subtype, copy=copy) if sp_values is self.sp_values and copy: sp_values = sp_values.copy() return self._simple_new(sp_values, self.sp_index, dtype)
python
def astype(self, dtype=None, copy=True): """ Change the dtype of a SparseArray. The output will always be a SparseArray. To convert to a dense ndarray with a certain dtype, use :meth:`numpy.asarray`. Parameters ---------- dtype : np.dtype or ExtensionDtype For SparseDtype, this changes the dtype of ``self.sp_values`` and the ``self.fill_value``. For other dtypes, this only changes the dtype of ``self.sp_values``. copy : bool, default True Whether to ensure a copy is made, even if not necessary. Returns ------- SparseArray Examples -------- >>> arr = SparseArray([0, 0, 1, 2]) >>> arr [0, 0, 1, 2] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) >>> arr.astype(np.dtype('int32')) [0, 0, 1, 2] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) Using a NumPy dtype with a different kind (e.g. float) will coerce just ``self.sp_values``. >>> arr.astype(np.dtype('float64')) ... # doctest: +NORMALIZE_WHITESPACE [0, 0, 1.0, 2.0] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) Use a SparseDtype if you wish to be change the fill value as well. >>> arr.astype(SparseDtype("float64", fill_value=np.nan)) ... # doctest: +NORMALIZE_WHITESPACE [nan, nan, 1.0, 2.0] Fill: nan IntIndex Indices: array([2, 3], dtype=int32) """ dtype = self.dtype.update_dtype(dtype) subtype = dtype._subtype_with_str sp_values = astype_nansafe(self.sp_values, subtype, copy=copy) if sp_values is self.sp_values and copy: sp_values = sp_values.copy() return self._simple_new(sp_values, self.sp_index, dtype)
[ "def", "astype", "(", "self", ",", "dtype", "=", "None", ",", "copy", "=", "True", ")", ":", "dtype", "=", "self", ".", "dtype", ".", "update_dtype", "(", "dtype", ")", "subtype", "=", "dtype", ".", "_subtype_with_str", "sp_values", "=", "astype_nansafe", "(", "self", ".", "sp_values", ",", "subtype", ",", "copy", "=", "copy", ")", "if", "sp_values", "is", "self", ".", "sp_values", "and", "copy", ":", "sp_values", "=", "sp_values", ".", "copy", "(", ")", "return", "self", ".", "_simple_new", "(", "sp_values", ",", "self", ".", "sp_index", ",", "dtype", ")" ]
Change the dtype of a SparseArray. The output will always be a SparseArray. To convert to a dense ndarray with a certain dtype, use :meth:`numpy.asarray`. Parameters ---------- dtype : np.dtype or ExtensionDtype For SparseDtype, this changes the dtype of ``self.sp_values`` and the ``self.fill_value``. For other dtypes, this only changes the dtype of ``self.sp_values``. copy : bool, default True Whether to ensure a copy is made, even if not necessary. Returns ------- SparseArray Examples -------- >>> arr = SparseArray([0, 0, 1, 2]) >>> arr [0, 0, 1, 2] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) >>> arr.astype(np.dtype('int32')) [0, 0, 1, 2] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) Using a NumPy dtype with a different kind (e.g. float) will coerce just ``self.sp_values``. >>> arr.astype(np.dtype('float64')) ... # doctest: +NORMALIZE_WHITESPACE [0, 0, 1.0, 2.0] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) Use a SparseDtype if you wish to be change the fill value as well. >>> arr.astype(SparseDtype("float64", fill_value=np.nan)) ... # doctest: +NORMALIZE_WHITESPACE [nan, nan, 1.0, 2.0] Fill: nan IntIndex Indices: array([2, 3], dtype=int32)
[ "Change", "the", "dtype", "of", "a", "SparseArray", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/sparse.py#L1278-L1345
20,322
pandas-dev/pandas
pandas/core/arrays/sparse.py
SparseArray.all
def all(self, axis=None, *args, **kwargs): """ Tests whether all elements evaluate True Returns ------- all : bool See Also -------- numpy.all """ nv.validate_all(args, kwargs) values = self.sp_values if len(values) != len(self) and not np.all(self.fill_value): return False return values.all()
python
def all(self, axis=None, *args, **kwargs): """ Tests whether all elements evaluate True Returns ------- all : bool See Also -------- numpy.all """ nv.validate_all(args, kwargs) values = self.sp_values if len(values) != len(self) and not np.all(self.fill_value): return False return values.all()
[ "def", "all", "(", "self", ",", "axis", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_all", "(", "args", ",", "kwargs", ")", "values", "=", "self", ".", "sp_values", "if", "len", "(", "values", ")", "!=", "len", "(", "self", ")", "and", "not", "np", ".", "all", "(", "self", ".", "fill_value", ")", ":", "return", "False", "return", "values", ".", "all", "(", ")" ]
Tests whether all elements evaluate True Returns ------- all : bool See Also -------- numpy.all
[ "Tests", "whether", "all", "elements", "evaluate", "True" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/sparse.py#L1461-L1480
20,323
pandas-dev/pandas
pandas/core/arrays/sparse.py
SparseArray.any
def any(self, axis=0, *args, **kwargs): """ Tests whether at least one of elements evaluate True Returns ------- any : bool See Also -------- numpy.any """ nv.validate_any(args, kwargs) values = self.sp_values if len(values) != len(self) and np.any(self.fill_value): return True return values.any().item()
python
def any(self, axis=0, *args, **kwargs): """ Tests whether at least one of elements evaluate True Returns ------- any : bool See Also -------- numpy.any """ nv.validate_any(args, kwargs) values = self.sp_values if len(values) != len(self) and np.any(self.fill_value): return True return values.any().item()
[ "def", "any", "(", "self", ",", "axis", "=", "0", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_any", "(", "args", ",", "kwargs", ")", "values", "=", "self", ".", "sp_values", "if", "len", "(", "values", ")", "!=", "len", "(", "self", ")", "and", "np", ".", "any", "(", "self", ".", "fill_value", ")", ":", "return", "True", "return", "values", ".", "any", "(", ")", ".", "item", "(", ")" ]
Tests whether at least one of elements evaluate True Returns ------- any : bool See Also -------- numpy.any
[ "Tests", "whether", "at", "least", "one", "of", "elements", "evaluate", "True" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/sparse.py#L1482-L1501
20,324
pandas-dev/pandas
pandas/core/computation/expr.py
tokenize_string
def tokenize_string(source): """Tokenize a Python source code string. Parameters ---------- source : str A Python source code string """ line_reader = StringIO(source).readline token_generator = tokenize.generate_tokens(line_reader) # Loop over all tokens till a backtick (`) is found. # Then, take all tokens till the next backtick to form a backtick quoted # string. for toknum, tokval, _, _, _ in token_generator: if tokval == '`': tokval = " ".join(it.takewhile( lambda tokval: tokval != '`', map(operator.itemgetter(1), token_generator))) toknum = _BACKTICK_QUOTED_STRING yield toknum, tokval
python
def tokenize_string(source): """Tokenize a Python source code string. Parameters ---------- source : str A Python source code string """ line_reader = StringIO(source).readline token_generator = tokenize.generate_tokens(line_reader) # Loop over all tokens till a backtick (`) is found. # Then, take all tokens till the next backtick to form a backtick quoted # string. for toknum, tokval, _, _, _ in token_generator: if tokval == '`': tokval = " ".join(it.takewhile( lambda tokval: tokval != '`', map(operator.itemgetter(1), token_generator))) toknum = _BACKTICK_QUOTED_STRING yield toknum, tokval
[ "def", "tokenize_string", "(", "source", ")", ":", "line_reader", "=", "StringIO", "(", "source", ")", ".", "readline", "token_generator", "=", "tokenize", ".", "generate_tokens", "(", "line_reader", ")", "# Loop over all tokens till a backtick (`) is found.", "# Then, take all tokens till the next backtick to form a backtick quoted", "# string.", "for", "toknum", ",", "tokval", ",", "_", ",", "_", ",", "_", "in", "token_generator", ":", "if", "tokval", "==", "'`'", ":", "tokval", "=", "\" \"", ".", "join", "(", "it", ".", "takewhile", "(", "lambda", "tokval", ":", "tokval", "!=", "'`'", ",", "map", "(", "operator", ".", "itemgetter", "(", "1", ")", ",", "token_generator", ")", ")", ")", "toknum", "=", "_BACKTICK_QUOTED_STRING", "yield", "toknum", ",", "tokval" ]
Tokenize a Python source code string. Parameters ---------- source : str A Python source code string
[ "Tokenize", "a", "Python", "source", "code", "string", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expr.py#L29-L49
20,325
pandas-dev/pandas
pandas/core/computation/expr.py
_replace_booleans
def _replace_booleans(tok): """Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise precedence is changed to boolean precedence. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- t : tuple of int, str Either the input or token or the replacement values """ toknum, tokval = tok if toknum == tokenize.OP: if tokval == '&': return tokenize.NAME, 'and' elif tokval == '|': return tokenize.NAME, 'or' return toknum, tokval return toknum, tokval
python
def _replace_booleans(tok): """Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise precedence is changed to boolean precedence. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- t : tuple of int, str Either the input or token or the replacement values """ toknum, tokval = tok if toknum == tokenize.OP: if tokval == '&': return tokenize.NAME, 'and' elif tokval == '|': return tokenize.NAME, 'or' return toknum, tokval return toknum, tokval
[ "def", "_replace_booleans", "(", "tok", ")", ":", "toknum", ",", "tokval", "=", "tok", "if", "toknum", "==", "tokenize", ".", "OP", ":", "if", "tokval", "==", "'&'", ":", "return", "tokenize", ".", "NAME", ",", "'and'", "elif", "tokval", "==", "'|'", ":", "return", "tokenize", ".", "NAME", ",", "'or'", "return", "toknum", ",", "tokval", "return", "toknum", ",", "tokval" ]
Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise precedence is changed to boolean precedence. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- t : tuple of int, str Either the input or token or the replacement values
[ "Replace", "&", "with", "and", "and", "|", "with", "or", "so", "that", "bitwise", "precedence", "is", "changed", "to", "boolean", "precedence", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expr.py#L70-L91
20,326
pandas-dev/pandas
pandas/core/computation/expr.py
_replace_locals
def _replace_locals(tok): """Replace local variables with a syntactically valid name. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- t : tuple of int, str Either the input or token or the replacement values Notes ----- This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as ``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_`` is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it. """ toknum, tokval = tok if toknum == tokenize.OP and tokval == '@': return tokenize.OP, _LOCAL_TAG return toknum, tokval
python
def _replace_locals(tok): """Replace local variables with a syntactically valid name. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- t : tuple of int, str Either the input or token or the replacement values Notes ----- This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as ``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_`` is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it. """ toknum, tokval = tok if toknum == tokenize.OP and tokval == '@': return tokenize.OP, _LOCAL_TAG return toknum, tokval
[ "def", "_replace_locals", "(", "tok", ")", ":", "toknum", ",", "tokval", "=", "tok", "if", "toknum", "==", "tokenize", ".", "OP", "and", "tokval", "==", "'@'", ":", "return", "tokenize", ".", "OP", ",", "_LOCAL_TAG", "return", "toknum", ",", "tokval" ]
Replace local variables with a syntactically valid name. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- t : tuple of int, str Either the input or token or the replacement values Notes ----- This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as ``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_`` is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it.
[ "Replace", "local", "variables", "with", "a", "syntactically", "valid", "name", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expr.py#L94-L116
20,327
pandas-dev/pandas
pandas/core/computation/expr.py
_clean_spaces_backtick_quoted_names
def _clean_spaces_backtick_quoted_names(tok): """Clean up a column name if surrounded by backticks. Backtick quoted string are indicated by a certain tokval value. If a string is a backtick quoted token it will processed by :func:`_remove_spaces_column_name` so that the parser can find this string when the query is executed. See also :meth:`NDFrame._get_space_character_free_column_resolver`. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- t : tuple of int, str Either the input or token or the replacement values """ toknum, tokval = tok if toknum == _BACKTICK_QUOTED_STRING: return tokenize.NAME, _remove_spaces_column_name(tokval) return toknum, tokval
python
def _clean_spaces_backtick_quoted_names(tok): """Clean up a column name if surrounded by backticks. Backtick quoted string are indicated by a certain tokval value. If a string is a backtick quoted token it will processed by :func:`_remove_spaces_column_name` so that the parser can find this string when the query is executed. See also :meth:`NDFrame._get_space_character_free_column_resolver`. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- t : tuple of int, str Either the input or token or the replacement values """ toknum, tokval = tok if toknum == _BACKTICK_QUOTED_STRING: return tokenize.NAME, _remove_spaces_column_name(tokval) return toknum, tokval
[ "def", "_clean_spaces_backtick_quoted_names", "(", "tok", ")", ":", "toknum", ",", "tokval", "=", "tok", "if", "toknum", "==", "_BACKTICK_QUOTED_STRING", ":", "return", "tokenize", ".", "NAME", ",", "_remove_spaces_column_name", "(", "tokval", ")", "return", "toknum", ",", "tokval" ]
Clean up a column name if surrounded by backticks. Backtick quoted string are indicated by a certain tokval value. If a string is a backtick quoted token it will processed by :func:`_remove_spaces_column_name` so that the parser can find this string when the query is executed. See also :meth:`NDFrame._get_space_character_free_column_resolver`. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- t : tuple of int, str Either the input or token or the replacement values
[ "Clean", "up", "a", "column", "name", "if", "surrounded", "by", "backticks", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expr.py#L119-L141
20,328
pandas-dev/pandas
pandas/core/computation/expr.py
_preparse
def _preparse(source, f=_compose(_replace_locals, _replace_booleans, _rewrite_assign, _clean_spaces_backtick_quoted_names)): """Compose a collection of tokenization functions Parameters ---------- source : str A Python source code string f : callable This takes a tuple of (toknum, tokval) as its argument and returns a tuple with the same structure but possibly different elements. Defaults to the composition of ``_rewrite_assign``, ``_replace_booleans``, and ``_replace_locals``. Returns ------- s : str Valid Python source code Notes ----- The `f` parameter can be any callable that takes *and* returns input of the form ``(toknum, tokval)``, where ``toknum`` is one of the constants from the ``tokenize`` module and ``tokval`` is a string. """ assert callable(f), 'f must be callable' return tokenize.untokenize(lmap(f, tokenize_string(source)))
python
def _preparse(source, f=_compose(_replace_locals, _replace_booleans, _rewrite_assign, _clean_spaces_backtick_quoted_names)): """Compose a collection of tokenization functions Parameters ---------- source : str A Python source code string f : callable This takes a tuple of (toknum, tokval) as its argument and returns a tuple with the same structure but possibly different elements. Defaults to the composition of ``_rewrite_assign``, ``_replace_booleans``, and ``_replace_locals``. Returns ------- s : str Valid Python source code Notes ----- The `f` parameter can be any callable that takes *and* returns input of the form ``(toknum, tokval)``, where ``toknum`` is one of the constants from the ``tokenize`` module and ``tokval`` is a string. """ assert callable(f), 'f must be callable' return tokenize.untokenize(lmap(f, tokenize_string(source)))
[ "def", "_preparse", "(", "source", ",", "f", "=", "_compose", "(", "_replace_locals", ",", "_replace_booleans", ",", "_rewrite_assign", ",", "_clean_spaces_backtick_quoted_names", ")", ")", ":", "assert", "callable", "(", "f", ")", ",", "'f must be callable'", "return", "tokenize", ".", "untokenize", "(", "lmap", "(", "f", ",", "tokenize_string", "(", "source", ")", ")", ")" ]
Compose a collection of tokenization functions Parameters ---------- source : str A Python source code string f : callable This takes a tuple of (toknum, tokval) as its argument and returns a tuple with the same structure but possibly different elements. Defaults to the composition of ``_rewrite_assign``, ``_replace_booleans``, and ``_replace_locals``. Returns ------- s : str Valid Python source code Notes ----- The `f` parameter can be any callable that takes *and* returns input of the form ``(toknum, tokval)``, where ``toknum`` is one of the constants from the ``tokenize`` module and ``tokval`` is a string.
[ "Compose", "a", "collection", "of", "tokenization", "functions" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expr.py#L155-L182
20,329
pandas-dev/pandas
pandas/core/computation/expr.py
_filter_nodes
def _filter_nodes(superclass, all_nodes=_all_nodes): """Filter out AST nodes that are subclasses of ``superclass``.""" node_names = (node.__name__ for node in all_nodes if issubclass(node, superclass)) return frozenset(node_names)
python
def _filter_nodes(superclass, all_nodes=_all_nodes): """Filter out AST nodes that are subclasses of ``superclass``.""" node_names = (node.__name__ for node in all_nodes if issubclass(node, superclass)) return frozenset(node_names)
[ "def", "_filter_nodes", "(", "superclass", ",", "all_nodes", "=", "_all_nodes", ")", ":", "node_names", "=", "(", "node", ".", "__name__", "for", "node", "in", "all_nodes", "if", "issubclass", "(", "node", ",", "superclass", ")", ")", "return", "frozenset", "(", "node_names", ")" ]
Filter out AST nodes that are subclasses of ``superclass``.
[ "Filter", "out", "AST", "nodes", "that", "are", "subclasses", "of", "superclass", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expr.py#L200-L204
20,330
pandas-dev/pandas
pandas/core/computation/expr.py
_node_not_implemented
def _node_not_implemented(node_name, cls): """Return a function that raises a NotImplementedError with a passed node name. """ def f(self, *args, **kwargs): raise NotImplementedError("{name!r} nodes are not " "implemented".format(name=node_name)) return f
python
def _node_not_implemented(node_name, cls): """Return a function that raises a NotImplementedError with a passed node name. """ def f(self, *args, **kwargs): raise NotImplementedError("{name!r} nodes are not " "implemented".format(name=node_name)) return f
[ "def", "_node_not_implemented", "(", "node_name", ",", "cls", ")", ":", "def", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "raise", "NotImplementedError", "(", "\"{name!r} nodes are not \"", "\"implemented\"", ".", "format", "(", "name", "=", "node_name", ")", ")", "return", "f" ]
Return a function that raises a NotImplementedError with a passed node name.
[ "Return", "a", "function", "that", "raises", "a", "NotImplementedError", "with", "a", "passed", "node", "name", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expr.py#L247-L255
20,331
pandas-dev/pandas
pandas/core/computation/expr.py
disallow
def disallow(nodes): """Decorator to disallow certain nodes from parsing. Raises a NotImplementedError instead. Returns ------- disallowed : callable """ def disallowed(cls): cls.unsupported_nodes = () for node in nodes: new_method = _node_not_implemented(node, cls) name = 'visit_{node}'.format(node=node) cls.unsupported_nodes += (name,) setattr(cls, name, new_method) return cls return disallowed
python
def disallow(nodes): """Decorator to disallow certain nodes from parsing. Raises a NotImplementedError instead. Returns ------- disallowed : callable """ def disallowed(cls): cls.unsupported_nodes = () for node in nodes: new_method = _node_not_implemented(node, cls) name = 'visit_{node}'.format(node=node) cls.unsupported_nodes += (name,) setattr(cls, name, new_method) return cls return disallowed
[ "def", "disallow", "(", "nodes", ")", ":", "def", "disallowed", "(", "cls", ")", ":", "cls", ".", "unsupported_nodes", "=", "(", ")", "for", "node", "in", "nodes", ":", "new_method", "=", "_node_not_implemented", "(", "node", ",", "cls", ")", "name", "=", "'visit_{node}'", ".", "format", "(", "node", "=", "node", ")", "cls", ".", "unsupported_nodes", "+=", "(", "name", ",", ")", "setattr", "(", "cls", ",", "name", ",", "new_method", ")", "return", "cls", "return", "disallowed" ]
Decorator to disallow certain nodes from parsing. Raises a NotImplementedError instead. Returns ------- disallowed : callable
[ "Decorator", "to", "disallow", "certain", "nodes", "from", "parsing", ".", "Raises", "a", "NotImplementedError", "instead", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expr.py#L258-L274
20,332
pandas-dev/pandas
pandas/core/computation/expr.py
_op_maker
def _op_maker(op_class, op_symbol): """Return a function to create an op class with its symbol already passed. Returns ------- f : callable """ def f(self, node, *args, **kwargs): """Return a partial function with an Op subclass with an operator already passed. Returns ------- f : callable """ return partial(op_class, op_symbol, *args, **kwargs) return f
python
def _op_maker(op_class, op_symbol): """Return a function to create an op class with its symbol already passed. Returns ------- f : callable """ def f(self, node, *args, **kwargs): """Return a partial function with an Op subclass with an operator already passed. Returns ------- f : callable """ return partial(op_class, op_symbol, *args, **kwargs) return f
[ "def", "_op_maker", "(", "op_class", ",", "op_symbol", ")", ":", "def", "f", "(", "self", ",", "node", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Return a partial function with an Op subclass with an operator\n already passed.\n\n Returns\n -------\n f : callable\n \"\"\"", "return", "partial", "(", "op_class", ",", "op_symbol", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "f" ]
Return a function to create an op class with its symbol already passed. Returns ------- f : callable
[ "Return", "a", "function", "to", "create", "an", "op", "class", "with", "its", "symbol", "already", "passed", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expr.py#L277-L294
20,333
pandas-dev/pandas
pandas/core/computation/expr.py
add_ops
def add_ops(op_classes): """Decorator to add default implementation of ops.""" def f(cls): for op_attr_name, op_class in op_classes.items(): ops = getattr(cls, '{name}_ops'.format(name=op_attr_name)) ops_map = getattr(cls, '{name}_op_nodes_map'.format( name=op_attr_name)) for op in ops: op_node = ops_map[op] if op_node is not None: made_op = _op_maker(op_class, op) setattr(cls, 'visit_{node}'.format(node=op_node), made_op) return cls return f
python
def add_ops(op_classes): """Decorator to add default implementation of ops.""" def f(cls): for op_attr_name, op_class in op_classes.items(): ops = getattr(cls, '{name}_ops'.format(name=op_attr_name)) ops_map = getattr(cls, '{name}_op_nodes_map'.format( name=op_attr_name)) for op in ops: op_node = ops_map[op] if op_node is not None: made_op = _op_maker(op_class, op) setattr(cls, 'visit_{node}'.format(node=op_node), made_op) return cls return f
[ "def", "add_ops", "(", "op_classes", ")", ":", "def", "f", "(", "cls", ")", ":", "for", "op_attr_name", ",", "op_class", "in", "op_classes", ".", "items", "(", ")", ":", "ops", "=", "getattr", "(", "cls", ",", "'{name}_ops'", ".", "format", "(", "name", "=", "op_attr_name", ")", ")", "ops_map", "=", "getattr", "(", "cls", ",", "'{name}_op_nodes_map'", ".", "format", "(", "name", "=", "op_attr_name", ")", ")", "for", "op", "in", "ops", ":", "op_node", "=", "ops_map", "[", "op", "]", "if", "op_node", "is", "not", "None", ":", "made_op", "=", "_op_maker", "(", "op_class", ",", "op", ")", "setattr", "(", "cls", ",", "'visit_{node}'", ".", "format", "(", "node", "=", "op_node", ")", ",", "made_op", ")", "return", "cls", "return", "f" ]
Decorator to add default implementation of ops.
[ "Decorator", "to", "add", "default", "implementation", "of", "ops", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expr.py#L300-L313
20,334
pandas-dev/pandas
pandas/core/computation/expr.py
Expr.names
def names(self): """Get the names in an expression""" if is_term(self.terms): return frozenset([self.terms.name]) return frozenset(term.name for term in com.flatten(self.terms))
python
def names(self): """Get the names in an expression""" if is_term(self.terms): return frozenset([self.terms.name]) return frozenset(term.name for term in com.flatten(self.terms))
[ "def", "names", "(", "self", ")", ":", "if", "is_term", "(", "self", ".", "terms", ")", ":", "return", "frozenset", "(", "[", "self", ".", "terms", ".", "name", "]", ")", "return", "frozenset", "(", "term", ".", "name", "for", "term", "in", "com", ".", "flatten", "(", "self", ".", "terms", ")", ")" ]
Get the names in an expression
[ "Get", "the", "names", "in", "an", "expression" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expr.py#L749-L753
20,335
pandas-dev/pandas
pandas/core/indexes/timedeltas.py
_is_convertible_to_index
def _is_convertible_to_index(other): """ return a boolean whether I can attempt conversion to a TimedeltaIndex """ if isinstance(other, TimedeltaIndex): return True elif (len(other) > 0 and other.inferred_type not in ('floating', 'mixed-integer', 'integer', 'mixed-integer-float', 'mixed')): return True return False
python
def _is_convertible_to_index(other): """ return a boolean whether I can attempt conversion to a TimedeltaIndex """ if isinstance(other, TimedeltaIndex): return True elif (len(other) > 0 and other.inferred_type not in ('floating', 'mixed-integer', 'integer', 'mixed-integer-float', 'mixed')): return True return False
[ "def", "_is_convertible_to_index", "(", "other", ")", ":", "if", "isinstance", "(", "other", ",", "TimedeltaIndex", ")", ":", "return", "True", "elif", "(", "len", "(", "other", ")", ">", "0", "and", "other", ".", "inferred_type", "not", "in", "(", "'floating'", ",", "'mixed-integer'", ",", "'integer'", ",", "'mixed-integer-float'", ",", "'mixed'", ")", ")", ":", "return", "True", "return", "False" ]
return a boolean whether I can attempt conversion to a TimedeltaIndex
[ "return", "a", "boolean", "whether", "I", "can", "attempt", "conversion", "to", "a", "TimedeltaIndex" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/timedeltas.py#L719-L729
20,336
pandas-dev/pandas
pandas/core/indexes/timedeltas.py
timedelta_range
def timedelta_range(start=None, end=None, periods=None, freq=None, name=None, closed=None): """ Return a fixed frequency TimedeltaIndex, with day as the default frequency Parameters ---------- start : string or timedelta-like, default None Left bound for generating timedeltas end : string or timedelta-like, default None Right bound for generating timedeltas periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'D' Frequency strings can have multiples, e.g. '5H' name : string, default None Name of the resulting TimedeltaIndex closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Returns ------- rng : TimedeltaIndex Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``TimedeltaIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end`` (closed on both sides). To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- >>> pd.timedelta_range(start='1 day', periods=4) TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq='D') The ``closed`` parameter specifies which endpoint is included. The default behavior is to include both endpoints. >>> pd.timedelta_range(start='1 day', periods=4, closed='right') TimedeltaIndex(['2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq='D') The ``freq`` parameter specifies the frequency of the TimedeltaIndex. Only fixed frequencies can be passed, non-fixed frequencies such as 'M' (month end) will raise. >>> pd.timedelta_range(start='1 day', end='2 days', freq='6H') TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00', '1 days 18:00:00', '2 days 00:00:00'], dtype='timedelta64[ns]', freq='6H') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.timedelta_range(start='1 day', end='5 days', periods=4) TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00', '5 days 00:00:00'], dtype='timedelta64[ns]', freq=None) """ if freq is None and com._any_none(periods, start, end): freq = 'D' freq, freq_infer = dtl.maybe_infer_freq(freq) tdarr = TimedeltaArray._generate_range(start, end, periods, freq, closed=closed) return TimedeltaIndex._simple_new(tdarr._data, freq=tdarr.freq, name=name)
python
def timedelta_range(start=None, end=None, periods=None, freq=None, name=None, closed=None): """ Return a fixed frequency TimedeltaIndex, with day as the default frequency Parameters ---------- start : string or timedelta-like, default None Left bound for generating timedeltas end : string or timedelta-like, default None Right bound for generating timedeltas periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'D' Frequency strings can have multiples, e.g. '5H' name : string, default None Name of the resulting TimedeltaIndex closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Returns ------- rng : TimedeltaIndex Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``TimedeltaIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end`` (closed on both sides). To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- >>> pd.timedelta_range(start='1 day', periods=4) TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq='D') The ``closed`` parameter specifies which endpoint is included. The default behavior is to include both endpoints. >>> pd.timedelta_range(start='1 day', periods=4, closed='right') TimedeltaIndex(['2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq='D') The ``freq`` parameter specifies the frequency of the TimedeltaIndex. Only fixed frequencies can be passed, non-fixed frequencies such as 'M' (month end) will raise. >>> pd.timedelta_range(start='1 day', end='2 days', freq='6H') TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00', '1 days 18:00:00', '2 days 00:00:00'], dtype='timedelta64[ns]', freq='6H') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.timedelta_range(start='1 day', end='5 days', periods=4) TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00', '5 days 00:00:00'], dtype='timedelta64[ns]', freq=None) """ if freq is None and com._any_none(periods, start, end): freq = 'D' freq, freq_infer = dtl.maybe_infer_freq(freq) tdarr = TimedeltaArray._generate_range(start, end, periods, freq, closed=closed) return TimedeltaIndex._simple_new(tdarr._data, freq=tdarr.freq, name=name)
[ "def", "timedelta_range", "(", "start", "=", "None", ",", "end", "=", "None", ",", "periods", "=", "None", ",", "freq", "=", "None", ",", "name", "=", "None", ",", "closed", "=", "None", ")", ":", "if", "freq", "is", "None", "and", "com", ".", "_any_none", "(", "periods", ",", "start", ",", "end", ")", ":", "freq", "=", "'D'", "freq", ",", "freq_infer", "=", "dtl", ".", "maybe_infer_freq", "(", "freq", ")", "tdarr", "=", "TimedeltaArray", ".", "_generate_range", "(", "start", ",", "end", ",", "periods", ",", "freq", ",", "closed", "=", "closed", ")", "return", "TimedeltaIndex", ".", "_simple_new", "(", "tdarr", ".", "_data", ",", "freq", "=", "tdarr", ".", "freq", ",", "name", "=", "name", ")" ]
Return a fixed frequency TimedeltaIndex, with day as the default frequency Parameters ---------- start : string or timedelta-like, default None Left bound for generating timedeltas end : string or timedelta-like, default None Right bound for generating timedeltas periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'D' Frequency strings can have multiples, e.g. '5H' name : string, default None Name of the resulting TimedeltaIndex closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Returns ------- rng : TimedeltaIndex Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``TimedeltaIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end`` (closed on both sides). To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- >>> pd.timedelta_range(start='1 day', periods=4) TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq='D') The ``closed`` parameter specifies which endpoint is included. The default behavior is to include both endpoints. >>> pd.timedelta_range(start='1 day', periods=4, closed='right') TimedeltaIndex(['2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq='D') The ``freq`` parameter specifies the frequency of the TimedeltaIndex. Only fixed frequencies can be passed, non-fixed frequencies such as 'M' (month end) will raise. >>> pd.timedelta_range(start='1 day', end='2 days', freq='6H') TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00', '1 days 18:00:00', '2 days 00:00:00'], dtype='timedelta64[ns]', freq='6H') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.timedelta_range(start='1 day', end='5 days', periods=4) TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00', '5 days 00:00:00'], dtype='timedelta64[ns]', freq=None)
[ "Return", "a", "fixed", "frequency", "TimedeltaIndex", "with", "day", "as", "the", "default", "frequency" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/timedeltas.py#L732-L805
20,337
pandas-dev/pandas
pandas/core/indexes/frozen.py
FrozenList.union
def union(self, other): """ Returns a FrozenList with other concatenated to the end of self. Parameters ---------- other : array-like The array-like whose elements we are concatenating. Returns ------- diff : FrozenList The collection difference between self and other. """ if isinstance(other, tuple): other = list(other) return type(self)(super().__add__(other))
python
def union(self, other): """ Returns a FrozenList with other concatenated to the end of self. Parameters ---------- other : array-like The array-like whose elements we are concatenating. Returns ------- diff : FrozenList The collection difference between self and other. """ if isinstance(other, tuple): other = list(other) return type(self)(super().__add__(other))
[ "def", "union", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "tuple", ")", ":", "other", "=", "list", "(", "other", ")", "return", "type", "(", "self", ")", "(", "super", "(", ")", ".", "__add__", "(", "other", ")", ")" ]
Returns a FrozenList with other concatenated to the end of self. Parameters ---------- other : array-like The array-like whose elements we are concatenating. Returns ------- diff : FrozenList The collection difference between self and other.
[ "Returns", "a", "FrozenList", "with", "other", "concatenated", "to", "the", "end", "of", "self", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/frozen.py#L34-L50
20,338
pandas-dev/pandas
pandas/core/indexes/frozen.py
FrozenList.difference
def difference(self, other): """ Returns a FrozenList with elements from other removed from self. Parameters ---------- other : array-like The array-like whose elements we are removing self. Returns ------- diff : FrozenList The collection difference between self and other. """ other = set(other) temp = [x for x in self if x not in other] return type(self)(temp)
python
def difference(self, other): """ Returns a FrozenList with elements from other removed from self. Parameters ---------- other : array-like The array-like whose elements we are removing self. Returns ------- diff : FrozenList The collection difference between self and other. """ other = set(other) temp = [x for x in self if x not in other] return type(self)(temp)
[ "def", "difference", "(", "self", ",", "other", ")", ":", "other", "=", "set", "(", "other", ")", "temp", "=", "[", "x", "for", "x", "in", "self", "if", "x", "not", "in", "other", "]", "return", "type", "(", "self", ")", "(", "temp", ")" ]
Returns a FrozenList with elements from other removed from self. Parameters ---------- other : array-like The array-like whose elements we are removing self. Returns ------- diff : FrozenList The collection difference between self and other.
[ "Returns", "a", "FrozenList", "with", "elements", "from", "other", "removed", "from", "self", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/frozen.py#L52-L68
20,339
pandas-dev/pandas
pandas/core/indexes/frozen.py
FrozenNDArray.searchsorted
def searchsorted(self, value, side="left", sorter=None): """ Find indices to insert `value` so as to maintain order. For full documentation, see `numpy.searchsorted` See Also -------- numpy.searchsorted : Equivalent function. """ # We are much more performant if the searched # indexer is the same type as the array. # # This doesn't matter for int64, but DOES # matter for smaller int dtypes. # # xref: https://github.com/numpy/numpy/issues/5370 try: value = self.dtype.type(value) except ValueError: pass return super().searchsorted(value, side=side, sorter=sorter)
python
def searchsorted(self, value, side="left", sorter=None): """ Find indices to insert `value` so as to maintain order. For full documentation, see `numpy.searchsorted` See Also -------- numpy.searchsorted : Equivalent function. """ # We are much more performant if the searched # indexer is the same type as the array. # # This doesn't matter for int64, but DOES # matter for smaller int dtypes. # # xref: https://github.com/numpy/numpy/issues/5370 try: value = self.dtype.type(value) except ValueError: pass return super().searchsorted(value, side=side, sorter=sorter)
[ "def", "searchsorted", "(", "self", ",", "value", ",", "side", "=", "\"left\"", ",", "sorter", "=", "None", ")", ":", "# We are much more performant if the searched", "# indexer is the same type as the array.", "#", "# This doesn't matter for int64, but DOES", "# matter for smaller int dtypes.", "#", "# xref: https://github.com/numpy/numpy/issues/5370", "try", ":", "value", "=", "self", ".", "dtype", ".", "type", "(", "value", ")", "except", "ValueError", ":", "pass", "return", "super", "(", ")", ".", "searchsorted", "(", "value", ",", "side", "=", "side", ",", "sorter", "=", "sorter", ")" ]
Find indices to insert `value` so as to maintain order. For full documentation, see `numpy.searchsorted` See Also -------- numpy.searchsorted : Equivalent function.
[ "Find", "indices", "to", "insert", "value", "so", "as", "to", "maintain", "order", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/frozen.py#L161-L184
20,340
pandas-dev/pandas
pandas/core/internals/construction.py
arrays_to_mgr
def arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ # figure out the index, if necessary if index is None: index = extract_index(arrays) else: index = ensure_index(index) # don't force copy because getting jammed in an ndarray anyway arrays = _homogenize(arrays, index, dtype) # from BlockManager perspective axes = [ensure_index(columns), index] return create_block_manager_from_arrays(arrays, arr_names, axes)
python
def arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ # figure out the index, if necessary if index is None: index = extract_index(arrays) else: index = ensure_index(index) # don't force copy because getting jammed in an ndarray anyway arrays = _homogenize(arrays, index, dtype) # from BlockManager perspective axes = [ensure_index(columns), index] return create_block_manager_from_arrays(arrays, arr_names, axes)
[ "def", "arrays_to_mgr", "(", "arrays", ",", "arr_names", ",", "index", ",", "columns", ",", "dtype", "=", "None", ")", ":", "# figure out the index, if necessary", "if", "index", "is", "None", ":", "index", "=", "extract_index", "(", "arrays", ")", "else", ":", "index", "=", "ensure_index", "(", "index", ")", "# don't force copy because getting jammed in an ndarray anyway", "arrays", "=", "_homogenize", "(", "arrays", ",", "index", ",", "dtype", ")", "# from BlockManager perspective", "axes", "=", "[", "ensure_index", "(", "columns", ")", ",", "index", "]", "return", "create_block_manager_from_arrays", "(", "arrays", ",", "arr_names", ",", "axes", ")" ]
Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases.
[ "Segregate", "Series", "based", "on", "type", "and", "coerce", "into", "matrices", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/construction.py#L41-L59
20,341
pandas-dev/pandas
pandas/core/internals/construction.py
masked_rec_array_to_mgr
def masked_rec_array_to_mgr(data, index, columns, dtype, copy): """ Extract from a masked rec array and create the manager. """ # essentially process a record array then fill it fill_value = data.fill_value fdata = ma.getdata(data) if index is None: index = get_names_from_index(fdata) if index is None: index = ibase.default_index(len(data)) index = ensure_index(index) if columns is not None: columns = ensure_index(columns) arrays, arr_columns = to_arrays(fdata, columns) # fill if needed new_arrays = [] for fv, arr, col in zip(fill_value, arrays, arr_columns): mask = ma.getmaskarray(data[col]) if mask.any(): arr, fv = maybe_upcast(arr, fill_value=fv, copy=True) arr[mask] = fv new_arrays.append(arr) # create the manager arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns) if columns is None: columns = arr_columns mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype) if copy: mgr = mgr.copy() return mgr
python
def masked_rec_array_to_mgr(data, index, columns, dtype, copy): """ Extract from a masked rec array and create the manager. """ # essentially process a record array then fill it fill_value = data.fill_value fdata = ma.getdata(data) if index is None: index = get_names_from_index(fdata) if index is None: index = ibase.default_index(len(data)) index = ensure_index(index) if columns is not None: columns = ensure_index(columns) arrays, arr_columns = to_arrays(fdata, columns) # fill if needed new_arrays = [] for fv, arr, col in zip(fill_value, arrays, arr_columns): mask = ma.getmaskarray(data[col]) if mask.any(): arr, fv = maybe_upcast(arr, fill_value=fv, copy=True) arr[mask] = fv new_arrays.append(arr) # create the manager arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns) if columns is None: columns = arr_columns mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype) if copy: mgr = mgr.copy() return mgr
[ "def", "masked_rec_array_to_mgr", "(", "data", ",", "index", ",", "columns", ",", "dtype", ",", "copy", ")", ":", "# essentially process a record array then fill it", "fill_value", "=", "data", ".", "fill_value", "fdata", "=", "ma", ".", "getdata", "(", "data", ")", "if", "index", "is", "None", ":", "index", "=", "get_names_from_index", "(", "fdata", ")", "if", "index", "is", "None", ":", "index", "=", "ibase", ".", "default_index", "(", "len", "(", "data", ")", ")", "index", "=", "ensure_index", "(", "index", ")", "if", "columns", "is", "not", "None", ":", "columns", "=", "ensure_index", "(", "columns", ")", "arrays", ",", "arr_columns", "=", "to_arrays", "(", "fdata", ",", "columns", ")", "# fill if needed", "new_arrays", "=", "[", "]", "for", "fv", ",", "arr", ",", "col", "in", "zip", "(", "fill_value", ",", "arrays", ",", "arr_columns", ")", ":", "mask", "=", "ma", ".", "getmaskarray", "(", "data", "[", "col", "]", ")", "if", "mask", ".", "any", "(", ")", ":", "arr", ",", "fv", "=", "maybe_upcast", "(", "arr", ",", "fill_value", "=", "fv", ",", "copy", "=", "True", ")", "arr", "[", "mask", "]", "=", "fv", "new_arrays", ".", "append", "(", "arr", ")", "# create the manager", "arrays", ",", "arr_columns", "=", "reorder_arrays", "(", "new_arrays", ",", "arr_columns", ",", "columns", ")", "if", "columns", "is", "None", ":", "columns", "=", "arr_columns", "mgr", "=", "arrays_to_mgr", "(", "arrays", ",", "arr_columns", ",", "index", ",", "columns", ",", "dtype", ")", "if", "copy", ":", "mgr", "=", "mgr", ".", "copy", "(", ")", "return", "mgr" ]
Extract from a masked rec array and create the manager.
[ "Extract", "from", "a", "masked", "rec", "array", "and", "create", "the", "manager", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/construction.py#L62-L98
20,342
pandas-dev/pandas
pandas/core/internals/construction.py
init_dict
def init_dict(data, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ if columns is not None: from pandas.core.series import Series arrays = Series(data, index=columns, dtype=object) data_names = arrays.index missing = arrays.isnull() if index is None: # GH10856 # raise ValueError if only scalars in dict index = extract_index(arrays[~missing]) else: index = ensure_index(index) # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): if dtype is None or np.issubdtype(dtype, np.flexible): # GH#1783 nan_dtype = object else: nan_dtype = dtype val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) arrays.loc[missing] = [val] * missing.sum() else: keys = com.dict_keys_to_ordered_list(data) columns = data_names = Index(keys) # GH#24096 need copy to be deep for datetime64tz case # TODO: See if we can avoid these copies arrays = [data[k] if not is_datetime64tz_dtype(data[k]) else data[k].copy(deep=True) for k in keys] return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
python
def init_dict(data, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ if columns is not None: from pandas.core.series import Series arrays = Series(data, index=columns, dtype=object) data_names = arrays.index missing = arrays.isnull() if index is None: # GH10856 # raise ValueError if only scalars in dict index = extract_index(arrays[~missing]) else: index = ensure_index(index) # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): if dtype is None or np.issubdtype(dtype, np.flexible): # GH#1783 nan_dtype = object else: nan_dtype = dtype val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) arrays.loc[missing] = [val] * missing.sum() else: keys = com.dict_keys_to_ordered_list(data) columns = data_names = Index(keys) # GH#24096 need copy to be deep for datetime64tz case # TODO: See if we can avoid these copies arrays = [data[k] if not is_datetime64tz_dtype(data[k]) else data[k].copy(deep=True) for k in keys] return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
[ "def", "init_dict", "(", "data", ",", "index", ",", "columns", ",", "dtype", "=", "None", ")", ":", "if", "columns", "is", "not", "None", ":", "from", "pandas", ".", "core", ".", "series", "import", "Series", "arrays", "=", "Series", "(", "data", ",", "index", "=", "columns", ",", "dtype", "=", "object", ")", "data_names", "=", "arrays", ".", "index", "missing", "=", "arrays", ".", "isnull", "(", ")", "if", "index", "is", "None", ":", "# GH10856", "# raise ValueError if only scalars in dict", "index", "=", "extract_index", "(", "arrays", "[", "~", "missing", "]", ")", "else", ":", "index", "=", "ensure_index", "(", "index", ")", "# no obvious \"empty\" int column", "if", "missing", ".", "any", "(", ")", "and", "not", "is_integer_dtype", "(", "dtype", ")", ":", "if", "dtype", "is", "None", "or", "np", ".", "issubdtype", "(", "dtype", ",", "np", ".", "flexible", ")", ":", "# GH#1783", "nan_dtype", "=", "object", "else", ":", "nan_dtype", "=", "dtype", "val", "=", "construct_1d_arraylike_from_scalar", "(", "np", ".", "nan", ",", "len", "(", "index", ")", ",", "nan_dtype", ")", "arrays", ".", "loc", "[", "missing", "]", "=", "[", "val", "]", "*", "missing", ".", "sum", "(", ")", "else", ":", "keys", "=", "com", ".", "dict_keys_to_ordered_list", "(", "data", ")", "columns", "=", "data_names", "=", "Index", "(", "keys", ")", "# GH#24096 need copy to be deep for datetime64tz case", "# TODO: See if we can avoid these copies", "arrays", "=", "[", "data", "[", "k", "]", "if", "not", "is_datetime64tz_dtype", "(", "data", "[", "k", "]", ")", "else", "data", "[", "k", "]", ".", "copy", "(", "deep", "=", "True", ")", "for", "k", "in", "keys", "]", "return", "arrays_to_mgr", "(", "arrays", ",", "data_names", ",", "index", ",", "columns", ",", "dtype", "=", "dtype", ")" ]
Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases.
[ "Segregate", "Series", "based", "on", "type", "and", "coerce", "into", "matrices", ".", "Needs", "to", "handle", "a", "lot", "of", "exceptional", "cases", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/construction.py#L168-L204
20,343
pandas-dev/pandas
pandas/core/internals/construction.py
to_arrays
def to_arrays(data, columns, coerce_float=False, dtype=None): """ Return list of arrays, columns. """ if isinstance(data, ABCDataFrame): if columns is not None: arrays = [data._ixs(i, axis=1).values for i, col in enumerate(data.columns) if col in columns] else: columns = data.columns arrays = [data._ixs(i, axis=1).values for i in range(len(columns))] return arrays, columns if not len(data): if isinstance(data, np.ndarray): columns = data.dtype.names if columns is not None: return [[]] * len(columns), columns return [], [] # columns if columns is not None else [] if isinstance(data[0], (list, tuple)): return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], abc.Mapping): return _list_of_dict_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], ABCSeries): return _list_of_series_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], Categorical): if columns is None: columns = ibase.default_index(len(data)) return data, columns elif (isinstance(data, (np.ndarray, ABCSeries, Index)) and data.dtype.names is not None): columns = list(data.dtype.names) arrays = [data[k] for k in columns] return arrays, columns else: # last ditch effort data = lmap(tuple, data) return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype)
python
def to_arrays(data, columns, coerce_float=False, dtype=None): """ Return list of arrays, columns. """ if isinstance(data, ABCDataFrame): if columns is not None: arrays = [data._ixs(i, axis=1).values for i, col in enumerate(data.columns) if col in columns] else: columns = data.columns arrays = [data._ixs(i, axis=1).values for i in range(len(columns))] return arrays, columns if not len(data): if isinstance(data, np.ndarray): columns = data.dtype.names if columns is not None: return [[]] * len(columns), columns return [], [] # columns if columns is not None else [] if isinstance(data[0], (list, tuple)): return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], abc.Mapping): return _list_of_dict_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], ABCSeries): return _list_of_series_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], Categorical): if columns is None: columns = ibase.default_index(len(data)) return data, columns elif (isinstance(data, (np.ndarray, ABCSeries, Index)) and data.dtype.names is not None): columns = list(data.dtype.names) arrays = [data[k] for k in columns] return arrays, columns else: # last ditch effort data = lmap(tuple, data) return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype)
[ "def", "to_arrays", "(", "data", ",", "columns", ",", "coerce_float", "=", "False", ",", "dtype", "=", "None", ")", ":", "if", "isinstance", "(", "data", ",", "ABCDataFrame", ")", ":", "if", "columns", "is", "not", "None", ":", "arrays", "=", "[", "data", ".", "_ixs", "(", "i", ",", "axis", "=", "1", ")", ".", "values", "for", "i", ",", "col", "in", "enumerate", "(", "data", ".", "columns", ")", "if", "col", "in", "columns", "]", "else", ":", "columns", "=", "data", ".", "columns", "arrays", "=", "[", "data", ".", "_ixs", "(", "i", ",", "axis", "=", "1", ")", ".", "values", "for", "i", "in", "range", "(", "len", "(", "columns", ")", ")", "]", "return", "arrays", ",", "columns", "if", "not", "len", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "columns", "=", "data", ".", "dtype", ".", "names", "if", "columns", "is", "not", "None", ":", "return", "[", "[", "]", "]", "*", "len", "(", "columns", ")", ",", "columns", "return", "[", "]", ",", "[", "]", "# columns if columns is not None else []", "if", "isinstance", "(", "data", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "_list_to_arrays", "(", "data", ",", "columns", ",", "coerce_float", "=", "coerce_float", ",", "dtype", "=", "dtype", ")", "elif", "isinstance", "(", "data", "[", "0", "]", ",", "abc", ".", "Mapping", ")", ":", "return", "_list_of_dict_to_arrays", "(", "data", ",", "columns", ",", "coerce_float", "=", "coerce_float", ",", "dtype", "=", "dtype", ")", "elif", "isinstance", "(", "data", "[", "0", "]", ",", "ABCSeries", ")", ":", "return", "_list_of_series_to_arrays", "(", "data", ",", "columns", ",", "coerce_float", "=", "coerce_float", ",", "dtype", "=", "dtype", ")", "elif", "isinstance", "(", "data", "[", "0", "]", ",", "Categorical", ")", ":", "if", "columns", "is", "None", ":", "columns", "=", "ibase", ".", "default_index", "(", "len", "(", "data", ")", ")", "return", "data", ",", "columns", "elif", "(", "isinstance", "(", "data", ",", "(", "np", ".", "ndarray", ",", "ABCSeries", ",", "Index", ")", ")", "and", "data", ".", "dtype", ".", "names", "is", "not", "None", ")", ":", "columns", "=", "list", "(", "data", ".", "dtype", ".", "names", ")", "arrays", "=", "[", "data", "[", "k", "]", "for", "k", "in", "columns", "]", "return", "arrays", ",", "columns", "else", ":", "# last ditch effort", "data", "=", "lmap", "(", "tuple", ",", "data", ")", "return", "_list_to_arrays", "(", "data", ",", "columns", ",", "coerce_float", "=", "coerce_float", ",", "dtype", "=", "dtype", ")" ]
Return list of arrays, columns.
[ "Return", "list", "of", "arrays", "columns", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/construction.py#L374-L418
20,344
pandas-dev/pandas
pandas/core/internals/construction.py
sanitize_index
def sanitize_index(data, index, copy=False): """ Sanitize an index type to return an ndarray of the underlying, pass through a non-Index. """ if index is None: return data if len(data) != len(index): raise ValueError('Length of values does not match length of index') if isinstance(data, ABCIndexClass) and not copy: pass elif isinstance(data, (ABCPeriodIndex, ABCDatetimeIndex)): data = data._values if copy: data = data.copy() elif isinstance(data, np.ndarray): # coerce datetimelike types if data.dtype.kind in ['M', 'm']: data = sanitize_array(data, index, copy=copy) return data
python
def sanitize_index(data, index, copy=False): """ Sanitize an index type to return an ndarray of the underlying, pass through a non-Index. """ if index is None: return data if len(data) != len(index): raise ValueError('Length of values does not match length of index') if isinstance(data, ABCIndexClass) and not copy: pass elif isinstance(data, (ABCPeriodIndex, ABCDatetimeIndex)): data = data._values if copy: data = data.copy() elif isinstance(data, np.ndarray): # coerce datetimelike types if data.dtype.kind in ['M', 'm']: data = sanitize_array(data, index, copy=copy) return data
[ "def", "sanitize_index", "(", "data", ",", "index", ",", "copy", "=", "False", ")", ":", "if", "index", "is", "None", ":", "return", "data", "if", "len", "(", "data", ")", "!=", "len", "(", "index", ")", ":", "raise", "ValueError", "(", "'Length of values does not match length of index'", ")", "if", "isinstance", "(", "data", ",", "ABCIndexClass", ")", "and", "not", "copy", ":", "pass", "elif", "isinstance", "(", "data", ",", "(", "ABCPeriodIndex", ",", "ABCDatetimeIndex", ")", ")", ":", "data", "=", "data", ".", "_values", "if", "copy", ":", "data", "=", "data", ".", "copy", "(", ")", "elif", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "# coerce datetimelike types", "if", "data", ".", "dtype", ".", "kind", "in", "[", "'M'", ",", "'m'", "]", ":", "data", "=", "sanitize_array", "(", "data", ",", "index", ",", "copy", "=", "copy", ")", "return", "data" ]
Sanitize an index type to return an ndarray of the underlying, pass through a non-Index.
[ "Sanitize", "an", "index", "type", "to", "return", "an", "ndarray", "of", "the", "underlying", "pass", "through", "a", "non", "-", "Index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/construction.py#L501-L526
20,345
pandas-dev/pandas
pandas/core/computation/eval.py
_check_engine
def _check_engine(engine): """Make sure a valid engine is passed. Parameters ---------- engine : str Raises ------ KeyError * If an invalid engine is passed ImportError * If numexpr was requested but doesn't exist Returns ------- string engine """ from pandas.core.computation.check import _NUMEXPR_INSTALLED if engine is None: if _NUMEXPR_INSTALLED: engine = 'numexpr' else: engine = 'python' if engine not in _engines: valid = list(_engines.keys()) raise KeyError('Invalid engine {engine!r} passed, valid engines are' ' {valid}'.format(engine=engine, valid=valid)) # TODO: validate this in a more general way (thinking of future engines # that won't necessarily be import-able) # Could potentially be done on engine instantiation if engine == 'numexpr': if not _NUMEXPR_INSTALLED: raise ImportError("'numexpr' is not installed or an " "unsupported version. Cannot use " "engine='numexpr' for query/eval " "if 'numexpr' is not installed") return engine
python
def _check_engine(engine): """Make sure a valid engine is passed. Parameters ---------- engine : str Raises ------ KeyError * If an invalid engine is passed ImportError * If numexpr was requested but doesn't exist Returns ------- string engine """ from pandas.core.computation.check import _NUMEXPR_INSTALLED if engine is None: if _NUMEXPR_INSTALLED: engine = 'numexpr' else: engine = 'python' if engine not in _engines: valid = list(_engines.keys()) raise KeyError('Invalid engine {engine!r} passed, valid engines are' ' {valid}'.format(engine=engine, valid=valid)) # TODO: validate this in a more general way (thinking of future engines # that won't necessarily be import-able) # Could potentially be done on engine instantiation if engine == 'numexpr': if not _NUMEXPR_INSTALLED: raise ImportError("'numexpr' is not installed or an " "unsupported version. Cannot use " "engine='numexpr' for query/eval " "if 'numexpr' is not installed") return engine
[ "def", "_check_engine", "(", "engine", ")", ":", "from", "pandas", ".", "core", ".", "computation", ".", "check", "import", "_NUMEXPR_INSTALLED", "if", "engine", "is", "None", ":", "if", "_NUMEXPR_INSTALLED", ":", "engine", "=", "'numexpr'", "else", ":", "engine", "=", "'python'", "if", "engine", "not", "in", "_engines", ":", "valid", "=", "list", "(", "_engines", ".", "keys", "(", ")", ")", "raise", "KeyError", "(", "'Invalid engine {engine!r} passed, valid engines are'", "' {valid}'", ".", "format", "(", "engine", "=", "engine", ",", "valid", "=", "valid", ")", ")", "# TODO: validate this in a more general way (thinking of future engines", "# that won't necessarily be import-able)", "# Could potentially be done on engine instantiation", "if", "engine", "==", "'numexpr'", ":", "if", "not", "_NUMEXPR_INSTALLED", ":", "raise", "ImportError", "(", "\"'numexpr' is not installed or an \"", "\"unsupported version. Cannot use \"", "\"engine='numexpr' for query/eval \"", "\"if 'numexpr' is not installed\"", ")", "return", "engine" ]
Make sure a valid engine is passed. Parameters ---------- engine : str Raises ------ KeyError * If an invalid engine is passed ImportError * If numexpr was requested but doesn't exist Returns ------- string engine
[ "Make", "sure", "a", "valid", "engine", "is", "passed", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/eval.py#L17-L59
20,346
pandas-dev/pandas
pandas/core/computation/eval.py
_check_parser
def _check_parser(parser): """Make sure a valid parser is passed. Parameters ---------- parser : str Raises ------ KeyError * If an invalid parser is passed """ from pandas.core.computation.expr import _parsers if parser not in _parsers: raise KeyError('Invalid parser {parser!r} passed, valid parsers are' ' {valid}'.format(parser=parser, valid=_parsers.keys()))
python
def _check_parser(parser): """Make sure a valid parser is passed. Parameters ---------- parser : str Raises ------ KeyError * If an invalid parser is passed """ from pandas.core.computation.expr import _parsers if parser not in _parsers: raise KeyError('Invalid parser {parser!r} passed, valid parsers are' ' {valid}'.format(parser=parser, valid=_parsers.keys()))
[ "def", "_check_parser", "(", "parser", ")", ":", "from", "pandas", ".", "core", ".", "computation", ".", "expr", "import", "_parsers", "if", "parser", "not", "in", "_parsers", ":", "raise", "KeyError", "(", "'Invalid parser {parser!r} passed, valid parsers are'", "' {valid}'", ".", "format", "(", "parser", "=", "parser", ",", "valid", "=", "_parsers", ".", "keys", "(", ")", ")", ")" ]
Make sure a valid parser is passed. Parameters ---------- parser : str Raises ------ KeyError * If an invalid parser is passed
[ "Make", "sure", "a", "valid", "parser", "is", "passed", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/eval.py#L62-L78
20,347
pandas-dev/pandas
pandas/core/computation/eval.py
eval
def eval(expr, parser='pandas', engine=None, truediv=True, local_dict=None, global_dict=None, resolvers=(), level=0, target=None, inplace=False): """Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : string, default 'pandas', {'pandas', 'python'} The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. engine : string or None, default 'numexpr', {'python', 'numexpr'} The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'``: This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'``: Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. truediv : bool, optional Whether to use true division, like in Python >= 3 local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query DataFrame.eval Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. """ from pandas.core.computation.expr import Expr inplace = validate_bool_kwarg(inplace, "inplace") if isinstance(expr, str): _check_expression(expr) exprs = [e.strip() for e in expr.splitlines() if e.strip() != ''] else: exprs = [expr] multi_line = len(exprs) > 1 if multi_line and target is None: raise ValueError("multi-line expressions are only valid in the " "context of data, use DataFrame.eval") ret = None first_expr = True target_modified = False for expr in exprs: expr = _convert_expression(expr) engine = _check_engine(engine) _check_parser(parser) _check_resolvers(resolvers) _check_for_locals(expr, level, parser) # get our (possibly passed-in) scope env = _ensure_scope(level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, truediv=truediv) # construct the engine and evaluate the parsed expression eng = _engines[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() if parsed_expr.assigner is None: if multi_line: raise ValueError("Multi-line expressions are only valid" " if all expressions contain an assignment") elif inplace: raise ValueError("Cannot operate inplace " "if there is no assignment") # assign if needed assigner = parsed_expr.assigner if env.target is not None and assigner is not None: target_modified = True # if returning a copy, copy only on the first assignment if not inplace and first_expr: try: target = env.target.copy() except AttributeError: raise ValueError("Cannot return a copy of the target") else: target = env.target # TypeError is most commonly raised (e.g. int, list), but you # get IndexError if you try to do this assignment on np.ndarray. # we will ignore numpy warnings here; e.g. if trying # to use a non-numeric indexer try: with warnings.catch_warnings(record=True): # TODO: Filter the warnings we actually care about here. target[assigner] = ret except (TypeError, IndexError): raise ValueError("Cannot assign expression output to target") if not resolvers: resolvers = ({assigner: ret},) else: # existing resolver needs updated to handle # case of mutating existing column in copy for resolver in resolvers: if assigner in resolver: resolver[assigner] = ret break else: resolvers += ({assigner: ret},) ret = None first_expr = False # We want to exclude `inplace=None` as being False. if inplace is False: return target if target_modified else ret
python
def eval(expr, parser='pandas', engine=None, truediv=True, local_dict=None, global_dict=None, resolvers=(), level=0, target=None, inplace=False): """Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : string, default 'pandas', {'pandas', 'python'} The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. engine : string or None, default 'numexpr', {'python', 'numexpr'} The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'``: This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'``: Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. truediv : bool, optional Whether to use true division, like in Python >= 3 local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query DataFrame.eval Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. """ from pandas.core.computation.expr import Expr inplace = validate_bool_kwarg(inplace, "inplace") if isinstance(expr, str): _check_expression(expr) exprs = [e.strip() for e in expr.splitlines() if e.strip() != ''] else: exprs = [expr] multi_line = len(exprs) > 1 if multi_line and target is None: raise ValueError("multi-line expressions are only valid in the " "context of data, use DataFrame.eval") ret = None first_expr = True target_modified = False for expr in exprs: expr = _convert_expression(expr) engine = _check_engine(engine) _check_parser(parser) _check_resolvers(resolvers) _check_for_locals(expr, level, parser) # get our (possibly passed-in) scope env = _ensure_scope(level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, truediv=truediv) # construct the engine and evaluate the parsed expression eng = _engines[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() if parsed_expr.assigner is None: if multi_line: raise ValueError("Multi-line expressions are only valid" " if all expressions contain an assignment") elif inplace: raise ValueError("Cannot operate inplace " "if there is no assignment") # assign if needed assigner = parsed_expr.assigner if env.target is not None and assigner is not None: target_modified = True # if returning a copy, copy only on the first assignment if not inplace and first_expr: try: target = env.target.copy() except AttributeError: raise ValueError("Cannot return a copy of the target") else: target = env.target # TypeError is most commonly raised (e.g. int, list), but you # get IndexError if you try to do this assignment on np.ndarray. # we will ignore numpy warnings here; e.g. if trying # to use a non-numeric indexer try: with warnings.catch_warnings(record=True): # TODO: Filter the warnings we actually care about here. target[assigner] = ret except (TypeError, IndexError): raise ValueError("Cannot assign expression output to target") if not resolvers: resolvers = ({assigner: ret},) else: # existing resolver needs updated to handle # case of mutating existing column in copy for resolver in resolvers: if assigner in resolver: resolver[assigner] = ret break else: resolvers += ({assigner: ret},) ret = None first_expr = False # We want to exclude `inplace=None` as being False. if inplace is False: return target if target_modified else ret
[ "def", "eval", "(", "expr", ",", "parser", "=", "'pandas'", ",", "engine", "=", "None", ",", "truediv", "=", "True", ",", "local_dict", "=", "None", ",", "global_dict", "=", "None", ",", "resolvers", "=", "(", ")", ",", "level", "=", "0", ",", "target", "=", "None", ",", "inplace", "=", "False", ")", ":", "from", "pandas", ".", "core", ".", "computation", ".", "expr", "import", "Expr", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "\"inplace\"", ")", "if", "isinstance", "(", "expr", ",", "str", ")", ":", "_check_expression", "(", "expr", ")", "exprs", "=", "[", "e", ".", "strip", "(", ")", "for", "e", "in", "expr", ".", "splitlines", "(", ")", "if", "e", ".", "strip", "(", ")", "!=", "''", "]", "else", ":", "exprs", "=", "[", "expr", "]", "multi_line", "=", "len", "(", "exprs", ")", ">", "1", "if", "multi_line", "and", "target", "is", "None", ":", "raise", "ValueError", "(", "\"multi-line expressions are only valid in the \"", "\"context of data, use DataFrame.eval\"", ")", "ret", "=", "None", "first_expr", "=", "True", "target_modified", "=", "False", "for", "expr", "in", "exprs", ":", "expr", "=", "_convert_expression", "(", "expr", ")", "engine", "=", "_check_engine", "(", "engine", ")", "_check_parser", "(", "parser", ")", "_check_resolvers", "(", "resolvers", ")", "_check_for_locals", "(", "expr", ",", "level", ",", "parser", ")", "# get our (possibly passed-in) scope", "env", "=", "_ensure_scope", "(", "level", "+", "1", ",", "global_dict", "=", "global_dict", ",", "local_dict", "=", "local_dict", ",", "resolvers", "=", "resolvers", ",", "target", "=", "target", ")", "parsed_expr", "=", "Expr", "(", "expr", ",", "engine", "=", "engine", ",", "parser", "=", "parser", ",", "env", "=", "env", ",", "truediv", "=", "truediv", ")", "# construct the engine and evaluate the parsed expression", "eng", "=", "_engines", "[", "engine", "]", "eng_inst", "=", "eng", "(", "parsed_expr", ")", "ret", "=", "eng_inst", ".", "evaluate", "(", ")", "if", "parsed_expr", ".", "assigner", "is", "None", ":", "if", "multi_line", ":", "raise", "ValueError", "(", "\"Multi-line expressions are only valid\"", "\" if all expressions contain an assignment\"", ")", "elif", "inplace", ":", "raise", "ValueError", "(", "\"Cannot operate inplace \"", "\"if there is no assignment\"", ")", "# assign if needed", "assigner", "=", "parsed_expr", ".", "assigner", "if", "env", ".", "target", "is", "not", "None", "and", "assigner", "is", "not", "None", ":", "target_modified", "=", "True", "# if returning a copy, copy only on the first assignment", "if", "not", "inplace", "and", "first_expr", ":", "try", ":", "target", "=", "env", ".", "target", ".", "copy", "(", ")", "except", "AttributeError", ":", "raise", "ValueError", "(", "\"Cannot return a copy of the target\"", ")", "else", ":", "target", "=", "env", ".", "target", "# TypeError is most commonly raised (e.g. int, list), but you", "# get IndexError if you try to do this assignment on np.ndarray.", "# we will ignore numpy warnings here; e.g. if trying", "# to use a non-numeric indexer", "try", ":", "with", "warnings", ".", "catch_warnings", "(", "record", "=", "True", ")", ":", "# TODO: Filter the warnings we actually care about here.", "target", "[", "assigner", "]", "=", "ret", "except", "(", "TypeError", ",", "IndexError", ")", ":", "raise", "ValueError", "(", "\"Cannot assign expression output to target\"", ")", "if", "not", "resolvers", ":", "resolvers", "=", "(", "{", "assigner", ":", "ret", "}", ",", ")", "else", ":", "# existing resolver needs updated to handle", "# case of mutating existing column in copy", "for", "resolver", "in", "resolvers", ":", "if", "assigner", "in", "resolver", ":", "resolver", "[", "assigner", "]", "=", "ret", "break", "else", ":", "resolvers", "+=", "(", "{", "assigner", ":", "ret", "}", ",", ")", "ret", "=", "None", "first_expr", "=", "False", "# We want to exclude `inplace=None` as being False.", "if", "inplace", "is", "False", ":", "return", "target", "if", "target_modified", "else", "ret" ]
Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : string, default 'pandas', {'pandas', 'python'} The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. engine : string or None, default 'numexpr', {'python', 'numexpr'} The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'``: This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'``: Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. truediv : bool, optional Whether to use true division, like in Python >= 3 local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query DataFrame.eval Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details.
[ "Evaluate", "a", "Python", "expression", "as", "a", "string", "using", "various", "backends", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/eval.py#L155-L350
20,348
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.from_arrays
def from_arrays(cls, arrays, sortorder=None, names=None): """ Convert arrays to MultiIndex. Parameters ---------- arrays : list / sequence of array-likes Each array-like gives one level's value for each data point. len(arrays) is the number of levels. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color']) """ error_msg = "Input must be a list / sequence of array-likes." if not is_list_like(arrays): raise TypeError(error_msg) elif is_iterator(arrays): arrays = list(arrays) # Check if elements of array are list-like for array in arrays: if not is_list_like(array): raise TypeError(error_msg) # Check if lengths of all arrays are equal or not, # raise ValueError, if not for i in range(1, len(arrays)): if len(arrays[i]) != len(arrays[i - 1]): raise ValueError('all arrays must be same length') from pandas.core.arrays.categorical import _factorize_from_iterables codes, levels = _factorize_from_iterables(arrays) if names is None: names = [getattr(arr, "name", None) for arr in arrays] return MultiIndex(levels=levels, codes=codes, sortorder=sortorder, names=names, verify_integrity=False)
python
def from_arrays(cls, arrays, sortorder=None, names=None): """ Convert arrays to MultiIndex. Parameters ---------- arrays : list / sequence of array-likes Each array-like gives one level's value for each data point. len(arrays) is the number of levels. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color']) """ error_msg = "Input must be a list / sequence of array-likes." if not is_list_like(arrays): raise TypeError(error_msg) elif is_iterator(arrays): arrays = list(arrays) # Check if elements of array are list-like for array in arrays: if not is_list_like(array): raise TypeError(error_msg) # Check if lengths of all arrays are equal or not, # raise ValueError, if not for i in range(1, len(arrays)): if len(arrays[i]) != len(arrays[i - 1]): raise ValueError('all arrays must be same length') from pandas.core.arrays.categorical import _factorize_from_iterables codes, levels = _factorize_from_iterables(arrays) if names is None: names = [getattr(arr, "name", None) for arr in arrays] return MultiIndex(levels=levels, codes=codes, sortorder=sortorder, names=names, verify_integrity=False)
[ "def", "from_arrays", "(", "cls", ",", "arrays", ",", "sortorder", "=", "None", ",", "names", "=", "None", ")", ":", "error_msg", "=", "\"Input must be a list / sequence of array-likes.\"", "if", "not", "is_list_like", "(", "arrays", ")", ":", "raise", "TypeError", "(", "error_msg", ")", "elif", "is_iterator", "(", "arrays", ")", ":", "arrays", "=", "list", "(", "arrays", ")", "# Check if elements of array are list-like", "for", "array", "in", "arrays", ":", "if", "not", "is_list_like", "(", "array", ")", ":", "raise", "TypeError", "(", "error_msg", ")", "# Check if lengths of all arrays are equal or not,", "# raise ValueError, if not", "for", "i", "in", "range", "(", "1", ",", "len", "(", "arrays", ")", ")", ":", "if", "len", "(", "arrays", "[", "i", "]", ")", "!=", "len", "(", "arrays", "[", "i", "-", "1", "]", ")", ":", "raise", "ValueError", "(", "'all arrays must be same length'", ")", "from", "pandas", ".", "core", ".", "arrays", ".", "categorical", "import", "_factorize_from_iterables", "codes", ",", "levels", "=", "_factorize_from_iterables", "(", "arrays", ")", "if", "names", "is", "None", ":", "names", "=", "[", "getattr", "(", "arr", ",", "\"name\"", ",", "None", ")", "for", "arr", "in", "arrays", "]", "return", "MultiIndex", "(", "levels", "=", "levels", ",", "codes", "=", "codes", ",", "sortorder", "=", "sortorder", ",", "names", "=", "names", ",", "verify_integrity", "=", "False", ")" ]
Convert arrays to MultiIndex. Parameters ---------- arrays : list / sequence of array-likes Each array-like gives one level's value for each data point. len(arrays) is the number of levels. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color'])
[ "Convert", "arrays", "to", "MultiIndex", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L292-L350
20,349
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.from_tuples
def from_tuples(cls, tuples, sortorder=None, names=None): """ Convert list of tuples to MultiIndex. Parameters ---------- tuples : list / sequence of tuple-likes Each tuple is the index of one row/column. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> tuples = [(1, 'red'), (1, 'blue'), ... (2, 'red'), (2, 'blue')] >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color']) """ if not is_list_like(tuples): raise TypeError('Input must be a list / sequence of tuple-likes.') elif is_iterator(tuples): tuples = list(tuples) if len(tuples) == 0: if names is None: msg = 'Cannot infer number of levels from empty list' raise TypeError(msg) arrays = [[]] * len(names) elif isinstance(tuples, (np.ndarray, Index)): if isinstance(tuples, Index): tuples = tuples._values arrays = list(lib.tuples_to_object_array(tuples).T) elif isinstance(tuples, list): arrays = list(lib.to_object_array_tuples(tuples).T) else: arrays = lzip(*tuples) return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
python
def from_tuples(cls, tuples, sortorder=None, names=None): """ Convert list of tuples to MultiIndex. Parameters ---------- tuples : list / sequence of tuple-likes Each tuple is the index of one row/column. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> tuples = [(1, 'red'), (1, 'blue'), ... (2, 'red'), (2, 'blue')] >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color']) """ if not is_list_like(tuples): raise TypeError('Input must be a list / sequence of tuple-likes.') elif is_iterator(tuples): tuples = list(tuples) if len(tuples) == 0: if names is None: msg = 'Cannot infer number of levels from empty list' raise TypeError(msg) arrays = [[]] * len(names) elif isinstance(tuples, (np.ndarray, Index)): if isinstance(tuples, Index): tuples = tuples._values arrays = list(lib.tuples_to_object_array(tuples).T) elif isinstance(tuples, list): arrays = list(lib.to_object_array_tuples(tuples).T) else: arrays = lzip(*tuples) return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
[ "def", "from_tuples", "(", "cls", ",", "tuples", ",", "sortorder", "=", "None", ",", "names", "=", "None", ")", ":", "if", "not", "is_list_like", "(", "tuples", ")", ":", "raise", "TypeError", "(", "'Input must be a list / sequence of tuple-likes.'", ")", "elif", "is_iterator", "(", "tuples", ")", ":", "tuples", "=", "list", "(", "tuples", ")", "if", "len", "(", "tuples", ")", "==", "0", ":", "if", "names", "is", "None", ":", "msg", "=", "'Cannot infer number of levels from empty list'", "raise", "TypeError", "(", "msg", ")", "arrays", "=", "[", "[", "]", "]", "*", "len", "(", "names", ")", "elif", "isinstance", "(", "tuples", ",", "(", "np", ".", "ndarray", ",", "Index", ")", ")", ":", "if", "isinstance", "(", "tuples", ",", "Index", ")", ":", "tuples", "=", "tuples", ".", "_values", "arrays", "=", "list", "(", "lib", ".", "tuples_to_object_array", "(", "tuples", ")", ".", "T", ")", "elif", "isinstance", "(", "tuples", ",", "list", ")", ":", "arrays", "=", "list", "(", "lib", ".", "to_object_array_tuples", "(", "tuples", ")", ".", "T", ")", "else", ":", "arrays", "=", "lzip", "(", "*", "tuples", ")", "return", "MultiIndex", ".", "from_arrays", "(", "arrays", ",", "sortorder", "=", "sortorder", ",", "names", "=", "names", ")" ]
Convert list of tuples to MultiIndex. Parameters ---------- tuples : list / sequence of tuple-likes Each tuple is the index of one row/column. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> tuples = [(1, 'red'), (1, 'blue'), ... (2, 'red'), (2, 'blue')] >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color'])
[ "Convert", "list", "of", "tuples", "to", "MultiIndex", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L353-L407
20,350
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.from_product
def from_product(cls, iterables, sortorder=None, names=None): """ Make a MultiIndex from the cartesian product of multiple iterables. Parameters ---------- iterables : list / sequence of iterables Each iterable has unique labels for each level of the index. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> numbers = [0, 1, 2] >>> colors = ['green', 'purple'] >>> pd.MultiIndex.from_product([numbers, colors], ... names=['number', 'color']) MultiIndex(levels=[[0, 1, 2], ['green', 'purple']], codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], names=['number', 'color']) """ from pandas.core.arrays.categorical import _factorize_from_iterables from pandas.core.reshape.util import cartesian_product if not is_list_like(iterables): raise TypeError("Input must be a list / sequence of iterables.") elif is_iterator(iterables): iterables = list(iterables) codes, levels = _factorize_from_iterables(iterables) codes = cartesian_product(codes) return MultiIndex(levels, codes, sortorder=sortorder, names=names)
python
def from_product(cls, iterables, sortorder=None, names=None): """ Make a MultiIndex from the cartesian product of multiple iterables. Parameters ---------- iterables : list / sequence of iterables Each iterable has unique labels for each level of the index. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> numbers = [0, 1, 2] >>> colors = ['green', 'purple'] >>> pd.MultiIndex.from_product([numbers, colors], ... names=['number', 'color']) MultiIndex(levels=[[0, 1, 2], ['green', 'purple']], codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], names=['number', 'color']) """ from pandas.core.arrays.categorical import _factorize_from_iterables from pandas.core.reshape.util import cartesian_product if not is_list_like(iterables): raise TypeError("Input must be a list / sequence of iterables.") elif is_iterator(iterables): iterables = list(iterables) codes, levels = _factorize_from_iterables(iterables) codes = cartesian_product(codes) return MultiIndex(levels, codes, sortorder=sortorder, names=names)
[ "def", "from_product", "(", "cls", ",", "iterables", ",", "sortorder", "=", "None", ",", "names", "=", "None", ")", ":", "from", "pandas", ".", "core", ".", "arrays", ".", "categorical", "import", "_factorize_from_iterables", "from", "pandas", ".", "core", ".", "reshape", ".", "util", "import", "cartesian_product", "if", "not", "is_list_like", "(", "iterables", ")", ":", "raise", "TypeError", "(", "\"Input must be a list / sequence of iterables.\"", ")", "elif", "is_iterator", "(", "iterables", ")", ":", "iterables", "=", "list", "(", "iterables", ")", "codes", ",", "levels", "=", "_factorize_from_iterables", "(", "iterables", ")", "codes", "=", "cartesian_product", "(", "codes", ")", "return", "MultiIndex", "(", "levels", ",", "codes", ",", "sortorder", "=", "sortorder", ",", "names", "=", "names", ")" ]
Make a MultiIndex from the cartesian product of multiple iterables. Parameters ---------- iterables : list / sequence of iterables Each iterable has unique labels for each level of the index. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> numbers = [0, 1, 2] >>> colors = ['green', 'purple'] >>> pd.MultiIndex.from_product([numbers, colors], ... names=['number', 'color']) MultiIndex(levels=[[0, 1, 2], ['green', 'purple']], codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], names=['number', 'color'])
[ "Make", "a", "MultiIndex", "from", "the", "cartesian", "product", "of", "multiple", "iterables", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L410-L454
20,351
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.from_frame
def from_frame(cls, df, sortorder=None, names=None): """ Make a MultiIndex from a DataFrame. .. versionadded:: 0.24.0 Parameters ---------- df : DataFrame DataFrame to be converted to MultiIndex. sortorder : int, optional Level of sortedness (must be lexicographically sorted by that level). names : list-like, optional If no names are provided, use the column names, or tuple of column names if the columns is a MultiIndex. If a sequence, overwrite names with the given sequence. Returns ------- MultiIndex The MultiIndex representation of the given DataFrame. See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. Examples -------- >>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'], ... ['NJ', 'Temp'], ['NJ', 'Precip']], ... columns=['a', 'b']) >>> df a b 0 HI Temp 1 HI Precip 2 NJ Temp 3 NJ Precip >>> pd.MultiIndex.from_frame(df) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['a', 'b']) Using explicit names, instead of the column names >>> pd.MultiIndex.from_frame(df, names=['state', 'observation']) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['state', 'observation']) """ if not isinstance(df, ABCDataFrame): raise TypeError("Input must be a DataFrame") column_names, columns = lzip(*df.iteritems()) names = column_names if names is None else names return cls.from_arrays(columns, sortorder=sortorder, names=names)
python
def from_frame(cls, df, sortorder=None, names=None): """ Make a MultiIndex from a DataFrame. .. versionadded:: 0.24.0 Parameters ---------- df : DataFrame DataFrame to be converted to MultiIndex. sortorder : int, optional Level of sortedness (must be lexicographically sorted by that level). names : list-like, optional If no names are provided, use the column names, or tuple of column names if the columns is a MultiIndex. If a sequence, overwrite names with the given sequence. Returns ------- MultiIndex The MultiIndex representation of the given DataFrame. See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. Examples -------- >>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'], ... ['NJ', 'Temp'], ['NJ', 'Precip']], ... columns=['a', 'b']) >>> df a b 0 HI Temp 1 HI Precip 2 NJ Temp 3 NJ Precip >>> pd.MultiIndex.from_frame(df) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['a', 'b']) Using explicit names, instead of the column names >>> pd.MultiIndex.from_frame(df, names=['state', 'observation']) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['state', 'observation']) """ if not isinstance(df, ABCDataFrame): raise TypeError("Input must be a DataFrame") column_names, columns = lzip(*df.iteritems()) names = column_names if names is None else names return cls.from_arrays(columns, sortorder=sortorder, names=names)
[ "def", "from_frame", "(", "cls", ",", "df", ",", "sortorder", "=", "None", ",", "names", "=", "None", ")", ":", "if", "not", "isinstance", "(", "df", ",", "ABCDataFrame", ")", ":", "raise", "TypeError", "(", "\"Input must be a DataFrame\"", ")", "column_names", ",", "columns", "=", "lzip", "(", "*", "df", ".", "iteritems", "(", ")", ")", "names", "=", "column_names", "if", "names", "is", "None", "else", "names", "return", "cls", ".", "from_arrays", "(", "columns", ",", "sortorder", "=", "sortorder", ",", "names", "=", "names", ")" ]
Make a MultiIndex from a DataFrame. .. versionadded:: 0.24.0 Parameters ---------- df : DataFrame DataFrame to be converted to MultiIndex. sortorder : int, optional Level of sortedness (must be lexicographically sorted by that level). names : list-like, optional If no names are provided, use the column names, or tuple of column names if the columns is a MultiIndex. If a sequence, overwrite names with the given sequence. Returns ------- MultiIndex The MultiIndex representation of the given DataFrame. See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. Examples -------- >>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'], ... ['NJ', 'Temp'], ['NJ', 'Precip']], ... columns=['a', 'b']) >>> df a b 0 HI Temp 1 HI Precip 2 NJ Temp 3 NJ Precip >>> pd.MultiIndex.from_frame(df) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['a', 'b']) Using explicit names, instead of the column names >>> pd.MultiIndex.from_frame(df, names=['state', 'observation']) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['state', 'observation'])
[ "Make", "a", "MultiIndex", "from", "a", "DataFrame", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L457-L516
20,352
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.set_levels
def set_levels(self, levels, level=None, inplace=False, verify_integrity=True): """ Set new levels on MultiIndex. Defaults to returning new index. Parameters ---------- levels : sequence or list of sequence new level(s) to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and codes are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]]) MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels(['a','b'], level=0) MultiIndex(levels=[['a', 'b'], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels(['a','b'], level='bar') MultiIndex(levels=[[1, 2], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]], level=[0,1]) MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) """ if is_list_like(levels) and not isinstance(levels, Index): levels = list(levels) if level is not None and not is_list_like(level): if not is_list_like(levels): raise TypeError("Levels must be list-like") if is_list_like(levels[0]): raise TypeError("Levels must be list-like") level = [level] levels = [levels] elif level is None or is_list_like(level): if not is_list_like(levels) or not is_list_like(levels[0]): raise TypeError("Levels must be list of lists-like") if inplace: idx = self else: idx = self._shallow_copy() idx._reset_identity() idx._set_levels(levels, level=level, validate=True, verify_integrity=verify_integrity) if not inplace: return idx
python
def set_levels(self, levels, level=None, inplace=False, verify_integrity=True): """ Set new levels on MultiIndex. Defaults to returning new index. Parameters ---------- levels : sequence or list of sequence new level(s) to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and codes are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]]) MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels(['a','b'], level=0) MultiIndex(levels=[['a', 'b'], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels(['a','b'], level='bar') MultiIndex(levels=[[1, 2], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]], level=[0,1]) MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) """ if is_list_like(levels) and not isinstance(levels, Index): levels = list(levels) if level is not None and not is_list_like(level): if not is_list_like(levels): raise TypeError("Levels must be list-like") if is_list_like(levels[0]): raise TypeError("Levels must be list-like") level = [level] levels = [levels] elif level is None or is_list_like(level): if not is_list_like(levels) or not is_list_like(levels[0]): raise TypeError("Levels must be list of lists-like") if inplace: idx = self else: idx = self._shallow_copy() idx._reset_identity() idx._set_levels(levels, level=level, validate=True, verify_integrity=verify_integrity) if not inplace: return idx
[ "def", "set_levels", "(", "self", ",", "levels", ",", "level", "=", "None", ",", "inplace", "=", "False", ",", "verify_integrity", "=", "True", ")", ":", "if", "is_list_like", "(", "levels", ")", "and", "not", "isinstance", "(", "levels", ",", "Index", ")", ":", "levels", "=", "list", "(", "levels", ")", "if", "level", "is", "not", "None", "and", "not", "is_list_like", "(", "level", ")", ":", "if", "not", "is_list_like", "(", "levels", ")", ":", "raise", "TypeError", "(", "\"Levels must be list-like\"", ")", "if", "is_list_like", "(", "levels", "[", "0", "]", ")", ":", "raise", "TypeError", "(", "\"Levels must be list-like\"", ")", "level", "=", "[", "level", "]", "levels", "=", "[", "levels", "]", "elif", "level", "is", "None", "or", "is_list_like", "(", "level", ")", ":", "if", "not", "is_list_like", "(", "levels", ")", "or", "not", "is_list_like", "(", "levels", "[", "0", "]", ")", ":", "raise", "TypeError", "(", "\"Levels must be list of lists-like\"", ")", "if", "inplace", ":", "idx", "=", "self", "else", ":", "idx", "=", "self", ".", "_shallow_copy", "(", ")", "idx", ".", "_reset_identity", "(", ")", "idx", ".", "_set_levels", "(", "levels", ",", "level", "=", "level", ",", "validate", "=", "True", ",", "verify_integrity", "=", "verify_integrity", ")", "if", "not", "inplace", ":", "return", "idx" ]
Set new levels on MultiIndex. Defaults to returning new index. Parameters ---------- levels : sequence or list of sequence new level(s) to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and codes are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]]) MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels(['a','b'], level=0) MultiIndex(levels=[['a', 'b'], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels(['a','b'], level='bar') MultiIndex(levels=[[1, 2], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]], level=[0,1]) MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar'])
[ "Set", "new", "levels", "on", "MultiIndex", ".", "Defaults", "to", "returning", "new", "index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L599-L664
20,353
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.set_codes
def set_codes(self, codes, level=None, inplace=False, verify_integrity=True): """ Set new codes on MultiIndex. Defaults to returning new index. .. versionadded:: 0.24.0 New name for deprecated method `set_labels`. Parameters ---------- codes : sequence or list of sequence new codes to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and codes are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]]) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=['foo', 'bar']) >>> idx.set_codes([1,0,1,0], level=0) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_codes([0,0,1,1], level='bar') MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 0, 1, 1]], names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]], level=[0,1]) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=['foo', 'bar']) """ if level is not None and not is_list_like(level): if not is_list_like(codes): raise TypeError("Codes must be list-like") if is_list_like(codes[0]): raise TypeError("Codes must be list-like") level = [level] codes = [codes] elif level is None or is_list_like(level): if not is_list_like(codes) or not is_list_like(codes[0]): raise TypeError("Codes must be list of lists-like") if inplace: idx = self else: idx = self._shallow_copy() idx._reset_identity() idx._set_codes(codes, level=level, verify_integrity=verify_integrity) if not inplace: return idx
python
def set_codes(self, codes, level=None, inplace=False, verify_integrity=True): """ Set new codes on MultiIndex. Defaults to returning new index. .. versionadded:: 0.24.0 New name for deprecated method `set_labels`. Parameters ---------- codes : sequence or list of sequence new codes to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and codes are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]]) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=['foo', 'bar']) >>> idx.set_codes([1,0,1,0], level=0) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_codes([0,0,1,1], level='bar') MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 0, 1, 1]], names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]], level=[0,1]) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=['foo', 'bar']) """ if level is not None and not is_list_like(level): if not is_list_like(codes): raise TypeError("Codes must be list-like") if is_list_like(codes[0]): raise TypeError("Codes must be list-like") level = [level] codes = [codes] elif level is None or is_list_like(level): if not is_list_like(codes) or not is_list_like(codes[0]): raise TypeError("Codes must be list of lists-like") if inplace: idx = self else: idx = self._shallow_copy() idx._reset_identity() idx._set_codes(codes, level=level, verify_integrity=verify_integrity) if not inplace: return idx
[ "def", "set_codes", "(", "self", ",", "codes", ",", "level", "=", "None", ",", "inplace", "=", "False", ",", "verify_integrity", "=", "True", ")", ":", "if", "level", "is", "not", "None", "and", "not", "is_list_like", "(", "level", ")", ":", "if", "not", "is_list_like", "(", "codes", ")", ":", "raise", "TypeError", "(", "\"Codes must be list-like\"", ")", "if", "is_list_like", "(", "codes", "[", "0", "]", ")", ":", "raise", "TypeError", "(", "\"Codes must be list-like\"", ")", "level", "=", "[", "level", "]", "codes", "=", "[", "codes", "]", "elif", "level", "is", "None", "or", "is_list_like", "(", "level", ")", ":", "if", "not", "is_list_like", "(", "codes", ")", "or", "not", "is_list_like", "(", "codes", "[", "0", "]", ")", ":", "raise", "TypeError", "(", "\"Codes must be list of lists-like\"", ")", "if", "inplace", ":", "idx", "=", "self", "else", ":", "idx", "=", "self", ".", "_shallow_copy", "(", ")", "idx", ".", "_reset_identity", "(", ")", "idx", ".", "_set_codes", "(", "codes", ",", "level", "=", "level", ",", "verify_integrity", "=", "verify_integrity", ")", "if", "not", "inplace", ":", "return", "idx" ]
Set new codes on MultiIndex. Defaults to returning new index. .. versionadded:: 0.24.0 New name for deprecated method `set_labels`. Parameters ---------- codes : sequence or list of sequence new codes to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and codes are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]]) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=['foo', 'bar']) >>> idx.set_codes([1,0,1,0], level=0) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_codes([0,0,1,1], level='bar') MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 0, 1, 1]], names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]], level=[0,1]) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=['foo', 'bar'])
[ "Set", "new", "codes", "on", "MultiIndex", ".", "Defaults", "to", "returning", "new", "index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L714-L779
20,354
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.copy
def copy(self, names=None, dtype=None, levels=None, codes=None, deep=False, _set_identity=False, **kwargs): """ Make a copy of this object. Names, dtype, levels and codes can be passed and will be set on new copy. Parameters ---------- names : sequence, optional dtype : numpy dtype or pandas type, optional levels : sequence, optional codes : sequence, optional Returns ------- copy : MultiIndex Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. This could be potentially expensive on large MultiIndex objects. """ name = kwargs.get('name') names = self._validate_names(name=name, names=names, deep=deep) if deep: from copy import deepcopy if levels is None: levels = deepcopy(self.levels) if codes is None: codes = deepcopy(self.codes) else: if levels is None: levels = self.levels if codes is None: codes = self.codes return MultiIndex(levels=levels, codes=codes, names=names, sortorder=self.sortorder, verify_integrity=False, _set_identity=_set_identity)
python
def copy(self, names=None, dtype=None, levels=None, codes=None, deep=False, _set_identity=False, **kwargs): """ Make a copy of this object. Names, dtype, levels and codes can be passed and will be set on new copy. Parameters ---------- names : sequence, optional dtype : numpy dtype or pandas type, optional levels : sequence, optional codes : sequence, optional Returns ------- copy : MultiIndex Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. This could be potentially expensive on large MultiIndex objects. """ name = kwargs.get('name') names = self._validate_names(name=name, names=names, deep=deep) if deep: from copy import deepcopy if levels is None: levels = deepcopy(self.levels) if codes is None: codes = deepcopy(self.codes) else: if levels is None: levels = self.levels if codes is None: codes = self.codes return MultiIndex(levels=levels, codes=codes, names=names, sortorder=self.sortorder, verify_integrity=False, _set_identity=_set_identity)
[ "def", "copy", "(", "self", ",", "names", "=", "None", ",", "dtype", "=", "None", ",", "levels", "=", "None", ",", "codes", "=", "None", ",", "deep", "=", "False", ",", "_set_identity", "=", "False", ",", "*", "*", "kwargs", ")", ":", "name", "=", "kwargs", ".", "get", "(", "'name'", ")", "names", "=", "self", ".", "_validate_names", "(", "name", "=", "name", ",", "names", "=", "names", ",", "deep", "=", "deep", ")", "if", "deep", ":", "from", "copy", "import", "deepcopy", "if", "levels", "is", "None", ":", "levels", "=", "deepcopy", "(", "self", ".", "levels", ")", "if", "codes", "is", "None", ":", "codes", "=", "deepcopy", "(", "self", ".", "codes", ")", "else", ":", "if", "levels", "is", "None", ":", "levels", "=", "self", ".", "levels", "if", "codes", "is", "None", ":", "codes", "=", "self", ".", "codes", "return", "MultiIndex", "(", "levels", "=", "levels", ",", "codes", "=", "codes", ",", "names", "=", "names", ",", "sortorder", "=", "self", ".", "sortorder", ",", "verify_integrity", "=", "False", ",", "_set_identity", "=", "_set_identity", ")" ]
Make a copy of this object. Names, dtype, levels and codes can be passed and will be set on new copy. Parameters ---------- names : sequence, optional dtype : numpy dtype or pandas type, optional levels : sequence, optional codes : sequence, optional Returns ------- copy : MultiIndex Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. This could be potentially expensive on large MultiIndex objects.
[ "Make", "a", "copy", "of", "this", "object", ".", "Names", "dtype", "levels", "and", "codes", "can", "be", "passed", "and", "will", "be", "set", "on", "new", "copy", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L782-L821
20,355
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.view
def view(self, cls=None): """ this is defined as a copy with the same identity """ result = self.copy() result._id = self._id return result
python
def view(self, cls=None): """ this is defined as a copy with the same identity """ result = self.copy() result._id = self._id return result
[ "def", "view", "(", "self", ",", "cls", "=", "None", ")", ":", "result", "=", "self", ".", "copy", "(", ")", "result", ".", "_id", "=", "self", ".", "_id", "return", "result" ]
this is defined as a copy with the same identity
[ "this", "is", "defined", "as", "a", "copy", "with", "the", "same", "identity" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L827-L831
20,356
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex._is_memory_usage_qualified
def _is_memory_usage_qualified(self): """ return a boolean if we need a qualified .info display """ def f(l): return 'mixed' in l or 'string' in l or 'unicode' in l return any(f(l) for l in self._inferred_type_levels)
python
def _is_memory_usage_qualified(self): """ return a boolean if we need a qualified .info display """ def f(l): return 'mixed' in l or 'string' in l or 'unicode' in l return any(f(l) for l in self._inferred_type_levels)
[ "def", "_is_memory_usage_qualified", "(", "self", ")", ":", "def", "f", "(", "l", ")", ":", "return", "'mixed'", "in", "l", "or", "'string'", "in", "l", "or", "'unicode'", "in", "l", "return", "any", "(", "f", "(", "l", ")", "for", "l", "in", "self", ".", "_inferred_type_levels", ")" ]
return a boolean if we need a qualified .info display
[ "return", "a", "boolean", "if", "we", "need", "a", "qualified", ".", "info", "display" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L866-L870
20,357
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex._nbytes
def _nbytes(self, deep=False): """ return the number of bytes in the underlying data deeply introspect the level data if deep=True include the engine hashtable *this is in internal routine* """ # for implementations with no useful getsizeof (PyPy) objsize = 24 level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels) label_nbytes = sum(i.nbytes for i in self.codes) names_nbytes = sum(getsizeof(i, objsize) for i in self.names) result = level_nbytes + label_nbytes + names_nbytes # include our engine hashtable result += self._engine.sizeof(deep=deep) return result
python
def _nbytes(self, deep=False): """ return the number of bytes in the underlying data deeply introspect the level data if deep=True include the engine hashtable *this is in internal routine* """ # for implementations with no useful getsizeof (PyPy) objsize = 24 level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels) label_nbytes = sum(i.nbytes for i in self.codes) names_nbytes = sum(getsizeof(i, objsize) for i in self.names) result = level_nbytes + label_nbytes + names_nbytes # include our engine hashtable result += self._engine.sizeof(deep=deep) return result
[ "def", "_nbytes", "(", "self", ",", "deep", "=", "False", ")", ":", "# for implementations with no useful getsizeof (PyPy)", "objsize", "=", "24", "level_nbytes", "=", "sum", "(", "i", ".", "memory_usage", "(", "deep", "=", "deep", ")", "for", "i", "in", "self", ".", "levels", ")", "label_nbytes", "=", "sum", "(", "i", ".", "nbytes", "for", "i", "in", "self", ".", "codes", ")", "names_nbytes", "=", "sum", "(", "getsizeof", "(", "i", ",", "objsize", ")", "for", "i", "in", "self", ".", "names", ")", "result", "=", "level_nbytes", "+", "label_nbytes", "+", "names_nbytes", "# include our engine hashtable", "result", "+=", "self", ".", "_engine", ".", "sizeof", "(", "deep", "=", "deep", ")", "return", "result" ]
return the number of bytes in the underlying data deeply introspect the level data if deep=True include the engine hashtable *this is in internal routine*
[ "return", "the", "number", "of", "bytes", "in", "the", "underlying", "data", "deeply", "introspect", "the", "level", "data", "if", "deep", "=", "True" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L884-L905
20,358
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex._hashed_indexing_key
def _hashed_indexing_key(self, key): """ validate and return the hash for the provided key *this is internal for use for the cython routines* Parameters ---------- key : string or tuple Returns ------- np.uint64 Notes ----- we need to stringify if we have mixed levels """ from pandas.core.util.hashing import hash_tuples, hash_tuple if not isinstance(key, tuple): return hash_tuples(key) if not len(key) == self.nlevels: raise KeyError def f(k, stringify): if stringify and not isinstance(k, str): k = str(k) return k key = tuple(f(k, stringify) for k, stringify in zip(key, self._have_mixed_levels)) return hash_tuple(key)
python
def _hashed_indexing_key(self, key): """ validate and return the hash for the provided key *this is internal for use for the cython routines* Parameters ---------- key : string or tuple Returns ------- np.uint64 Notes ----- we need to stringify if we have mixed levels """ from pandas.core.util.hashing import hash_tuples, hash_tuple if not isinstance(key, tuple): return hash_tuples(key) if not len(key) == self.nlevels: raise KeyError def f(k, stringify): if stringify and not isinstance(k, str): k = str(k) return k key = tuple(f(k, stringify) for k, stringify in zip(key, self._have_mixed_levels)) return hash_tuple(key)
[ "def", "_hashed_indexing_key", "(", "self", ",", "key", ")", ":", "from", "pandas", ".", "core", ".", "util", ".", "hashing", "import", "hash_tuples", ",", "hash_tuple", "if", "not", "isinstance", "(", "key", ",", "tuple", ")", ":", "return", "hash_tuples", "(", "key", ")", "if", "not", "len", "(", "key", ")", "==", "self", ".", "nlevels", ":", "raise", "KeyError", "def", "f", "(", "k", ",", "stringify", ")", ":", "if", "stringify", "and", "not", "isinstance", "(", "k", ",", "str", ")", ":", "k", "=", "str", "(", "k", ")", "return", "k", "key", "=", "tuple", "(", "f", "(", "k", ",", "stringify", ")", "for", "k", ",", "stringify", "in", "zip", "(", "key", ",", "self", ".", "_have_mixed_levels", ")", ")", "return", "hash_tuple", "(", "key", ")" ]
validate and return the hash for the provided key *this is internal for use for the cython routines* Parameters ---------- key : string or tuple Returns ------- np.uint64 Notes ----- we need to stringify if we have mixed levels
[ "validate", "and", "return", "the", "hash", "for", "the", "provided", "key" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1234-L1267
20,359
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex._get_level_values
def _get_level_values(self, level, unique=False): """ Return vector of label values for requested level, equal to the length of the index **this is an internal method** Parameters ---------- level : int level unique : bool, default False if True, drop duplicated values Returns ------- values : ndarray """ values = self.levels[level] level_codes = self.codes[level] if unique: level_codes = algos.unique(level_codes) filled = algos.take_1d(values._values, level_codes, fill_value=values._na_value) values = values._shallow_copy(filled) return values
python
def _get_level_values(self, level, unique=False): """ Return vector of label values for requested level, equal to the length of the index **this is an internal method** Parameters ---------- level : int level unique : bool, default False if True, drop duplicated values Returns ------- values : ndarray """ values = self.levels[level] level_codes = self.codes[level] if unique: level_codes = algos.unique(level_codes) filled = algos.take_1d(values._values, level_codes, fill_value=values._na_value) values = values._shallow_copy(filled) return values
[ "def", "_get_level_values", "(", "self", ",", "level", ",", "unique", "=", "False", ")", ":", "values", "=", "self", ".", "levels", "[", "level", "]", "level_codes", "=", "self", ".", "codes", "[", "level", "]", "if", "unique", ":", "level_codes", "=", "algos", ".", "unique", "(", "level_codes", ")", "filled", "=", "algos", ".", "take_1d", "(", "values", ".", "_values", ",", "level_codes", ",", "fill_value", "=", "values", ".", "_na_value", ")", "values", "=", "values", ".", "_shallow_copy", "(", "filled", ")", "return", "values" ]
Return vector of label values for requested level, equal to the length of the index **this is an internal method** Parameters ---------- level : int level unique : bool, default False if True, drop duplicated values Returns ------- values : ndarray
[ "Return", "vector", "of", "label", "values", "for", "requested", "level", "equal", "to", "the", "length", "of", "the", "index" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1357-L1382
20,360
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.get_level_values
def get_level_values(self, level): """ Return vector of label values for requested level, equal to the length of the index. Parameters ---------- level : int or str ``level`` is either the integer position of the level in the MultiIndex, or the name of the level. Returns ------- values : Index Values is a level of this MultiIndex converted to a single :class:`Index` (or subclass thereof). Examples --------- Create a MultiIndex: >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def'))) >>> mi.names = ['level_1', 'level_2'] Get level values by supplying level as either integer or name: >>> mi.get_level_values(0) Index(['a', 'b', 'c'], dtype='object', name='level_1') >>> mi.get_level_values('level_2') Index(['d', 'e', 'f'], dtype='object', name='level_2') """ level = self._get_level_number(level) values = self._get_level_values(level) return values
python
def get_level_values(self, level): """ Return vector of label values for requested level, equal to the length of the index. Parameters ---------- level : int or str ``level`` is either the integer position of the level in the MultiIndex, or the name of the level. Returns ------- values : Index Values is a level of this MultiIndex converted to a single :class:`Index` (or subclass thereof). Examples --------- Create a MultiIndex: >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def'))) >>> mi.names = ['level_1', 'level_2'] Get level values by supplying level as either integer or name: >>> mi.get_level_values(0) Index(['a', 'b', 'c'], dtype='object', name='level_1') >>> mi.get_level_values('level_2') Index(['d', 'e', 'f'], dtype='object', name='level_2') """ level = self._get_level_number(level) values = self._get_level_values(level) return values
[ "def", "get_level_values", "(", "self", ",", "level", ")", ":", "level", "=", "self", ".", "_get_level_number", "(", "level", ")", "values", "=", "self", ".", "_get_level_values", "(", "level", ")", "return", "values" ]
Return vector of label values for requested level, equal to the length of the index. Parameters ---------- level : int or str ``level`` is either the integer position of the level in the MultiIndex, or the name of the level. Returns ------- values : Index Values is a level of this MultiIndex converted to a single :class:`Index` (or subclass thereof). Examples --------- Create a MultiIndex: >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def'))) >>> mi.names = ['level_1', 'level_2'] Get level values by supplying level as either integer or name: >>> mi.get_level_values(0) Index(['a', 'b', 'c'], dtype='object', name='level_1') >>> mi.get_level_values('level_2') Index(['d', 'e', 'f'], dtype='object', name='level_2')
[ "Return", "vector", "of", "label", "values", "for", "requested", "level", "equal", "to", "the", "length", "of", "the", "index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1384-L1418
20,361
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.to_frame
def to_frame(self, index=True, name=None): """ Create a DataFrame with the levels of the MultiIndex as columns. Column ordering is determined by the DataFrame constructor with data as a dict. .. versionadded:: 0.24.0 Parameters ---------- index : boolean, default True Set the index of the returned DataFrame as the original MultiIndex. name : list / sequence of strings, optional The passed names should substitute index level names. Returns ------- DataFrame : a DataFrame containing the original MultiIndex data. See Also -------- DataFrame """ from pandas import DataFrame if name is not None: if not is_list_like(name): raise TypeError("'name' must be a list / sequence " "of column names.") if len(name) != len(self.levels): raise ValueError("'name' should have same length as " "number of levels on index.") idx_names = name else: idx_names = self.names # Guarantee resulting column order result = DataFrame( OrderedDict([ ((level if lvlname is None else lvlname), self._get_level_values(level)) for lvlname, level in zip(idx_names, range(len(self.levels))) ]), copy=False ) if index: result.index = self return result
python
def to_frame(self, index=True, name=None): """ Create a DataFrame with the levels of the MultiIndex as columns. Column ordering is determined by the DataFrame constructor with data as a dict. .. versionadded:: 0.24.0 Parameters ---------- index : boolean, default True Set the index of the returned DataFrame as the original MultiIndex. name : list / sequence of strings, optional The passed names should substitute index level names. Returns ------- DataFrame : a DataFrame containing the original MultiIndex data. See Also -------- DataFrame """ from pandas import DataFrame if name is not None: if not is_list_like(name): raise TypeError("'name' must be a list / sequence " "of column names.") if len(name) != len(self.levels): raise ValueError("'name' should have same length as " "number of levels on index.") idx_names = name else: idx_names = self.names # Guarantee resulting column order result = DataFrame( OrderedDict([ ((level if lvlname is None else lvlname), self._get_level_values(level)) for lvlname, level in zip(idx_names, range(len(self.levels))) ]), copy=False ) if index: result.index = self return result
[ "def", "to_frame", "(", "self", ",", "index", "=", "True", ",", "name", "=", "None", ")", ":", "from", "pandas", "import", "DataFrame", "if", "name", "is", "not", "None", ":", "if", "not", "is_list_like", "(", "name", ")", ":", "raise", "TypeError", "(", "\"'name' must be a list / sequence \"", "\"of column names.\"", ")", "if", "len", "(", "name", ")", "!=", "len", "(", "self", ".", "levels", ")", ":", "raise", "ValueError", "(", "\"'name' should have same length as \"", "\"number of levels on index.\"", ")", "idx_names", "=", "name", "else", ":", "idx_names", "=", "self", ".", "names", "# Guarantee resulting column order", "result", "=", "DataFrame", "(", "OrderedDict", "(", "[", "(", "(", "level", "if", "lvlname", "is", "None", "else", "lvlname", ")", ",", "self", ".", "_get_level_values", "(", "level", ")", ")", "for", "lvlname", ",", "level", "in", "zip", "(", "idx_names", ",", "range", "(", "len", "(", "self", ".", "levels", ")", ")", ")", "]", ")", ",", "copy", "=", "False", ")", "if", "index", ":", "result", ".", "index", "=", "self", "return", "result" ]
Create a DataFrame with the levels of the MultiIndex as columns. Column ordering is determined by the DataFrame constructor with data as a dict. .. versionadded:: 0.24.0 Parameters ---------- index : boolean, default True Set the index of the returned DataFrame as the original MultiIndex. name : list / sequence of strings, optional The passed names should substitute index level names. Returns ------- DataFrame : a DataFrame containing the original MultiIndex data. See Also -------- DataFrame
[ "Create", "a", "DataFrame", "with", "the", "levels", "of", "the", "MultiIndex", "as", "columns", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1433-L1484
20,362
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.to_hierarchical
def to_hierarchical(self, n_repeat, n_shuffle=1): """ Return a MultiIndex reshaped to conform to the shapes given by n_repeat and n_shuffle. .. deprecated:: 0.24.0 Useful to replicate and rearrange a MultiIndex for combination with another Index with n_repeat items. Parameters ---------- n_repeat : int Number of times to repeat the labels on self n_shuffle : int Controls the reordering of the labels. If the result is going to be an inner level in a MultiIndex, n_shuffle will need to be greater than one. The size of each label must divisible by n_shuffle. Returns ------- MultiIndex Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]) >>> idx.to_hierarchical(3) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]]) """ levels = self.levels codes = [np.repeat(level_codes, n_repeat) for level_codes in self.codes] # Assumes that each level_codes is divisible by n_shuffle codes = [x.reshape(n_shuffle, -1).ravel(order='F') for x in codes] names = self.names warnings.warn("Method .to_hierarchical is deprecated and will " "be removed in a future version", FutureWarning, stacklevel=2) return MultiIndex(levels=levels, codes=codes, names=names)
python
def to_hierarchical(self, n_repeat, n_shuffle=1): """ Return a MultiIndex reshaped to conform to the shapes given by n_repeat and n_shuffle. .. deprecated:: 0.24.0 Useful to replicate and rearrange a MultiIndex for combination with another Index with n_repeat items. Parameters ---------- n_repeat : int Number of times to repeat the labels on self n_shuffle : int Controls the reordering of the labels. If the result is going to be an inner level in a MultiIndex, n_shuffle will need to be greater than one. The size of each label must divisible by n_shuffle. Returns ------- MultiIndex Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]) >>> idx.to_hierarchical(3) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]]) """ levels = self.levels codes = [np.repeat(level_codes, n_repeat) for level_codes in self.codes] # Assumes that each level_codes is divisible by n_shuffle codes = [x.reshape(n_shuffle, -1).ravel(order='F') for x in codes] names = self.names warnings.warn("Method .to_hierarchical is deprecated and will " "be removed in a future version", FutureWarning, stacklevel=2) return MultiIndex(levels=levels, codes=codes, names=names)
[ "def", "to_hierarchical", "(", "self", ",", "n_repeat", ",", "n_shuffle", "=", "1", ")", ":", "levels", "=", "self", ".", "levels", "codes", "=", "[", "np", ".", "repeat", "(", "level_codes", ",", "n_repeat", ")", "for", "level_codes", "in", "self", ".", "codes", "]", "# Assumes that each level_codes is divisible by n_shuffle", "codes", "=", "[", "x", ".", "reshape", "(", "n_shuffle", ",", "-", "1", ")", ".", "ravel", "(", "order", "=", "'F'", ")", "for", "x", "in", "codes", "]", "names", "=", "self", ".", "names", "warnings", ".", "warn", "(", "\"Method .to_hierarchical is deprecated and will \"", "\"be removed in a future version\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "MultiIndex", "(", "levels", "=", "levels", ",", "codes", "=", "codes", ",", "names", "=", "names", ")" ]
Return a MultiIndex reshaped to conform to the shapes given by n_repeat and n_shuffle. .. deprecated:: 0.24.0 Useful to replicate and rearrange a MultiIndex for combination with another Index with n_repeat items. Parameters ---------- n_repeat : int Number of times to repeat the labels on self n_shuffle : int Controls the reordering of the labels. If the result is going to be an inner level in a MultiIndex, n_shuffle will need to be greater than one. The size of each label must divisible by n_shuffle. Returns ------- MultiIndex Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]) >>> idx.to_hierarchical(3) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
[ "Return", "a", "MultiIndex", "reshaped", "to", "conform", "to", "the", "shapes", "given", "by", "n_repeat", "and", "n_shuffle", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1486-L1528
20,363
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.remove_unused_levels
def remove_unused_levels(self): """ Create a new MultiIndex from the current that removes unused levels, meaning that they are not expressed in the labels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. .. versionadded:: 0.20.0 Returns ------- MultiIndex Examples -------- >>> i = pd.MultiIndex.from_product([range(2), list('ab')]) MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i[2:] MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[1, 1], [0, 1]]) The 0 from the first level is not represented and can be removed >>> i[2:].remove_unused_levels() MultiIndex(levels=[[1], ['a', 'b']], codes=[[0, 0], [0, 1]]) """ new_levels = [] new_codes = [] changed = False for lev, level_codes in zip(self.levels, self.codes): # Since few levels are typically unused, bincount() is more # efficient than unique() - however it only accepts positive values # (and drops order): uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1 has_na = int(len(uniques) and (uniques[0] == -1)) if len(uniques) != len(lev) + has_na: # We have unused levels changed = True # Recalculate uniques, now preserving order. # Can easily be cythonized by exploiting the already existing # "uniques" and stop parsing "level_codes" when all items # are found: uniques = algos.unique(level_codes) if has_na: na_idx = np.where(uniques == -1)[0] # Just ensure that -1 is in first position: uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]] # codes get mapped from uniques to 0:len(uniques) # -1 (if present) is mapped to last position code_mapping = np.zeros(len(lev) + has_na) # ... and reassigned value -1: code_mapping[uniques] = np.arange(len(uniques)) - has_na level_codes = code_mapping[level_codes] # new levels are simple lev = lev.take(uniques[has_na:]) new_levels.append(lev) new_codes.append(level_codes) result = self._shallow_copy() if changed: result._reset_identity() result._set_levels(new_levels, validate=False) result._set_codes(new_codes, validate=False) return result
python
def remove_unused_levels(self): """ Create a new MultiIndex from the current that removes unused levels, meaning that they are not expressed in the labels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. .. versionadded:: 0.20.0 Returns ------- MultiIndex Examples -------- >>> i = pd.MultiIndex.from_product([range(2), list('ab')]) MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i[2:] MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[1, 1], [0, 1]]) The 0 from the first level is not represented and can be removed >>> i[2:].remove_unused_levels() MultiIndex(levels=[[1], ['a', 'b']], codes=[[0, 0], [0, 1]]) """ new_levels = [] new_codes = [] changed = False for lev, level_codes in zip(self.levels, self.codes): # Since few levels are typically unused, bincount() is more # efficient than unique() - however it only accepts positive values # (and drops order): uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1 has_na = int(len(uniques) and (uniques[0] == -1)) if len(uniques) != len(lev) + has_na: # We have unused levels changed = True # Recalculate uniques, now preserving order. # Can easily be cythonized by exploiting the already existing # "uniques" and stop parsing "level_codes" when all items # are found: uniques = algos.unique(level_codes) if has_na: na_idx = np.where(uniques == -1)[0] # Just ensure that -1 is in first position: uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]] # codes get mapped from uniques to 0:len(uniques) # -1 (if present) is mapped to last position code_mapping = np.zeros(len(lev) + has_na) # ... and reassigned value -1: code_mapping[uniques] = np.arange(len(uniques)) - has_na level_codes = code_mapping[level_codes] # new levels are simple lev = lev.take(uniques[has_na:]) new_levels.append(lev) new_codes.append(level_codes) result = self._shallow_copy() if changed: result._reset_identity() result._set_levels(new_levels, validate=False) result._set_codes(new_codes, validate=False) return result
[ "def", "remove_unused_levels", "(", "self", ")", ":", "new_levels", "=", "[", "]", "new_codes", "=", "[", "]", "changed", "=", "False", "for", "lev", ",", "level_codes", "in", "zip", "(", "self", ".", "levels", ",", "self", ".", "codes", ")", ":", "# Since few levels are typically unused, bincount() is more", "# efficient than unique() - however it only accepts positive values", "# (and drops order):", "uniques", "=", "np", ".", "where", "(", "np", ".", "bincount", "(", "level_codes", "+", "1", ")", ">", "0", ")", "[", "0", "]", "-", "1", "has_na", "=", "int", "(", "len", "(", "uniques", ")", "and", "(", "uniques", "[", "0", "]", "==", "-", "1", ")", ")", "if", "len", "(", "uniques", ")", "!=", "len", "(", "lev", ")", "+", "has_na", ":", "# We have unused levels", "changed", "=", "True", "# Recalculate uniques, now preserving order.", "# Can easily be cythonized by exploiting the already existing", "# \"uniques\" and stop parsing \"level_codes\" when all items", "# are found:", "uniques", "=", "algos", ".", "unique", "(", "level_codes", ")", "if", "has_na", ":", "na_idx", "=", "np", ".", "where", "(", "uniques", "==", "-", "1", ")", "[", "0", "]", "# Just ensure that -1 is in first position:", "uniques", "[", "[", "0", ",", "na_idx", "[", "0", "]", "]", "]", "=", "uniques", "[", "[", "na_idx", "[", "0", "]", ",", "0", "]", "]", "# codes get mapped from uniques to 0:len(uniques)", "# -1 (if present) is mapped to last position", "code_mapping", "=", "np", ".", "zeros", "(", "len", "(", "lev", ")", "+", "has_na", ")", "# ... and reassigned value -1:", "code_mapping", "[", "uniques", "]", "=", "np", ".", "arange", "(", "len", "(", "uniques", ")", ")", "-", "has_na", "level_codes", "=", "code_mapping", "[", "level_codes", "]", "# new levels are simple", "lev", "=", "lev", ".", "take", "(", "uniques", "[", "has_na", ":", "]", ")", "new_levels", ".", "append", "(", "lev", ")", "new_codes", ".", "append", "(", "level_codes", ")", "result", "=", "self", ".", "_shallow_copy", "(", ")", "if", "changed", ":", "result", ".", "_reset_identity", "(", ")", "result", ".", "_set_levels", "(", "new_levels", ",", "validate", "=", "False", ")", "result", ".", "_set_codes", "(", "new_codes", ",", "validate", "=", "False", ")", "return", "result" ]
Create a new MultiIndex from the current that removes unused levels, meaning that they are not expressed in the labels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. .. versionadded:: 0.20.0 Returns ------- MultiIndex Examples -------- >>> i = pd.MultiIndex.from_product([range(2), list('ab')]) MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i[2:] MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[1, 1], [0, 1]]) The 0 from the first level is not represented and can be removed >>> i[2:].remove_unused_levels() MultiIndex(levels=[[1], ['a', 'b']], codes=[[0, 0], [0, 1]])
[ "Create", "a", "new", "MultiIndex", "from", "the", "current", "that", "removes", "unused", "levels", "meaning", "that", "they", "are", "not", "expressed", "in", "the", "labels", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1645-L1725
20,364
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex._assert_take_fillable
def _assert_take_fillable(self, values, indices, allow_fill=True, fill_value=None, na_value=None): """ Internal method to handle NA filling of take """ # only fill if we are passing a non-None fill_value if allow_fill and fill_value is not None: if (indices < -1).any(): msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') raise ValueError(msg) taken = [lab.take(indices) for lab in self.codes] mask = indices == -1 if mask.any(): masked = [] for new_label in taken: label_values = new_label.values() label_values[mask] = na_value masked.append(np.asarray(label_values)) taken = masked else: taken = [lab.take(indices) for lab in self.codes] return taken
python
def _assert_take_fillable(self, values, indices, allow_fill=True, fill_value=None, na_value=None): """ Internal method to handle NA filling of take """ # only fill if we are passing a non-None fill_value if allow_fill and fill_value is not None: if (indices < -1).any(): msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') raise ValueError(msg) taken = [lab.take(indices) for lab in self.codes] mask = indices == -1 if mask.any(): masked = [] for new_label in taken: label_values = new_label.values() label_values[mask] = na_value masked.append(np.asarray(label_values)) taken = masked else: taken = [lab.take(indices) for lab in self.codes] return taken
[ "def", "_assert_take_fillable", "(", "self", ",", "values", ",", "indices", ",", "allow_fill", "=", "True", ",", "fill_value", "=", "None", ",", "na_value", "=", "None", ")", ":", "# only fill if we are passing a non-None fill_value", "if", "allow_fill", "and", "fill_value", "is", "not", "None", ":", "if", "(", "indices", "<", "-", "1", ")", ".", "any", "(", ")", ":", "msg", "=", "(", "'When allow_fill=True and fill_value is not None, '", "'all indices must be >= -1'", ")", "raise", "ValueError", "(", "msg", ")", "taken", "=", "[", "lab", ".", "take", "(", "indices", ")", "for", "lab", "in", "self", ".", "codes", "]", "mask", "=", "indices", "==", "-", "1", "if", "mask", ".", "any", "(", ")", ":", "masked", "=", "[", "]", "for", "new_label", "in", "taken", ":", "label_values", "=", "new_label", ".", "values", "(", ")", "label_values", "[", "mask", "]", "=", "na_value", "masked", ".", "append", "(", "np", ".", "asarray", "(", "label_values", ")", ")", "taken", "=", "masked", "else", ":", "taken", "=", "[", "lab", ".", "take", "(", "indices", ")", "for", "lab", "in", "self", ".", "codes", "]", "return", "taken" ]
Internal method to handle NA filling of take
[ "Internal", "method", "to", "handle", "NA", "filling", "of", "take" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1806-L1826
20,365
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.append
def append(self, other): """ Append a collection of Index options together Parameters ---------- other : Index or list/tuple of indices Returns ------- appended : Index """ if not isinstance(other, (list, tuple)): other = [other] if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other): arrays = [] for i in range(self.nlevels): label = self._get_level_values(i) appended = [o._get_level_values(i) for o in other] arrays.append(label.append(appended)) return MultiIndex.from_arrays(arrays, names=self.names) to_concat = (self.values, ) + tuple(k._values for k in other) new_tuples = np.concatenate(to_concat) # if all(isinstance(x, MultiIndex) for x in other): try: return MultiIndex.from_tuples(new_tuples, names=self.names) except (TypeError, IndexError): return Index(new_tuples)
python
def append(self, other): """ Append a collection of Index options together Parameters ---------- other : Index or list/tuple of indices Returns ------- appended : Index """ if not isinstance(other, (list, tuple)): other = [other] if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other): arrays = [] for i in range(self.nlevels): label = self._get_level_values(i) appended = [o._get_level_values(i) for o in other] arrays.append(label.append(appended)) return MultiIndex.from_arrays(arrays, names=self.names) to_concat = (self.values, ) + tuple(k._values for k in other) new_tuples = np.concatenate(to_concat) # if all(isinstance(x, MultiIndex) for x in other): try: return MultiIndex.from_tuples(new_tuples, names=self.names) except (TypeError, IndexError): return Index(new_tuples)
[ "def", "append", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "(", "list", ",", "tuple", ")", ")", ":", "other", "=", "[", "other", "]", "if", "all", "(", "(", "isinstance", "(", "o", ",", "MultiIndex", ")", "and", "o", ".", "nlevels", ">=", "self", ".", "nlevels", ")", "for", "o", "in", "other", ")", ":", "arrays", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "nlevels", ")", ":", "label", "=", "self", ".", "_get_level_values", "(", "i", ")", "appended", "=", "[", "o", ".", "_get_level_values", "(", "i", ")", "for", "o", "in", "other", "]", "arrays", ".", "append", "(", "label", ".", "append", "(", "appended", ")", ")", "return", "MultiIndex", ".", "from_arrays", "(", "arrays", ",", "names", "=", "self", ".", "names", ")", "to_concat", "=", "(", "self", ".", "values", ",", ")", "+", "tuple", "(", "k", ".", "_values", "for", "k", "in", "other", ")", "new_tuples", "=", "np", ".", "concatenate", "(", "to_concat", ")", "# if all(isinstance(x, MultiIndex) for x in other):", "try", ":", "return", "MultiIndex", ".", "from_tuples", "(", "new_tuples", ",", "names", "=", "self", ".", "names", ")", "except", "(", "TypeError", ",", "IndexError", ")", ":", "return", "Index", "(", "new_tuples", ")" ]
Append a collection of Index options together Parameters ---------- other : Index or list/tuple of indices Returns ------- appended : Index
[ "Append", "a", "collection", "of", "Index", "options", "together" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1828-L1859
20,366
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.drop
def drop(self, codes, level=None, errors='raise'): """ Make new MultiIndex with passed list of codes deleted Parameters ---------- codes : array-like Must be a list of tuples level : int or level name, default None Returns ------- dropped : MultiIndex """ if level is not None: return self._drop_from_level(codes, level) try: if not isinstance(codes, (np.ndarray, Index)): codes = com.index_labels_to_array(codes) indexer = self.get_indexer(codes) mask = indexer == -1 if mask.any(): if errors != 'ignore': raise ValueError('codes %s not contained in axis' % codes[mask]) except Exception: pass inds = [] for level_codes in codes: try: loc = self.get_loc(level_codes) # get_loc returns either an integer, a slice, or a boolean # mask if isinstance(loc, int): inds.append(loc) elif isinstance(loc, slice): inds.extend(lrange(loc.start, loc.stop)) elif com.is_bool_indexer(loc): if self.lexsort_depth == 0: warnings.warn('dropping on a non-lexsorted multi-index' ' without a level parameter may impact ' 'performance.', PerformanceWarning, stacklevel=3) loc = loc.nonzero()[0] inds.extend(loc) else: msg = 'unsupported indexer of type {}'.format(type(loc)) raise AssertionError(msg) except KeyError: if errors != 'ignore': raise return self.delete(inds)
python
def drop(self, codes, level=None, errors='raise'): """ Make new MultiIndex with passed list of codes deleted Parameters ---------- codes : array-like Must be a list of tuples level : int or level name, default None Returns ------- dropped : MultiIndex """ if level is not None: return self._drop_from_level(codes, level) try: if not isinstance(codes, (np.ndarray, Index)): codes = com.index_labels_to_array(codes) indexer = self.get_indexer(codes) mask = indexer == -1 if mask.any(): if errors != 'ignore': raise ValueError('codes %s not contained in axis' % codes[mask]) except Exception: pass inds = [] for level_codes in codes: try: loc = self.get_loc(level_codes) # get_loc returns either an integer, a slice, or a boolean # mask if isinstance(loc, int): inds.append(loc) elif isinstance(loc, slice): inds.extend(lrange(loc.start, loc.stop)) elif com.is_bool_indexer(loc): if self.lexsort_depth == 0: warnings.warn('dropping on a non-lexsorted multi-index' ' without a level parameter may impact ' 'performance.', PerformanceWarning, stacklevel=3) loc = loc.nonzero()[0] inds.extend(loc) else: msg = 'unsupported indexer of type {}'.format(type(loc)) raise AssertionError(msg) except KeyError: if errors != 'ignore': raise return self.delete(inds)
[ "def", "drop", "(", "self", ",", "codes", ",", "level", "=", "None", ",", "errors", "=", "'raise'", ")", ":", "if", "level", "is", "not", "None", ":", "return", "self", ".", "_drop_from_level", "(", "codes", ",", "level", ")", "try", ":", "if", "not", "isinstance", "(", "codes", ",", "(", "np", ".", "ndarray", ",", "Index", ")", ")", ":", "codes", "=", "com", ".", "index_labels_to_array", "(", "codes", ")", "indexer", "=", "self", ".", "get_indexer", "(", "codes", ")", "mask", "=", "indexer", "==", "-", "1", "if", "mask", ".", "any", "(", ")", ":", "if", "errors", "!=", "'ignore'", ":", "raise", "ValueError", "(", "'codes %s not contained in axis'", "%", "codes", "[", "mask", "]", ")", "except", "Exception", ":", "pass", "inds", "=", "[", "]", "for", "level_codes", "in", "codes", ":", "try", ":", "loc", "=", "self", ".", "get_loc", "(", "level_codes", ")", "# get_loc returns either an integer, a slice, or a boolean", "# mask", "if", "isinstance", "(", "loc", ",", "int", ")", ":", "inds", ".", "append", "(", "loc", ")", "elif", "isinstance", "(", "loc", ",", "slice", ")", ":", "inds", ".", "extend", "(", "lrange", "(", "loc", ".", "start", ",", "loc", ".", "stop", ")", ")", "elif", "com", ".", "is_bool_indexer", "(", "loc", ")", ":", "if", "self", ".", "lexsort_depth", "==", "0", ":", "warnings", ".", "warn", "(", "'dropping on a non-lexsorted multi-index'", "' without a level parameter may impact '", "'performance.'", ",", "PerformanceWarning", ",", "stacklevel", "=", "3", ")", "loc", "=", "loc", ".", "nonzero", "(", ")", "[", "0", "]", "inds", ".", "extend", "(", "loc", ")", "else", ":", "msg", "=", "'unsupported indexer of type {}'", ".", "format", "(", "type", "(", "loc", ")", ")", "raise", "AssertionError", "(", "msg", ")", "except", "KeyError", ":", "if", "errors", "!=", "'ignore'", ":", "raise", "return", "self", ".", "delete", "(", "inds", ")" ]
Make new MultiIndex with passed list of codes deleted Parameters ---------- codes : array-like Must be a list of tuples level : int or level name, default None Returns ------- dropped : MultiIndex
[ "Make", "new", "MultiIndex", "with", "passed", "list", "of", "codes", "deleted" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1878-L1933
20,367
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.swaplevel
def swaplevel(self, i=-2, j=-1): """ Swap level i with level j. Calling this method does not change the ordering of the values. Parameters ---------- i : int, str, default -2 First level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. j : int, str, default -1 Second level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. Returns ------- MultiIndex A new MultiIndex. .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. See Also -------- Series.swaplevel : Swap levels i and j in a MultiIndex. Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a particular axis. Examples -------- >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi.swaplevel(0, 1) MultiIndex(levels=[['bb', 'aa'], ['a', 'b']], codes=[[0, 1, 0, 1], [0, 0, 1, 1]]) """ new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) i = self._get_level_number(i) j = self._get_level_number(j) new_levels[i], new_levels[j] = new_levels[j], new_levels[i] new_codes[i], new_codes[j] = new_codes[j], new_codes[i] new_names[i], new_names[j] = new_names[j], new_names[i] return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)
python
def swaplevel(self, i=-2, j=-1): """ Swap level i with level j. Calling this method does not change the ordering of the values. Parameters ---------- i : int, str, default -2 First level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. j : int, str, default -1 Second level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. Returns ------- MultiIndex A new MultiIndex. .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. See Also -------- Series.swaplevel : Swap levels i and j in a MultiIndex. Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a particular axis. Examples -------- >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi.swaplevel(0, 1) MultiIndex(levels=[['bb', 'aa'], ['a', 'b']], codes=[[0, 1, 0, 1], [0, 0, 1, 1]]) """ new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) i = self._get_level_number(i) j = self._get_level_number(j) new_levels[i], new_levels[j] = new_levels[j], new_levels[i] new_codes[i], new_codes[j] = new_codes[j], new_codes[i] new_names[i], new_names[j] = new_names[j], new_names[i] return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)
[ "def", "swaplevel", "(", "self", ",", "i", "=", "-", "2", ",", "j", "=", "-", "1", ")", ":", "new_levels", "=", "list", "(", "self", ".", "levels", ")", "new_codes", "=", "list", "(", "self", ".", "codes", ")", "new_names", "=", "list", "(", "self", ".", "names", ")", "i", "=", "self", ".", "_get_level_number", "(", "i", ")", "j", "=", "self", ".", "_get_level_number", "(", "j", ")", "new_levels", "[", "i", "]", ",", "new_levels", "[", "j", "]", "=", "new_levels", "[", "j", "]", ",", "new_levels", "[", "i", "]", "new_codes", "[", "i", "]", ",", "new_codes", "[", "j", "]", "=", "new_codes", "[", "j", "]", ",", "new_codes", "[", "i", "]", "new_names", "[", "i", "]", ",", "new_names", "[", "j", "]", "=", "new_names", "[", "j", "]", ",", "new_names", "[", "i", "]", "return", "MultiIndex", "(", "levels", "=", "new_levels", ",", "codes", "=", "new_codes", ",", "names", "=", "new_names", ",", "verify_integrity", "=", "False", ")" ]
Swap level i with level j. Calling this method does not change the ordering of the values. Parameters ---------- i : int, str, default -2 First level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. j : int, str, default -1 Second level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. Returns ------- MultiIndex A new MultiIndex. .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. See Also -------- Series.swaplevel : Swap levels i and j in a MultiIndex. Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a particular axis. Examples -------- >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi.swaplevel(0, 1) MultiIndex(levels=[['bb', 'aa'], ['a', 'b']], codes=[[0, 1, 0, 1], [0, 0, 1, 1]])
[ "Swap", "level", "i", "with", "level", "j", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1945-L1999
20,368
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.reorder_levels
def reorder_levels(self, order): """ Rearrange levels using input order. May not drop or duplicate levels Parameters ---------- """ order = [self._get_level_number(i) for i in order] if len(order) != self.nlevels: raise AssertionError('Length of order must be same as ' 'number of levels (%d), got %d' % (self.nlevels, len(order))) new_levels = [self.levels[i] for i in order] new_codes = [self.codes[i] for i in order] new_names = [self.names[i] for i in order] return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)
python
def reorder_levels(self, order): """ Rearrange levels using input order. May not drop or duplicate levels Parameters ---------- """ order = [self._get_level_number(i) for i in order] if len(order) != self.nlevels: raise AssertionError('Length of order must be same as ' 'number of levels (%d), got %d' % (self.nlevels, len(order))) new_levels = [self.levels[i] for i in order] new_codes = [self.codes[i] for i in order] new_names = [self.names[i] for i in order] return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)
[ "def", "reorder_levels", "(", "self", ",", "order", ")", ":", "order", "=", "[", "self", ".", "_get_level_number", "(", "i", ")", "for", "i", "in", "order", "]", "if", "len", "(", "order", ")", "!=", "self", ".", "nlevels", ":", "raise", "AssertionError", "(", "'Length of order must be same as '", "'number of levels (%d), got %d'", "%", "(", "self", ".", "nlevels", ",", "len", "(", "order", ")", ")", ")", "new_levels", "=", "[", "self", ".", "levels", "[", "i", "]", "for", "i", "in", "order", "]", "new_codes", "=", "[", "self", ".", "codes", "[", "i", "]", "for", "i", "in", "order", "]", "new_names", "=", "[", "self", ".", "names", "[", "i", "]", "for", "i", "in", "order", "]", "return", "MultiIndex", "(", "levels", "=", "new_levels", ",", "codes", "=", "new_codes", ",", "names", "=", "new_names", ",", "verify_integrity", "=", "False", ")" ]
Rearrange levels using input order. May not drop or duplicate levels Parameters ----------
[ "Rearrange", "levels", "using", "input", "order", ".", "May", "not", "drop", "or", "duplicate", "levels" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2001-L2018
20,369
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.sortlevel
def sortlevel(self, level=0, ascending=True, sort_remaining=True): """ Sort MultiIndex at the requested level. The result will respect the original ordering of the associated factor at that level. Parameters ---------- level : list-like, int or str, default 0 If a string is given, must be a name of the level If list-like must be names or ints of levels. ascending : boolean, default True False to sort in descending order Can also be a list to specify a directed ordering sort_remaining : sort by the remaining levels after level Returns ------- sorted_index : pd.MultiIndex Resulting index. indexer : np.ndarray Indices of output values in original index. """ from pandas.core.sorting import indexer_from_factorized if isinstance(level, (str, int)): level = [level] level = [self._get_level_number(lev) for lev in level] sortorder = None # we have a directed ordering via ascending if isinstance(ascending, list): if not len(level) == len(ascending): raise ValueError("level must have same length as ascending") from pandas.core.sorting import lexsort_indexer indexer = lexsort_indexer([self.codes[lev] for lev in level], orders=ascending) # level ordering else: codes = list(self.codes) shape = list(self.levshape) # partition codes and shape primary = tuple(codes[lev] for lev in level) primshp = tuple(shape[lev] for lev in level) # Reverse sorted to retain the order of # smaller indices that needs to be removed for lev in sorted(level, reverse=True): codes.pop(lev) shape.pop(lev) if sort_remaining: primary += primary + tuple(codes) primshp += primshp + tuple(shape) else: sortorder = level[0] indexer = indexer_from_factorized(primary, primshp, compress=False) if not ascending: indexer = indexer[::-1] indexer = ensure_platform_int(indexer) new_codes = [level_codes.take(indexer) for level_codes in self.codes] new_index = MultiIndex(codes=new_codes, levels=self.levels, names=self.names, sortorder=sortorder, verify_integrity=False) return new_index, indexer
python
def sortlevel(self, level=0, ascending=True, sort_remaining=True): """ Sort MultiIndex at the requested level. The result will respect the original ordering of the associated factor at that level. Parameters ---------- level : list-like, int or str, default 0 If a string is given, must be a name of the level If list-like must be names or ints of levels. ascending : boolean, default True False to sort in descending order Can also be a list to specify a directed ordering sort_remaining : sort by the remaining levels after level Returns ------- sorted_index : pd.MultiIndex Resulting index. indexer : np.ndarray Indices of output values in original index. """ from pandas.core.sorting import indexer_from_factorized if isinstance(level, (str, int)): level = [level] level = [self._get_level_number(lev) for lev in level] sortorder = None # we have a directed ordering via ascending if isinstance(ascending, list): if not len(level) == len(ascending): raise ValueError("level must have same length as ascending") from pandas.core.sorting import lexsort_indexer indexer = lexsort_indexer([self.codes[lev] for lev in level], orders=ascending) # level ordering else: codes = list(self.codes) shape = list(self.levshape) # partition codes and shape primary = tuple(codes[lev] for lev in level) primshp = tuple(shape[lev] for lev in level) # Reverse sorted to retain the order of # smaller indices that needs to be removed for lev in sorted(level, reverse=True): codes.pop(lev) shape.pop(lev) if sort_remaining: primary += primary + tuple(codes) primshp += primshp + tuple(shape) else: sortorder = level[0] indexer = indexer_from_factorized(primary, primshp, compress=False) if not ascending: indexer = indexer[::-1] indexer = ensure_platform_int(indexer) new_codes = [level_codes.take(indexer) for level_codes in self.codes] new_index = MultiIndex(codes=new_codes, levels=self.levels, names=self.names, sortorder=sortorder, verify_integrity=False) return new_index, indexer
[ "def", "sortlevel", "(", "self", ",", "level", "=", "0", ",", "ascending", "=", "True", ",", "sort_remaining", "=", "True", ")", ":", "from", "pandas", ".", "core", ".", "sorting", "import", "indexer_from_factorized", "if", "isinstance", "(", "level", ",", "(", "str", ",", "int", ")", ")", ":", "level", "=", "[", "level", "]", "level", "=", "[", "self", ".", "_get_level_number", "(", "lev", ")", "for", "lev", "in", "level", "]", "sortorder", "=", "None", "# we have a directed ordering via ascending", "if", "isinstance", "(", "ascending", ",", "list", ")", ":", "if", "not", "len", "(", "level", ")", "==", "len", "(", "ascending", ")", ":", "raise", "ValueError", "(", "\"level must have same length as ascending\"", ")", "from", "pandas", ".", "core", ".", "sorting", "import", "lexsort_indexer", "indexer", "=", "lexsort_indexer", "(", "[", "self", ".", "codes", "[", "lev", "]", "for", "lev", "in", "level", "]", ",", "orders", "=", "ascending", ")", "# level ordering", "else", ":", "codes", "=", "list", "(", "self", ".", "codes", ")", "shape", "=", "list", "(", "self", ".", "levshape", ")", "# partition codes and shape", "primary", "=", "tuple", "(", "codes", "[", "lev", "]", "for", "lev", "in", "level", ")", "primshp", "=", "tuple", "(", "shape", "[", "lev", "]", "for", "lev", "in", "level", ")", "# Reverse sorted to retain the order of", "# smaller indices that needs to be removed", "for", "lev", "in", "sorted", "(", "level", ",", "reverse", "=", "True", ")", ":", "codes", ".", "pop", "(", "lev", ")", "shape", ".", "pop", "(", "lev", ")", "if", "sort_remaining", ":", "primary", "+=", "primary", "+", "tuple", "(", "codes", ")", "primshp", "+=", "primshp", "+", "tuple", "(", "shape", ")", "else", ":", "sortorder", "=", "level", "[", "0", "]", "indexer", "=", "indexer_from_factorized", "(", "primary", ",", "primshp", ",", "compress", "=", "False", ")", "if", "not", "ascending", ":", "indexer", "=", "indexer", "[", ":", ":", "-", "1", "]", "indexer", "=", "ensure_platform_int", "(", "indexer", ")", "new_codes", "=", "[", "level_codes", ".", "take", "(", "indexer", ")", "for", "level_codes", "in", "self", ".", "codes", "]", "new_index", "=", "MultiIndex", "(", "codes", "=", "new_codes", ",", "levels", "=", "self", ".", "levels", ",", "names", "=", "self", ".", "names", ",", "sortorder", "=", "sortorder", ",", "verify_integrity", "=", "False", ")", "return", "new_index", ",", "indexer" ]
Sort MultiIndex at the requested level. The result will respect the original ordering of the associated factor at that level. Parameters ---------- level : list-like, int or str, default 0 If a string is given, must be a name of the level If list-like must be names or ints of levels. ascending : boolean, default True False to sort in descending order Can also be a list to specify a directed ordering sort_remaining : sort by the remaining levels after level Returns ------- sorted_index : pd.MultiIndex Resulting index. indexer : np.ndarray Indices of output values in original index.
[ "Sort", "MultiIndex", "at", "the", "requested", "level", ".", "The", "result", "will", "respect", "the", "original", "ordering", "of", "the", "associated", "factor", "at", "that", "level", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2042-L2115
20,370
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.slice_locs
def slice_locs(self, start=None, end=None, step=None, kind=None): """ For an ordered MultiIndex, compute the slice locations for input labels. The input labels can be tuples representing partial levels, e.g. for a MultiIndex with 3 levels, you can pass a single value (corresponding to the first level), or a 1-, 2-, or 3-tuple. Parameters ---------- start : label or tuple, default None If None, defaults to the beginning end : label or tuple If None, defaults to the end step : int or None Slice step kind : string, optional, defaults None Returns ------- (start, end) : (int, int) Notes ----- This method only works if the MultiIndex is properly lexsorted. So, if only the first 2 levels of a 3-level MultiIndex are lexsorted, you can only pass two levels to ``.slice_locs``. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')], ... names=['A', 'B']) Get the slice locations from the beginning of 'b' in the first level until the end of the multiindex: >>> mi.slice_locs(start='b') (1, 4) Like above, but stop at the end of 'b' in the first level and 'f' in the second level: >>> mi.slice_locs(start='b', end=('b', 'f')) (1, 3) See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """ # This function adds nothing to its parent implementation (the magic # happens in get_slice_bound method), but it adds meaningful doc. return super().slice_locs(start, end, step, kind=kind)
python
def slice_locs(self, start=None, end=None, step=None, kind=None): """ For an ordered MultiIndex, compute the slice locations for input labels. The input labels can be tuples representing partial levels, e.g. for a MultiIndex with 3 levels, you can pass a single value (corresponding to the first level), or a 1-, 2-, or 3-tuple. Parameters ---------- start : label or tuple, default None If None, defaults to the beginning end : label or tuple If None, defaults to the end step : int or None Slice step kind : string, optional, defaults None Returns ------- (start, end) : (int, int) Notes ----- This method only works if the MultiIndex is properly lexsorted. So, if only the first 2 levels of a 3-level MultiIndex are lexsorted, you can only pass two levels to ``.slice_locs``. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')], ... names=['A', 'B']) Get the slice locations from the beginning of 'b' in the first level until the end of the multiindex: >>> mi.slice_locs(start='b') (1, 4) Like above, but stop at the end of 'b' in the first level and 'f' in the second level: >>> mi.slice_locs(start='b', end=('b', 'f')) (1, 3) See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """ # This function adds nothing to its parent implementation (the magic # happens in get_slice_bound method), but it adds meaningful doc. return super().slice_locs(start, end, step, kind=kind)
[ "def", "slice_locs", "(", "self", ",", "start", "=", "None", ",", "end", "=", "None", ",", "step", "=", "None", ",", "kind", "=", "None", ")", ":", "# This function adds nothing to its parent implementation (the magic", "# happens in get_slice_bound method), but it adds meaningful doc.", "return", "super", "(", ")", ".", "slice_locs", "(", "start", ",", "end", ",", "step", ",", "kind", "=", "kind", ")" ]
For an ordered MultiIndex, compute the slice locations for input labels. The input labels can be tuples representing partial levels, e.g. for a MultiIndex with 3 levels, you can pass a single value (corresponding to the first level), or a 1-, 2-, or 3-tuple. Parameters ---------- start : label or tuple, default None If None, defaults to the beginning end : label or tuple If None, defaults to the end step : int or None Slice step kind : string, optional, defaults None Returns ------- (start, end) : (int, int) Notes ----- This method only works if the MultiIndex is properly lexsorted. So, if only the first 2 levels of a 3-level MultiIndex are lexsorted, you can only pass two levels to ``.slice_locs``. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')], ... names=['A', 'B']) Get the slice locations from the beginning of 'b' in the first level until the end of the multiindex: >>> mi.slice_locs(start='b') (1, 4) Like above, but stop at the end of 'b' in the first level and 'f' in the second level: >>> mi.slice_locs(start='b', end=('b', 'f')) (1, 3) See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such.
[ "For", "an", "ordered", "MultiIndex", "compute", "the", "slice", "locations", "for", "input", "labels", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2260-L2314
20,371
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.get_loc
def get_loc(self, key, method=None): """ Get location for a label or a tuple of labels as an integer, slice or boolean mask. Parameters ---------- key : label or tuple of labels (one for each level) method : None Returns ------- loc : int, slice object or boolean mask If the key is past the lexsort depth, the return may be a boolean mask array, otherwise it is always a slice or int. Examples --------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')]) >>> mi.get_loc('b') slice(1, 3, None) >>> mi.get_loc(('b', 'e')) 1 Notes ------ The key cannot be a slice, list of same-level labels, a boolean mask, or a sequence of such. If you want to use those, use :meth:`MultiIndex.get_locs` instead. See Also -------- Index.get_loc : The get_loc method for (single-level) index. MultiIndex.slice_locs : Get slice location given start label(s) and end label(s). MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """ if method is not None: raise NotImplementedError('only the default get_loc method is ' 'currently supported for MultiIndex') def _maybe_to_slice(loc): """convert integer indexer to boolean mask or slice if possible""" if not isinstance(loc, np.ndarray) or loc.dtype != 'int64': return loc loc = lib.maybe_indices_to_slice(loc, len(self)) if isinstance(loc, slice): return loc mask = np.empty(len(self), dtype='bool') mask.fill(False) mask[loc] = True return mask if not isinstance(key, tuple): loc = self._get_level_indexer(key, level=0) return _maybe_to_slice(loc) keylen = len(key) if self.nlevels < keylen: raise KeyError('Key length ({0}) exceeds index depth ({1})' ''.format(keylen, self.nlevels)) if keylen == self.nlevels and self.is_unique: return self._engine.get_loc(key) # -- partial selection or non-unique index # break the key into 2 parts based on the lexsort_depth of the index; # the first part returns a continuous slice of the index; the 2nd part # needs linear search within the slice i = self.lexsort_depth lead_key, follow_key = key[:i], key[i:] start, stop = (self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))) if start == stop: raise KeyError(key) if not follow_key: return slice(start, stop) warnings.warn('indexing past lexsort depth may impact performance.', PerformanceWarning, stacklevel=10) loc = np.arange(start, stop, dtype='int64') for i, k in enumerate(follow_key, len(lead_key)): mask = self.codes[i][loc] == self.levels[i].get_loc(k) if not mask.all(): loc = loc[mask] if not len(loc): raise KeyError(key) return (_maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop))
python
def get_loc(self, key, method=None): """ Get location for a label or a tuple of labels as an integer, slice or boolean mask. Parameters ---------- key : label or tuple of labels (one for each level) method : None Returns ------- loc : int, slice object or boolean mask If the key is past the lexsort depth, the return may be a boolean mask array, otherwise it is always a slice or int. Examples --------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')]) >>> mi.get_loc('b') slice(1, 3, None) >>> mi.get_loc(('b', 'e')) 1 Notes ------ The key cannot be a slice, list of same-level labels, a boolean mask, or a sequence of such. If you want to use those, use :meth:`MultiIndex.get_locs` instead. See Also -------- Index.get_loc : The get_loc method for (single-level) index. MultiIndex.slice_locs : Get slice location given start label(s) and end label(s). MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """ if method is not None: raise NotImplementedError('only the default get_loc method is ' 'currently supported for MultiIndex') def _maybe_to_slice(loc): """convert integer indexer to boolean mask or slice if possible""" if not isinstance(loc, np.ndarray) or loc.dtype != 'int64': return loc loc = lib.maybe_indices_to_slice(loc, len(self)) if isinstance(loc, slice): return loc mask = np.empty(len(self), dtype='bool') mask.fill(False) mask[loc] = True return mask if not isinstance(key, tuple): loc = self._get_level_indexer(key, level=0) return _maybe_to_slice(loc) keylen = len(key) if self.nlevels < keylen: raise KeyError('Key length ({0}) exceeds index depth ({1})' ''.format(keylen, self.nlevels)) if keylen == self.nlevels and self.is_unique: return self._engine.get_loc(key) # -- partial selection or non-unique index # break the key into 2 parts based on the lexsort_depth of the index; # the first part returns a continuous slice of the index; the 2nd part # needs linear search within the slice i = self.lexsort_depth lead_key, follow_key = key[:i], key[i:] start, stop = (self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))) if start == stop: raise KeyError(key) if not follow_key: return slice(start, stop) warnings.warn('indexing past lexsort depth may impact performance.', PerformanceWarning, stacklevel=10) loc = np.arange(start, stop, dtype='int64') for i, k in enumerate(follow_key, len(lead_key)): mask = self.codes[i][loc] == self.levels[i].get_loc(k) if not mask.all(): loc = loc[mask] if not len(loc): raise KeyError(key) return (_maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop))
[ "def", "get_loc", "(", "self", ",", "key", ",", "method", "=", "None", ")", ":", "if", "method", "is", "not", "None", ":", "raise", "NotImplementedError", "(", "'only the default get_loc method is '", "'currently supported for MultiIndex'", ")", "def", "_maybe_to_slice", "(", "loc", ")", ":", "\"\"\"convert integer indexer to boolean mask or slice if possible\"\"\"", "if", "not", "isinstance", "(", "loc", ",", "np", ".", "ndarray", ")", "or", "loc", ".", "dtype", "!=", "'int64'", ":", "return", "loc", "loc", "=", "lib", ".", "maybe_indices_to_slice", "(", "loc", ",", "len", "(", "self", ")", ")", "if", "isinstance", "(", "loc", ",", "slice", ")", ":", "return", "loc", "mask", "=", "np", ".", "empty", "(", "len", "(", "self", ")", ",", "dtype", "=", "'bool'", ")", "mask", ".", "fill", "(", "False", ")", "mask", "[", "loc", "]", "=", "True", "return", "mask", "if", "not", "isinstance", "(", "key", ",", "tuple", ")", ":", "loc", "=", "self", ".", "_get_level_indexer", "(", "key", ",", "level", "=", "0", ")", "return", "_maybe_to_slice", "(", "loc", ")", "keylen", "=", "len", "(", "key", ")", "if", "self", ".", "nlevels", "<", "keylen", ":", "raise", "KeyError", "(", "'Key length ({0}) exceeds index depth ({1})'", "''", ".", "format", "(", "keylen", ",", "self", ".", "nlevels", ")", ")", "if", "keylen", "==", "self", ".", "nlevels", "and", "self", ".", "is_unique", ":", "return", "self", ".", "_engine", ".", "get_loc", "(", "key", ")", "# -- partial selection or non-unique index", "# break the key into 2 parts based on the lexsort_depth of the index;", "# the first part returns a continuous slice of the index; the 2nd part", "# needs linear search within the slice", "i", "=", "self", ".", "lexsort_depth", "lead_key", ",", "follow_key", "=", "key", "[", ":", "i", "]", ",", "key", "[", "i", ":", "]", "start", ",", "stop", "=", "(", "self", ".", "slice_locs", "(", "lead_key", ",", "lead_key", ")", "if", "lead_key", "else", "(", "0", ",", "len", "(", "self", ")", ")", ")", "if", "start", "==", "stop", ":", "raise", "KeyError", "(", "key", ")", "if", "not", "follow_key", ":", "return", "slice", "(", "start", ",", "stop", ")", "warnings", ".", "warn", "(", "'indexing past lexsort depth may impact performance.'", ",", "PerformanceWarning", ",", "stacklevel", "=", "10", ")", "loc", "=", "np", ".", "arange", "(", "start", ",", "stop", ",", "dtype", "=", "'int64'", ")", "for", "i", ",", "k", "in", "enumerate", "(", "follow_key", ",", "len", "(", "lead_key", ")", ")", ":", "mask", "=", "self", ".", "codes", "[", "i", "]", "[", "loc", "]", "==", "self", ".", "levels", "[", "i", "]", ".", "get_loc", "(", "k", ")", "if", "not", "mask", ".", "all", "(", ")", ":", "loc", "=", "loc", "[", "mask", "]", "if", "not", "len", "(", "loc", ")", ":", "raise", "KeyError", "(", "key", ")", "return", "(", "_maybe_to_slice", "(", "loc", ")", "if", "len", "(", "loc", ")", "!=", "stop", "-", "start", "else", "slice", "(", "start", ",", "stop", ")", ")" ]
Get location for a label or a tuple of labels as an integer, slice or boolean mask. Parameters ---------- key : label or tuple of labels (one for each level) method : None Returns ------- loc : int, slice object or boolean mask If the key is past the lexsort depth, the return may be a boolean mask array, otherwise it is always a slice or int. Examples --------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')]) >>> mi.get_loc('b') slice(1, 3, None) >>> mi.get_loc(('b', 'e')) 1 Notes ------ The key cannot be a slice, list of same-level labels, a boolean mask, or a sequence of such. If you want to use those, use :meth:`MultiIndex.get_locs` instead. See Also -------- Index.get_loc : The get_loc method for (single-level) index. MultiIndex.slice_locs : Get slice location given start label(s) and end label(s). MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such.
[ "Get", "location", "for", "a", "label", "or", "a", "tuple", "of", "labels", "as", "an", "integer", "slice", "or", "boolean", "mask", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2347-L2445
20,372
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.equal_levels
def equal_levels(self, other): """ Return True if the levels of both MultiIndex objects are the same """ if self.nlevels != other.nlevels: return False for i in range(self.nlevels): if not self.levels[i].equals(other.levels[i]): return False return True
python
def equal_levels(self, other): """ Return True if the levels of both MultiIndex objects are the same """ if self.nlevels != other.nlevels: return False for i in range(self.nlevels): if not self.levels[i].equals(other.levels[i]): return False return True
[ "def", "equal_levels", "(", "self", ",", "other", ")", ":", "if", "self", ".", "nlevels", "!=", "other", ".", "nlevels", ":", "return", "False", "for", "i", "in", "range", "(", "self", ".", "nlevels", ")", ":", "if", "not", "self", ".", "levels", "[", "i", "]", ".", "equals", "(", "other", ".", "levels", "[", "i", "]", ")", ":", "return", "False", "return", "True" ]
Return True if the levels of both MultiIndex objects are the same
[ "Return", "True", "if", "the", "levels", "of", "both", "MultiIndex", "objects", "are", "the", "same" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2878-L2889
20,373
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.union
def union(self, other, sort=None): """ Form the union of two MultiIndex objects Parameters ---------- other : MultiIndex or array / Index of tuples sort : False or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- Index >>> index.union(index2) """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if len(other) == 0 or self.equals(other): return self # TODO: Index.union returns other when `len(self)` is 0. uniq_tuples = lib.fast_unique_multiple([self._ndarray_values, other._ndarray_values], sort=sort) return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names)
python
def union(self, other, sort=None): """ Form the union of two MultiIndex objects Parameters ---------- other : MultiIndex or array / Index of tuples sort : False or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- Index >>> index.union(index2) """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if len(other) == 0 or self.equals(other): return self # TODO: Index.union returns other when `len(self)` is 0. uniq_tuples = lib.fast_unique_multiple([self._ndarray_values, other._ndarray_values], sort=sort) return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names)
[ "def", "union", "(", "self", ",", "other", ",", "sort", "=", "None", ")", ":", "self", ".", "_validate_sort_keyword", "(", "sort", ")", "self", ".", "_assert_can_do_setop", "(", "other", ")", "other", ",", "result_names", "=", "self", ".", "_convert_can_do_setop", "(", "other", ")", "if", "len", "(", "other", ")", "==", "0", "or", "self", ".", "equals", "(", "other", ")", ":", "return", "self", "# TODO: Index.union returns other when `len(self)` is 0.", "uniq_tuples", "=", "lib", ".", "fast_unique_multiple", "(", "[", "self", ".", "_ndarray_values", ",", "other", ".", "_ndarray_values", "]", ",", "sort", "=", "sort", ")", "return", "MultiIndex", ".", "from_arrays", "(", "lzip", "(", "*", "uniq_tuples", ")", ",", "sortorder", "=", "0", ",", "names", "=", "result_names", ")" ]
Form the union of two MultiIndex objects Parameters ---------- other : MultiIndex or array / Index of tuples sort : False or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- Index >>> index.union(index2)
[ "Form", "the", "union", "of", "two", "MultiIndex", "objects" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2891-L2937
20,374
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.intersection
def intersection(self, other, sort=False): """ Form the intersection of two MultiIndex objects. Parameters ---------- other : MultiIndex or array / Index of tuples sort : False or None, default False Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default from ``True`` to ``False``, to match behaviour from before 0.24.0 Returns ------- Index """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if self.equals(other): return self self_tuples = self._ndarray_values other_tuples = other._ndarray_values uniq_tuples = set(self_tuples) & set(other_tuples) if sort is None: uniq_tuples = sorted(uniq_tuples) if len(uniq_tuples) == 0: return MultiIndex(levels=self.levels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) else: return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names)
python
def intersection(self, other, sort=False): """ Form the intersection of two MultiIndex objects. Parameters ---------- other : MultiIndex or array / Index of tuples sort : False or None, default False Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default from ``True`` to ``False``, to match behaviour from before 0.24.0 Returns ------- Index """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if self.equals(other): return self self_tuples = self._ndarray_values other_tuples = other._ndarray_values uniq_tuples = set(self_tuples) & set(other_tuples) if sort is None: uniq_tuples = sorted(uniq_tuples) if len(uniq_tuples) == 0: return MultiIndex(levels=self.levels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) else: return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names)
[ "def", "intersection", "(", "self", ",", "other", ",", "sort", "=", "False", ")", ":", "self", ".", "_validate_sort_keyword", "(", "sort", ")", "self", ".", "_assert_can_do_setop", "(", "other", ")", "other", ",", "result_names", "=", "self", ".", "_convert_can_do_setop", "(", "other", ")", "if", "self", ".", "equals", "(", "other", ")", ":", "return", "self", "self_tuples", "=", "self", ".", "_ndarray_values", "other_tuples", "=", "other", ".", "_ndarray_values", "uniq_tuples", "=", "set", "(", "self_tuples", ")", "&", "set", "(", "other_tuples", ")", "if", "sort", "is", "None", ":", "uniq_tuples", "=", "sorted", "(", "uniq_tuples", ")", "if", "len", "(", "uniq_tuples", ")", "==", "0", ":", "return", "MultiIndex", "(", "levels", "=", "self", ".", "levels", ",", "codes", "=", "[", "[", "]", "]", "*", "self", ".", "nlevels", ",", "names", "=", "result_names", ",", "verify_integrity", "=", "False", ")", "else", ":", "return", "MultiIndex", ".", "from_arrays", "(", "lzip", "(", "*", "uniq_tuples", ")", ",", "sortorder", "=", "0", ",", "names", "=", "result_names", ")" ]
Form the intersection of two MultiIndex objects. Parameters ---------- other : MultiIndex or array / Index of tuples sort : False or None, default False Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default from ``True`` to ``False``, to match behaviour from before 0.24.0 Returns ------- Index
[ "Form", "the", "intersection", "of", "two", "MultiIndex", "objects", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2939-L2980
20,375
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.difference
def difference(self, other, sort=None): """ Compute set difference of two MultiIndex objects Parameters ---------- other : MultiIndex sort : False or None, default None Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- diff : MultiIndex """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if len(other) == 0: return self if self.equals(other): return MultiIndex(levels=self.levels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) this = self._get_unique_index() indexer = this.get_indexer(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) difference = this.values.take(label_diff) if sort is None: difference = sorted(difference) if len(difference) == 0: return MultiIndex(levels=[[]] * self.nlevels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) else: return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
python
def difference(self, other, sort=None): """ Compute set difference of two MultiIndex objects Parameters ---------- other : MultiIndex sort : False or None, default None Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- diff : MultiIndex """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if len(other) == 0: return self if self.equals(other): return MultiIndex(levels=self.levels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) this = self._get_unique_index() indexer = this.get_indexer(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) difference = this.values.take(label_diff) if sort is None: difference = sorted(difference) if len(difference) == 0: return MultiIndex(levels=[[]] * self.nlevels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) else: return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
[ "def", "difference", "(", "self", ",", "other", ",", "sort", "=", "None", ")", ":", "self", ".", "_validate_sort_keyword", "(", "sort", ")", "self", ".", "_assert_can_do_setop", "(", "other", ")", "other", ",", "result_names", "=", "self", ".", "_convert_can_do_setop", "(", "other", ")", "if", "len", "(", "other", ")", "==", "0", ":", "return", "self", "if", "self", ".", "equals", "(", "other", ")", ":", "return", "MultiIndex", "(", "levels", "=", "self", ".", "levels", ",", "codes", "=", "[", "[", "]", "]", "*", "self", ".", "nlevels", ",", "names", "=", "result_names", ",", "verify_integrity", "=", "False", ")", "this", "=", "self", ".", "_get_unique_index", "(", ")", "indexer", "=", "this", ".", "get_indexer", "(", "other", ")", "indexer", "=", "indexer", ".", "take", "(", "(", "indexer", "!=", "-", "1", ")", ".", "nonzero", "(", ")", "[", "0", "]", ")", "label_diff", "=", "np", ".", "setdiff1d", "(", "np", ".", "arange", "(", "this", ".", "size", ")", ",", "indexer", ",", "assume_unique", "=", "True", ")", "difference", "=", "this", ".", "values", ".", "take", "(", "label_diff", ")", "if", "sort", "is", "None", ":", "difference", "=", "sorted", "(", "difference", ")", "if", "len", "(", "difference", ")", "==", "0", ":", "return", "MultiIndex", "(", "levels", "=", "[", "[", "]", "]", "*", "self", ".", "nlevels", ",", "codes", "=", "[", "[", "]", "]", "*", "self", ".", "nlevels", ",", "names", "=", "result_names", ",", "verify_integrity", "=", "False", ")", "else", ":", "return", "MultiIndex", ".", "from_tuples", "(", "difference", ",", "sortorder", "=", "0", ",", "names", "=", "result_names", ")" ]
Compute set difference of two MultiIndex objects Parameters ---------- other : MultiIndex sort : False or None, default None Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- diff : MultiIndex
[ "Compute", "set", "difference", "of", "two", "MultiIndex", "objects" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2982-L3032
20,376
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.insert
def insert(self, loc, item): """ Make new MultiIndex inserting new item at location Parameters ---------- loc : int item : tuple Must be same length as number of levels in the MultiIndex Returns ------- new_index : Index """ # Pad the key with empty strings if lower levels of the key # aren't specified: if not isinstance(item, tuple): item = (item, ) + ('', ) * (self.nlevels - 1) elif len(item) != self.nlevels: raise ValueError('Item must have length equal to number of ' 'levels.') new_levels = [] new_codes = [] for k, level, level_codes in zip(item, self.levels, self.codes): if k not in level: # have to insert into level # must insert at end otherwise you have to recompute all the # other codes lev_loc = len(level) level = level.insert(lev_loc, k) else: lev_loc = level.get_loc(k) new_levels.append(level) new_codes.append(np.insert( ensure_int64(level_codes), loc, lev_loc)) return MultiIndex(levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False)
python
def insert(self, loc, item): """ Make new MultiIndex inserting new item at location Parameters ---------- loc : int item : tuple Must be same length as number of levels in the MultiIndex Returns ------- new_index : Index """ # Pad the key with empty strings if lower levels of the key # aren't specified: if not isinstance(item, tuple): item = (item, ) + ('', ) * (self.nlevels - 1) elif len(item) != self.nlevels: raise ValueError('Item must have length equal to number of ' 'levels.') new_levels = [] new_codes = [] for k, level, level_codes in zip(item, self.levels, self.codes): if k not in level: # have to insert into level # must insert at end otherwise you have to recompute all the # other codes lev_loc = len(level) level = level.insert(lev_loc, k) else: lev_loc = level.get_loc(k) new_levels.append(level) new_codes.append(np.insert( ensure_int64(level_codes), loc, lev_loc)) return MultiIndex(levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False)
[ "def", "insert", "(", "self", ",", "loc", ",", "item", ")", ":", "# Pad the key with empty strings if lower levels of the key", "# aren't specified:", "if", "not", "isinstance", "(", "item", ",", "tuple", ")", ":", "item", "=", "(", "item", ",", ")", "+", "(", "''", ",", ")", "*", "(", "self", ".", "nlevels", "-", "1", ")", "elif", "len", "(", "item", ")", "!=", "self", ".", "nlevels", ":", "raise", "ValueError", "(", "'Item must have length equal to number of '", "'levels.'", ")", "new_levels", "=", "[", "]", "new_codes", "=", "[", "]", "for", "k", ",", "level", ",", "level_codes", "in", "zip", "(", "item", ",", "self", ".", "levels", ",", "self", ".", "codes", ")", ":", "if", "k", "not", "in", "level", ":", "# have to insert into level", "# must insert at end otherwise you have to recompute all the", "# other codes", "lev_loc", "=", "len", "(", "level", ")", "level", "=", "level", ".", "insert", "(", "lev_loc", ",", "k", ")", "else", ":", "lev_loc", "=", "level", ".", "get_loc", "(", "k", ")", "new_levels", ".", "append", "(", "level", ")", "new_codes", ".", "append", "(", "np", ".", "insert", "(", "ensure_int64", "(", "level_codes", ")", ",", "loc", ",", "lev_loc", ")", ")", "return", "MultiIndex", "(", "levels", "=", "new_levels", ",", "codes", "=", "new_codes", ",", "names", "=", "self", ".", "names", ",", "verify_integrity", "=", "False", ")" ]
Make new MultiIndex inserting new item at location Parameters ---------- loc : int item : tuple Must be same length as number of levels in the MultiIndex Returns ------- new_index : Index
[ "Make", "new", "MultiIndex", "inserting", "new", "item", "at", "location" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L3066-L3105
20,377
pandas-dev/pandas
pandas/core/indexes/multi.py
MultiIndex.delete
def delete(self, loc): """ Make new index with passed location deleted Returns ------- new_index : MultiIndex """ new_codes = [np.delete(level_codes, loc) for level_codes in self.codes] return MultiIndex(levels=self.levels, codes=new_codes, names=self.names, verify_integrity=False)
python
def delete(self, loc): """ Make new index with passed location deleted Returns ------- new_index : MultiIndex """ new_codes = [np.delete(level_codes, loc) for level_codes in self.codes] return MultiIndex(levels=self.levels, codes=new_codes, names=self.names, verify_integrity=False)
[ "def", "delete", "(", "self", ",", "loc", ")", ":", "new_codes", "=", "[", "np", ".", "delete", "(", "level_codes", ",", "loc", ")", "for", "level_codes", "in", "self", ".", "codes", "]", "return", "MultiIndex", "(", "levels", "=", "self", ".", "levels", ",", "codes", "=", "new_codes", ",", "names", "=", "self", ".", "names", ",", "verify_integrity", "=", "False", ")" ]
Make new index with passed location deleted Returns ------- new_index : MultiIndex
[ "Make", "new", "index", "with", "passed", "location", "deleted" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L3107-L3117
20,378
pandas-dev/pandas
pandas/core/algorithms.py
_ensure_data
def _ensure_data(values, dtype=None): """ routine to ensure that our data is of the correct input dtype for lower-level routines This will coerce: - ints -> int64 - uint -> uint64 - bool -> uint64 (TODO this should be uint8) - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes Parameters ---------- values : array-like dtype : pandas_dtype, optional coerce to this dtype Returns ------- (ndarray, pandas_dtype, algo dtype as a string) """ # we check some simple dtypes first try: if is_object_dtype(dtype): return ensure_object(np.asarray(values)), 'object', 'object' if is_bool_dtype(values) or is_bool_dtype(dtype): # we are actually coercing to uint64 # until our algos support uint8 directly (see TODO) return np.asarray(values).astype('uint64'), 'bool', 'uint64' elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype): return ensure_int64(values), 'int64', 'int64' elif (is_unsigned_integer_dtype(values) or is_unsigned_integer_dtype(dtype)): return ensure_uint64(values), 'uint64', 'uint64' elif is_float_dtype(values) or is_float_dtype(dtype): return ensure_float64(values), 'float64', 'float64' elif is_object_dtype(values) and dtype is None: return ensure_object(np.asarray(values)), 'object', 'object' elif is_complex_dtype(values) or is_complex_dtype(dtype): # ignore the fact that we are casting to float # which discards complex parts with catch_warnings(): simplefilter("ignore", np.ComplexWarning) values = ensure_float64(values) return values, 'float64', 'float64' except (TypeError, ValueError, OverflowError): # if we are trying to coerce to a dtype # and it is incompat this will fall thru to here return ensure_object(values), 'object', 'object' # datetimelike if (needs_i8_conversion(values) or is_period_dtype(dtype) or is_datetime64_any_dtype(dtype) or is_timedelta64_dtype(dtype)): if is_period_dtype(values) or is_period_dtype(dtype): from pandas import PeriodIndex values = PeriodIndex(values) dtype = values.dtype elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype): from pandas import TimedeltaIndex values = TimedeltaIndex(values) dtype = values.dtype else: # Datetime from pandas import DatetimeIndex values = DatetimeIndex(values) dtype = values.dtype return values.asi8, dtype, 'int64' elif (is_categorical_dtype(values) and (is_categorical_dtype(dtype) or dtype is None)): values = getattr(values, 'values', values) values = values.codes dtype = 'category' # we are actually coercing to int64 # until our algos support int* directly (not all do) values = ensure_int64(values) return values, dtype, 'int64' # we have failed, return object values = np.asarray(values, dtype=np.object) return ensure_object(values), 'object', 'object'
python
def _ensure_data(values, dtype=None): """ routine to ensure that our data is of the correct input dtype for lower-level routines This will coerce: - ints -> int64 - uint -> uint64 - bool -> uint64 (TODO this should be uint8) - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes Parameters ---------- values : array-like dtype : pandas_dtype, optional coerce to this dtype Returns ------- (ndarray, pandas_dtype, algo dtype as a string) """ # we check some simple dtypes first try: if is_object_dtype(dtype): return ensure_object(np.asarray(values)), 'object', 'object' if is_bool_dtype(values) or is_bool_dtype(dtype): # we are actually coercing to uint64 # until our algos support uint8 directly (see TODO) return np.asarray(values).astype('uint64'), 'bool', 'uint64' elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype): return ensure_int64(values), 'int64', 'int64' elif (is_unsigned_integer_dtype(values) or is_unsigned_integer_dtype(dtype)): return ensure_uint64(values), 'uint64', 'uint64' elif is_float_dtype(values) or is_float_dtype(dtype): return ensure_float64(values), 'float64', 'float64' elif is_object_dtype(values) and dtype is None: return ensure_object(np.asarray(values)), 'object', 'object' elif is_complex_dtype(values) or is_complex_dtype(dtype): # ignore the fact that we are casting to float # which discards complex parts with catch_warnings(): simplefilter("ignore", np.ComplexWarning) values = ensure_float64(values) return values, 'float64', 'float64' except (TypeError, ValueError, OverflowError): # if we are trying to coerce to a dtype # and it is incompat this will fall thru to here return ensure_object(values), 'object', 'object' # datetimelike if (needs_i8_conversion(values) or is_period_dtype(dtype) or is_datetime64_any_dtype(dtype) or is_timedelta64_dtype(dtype)): if is_period_dtype(values) or is_period_dtype(dtype): from pandas import PeriodIndex values = PeriodIndex(values) dtype = values.dtype elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype): from pandas import TimedeltaIndex values = TimedeltaIndex(values) dtype = values.dtype else: # Datetime from pandas import DatetimeIndex values = DatetimeIndex(values) dtype = values.dtype return values.asi8, dtype, 'int64' elif (is_categorical_dtype(values) and (is_categorical_dtype(dtype) or dtype is None)): values = getattr(values, 'values', values) values = values.codes dtype = 'category' # we are actually coercing to int64 # until our algos support int* directly (not all do) values = ensure_int64(values) return values, dtype, 'int64' # we have failed, return object values = np.asarray(values, dtype=np.object) return ensure_object(values), 'object', 'object'
[ "def", "_ensure_data", "(", "values", ",", "dtype", "=", "None", ")", ":", "# we check some simple dtypes first", "try", ":", "if", "is_object_dtype", "(", "dtype", ")", ":", "return", "ensure_object", "(", "np", ".", "asarray", "(", "values", ")", ")", ",", "'object'", ",", "'object'", "if", "is_bool_dtype", "(", "values", ")", "or", "is_bool_dtype", "(", "dtype", ")", ":", "# we are actually coercing to uint64", "# until our algos support uint8 directly (see TODO)", "return", "np", ".", "asarray", "(", "values", ")", ".", "astype", "(", "'uint64'", ")", ",", "'bool'", ",", "'uint64'", "elif", "is_signed_integer_dtype", "(", "values", ")", "or", "is_signed_integer_dtype", "(", "dtype", ")", ":", "return", "ensure_int64", "(", "values", ")", ",", "'int64'", ",", "'int64'", "elif", "(", "is_unsigned_integer_dtype", "(", "values", ")", "or", "is_unsigned_integer_dtype", "(", "dtype", ")", ")", ":", "return", "ensure_uint64", "(", "values", ")", ",", "'uint64'", ",", "'uint64'", "elif", "is_float_dtype", "(", "values", ")", "or", "is_float_dtype", "(", "dtype", ")", ":", "return", "ensure_float64", "(", "values", ")", ",", "'float64'", ",", "'float64'", "elif", "is_object_dtype", "(", "values", ")", "and", "dtype", "is", "None", ":", "return", "ensure_object", "(", "np", ".", "asarray", "(", "values", ")", ")", ",", "'object'", ",", "'object'", "elif", "is_complex_dtype", "(", "values", ")", "or", "is_complex_dtype", "(", "dtype", ")", ":", "# ignore the fact that we are casting to float", "# which discards complex parts", "with", "catch_warnings", "(", ")", ":", "simplefilter", "(", "\"ignore\"", ",", "np", ".", "ComplexWarning", ")", "values", "=", "ensure_float64", "(", "values", ")", "return", "values", ",", "'float64'", ",", "'float64'", "except", "(", "TypeError", ",", "ValueError", ",", "OverflowError", ")", ":", "# if we are trying to coerce to a dtype", "# and it is incompat this will fall thru to here", "return", "ensure_object", "(", "values", ")", ",", "'object'", ",", "'object'", "# datetimelike", "if", "(", "needs_i8_conversion", "(", "values", ")", "or", "is_period_dtype", "(", "dtype", ")", "or", "is_datetime64_any_dtype", "(", "dtype", ")", "or", "is_timedelta64_dtype", "(", "dtype", ")", ")", ":", "if", "is_period_dtype", "(", "values", ")", "or", "is_period_dtype", "(", "dtype", ")", ":", "from", "pandas", "import", "PeriodIndex", "values", "=", "PeriodIndex", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "elif", "is_timedelta64_dtype", "(", "values", ")", "or", "is_timedelta64_dtype", "(", "dtype", ")", ":", "from", "pandas", "import", "TimedeltaIndex", "values", "=", "TimedeltaIndex", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "else", ":", "# Datetime", "from", "pandas", "import", "DatetimeIndex", "values", "=", "DatetimeIndex", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "return", "values", ".", "asi8", ",", "dtype", ",", "'int64'", "elif", "(", "is_categorical_dtype", "(", "values", ")", "and", "(", "is_categorical_dtype", "(", "dtype", ")", "or", "dtype", "is", "None", ")", ")", ":", "values", "=", "getattr", "(", "values", ",", "'values'", ",", "values", ")", "values", "=", "values", ".", "codes", "dtype", "=", "'category'", "# we are actually coercing to int64", "# until our algos support int* directly (not all do)", "values", "=", "ensure_int64", "(", "values", ")", "return", "values", ",", "dtype", ",", "'int64'", "# we have failed, return object", "values", "=", "np", ".", "asarray", "(", "values", ",", "dtype", "=", "np", ".", "object", ")", "return", "ensure_object", "(", "values", ")", ",", "'object'", ",", "'object'" ]
routine to ensure that our data is of the correct input dtype for lower-level routines This will coerce: - ints -> int64 - uint -> uint64 - bool -> uint64 (TODO this should be uint8) - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes Parameters ---------- values : array-like dtype : pandas_dtype, optional coerce to this dtype Returns ------- (ndarray, pandas_dtype, algo dtype as a string)
[ "routine", "to", "ensure", "that", "our", "data", "is", "of", "the", "correct", "input", "dtype", "for", "lower", "-", "level", "routines" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L36-L127
20,379
pandas-dev/pandas
pandas/core/algorithms.py
_reconstruct_data
def _reconstruct_data(values, dtype, original): """ reverse of _ensure_data Parameters ---------- values : ndarray dtype : pandas_dtype original : ndarray-like Returns ------- Index for extension types, otherwise ndarray casted to dtype """ from pandas import Index if is_extension_array_dtype(dtype): values = dtype.construct_array_type()._from_sequence(values) elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype): values = Index(original)._shallow_copy(values, name=None) elif is_bool_dtype(dtype): values = values.astype(dtype) # we only support object dtypes bool Index if isinstance(original, Index): values = values.astype(object) elif dtype is not None: values = values.astype(dtype) return values
python
def _reconstruct_data(values, dtype, original): """ reverse of _ensure_data Parameters ---------- values : ndarray dtype : pandas_dtype original : ndarray-like Returns ------- Index for extension types, otherwise ndarray casted to dtype """ from pandas import Index if is_extension_array_dtype(dtype): values = dtype.construct_array_type()._from_sequence(values) elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype): values = Index(original)._shallow_copy(values, name=None) elif is_bool_dtype(dtype): values = values.astype(dtype) # we only support object dtypes bool Index if isinstance(original, Index): values = values.astype(object) elif dtype is not None: values = values.astype(dtype) return values
[ "def", "_reconstruct_data", "(", "values", ",", "dtype", ",", "original", ")", ":", "from", "pandas", "import", "Index", "if", "is_extension_array_dtype", "(", "dtype", ")", ":", "values", "=", "dtype", ".", "construct_array_type", "(", ")", ".", "_from_sequence", "(", "values", ")", "elif", "is_datetime64tz_dtype", "(", "dtype", ")", "or", "is_period_dtype", "(", "dtype", ")", ":", "values", "=", "Index", "(", "original", ")", ".", "_shallow_copy", "(", "values", ",", "name", "=", "None", ")", "elif", "is_bool_dtype", "(", "dtype", ")", ":", "values", "=", "values", ".", "astype", "(", "dtype", ")", "# we only support object dtypes bool Index", "if", "isinstance", "(", "original", ",", "Index", ")", ":", "values", "=", "values", ".", "astype", "(", "object", ")", "elif", "dtype", "is", "not", "None", ":", "values", "=", "values", ".", "astype", "(", "dtype", ")", "return", "values" ]
reverse of _ensure_data Parameters ---------- values : ndarray dtype : pandas_dtype original : ndarray-like Returns ------- Index for extension types, otherwise ndarray casted to dtype
[ "reverse", "of", "_ensure_data" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L130-L158
20,380
pandas-dev/pandas
pandas/core/algorithms.py
_ensure_arraylike
def _ensure_arraylike(values): """ ensure that we are arraylike if not already """ if not is_array_like(values): inferred = lib.infer_dtype(values, skipna=False) if inferred in ['mixed', 'string', 'unicode']: if isinstance(values, tuple): values = list(values) values = construct_1d_object_array_from_listlike(values) else: values = np.asarray(values) return values
python
def _ensure_arraylike(values): """ ensure that we are arraylike if not already """ if not is_array_like(values): inferred = lib.infer_dtype(values, skipna=False) if inferred in ['mixed', 'string', 'unicode']: if isinstance(values, tuple): values = list(values) values = construct_1d_object_array_from_listlike(values) else: values = np.asarray(values) return values
[ "def", "_ensure_arraylike", "(", "values", ")", ":", "if", "not", "is_array_like", "(", "values", ")", ":", "inferred", "=", "lib", ".", "infer_dtype", "(", "values", ",", "skipna", "=", "False", ")", "if", "inferred", "in", "[", "'mixed'", ",", "'string'", ",", "'unicode'", "]", ":", "if", "isinstance", "(", "values", ",", "tuple", ")", ":", "values", "=", "list", "(", "values", ")", "values", "=", "construct_1d_object_array_from_listlike", "(", "values", ")", "else", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "return", "values" ]
ensure that we are arraylike if not already
[ "ensure", "that", "we", "are", "arraylike", "if", "not", "already" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L161-L173
20,381
pandas-dev/pandas
pandas/core/algorithms.py
match
def match(to_match, values, na_sentinel=-1): """ Compute locations of to_match into values Parameters ---------- to_match : array-like values to find positions of values : array-like Unique set of values na_sentinel : int, default -1 Value to mark "not found" Examples -------- Returns ------- match : ndarray of integers """ values = com.asarray_tuplesafe(values) htable, _, values, dtype, ndtype = _get_hashtable_algo(values) to_match, _, _ = _ensure_data(to_match, dtype) table = htable(min(len(to_match), 1000000)) table.map_locations(values) result = table.lookup(to_match) if na_sentinel != -1: # replace but return a numpy array # use a Series because it handles dtype conversions properly from pandas import Series result = Series(result.ravel()).replace(-1, na_sentinel) result = result.values.reshape(result.shape) return result
python
def match(to_match, values, na_sentinel=-1): """ Compute locations of to_match into values Parameters ---------- to_match : array-like values to find positions of values : array-like Unique set of values na_sentinel : int, default -1 Value to mark "not found" Examples -------- Returns ------- match : ndarray of integers """ values = com.asarray_tuplesafe(values) htable, _, values, dtype, ndtype = _get_hashtable_algo(values) to_match, _, _ = _ensure_data(to_match, dtype) table = htable(min(len(to_match), 1000000)) table.map_locations(values) result = table.lookup(to_match) if na_sentinel != -1: # replace but return a numpy array # use a Series because it handles dtype conversions properly from pandas import Series result = Series(result.ravel()).replace(-1, na_sentinel) result = result.values.reshape(result.shape) return result
[ "def", "match", "(", "to_match", ",", "values", ",", "na_sentinel", "=", "-", "1", ")", ":", "values", "=", "com", ".", "asarray_tuplesafe", "(", "values", ")", "htable", ",", "_", ",", "values", ",", "dtype", ",", "ndtype", "=", "_get_hashtable_algo", "(", "values", ")", "to_match", ",", "_", ",", "_", "=", "_ensure_data", "(", "to_match", ",", "dtype", ")", "table", "=", "htable", "(", "min", "(", "len", "(", "to_match", ")", ",", "1000000", ")", ")", "table", ".", "map_locations", "(", "values", ")", "result", "=", "table", ".", "lookup", "(", "to_match", ")", "if", "na_sentinel", "!=", "-", "1", ":", "# replace but return a numpy array", "# use a Series because it handles dtype conversions properly", "from", "pandas", "import", "Series", "result", "=", "Series", "(", "result", ".", "ravel", "(", ")", ")", ".", "replace", "(", "-", "1", ",", "na_sentinel", ")", "result", "=", "result", ".", "values", ".", "reshape", "(", "result", ".", "shape", ")", "return", "result" ]
Compute locations of to_match into values Parameters ---------- to_match : array-like values to find positions of values : array-like Unique set of values na_sentinel : int, default -1 Value to mark "not found" Examples -------- Returns ------- match : ndarray of integers
[ "Compute", "locations", "of", "to_match", "into", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L238-L273
20,382
pandas-dev/pandas
pandas/core/algorithms.py
unique
def unique(values): """ Hash table-based unique. Uniques are returned in order of appearance. This does NOT sort. Significantly faster than numpy.unique. Includes NA values. Parameters ---------- values : 1d array-like Returns ------- numpy.ndarray or ExtensionArray The return can be: * Index : when the input is an Index * Categorical : when the input is a Categorical dtype * ndarray : when the input is a Series/ndarray Return numpy.ndarray or ExtensionArray. See Also -------- Index.unique Series.unique Examples -------- >>> pd.unique(pd.Series([2, 1, 3, 3])) array([2, 1, 3]) >>> pd.unique(pd.Series([2] + [1] * 5)) array([2, 1]) >>> pd.unique(pd.Series([pd.Timestamp('20160101'), ... pd.Timestamp('20160101')])) array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], dtype=object) >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) DatetimeIndex(['2016-01-01 00:00:00-05:00'], ... dtype='datetime64[ns, US/Eastern]', freq=None) >>> pd.unique(list('baabc')) array(['b', 'a', 'c'], dtype=object) An unordered Categorical will return categories in the order of appearance. >>> pd.unique(pd.Series(pd.Categorical(list('baabc')))) [b, a, c] Categories (3, object): [b, a, c] >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc')))) [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc'), ... ordered=True))) [b, a, c] Categories (3, object): [a < b < c] An array of tuples >>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')]) array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object) """ values = _ensure_arraylike(values) if is_extension_array_dtype(values): # Dispatch to extension dtype's unique. return values.unique() original = values htable, _, values, dtype, ndtype = _get_hashtable_algo(values) table = htable(len(values)) uniques = table.unique(values) uniques = _reconstruct_data(uniques, dtype, original) return uniques
python
def unique(values): """ Hash table-based unique. Uniques are returned in order of appearance. This does NOT sort. Significantly faster than numpy.unique. Includes NA values. Parameters ---------- values : 1d array-like Returns ------- numpy.ndarray or ExtensionArray The return can be: * Index : when the input is an Index * Categorical : when the input is a Categorical dtype * ndarray : when the input is a Series/ndarray Return numpy.ndarray or ExtensionArray. See Also -------- Index.unique Series.unique Examples -------- >>> pd.unique(pd.Series([2, 1, 3, 3])) array([2, 1, 3]) >>> pd.unique(pd.Series([2] + [1] * 5)) array([2, 1]) >>> pd.unique(pd.Series([pd.Timestamp('20160101'), ... pd.Timestamp('20160101')])) array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], dtype=object) >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) DatetimeIndex(['2016-01-01 00:00:00-05:00'], ... dtype='datetime64[ns, US/Eastern]', freq=None) >>> pd.unique(list('baabc')) array(['b', 'a', 'c'], dtype=object) An unordered Categorical will return categories in the order of appearance. >>> pd.unique(pd.Series(pd.Categorical(list('baabc')))) [b, a, c] Categories (3, object): [b, a, c] >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc')))) [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc'), ... ordered=True))) [b, a, c] Categories (3, object): [a < b < c] An array of tuples >>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')]) array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object) """ values = _ensure_arraylike(values) if is_extension_array_dtype(values): # Dispatch to extension dtype's unique. return values.unique() original = values htable, _, values, dtype, ndtype = _get_hashtable_algo(values) table = htable(len(values)) uniques = table.unique(values) uniques = _reconstruct_data(uniques, dtype, original) return uniques
[ "def", "unique", "(", "values", ")", ":", "values", "=", "_ensure_arraylike", "(", "values", ")", "if", "is_extension_array_dtype", "(", "values", ")", ":", "# Dispatch to extension dtype's unique.", "return", "values", ".", "unique", "(", ")", "original", "=", "values", "htable", ",", "_", ",", "values", ",", "dtype", ",", "ndtype", "=", "_get_hashtable_algo", "(", "values", ")", "table", "=", "htable", "(", "len", "(", "values", ")", ")", "uniques", "=", "table", ".", "unique", "(", "values", ")", "uniques", "=", "_reconstruct_data", "(", "uniques", ",", "dtype", ",", "original", ")", "return", "uniques" ]
Hash table-based unique. Uniques are returned in order of appearance. This does NOT sort. Significantly faster than numpy.unique. Includes NA values. Parameters ---------- values : 1d array-like Returns ------- numpy.ndarray or ExtensionArray The return can be: * Index : when the input is an Index * Categorical : when the input is a Categorical dtype * ndarray : when the input is a Series/ndarray Return numpy.ndarray or ExtensionArray. See Also -------- Index.unique Series.unique Examples -------- >>> pd.unique(pd.Series([2, 1, 3, 3])) array([2, 1, 3]) >>> pd.unique(pd.Series([2] + [1] * 5)) array([2, 1]) >>> pd.unique(pd.Series([pd.Timestamp('20160101'), ... pd.Timestamp('20160101')])) array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], dtype=object) >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) DatetimeIndex(['2016-01-01 00:00:00-05:00'], ... dtype='datetime64[ns, US/Eastern]', freq=None) >>> pd.unique(list('baabc')) array(['b', 'a', 'c'], dtype=object) An unordered Categorical will return categories in the order of appearance. >>> pd.unique(pd.Series(pd.Categorical(list('baabc')))) [b, a, c] Categories (3, object): [b, a, c] >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc')))) [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc'), ... ordered=True))) [b, a, c] Categories (3, object): [a < b < c] An array of tuples >>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')]) array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
[ "Hash", "table", "-", "based", "unique", ".", "Uniques", "are", "returned", "in", "order", "of", "appearance", ".", "This", "does", "NOT", "sort", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L276-L367
20,383
pandas-dev/pandas
pandas/core/algorithms.py
isin
def isin(comps, values): """ Compute the isin boolean array Parameters ---------- comps : array-like values : array-like Returns ------- boolean array same length as comps """ if not is_list_like(comps): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{comps_type}]" .format(comps_type=type(comps).__name__)) if not is_list_like(values): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{values_type}]" .format(values_type=type(values).__name__)) if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)): values = construct_1d_object_array_from_listlike(list(values)) if is_categorical_dtype(comps): # TODO(extension) # handle categoricals return comps._values.isin(values) comps = com.values_from_object(comps) comps, dtype, _ = _ensure_data(comps) values, _, _ = _ensure_data(values, dtype=dtype) # faster for larger cases to use np.in1d f = lambda x, y: htable.ismember_object(x, values) # GH16012 # Ensure np.in1d doesn't get object types or it *may* throw an exception if len(comps) > 1000000 and not is_object_dtype(comps): f = lambda x, y: np.in1d(x, y) elif is_integer_dtype(comps): try: values = values.astype('int64', copy=False) comps = comps.astype('int64', copy=False) f = lambda x, y: htable.ismember_int64(x, y) except (TypeError, ValueError, OverflowError): values = values.astype(object) comps = comps.astype(object) elif is_float_dtype(comps): try: values = values.astype('float64', copy=False) comps = comps.astype('float64', copy=False) f = lambda x, y: htable.ismember_float64(x, y) except (TypeError, ValueError): values = values.astype(object) comps = comps.astype(object) return f(comps, values)
python
def isin(comps, values): """ Compute the isin boolean array Parameters ---------- comps : array-like values : array-like Returns ------- boolean array same length as comps """ if not is_list_like(comps): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{comps_type}]" .format(comps_type=type(comps).__name__)) if not is_list_like(values): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{values_type}]" .format(values_type=type(values).__name__)) if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)): values = construct_1d_object_array_from_listlike(list(values)) if is_categorical_dtype(comps): # TODO(extension) # handle categoricals return comps._values.isin(values) comps = com.values_from_object(comps) comps, dtype, _ = _ensure_data(comps) values, _, _ = _ensure_data(values, dtype=dtype) # faster for larger cases to use np.in1d f = lambda x, y: htable.ismember_object(x, values) # GH16012 # Ensure np.in1d doesn't get object types or it *may* throw an exception if len(comps) > 1000000 and not is_object_dtype(comps): f = lambda x, y: np.in1d(x, y) elif is_integer_dtype(comps): try: values = values.astype('int64', copy=False) comps = comps.astype('int64', copy=False) f = lambda x, y: htable.ismember_int64(x, y) except (TypeError, ValueError, OverflowError): values = values.astype(object) comps = comps.astype(object) elif is_float_dtype(comps): try: values = values.astype('float64', copy=False) comps = comps.astype('float64', copy=False) f = lambda x, y: htable.ismember_float64(x, y) except (TypeError, ValueError): values = values.astype(object) comps = comps.astype(object) return f(comps, values)
[ "def", "isin", "(", "comps", ",", "values", ")", ":", "if", "not", "is_list_like", "(", "comps", ")", ":", "raise", "TypeError", "(", "\"only list-like objects are allowed to be passed\"", "\" to isin(), you passed a [{comps_type}]\"", ".", "format", "(", "comps_type", "=", "type", "(", "comps", ")", ".", "__name__", ")", ")", "if", "not", "is_list_like", "(", "values", ")", ":", "raise", "TypeError", "(", "\"only list-like objects are allowed to be passed\"", "\" to isin(), you passed a [{values_type}]\"", ".", "format", "(", "values_type", "=", "type", "(", "values", ")", ".", "__name__", ")", ")", "if", "not", "isinstance", "(", "values", ",", "(", "ABCIndex", ",", "ABCSeries", ",", "np", ".", "ndarray", ")", ")", ":", "values", "=", "construct_1d_object_array_from_listlike", "(", "list", "(", "values", ")", ")", "if", "is_categorical_dtype", "(", "comps", ")", ":", "# TODO(extension)", "# handle categoricals", "return", "comps", ".", "_values", ".", "isin", "(", "values", ")", "comps", "=", "com", ".", "values_from_object", "(", "comps", ")", "comps", ",", "dtype", ",", "_", "=", "_ensure_data", "(", "comps", ")", "values", ",", "_", ",", "_", "=", "_ensure_data", "(", "values", ",", "dtype", "=", "dtype", ")", "# faster for larger cases to use np.in1d", "f", "=", "lambda", "x", ",", "y", ":", "htable", ".", "ismember_object", "(", "x", ",", "values", ")", "# GH16012", "# Ensure np.in1d doesn't get object types or it *may* throw an exception", "if", "len", "(", "comps", ")", ">", "1000000", "and", "not", "is_object_dtype", "(", "comps", ")", ":", "f", "=", "lambda", "x", ",", "y", ":", "np", ".", "in1d", "(", "x", ",", "y", ")", "elif", "is_integer_dtype", "(", "comps", ")", ":", "try", ":", "values", "=", "values", ".", "astype", "(", "'int64'", ",", "copy", "=", "False", ")", "comps", "=", "comps", ".", "astype", "(", "'int64'", ",", "copy", "=", "False", ")", "f", "=", "lambda", "x", ",", "y", ":", "htable", ".", "ismember_int64", "(", "x", ",", "y", ")", "except", "(", "TypeError", ",", "ValueError", ",", "OverflowError", ")", ":", "values", "=", "values", ".", "astype", "(", "object", ")", "comps", "=", "comps", ".", "astype", "(", "object", ")", "elif", "is_float_dtype", "(", "comps", ")", ":", "try", ":", "values", "=", "values", ".", "astype", "(", "'float64'", ",", "copy", "=", "False", ")", "comps", "=", "comps", ".", "astype", "(", "'float64'", ",", "copy", "=", "False", ")", "f", "=", "lambda", "x", ",", "y", ":", "htable", ".", "ismember_float64", "(", "x", ",", "y", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "values", "=", "values", ".", "astype", "(", "object", ")", "comps", "=", "comps", ".", "astype", "(", "object", ")", "return", "f", "(", "comps", ",", "values", ")" ]
Compute the isin boolean array Parameters ---------- comps : array-like values : array-like Returns ------- boolean array same length as comps
[ "Compute", "the", "isin", "boolean", "array" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L373-L434
20,384
pandas-dev/pandas
pandas/core/algorithms.py
_factorize_array
def _factorize_array(values, na_sentinel=-1, size_hint=None, na_value=None): """Factorize an array-like to labels and uniques. This doesn't do any coercion of types or unboxing before factorization. Parameters ---------- values : ndarray na_sentinel : int, default -1 size_hint : int, optional Passsed through to the hashtable's 'get_labels' method na_value : object, optional A value in `values` to consider missing. Note: only use this parameter when you know that you don't have any values pandas would consider missing in the array (NaN for float data, iNaT for datetimes, etc.). Returns ------- labels, uniques : ndarray """ (hash_klass, _), values = _get_data_algo(values, _hashtables) table = hash_klass(size_hint or len(values)) uniques, labels = table.factorize(values, na_sentinel=na_sentinel, na_value=na_value) labels = ensure_platform_int(labels) return labels, uniques
python
def _factorize_array(values, na_sentinel=-1, size_hint=None, na_value=None): """Factorize an array-like to labels and uniques. This doesn't do any coercion of types or unboxing before factorization. Parameters ---------- values : ndarray na_sentinel : int, default -1 size_hint : int, optional Passsed through to the hashtable's 'get_labels' method na_value : object, optional A value in `values` to consider missing. Note: only use this parameter when you know that you don't have any values pandas would consider missing in the array (NaN for float data, iNaT for datetimes, etc.). Returns ------- labels, uniques : ndarray """ (hash_klass, _), values = _get_data_algo(values, _hashtables) table = hash_klass(size_hint or len(values)) uniques, labels = table.factorize(values, na_sentinel=na_sentinel, na_value=na_value) labels = ensure_platform_int(labels) return labels, uniques
[ "def", "_factorize_array", "(", "values", ",", "na_sentinel", "=", "-", "1", ",", "size_hint", "=", "None", ",", "na_value", "=", "None", ")", ":", "(", "hash_klass", ",", "_", ")", ",", "values", "=", "_get_data_algo", "(", "values", ",", "_hashtables", ")", "table", "=", "hash_klass", "(", "size_hint", "or", "len", "(", "values", ")", ")", "uniques", ",", "labels", "=", "table", ".", "factorize", "(", "values", ",", "na_sentinel", "=", "na_sentinel", ",", "na_value", "=", "na_value", ")", "labels", "=", "ensure_platform_int", "(", "labels", ")", "return", "labels", ",", "uniques" ]
Factorize an array-like to labels and uniques. This doesn't do any coercion of types or unboxing before factorization. Parameters ---------- values : ndarray na_sentinel : int, default -1 size_hint : int, optional Passsed through to the hashtable's 'get_labels' method na_value : object, optional A value in `values` to consider missing. Note: only use this parameter when you know that you don't have any values pandas would consider missing in the array (NaN for float data, iNaT for datetimes, etc.). Returns ------- labels, uniques : ndarray
[ "Factorize", "an", "array", "-", "like", "to", "labels", "and", "uniques", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L437-L466
20,385
pandas-dev/pandas
pandas/core/algorithms.py
value_counts
def value_counts(values, sort=True, ascending=False, normalize=False, bins=None, dropna=True): """ Compute a histogram of the counts of non-null values. Parameters ---------- values : ndarray (1-d) sort : boolean, default True Sort by values ascending : boolean, default False Sort in ascending order normalize: boolean, default False If True then compute a relative histogram bins : integer, optional Rather than count values, group them into half-open bins, convenience for pd.cut, only works with numeric data dropna : boolean, default True Don't include counts of NaN Returns ------- value_counts : Series """ from pandas.core.series import Series, Index name = getattr(values, 'name', None) if bins is not None: try: from pandas.core.reshape.tile import cut values = Series(values) ii = cut(values, bins, include_lowest=True) except TypeError: raise TypeError("bins argument only works with numeric data.") # count, remove nulls (from the index), and but the bins result = ii.value_counts(dropna=dropna) result = result[result.index.notna()] result.index = result.index.astype('interval') result = result.sort_index() # if we are dropna and we have NO values if dropna and (result.values == 0).all(): result = result.iloc[0:0] # normalizing is by len of all (regardless of dropna) counts = np.array([len(ii)]) else: if is_extension_array_dtype(values) or is_sparse(values): # handle Categorical and sparse, result = Series(values)._values.value_counts(dropna=dropna) result.name = name counts = result.values else: keys, counts = _value_counts_arraylike(values, dropna) if not isinstance(keys, Index): keys = Index(keys) result = Series(counts, index=keys, name=name) if sort: result = result.sort_values(ascending=ascending) if normalize: result = result / float(counts.sum()) return result
python
def value_counts(values, sort=True, ascending=False, normalize=False, bins=None, dropna=True): """ Compute a histogram of the counts of non-null values. Parameters ---------- values : ndarray (1-d) sort : boolean, default True Sort by values ascending : boolean, default False Sort in ascending order normalize: boolean, default False If True then compute a relative histogram bins : integer, optional Rather than count values, group them into half-open bins, convenience for pd.cut, only works with numeric data dropna : boolean, default True Don't include counts of NaN Returns ------- value_counts : Series """ from pandas.core.series import Series, Index name = getattr(values, 'name', None) if bins is not None: try: from pandas.core.reshape.tile import cut values = Series(values) ii = cut(values, bins, include_lowest=True) except TypeError: raise TypeError("bins argument only works with numeric data.") # count, remove nulls (from the index), and but the bins result = ii.value_counts(dropna=dropna) result = result[result.index.notna()] result.index = result.index.astype('interval') result = result.sort_index() # if we are dropna and we have NO values if dropna and (result.values == 0).all(): result = result.iloc[0:0] # normalizing is by len of all (regardless of dropna) counts = np.array([len(ii)]) else: if is_extension_array_dtype(values) or is_sparse(values): # handle Categorical and sparse, result = Series(values)._values.value_counts(dropna=dropna) result.name = name counts = result.values else: keys, counts = _value_counts_arraylike(values, dropna) if not isinstance(keys, Index): keys = Index(keys) result = Series(counts, index=keys, name=name) if sort: result = result.sort_values(ascending=ascending) if normalize: result = result / float(counts.sum()) return result
[ "def", "value_counts", "(", "values", ",", "sort", "=", "True", ",", "ascending", "=", "False", ",", "normalize", "=", "False", ",", "bins", "=", "None", ",", "dropna", "=", "True", ")", ":", "from", "pandas", ".", "core", ".", "series", "import", "Series", ",", "Index", "name", "=", "getattr", "(", "values", ",", "'name'", ",", "None", ")", "if", "bins", "is", "not", "None", ":", "try", ":", "from", "pandas", ".", "core", ".", "reshape", ".", "tile", "import", "cut", "values", "=", "Series", "(", "values", ")", "ii", "=", "cut", "(", "values", ",", "bins", ",", "include_lowest", "=", "True", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "\"bins argument only works with numeric data.\"", ")", "# count, remove nulls (from the index), and but the bins", "result", "=", "ii", ".", "value_counts", "(", "dropna", "=", "dropna", ")", "result", "=", "result", "[", "result", ".", "index", ".", "notna", "(", ")", "]", "result", ".", "index", "=", "result", ".", "index", ".", "astype", "(", "'interval'", ")", "result", "=", "result", ".", "sort_index", "(", ")", "# if we are dropna and we have NO values", "if", "dropna", "and", "(", "result", ".", "values", "==", "0", ")", ".", "all", "(", ")", ":", "result", "=", "result", ".", "iloc", "[", "0", ":", "0", "]", "# normalizing is by len of all (regardless of dropna)", "counts", "=", "np", ".", "array", "(", "[", "len", "(", "ii", ")", "]", ")", "else", ":", "if", "is_extension_array_dtype", "(", "values", ")", "or", "is_sparse", "(", "values", ")", ":", "# handle Categorical and sparse,", "result", "=", "Series", "(", "values", ")", ".", "_values", ".", "value_counts", "(", "dropna", "=", "dropna", ")", "result", ".", "name", "=", "name", "counts", "=", "result", ".", "values", "else", ":", "keys", ",", "counts", "=", "_value_counts_arraylike", "(", "values", ",", "dropna", ")", "if", "not", "isinstance", "(", "keys", ",", "Index", ")", ":", "keys", "=", "Index", "(", "keys", ")", "result", "=", "Series", "(", "counts", ",", "index", "=", "keys", ",", "name", "=", "name", ")", "if", "sort", ":", "result", "=", "result", ".", "sort_values", "(", "ascending", "=", "ascending", ")", "if", "normalize", ":", "result", "=", "result", "/", "float", "(", "counts", ".", "sum", "(", ")", ")", "return", "result" ]
Compute a histogram of the counts of non-null values. Parameters ---------- values : ndarray (1-d) sort : boolean, default True Sort by values ascending : boolean, default False Sort in ascending order normalize: boolean, default False If True then compute a relative histogram bins : integer, optional Rather than count values, group them into half-open bins, convenience for pd.cut, only works with numeric data dropna : boolean, default True Don't include counts of NaN Returns ------- value_counts : Series
[ "Compute", "a", "histogram", "of", "the", "counts", "of", "non", "-", "null", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L649-L720
20,386
pandas-dev/pandas
pandas/core/algorithms.py
duplicated
def duplicated(values, keep='first'): """ Return boolean ndarray denoting duplicate values. .. versionadded:: 0.19.0 Parameters ---------- values : ndarray-like Array over which to check for duplicate values. keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : ndarray """ values, dtype, ndtype = _ensure_data(values) f = getattr(htable, "duplicated_{dtype}".format(dtype=ndtype)) return f(values, keep=keep)
python
def duplicated(values, keep='first'): """ Return boolean ndarray denoting duplicate values. .. versionadded:: 0.19.0 Parameters ---------- values : ndarray-like Array over which to check for duplicate values. keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : ndarray """ values, dtype, ndtype = _ensure_data(values) f = getattr(htable, "duplicated_{dtype}".format(dtype=ndtype)) return f(values, keep=keep)
[ "def", "duplicated", "(", "values", ",", "keep", "=", "'first'", ")", ":", "values", ",", "dtype", ",", "ndtype", "=", "_ensure_data", "(", "values", ")", "f", "=", "getattr", "(", "htable", ",", "\"duplicated_{dtype}\"", ".", "format", "(", "dtype", "=", "ndtype", ")", ")", "return", "f", "(", "values", ",", "keep", "=", "keep", ")" ]
Return boolean ndarray denoting duplicate values. .. versionadded:: 0.19.0 Parameters ---------- values : ndarray-like Array over which to check for duplicate values. keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : ndarray
[ "Return", "boolean", "ndarray", "denoting", "duplicate", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L766-L790
20,387
pandas-dev/pandas
pandas/core/algorithms.py
rank
def rank(values, axis=0, method='average', na_option='keep', ascending=True, pct=False): """ Rank the values along a given axis. Parameters ---------- values : array-like Array whose values will be ranked. The number of dimensions in this array must not exceed 2. axis : int, default 0 Axis over which to perform rankings. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' The method by which tiebreaks are broken during the ranking. na_option : {'keep', 'top'}, default 'keep' The method by which NaNs are placed in the ranking. - ``keep``: rank each NaN value with a NaN ranking - ``top``: replace each NaN with either +/- inf so that they there are ranked at the top ascending : boolean, default True Whether or not the elements should be ranked in ascending order. pct : boolean, default False Whether or not to the display the returned rankings in integer form (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1). """ if values.ndim == 1: f, values = _get_data_algo(values, _rank1d_functions) ranks = f(values, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) elif values.ndim == 2: f, values = _get_data_algo(values, _rank2d_functions) ranks = f(values, axis=axis, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) else: raise TypeError("Array with ndim > 2 are not supported.") return ranks
python
def rank(values, axis=0, method='average', na_option='keep', ascending=True, pct=False): """ Rank the values along a given axis. Parameters ---------- values : array-like Array whose values will be ranked. The number of dimensions in this array must not exceed 2. axis : int, default 0 Axis over which to perform rankings. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' The method by which tiebreaks are broken during the ranking. na_option : {'keep', 'top'}, default 'keep' The method by which NaNs are placed in the ranking. - ``keep``: rank each NaN value with a NaN ranking - ``top``: replace each NaN with either +/- inf so that they there are ranked at the top ascending : boolean, default True Whether or not the elements should be ranked in ascending order. pct : boolean, default False Whether or not to the display the returned rankings in integer form (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1). """ if values.ndim == 1: f, values = _get_data_algo(values, _rank1d_functions) ranks = f(values, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) elif values.ndim == 2: f, values = _get_data_algo(values, _rank2d_functions) ranks = f(values, axis=axis, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) else: raise TypeError("Array with ndim > 2 are not supported.") return ranks
[ "def", "rank", "(", "values", ",", "axis", "=", "0", ",", "method", "=", "'average'", ",", "na_option", "=", "'keep'", ",", "ascending", "=", "True", ",", "pct", "=", "False", ")", ":", "if", "values", ".", "ndim", "==", "1", ":", "f", ",", "values", "=", "_get_data_algo", "(", "values", ",", "_rank1d_functions", ")", "ranks", "=", "f", "(", "values", ",", "ties_method", "=", "method", ",", "ascending", "=", "ascending", ",", "na_option", "=", "na_option", ",", "pct", "=", "pct", ")", "elif", "values", ".", "ndim", "==", "2", ":", "f", ",", "values", "=", "_get_data_algo", "(", "values", ",", "_rank2d_functions", ")", "ranks", "=", "f", "(", "values", ",", "axis", "=", "axis", ",", "ties_method", "=", "method", ",", "ascending", "=", "ascending", ",", "na_option", "=", "na_option", ",", "pct", "=", "pct", ")", "else", ":", "raise", "TypeError", "(", "\"Array with ndim > 2 are not supported.\"", ")", "return", "ranks" ]
Rank the values along a given axis. Parameters ---------- values : array-like Array whose values will be ranked. The number of dimensions in this array must not exceed 2. axis : int, default 0 Axis over which to perform rankings. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' The method by which tiebreaks are broken during the ranking. na_option : {'keep', 'top'}, default 'keep' The method by which NaNs are placed in the ranking. - ``keep``: rank each NaN value with a NaN ranking - ``top``: replace each NaN with either +/- inf so that they there are ranked at the top ascending : boolean, default True Whether or not the elements should be ranked in ascending order. pct : boolean, default False Whether or not to the display the returned rankings in integer form (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
[ "Rank", "the", "values", "along", "a", "given", "axis", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L838-L874
20,388
pandas-dev/pandas
pandas/core/algorithms.py
checked_add_with_arr
def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None): """ Perform array addition that checks for underflow and overflow. Performs the addition of an int64 array and an int64 integer (or array) but checks that they do not result in overflow first. For elements that are indicated to be NaN, whether or not there is overflow for that element is automatically ignored. Parameters ---------- arr : array addend. b : array or scalar addend. arr_mask : boolean array or None array indicating which elements to exclude from checking b_mask : boolean array or boolean or None array or scalar indicating which element(s) to exclude from checking Returns ------- sum : An array for elements x + b for each element x in arr if b is a scalar or an array for elements x + y for each element pair (x, y) in (arr, b). Raises ------ OverflowError if any x + y exceeds the maximum or minimum int64 value. """ # For performance reasons, we broadcast 'b' to the new array 'b2' # so that it has the same size as 'arr'. b2 = np.broadcast_to(b, arr.shape) if b_mask is not None: # We do the same broadcasting for b_mask as well. b2_mask = np.broadcast_to(b_mask, arr.shape) else: b2_mask = None # For elements that are NaN, regardless of their value, we should # ignore whether they overflow or not when doing the checked add. if arr_mask is not None and b2_mask is not None: not_nan = np.logical_not(arr_mask | b2_mask) elif arr_mask is not None: not_nan = np.logical_not(arr_mask) elif b_mask is not None: not_nan = np.logical_not(b2_mask) else: not_nan = np.empty(arr.shape, dtype=bool) not_nan.fill(True) # gh-14324: For each element in 'arr' and its corresponding element # in 'b2', we check the sign of the element in 'b2'. If it is positive, # we then check whether its sum with the element in 'arr' exceeds # np.iinfo(np.int64).max. If so, we have an overflow error. If it # it is negative, we then check whether its sum with the element in # 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow # error as well. mask1 = b2 > 0 mask2 = b2 < 0 if not mask1.any(): to_raise = ((np.iinfo(np.int64).min - b2 > arr) & not_nan).any() elif not mask2.any(): to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any() else: to_raise = (((np.iinfo(np.int64).max - b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or ((np.iinfo(np.int64).min - b2[mask2] > arr[mask2]) & not_nan[mask2]).any()) if to_raise: raise OverflowError("Overflow in int64 addition") return arr + b
python
def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None): """ Perform array addition that checks for underflow and overflow. Performs the addition of an int64 array and an int64 integer (or array) but checks that they do not result in overflow first. For elements that are indicated to be NaN, whether or not there is overflow for that element is automatically ignored. Parameters ---------- arr : array addend. b : array or scalar addend. arr_mask : boolean array or None array indicating which elements to exclude from checking b_mask : boolean array or boolean or None array or scalar indicating which element(s) to exclude from checking Returns ------- sum : An array for elements x + b for each element x in arr if b is a scalar or an array for elements x + y for each element pair (x, y) in (arr, b). Raises ------ OverflowError if any x + y exceeds the maximum or minimum int64 value. """ # For performance reasons, we broadcast 'b' to the new array 'b2' # so that it has the same size as 'arr'. b2 = np.broadcast_to(b, arr.shape) if b_mask is not None: # We do the same broadcasting for b_mask as well. b2_mask = np.broadcast_to(b_mask, arr.shape) else: b2_mask = None # For elements that are NaN, regardless of their value, we should # ignore whether they overflow or not when doing the checked add. if arr_mask is not None and b2_mask is not None: not_nan = np.logical_not(arr_mask | b2_mask) elif arr_mask is not None: not_nan = np.logical_not(arr_mask) elif b_mask is not None: not_nan = np.logical_not(b2_mask) else: not_nan = np.empty(arr.shape, dtype=bool) not_nan.fill(True) # gh-14324: For each element in 'arr' and its corresponding element # in 'b2', we check the sign of the element in 'b2'. If it is positive, # we then check whether its sum with the element in 'arr' exceeds # np.iinfo(np.int64).max. If so, we have an overflow error. If it # it is negative, we then check whether its sum with the element in # 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow # error as well. mask1 = b2 > 0 mask2 = b2 < 0 if not mask1.any(): to_raise = ((np.iinfo(np.int64).min - b2 > arr) & not_nan).any() elif not mask2.any(): to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any() else: to_raise = (((np.iinfo(np.int64).max - b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or ((np.iinfo(np.int64).min - b2[mask2] > arr[mask2]) & not_nan[mask2]).any()) if to_raise: raise OverflowError("Overflow in int64 addition") return arr + b
[ "def", "checked_add_with_arr", "(", "arr", ",", "b", ",", "arr_mask", "=", "None", ",", "b_mask", "=", "None", ")", ":", "# For performance reasons, we broadcast 'b' to the new array 'b2'", "# so that it has the same size as 'arr'.", "b2", "=", "np", ".", "broadcast_to", "(", "b", ",", "arr", ".", "shape", ")", "if", "b_mask", "is", "not", "None", ":", "# We do the same broadcasting for b_mask as well.", "b2_mask", "=", "np", ".", "broadcast_to", "(", "b_mask", ",", "arr", ".", "shape", ")", "else", ":", "b2_mask", "=", "None", "# For elements that are NaN, regardless of their value, we should", "# ignore whether they overflow or not when doing the checked add.", "if", "arr_mask", "is", "not", "None", "and", "b2_mask", "is", "not", "None", ":", "not_nan", "=", "np", ".", "logical_not", "(", "arr_mask", "|", "b2_mask", ")", "elif", "arr_mask", "is", "not", "None", ":", "not_nan", "=", "np", ".", "logical_not", "(", "arr_mask", ")", "elif", "b_mask", "is", "not", "None", ":", "not_nan", "=", "np", ".", "logical_not", "(", "b2_mask", ")", "else", ":", "not_nan", "=", "np", ".", "empty", "(", "arr", ".", "shape", ",", "dtype", "=", "bool", ")", "not_nan", ".", "fill", "(", "True", ")", "# gh-14324: For each element in 'arr' and its corresponding element", "# in 'b2', we check the sign of the element in 'b2'. If it is positive,", "# we then check whether its sum with the element in 'arr' exceeds", "# np.iinfo(np.int64).max. If so, we have an overflow error. If it", "# it is negative, we then check whether its sum with the element in", "# 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow", "# error as well.", "mask1", "=", "b2", ">", "0", "mask2", "=", "b2", "<", "0", "if", "not", "mask1", ".", "any", "(", ")", ":", "to_raise", "=", "(", "(", "np", ".", "iinfo", "(", "np", ".", "int64", ")", ".", "min", "-", "b2", ">", "arr", ")", "&", "not_nan", ")", ".", "any", "(", ")", "elif", "not", "mask2", ".", "any", "(", ")", ":", "to_raise", "=", "(", "(", "np", ".", "iinfo", "(", "np", ".", "int64", ")", ".", "max", "-", "b2", "<", "arr", ")", "&", "not_nan", ")", ".", "any", "(", ")", "else", ":", "to_raise", "=", "(", "(", "(", "np", ".", "iinfo", "(", "np", ".", "int64", ")", ".", "max", "-", "b2", "[", "mask1", "]", "<", "arr", "[", "mask1", "]", ")", "&", "not_nan", "[", "mask1", "]", ")", ".", "any", "(", ")", "or", "(", "(", "np", ".", "iinfo", "(", "np", ".", "int64", ")", ".", "min", "-", "b2", "[", "mask2", "]", ">", "arr", "[", "mask2", "]", ")", "&", "not_nan", "[", "mask2", "]", ")", ".", "any", "(", ")", ")", "if", "to_raise", ":", "raise", "OverflowError", "(", "\"Overflow in int64 addition\"", ")", "return", "arr", "+", "b" ]
Perform array addition that checks for underflow and overflow. Performs the addition of an int64 array and an int64 integer (or array) but checks that they do not result in overflow first. For elements that are indicated to be NaN, whether or not there is overflow for that element is automatically ignored. Parameters ---------- arr : array addend. b : array or scalar addend. arr_mask : boolean array or None array indicating which elements to exclude from checking b_mask : boolean array or boolean or None array or scalar indicating which element(s) to exclude from checking Returns ------- sum : An array for elements x + b for each element x in arr if b is a scalar or an array for elements x + y for each element pair (x, y) in (arr, b). Raises ------ OverflowError if any x + y exceeds the maximum or minimum int64 value.
[ "Perform", "array", "addition", "that", "checks", "for", "underflow", "and", "overflow", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L877-L948
20,389
pandas-dev/pandas
pandas/core/algorithms.py
quantile
def quantile(x, q, interpolation_method='fraction'): """ Compute sample quantile or quantiles of the input array. For example, q=0.5 computes the median. The `interpolation_method` parameter supports three values, namely `fraction` (default), `lower` and `higher`. Interpolation is done only, if the desired quantile lies between two data points `i` and `j`. For `fraction`, the result is an interpolated value between `i` and `j`; for `lower`, the result is `i`, for `higher` the result is `j`. Parameters ---------- x : ndarray Values from which to extract score. q : scalar or array Percentile at which to extract score. interpolation_method : {'fraction', 'lower', 'higher'}, optional This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: - fraction: `i + (j - i)*fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. -lower: `i`. - higher: `j`. Returns ------- score : float Score at percentile. Examples -------- >>> from scipy import stats >>> a = np.arange(100) >>> stats.scoreatpercentile(a, 50) 49.5 """ x = np.asarray(x) mask = isna(x) x = x[~mask] values = np.sort(x) def _interpolate(a, b, fraction): """Returns the point at the given fraction between a and b, where 'fraction' must be between 0 and 1. """ return a + (b - a) * fraction def _get_score(at): if len(values) == 0: return np.nan idx = at * (len(values) - 1) if idx % 1 == 0: score = values[int(idx)] else: if interpolation_method == 'fraction': score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1) elif interpolation_method == 'lower': score = values[np.floor(idx)] elif interpolation_method == 'higher': score = values[np.ceil(idx)] else: raise ValueError("interpolation_method can only be 'fraction' " ", 'lower' or 'higher'") return score if is_scalar(q): return _get_score(q) else: q = np.asarray(q, np.float64) return algos.arrmap_float64(q, _get_score)
python
def quantile(x, q, interpolation_method='fraction'): """ Compute sample quantile or quantiles of the input array. For example, q=0.5 computes the median. The `interpolation_method` parameter supports three values, namely `fraction` (default), `lower` and `higher`. Interpolation is done only, if the desired quantile lies between two data points `i` and `j`. For `fraction`, the result is an interpolated value between `i` and `j`; for `lower`, the result is `i`, for `higher` the result is `j`. Parameters ---------- x : ndarray Values from which to extract score. q : scalar or array Percentile at which to extract score. interpolation_method : {'fraction', 'lower', 'higher'}, optional This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: - fraction: `i + (j - i)*fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. -lower: `i`. - higher: `j`. Returns ------- score : float Score at percentile. Examples -------- >>> from scipy import stats >>> a = np.arange(100) >>> stats.scoreatpercentile(a, 50) 49.5 """ x = np.asarray(x) mask = isna(x) x = x[~mask] values = np.sort(x) def _interpolate(a, b, fraction): """Returns the point at the given fraction between a and b, where 'fraction' must be between 0 and 1. """ return a + (b - a) * fraction def _get_score(at): if len(values) == 0: return np.nan idx = at * (len(values) - 1) if idx % 1 == 0: score = values[int(idx)] else: if interpolation_method == 'fraction': score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1) elif interpolation_method == 'lower': score = values[np.floor(idx)] elif interpolation_method == 'higher': score = values[np.ceil(idx)] else: raise ValueError("interpolation_method can only be 'fraction' " ", 'lower' or 'higher'") return score if is_scalar(q): return _get_score(q) else: q = np.asarray(q, np.float64) return algos.arrmap_float64(q, _get_score)
[ "def", "quantile", "(", "x", ",", "q", ",", "interpolation_method", "=", "'fraction'", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "mask", "=", "isna", "(", "x", ")", "x", "=", "x", "[", "~", "mask", "]", "values", "=", "np", ".", "sort", "(", "x", ")", "def", "_interpolate", "(", "a", ",", "b", ",", "fraction", ")", ":", "\"\"\"Returns the point at the given fraction between a and b, where\n 'fraction' must be between 0 and 1.\n \"\"\"", "return", "a", "+", "(", "b", "-", "a", ")", "*", "fraction", "def", "_get_score", "(", "at", ")", ":", "if", "len", "(", "values", ")", "==", "0", ":", "return", "np", ".", "nan", "idx", "=", "at", "*", "(", "len", "(", "values", ")", "-", "1", ")", "if", "idx", "%", "1", "==", "0", ":", "score", "=", "values", "[", "int", "(", "idx", ")", "]", "else", ":", "if", "interpolation_method", "==", "'fraction'", ":", "score", "=", "_interpolate", "(", "values", "[", "int", "(", "idx", ")", "]", ",", "values", "[", "int", "(", "idx", ")", "+", "1", "]", ",", "idx", "%", "1", ")", "elif", "interpolation_method", "==", "'lower'", ":", "score", "=", "values", "[", "np", ".", "floor", "(", "idx", ")", "]", "elif", "interpolation_method", "==", "'higher'", ":", "score", "=", "values", "[", "np", ".", "ceil", "(", "idx", ")", "]", "else", ":", "raise", "ValueError", "(", "\"interpolation_method can only be 'fraction' \"", "\", 'lower' or 'higher'\"", ")", "return", "score", "if", "is_scalar", "(", "q", ")", ":", "return", "_get_score", "(", "q", ")", "else", ":", "q", "=", "np", ".", "asarray", "(", "q", ",", "np", ".", "float64", ")", "return", "algos", ".", "arrmap_float64", "(", "q", ",", "_get_score", ")" ]
Compute sample quantile or quantiles of the input array. For example, q=0.5 computes the median. The `interpolation_method` parameter supports three values, namely `fraction` (default), `lower` and `higher`. Interpolation is done only, if the desired quantile lies between two data points `i` and `j`. For `fraction`, the result is an interpolated value between `i` and `j`; for `lower`, the result is `i`, for `higher` the result is `j`. Parameters ---------- x : ndarray Values from which to extract score. q : scalar or array Percentile at which to extract score. interpolation_method : {'fraction', 'lower', 'higher'}, optional This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: - fraction: `i + (j - i)*fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. -lower: `i`. - higher: `j`. Returns ------- score : float Score at percentile. Examples -------- >>> from scipy import stats >>> a = np.arange(100) >>> stats.scoreatpercentile(a, 50) 49.5
[ "Compute", "sample", "quantile", "or", "quantiles", "of", "the", "input", "array", ".", "For", "example", "q", "=", "0", ".", "5", "computes", "the", "median", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L966-L1043
20,390
pandas-dev/pandas
pandas/core/sparse/scipy_sparse.py
_sparse_series_to_coo
def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False): """ Convert a SparseSeries to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels. """ import scipy.sparse if ss.index.nlevels < 2: raise ValueError('to_coo requires MultiIndex with nlevels > 2') if not ss.index.is_unique: raise ValueError('Duplicate index entries are not allowed in to_coo ' 'transformation.') # to keep things simple, only rely on integer indexing (not labels) row_levels = [ss.index._get_level_number(x) for x in row_levels] column_levels = [ss.index._get_level_number(x) for x in column_levels] v, i, j, rows, columns = _to_ijv(ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels) sparse_matrix = scipy.sparse.coo_matrix( (v, (i, j)), shape=(len(rows), len(columns))) return sparse_matrix, rows, columns
python
def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False): """ Convert a SparseSeries to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels. """ import scipy.sparse if ss.index.nlevels < 2: raise ValueError('to_coo requires MultiIndex with nlevels > 2') if not ss.index.is_unique: raise ValueError('Duplicate index entries are not allowed in to_coo ' 'transformation.') # to keep things simple, only rely on integer indexing (not labels) row_levels = [ss.index._get_level_number(x) for x in row_levels] column_levels = [ss.index._get_level_number(x) for x in column_levels] v, i, j, rows, columns = _to_ijv(ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels) sparse_matrix = scipy.sparse.coo_matrix( (v, (i, j)), shape=(len(rows), len(columns))) return sparse_matrix, rows, columns
[ "def", "_sparse_series_to_coo", "(", "ss", ",", "row_levels", "=", "(", "0", ",", ")", ",", "column_levels", "=", "(", "1", ",", ")", ",", "sort_labels", "=", "False", ")", ":", "import", "scipy", ".", "sparse", "if", "ss", ".", "index", ".", "nlevels", "<", "2", ":", "raise", "ValueError", "(", "'to_coo requires MultiIndex with nlevels > 2'", ")", "if", "not", "ss", ".", "index", ".", "is_unique", ":", "raise", "ValueError", "(", "'Duplicate index entries are not allowed in to_coo '", "'transformation.'", ")", "# to keep things simple, only rely on integer indexing (not labels)", "row_levels", "=", "[", "ss", ".", "index", ".", "_get_level_number", "(", "x", ")", "for", "x", "in", "row_levels", "]", "column_levels", "=", "[", "ss", ".", "index", ".", "_get_level_number", "(", "x", ")", "for", "x", "in", "column_levels", "]", "v", ",", "i", ",", "j", ",", "rows", ",", "columns", "=", "_to_ijv", "(", "ss", ",", "row_levels", "=", "row_levels", ",", "column_levels", "=", "column_levels", ",", "sort_labels", "=", "sort_labels", ")", "sparse_matrix", "=", "scipy", ".", "sparse", ".", "coo_matrix", "(", "(", "v", ",", "(", "i", ",", "j", ")", ")", ",", "shape", "=", "(", "len", "(", "rows", ")", ",", "len", "(", "columns", ")", ")", ")", "return", "sparse_matrix", ",", "rows", ",", "columns" ]
Convert a SparseSeries to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels.
[ "Convert", "a", "SparseSeries", "to", "a", "scipy", ".", "sparse", ".", "coo_matrix", "using", "index", "levels", "row_levels", "column_levels", "as", "the", "row", "and", "column", "labels", "respectively", ".", "Returns", "the", "sparse_matrix", "row", "and", "column", "labels", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/scipy_sparse.py#L93-L118
20,391
pandas-dev/pandas
pandas/core/sparse/scipy_sparse.py
_coo_to_sparse_series
def _coo_to_sparse_series(A, dense_index=False): """ Convert a scipy.sparse.coo_matrix to a SparseSeries. Use the defaults given in the SparseSeries constructor. """ s = Series(A.data, MultiIndex.from_arrays((A.row, A.col))) s = s.sort_index() s = s.to_sparse() # TODO: specify kind? if dense_index: # is there a better constructor method to use here? i = range(A.shape[0]) j = range(A.shape[1]) ind = MultiIndex.from_product([i, j]) s = s.reindex(ind) return s
python
def _coo_to_sparse_series(A, dense_index=False): """ Convert a scipy.sparse.coo_matrix to a SparseSeries. Use the defaults given in the SparseSeries constructor. """ s = Series(A.data, MultiIndex.from_arrays((A.row, A.col))) s = s.sort_index() s = s.to_sparse() # TODO: specify kind? if dense_index: # is there a better constructor method to use here? i = range(A.shape[0]) j = range(A.shape[1]) ind = MultiIndex.from_product([i, j]) s = s.reindex(ind) return s
[ "def", "_coo_to_sparse_series", "(", "A", ",", "dense_index", "=", "False", ")", ":", "s", "=", "Series", "(", "A", ".", "data", ",", "MultiIndex", ".", "from_arrays", "(", "(", "A", ".", "row", ",", "A", ".", "col", ")", ")", ")", "s", "=", "s", ".", "sort_index", "(", ")", "s", "=", "s", ".", "to_sparse", "(", ")", "# TODO: specify kind?", "if", "dense_index", ":", "# is there a better constructor method to use here?", "i", "=", "range", "(", "A", ".", "shape", "[", "0", "]", ")", "j", "=", "range", "(", "A", ".", "shape", "[", "1", "]", ")", "ind", "=", "MultiIndex", ".", "from_product", "(", "[", "i", ",", "j", "]", ")", "s", "=", "s", ".", "reindex", "(", "ind", ")", "return", "s" ]
Convert a scipy.sparse.coo_matrix to a SparseSeries. Use the defaults given in the SparseSeries constructor.
[ "Convert", "a", "scipy", ".", "sparse", ".", "coo_matrix", "to", "a", "SparseSeries", ".", "Use", "the", "defaults", "given", "in", "the", "SparseSeries", "constructor", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/scipy_sparse.py#L121-L135
20,392
pandas-dev/pandas
pandas/core/arrays/datetimes.py
_to_M8
def _to_M8(key, tz=None): """ Timestamp-like => dt64 """ if not isinstance(key, Timestamp): # this also converts strings key = Timestamp(key) if key.tzinfo is not None and tz is not None: # Don't tz_localize(None) if key is already tz-aware key = key.tz_convert(tz) else: key = key.tz_localize(tz) return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE)
python
def _to_M8(key, tz=None): """ Timestamp-like => dt64 """ if not isinstance(key, Timestamp): # this also converts strings key = Timestamp(key) if key.tzinfo is not None and tz is not None: # Don't tz_localize(None) if key is already tz-aware key = key.tz_convert(tz) else: key = key.tz_localize(tz) return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE)
[ "def", "_to_M8", "(", "key", ",", "tz", "=", "None", ")", ":", "if", "not", "isinstance", "(", "key", ",", "Timestamp", ")", ":", "# this also converts strings", "key", "=", "Timestamp", "(", "key", ")", "if", "key", ".", "tzinfo", "is", "not", "None", "and", "tz", "is", "not", "None", ":", "# Don't tz_localize(None) if key is already tz-aware", "key", "=", "key", ".", "tz_convert", "(", "tz", ")", "else", ":", "key", "=", "key", ".", "tz_localize", "(", "tz", ")", "return", "np", ".", "int64", "(", "conversion", ".", "pydt_to_i8", "(", "key", ")", ")", ".", "view", "(", "_NS_DTYPE", ")" ]
Timestamp-like => dt64
[ "Timestamp", "-", "like", "=", ">", "dt64" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L72-L85
20,393
pandas-dev/pandas
pandas/core/arrays/datetimes.py
_dt_array_cmp
def _dt_array_cmp(cls, op): """ Wrap comparison operations to convert datetime-like to datetime64 """ opname = '__{name}__'.format(name=op.__name__) nat_result = opname == '__ne__' def wrapper(self, other): if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): return NotImplemented other = lib.item_from_zerodim(other) if isinstance(other, (datetime, np.datetime64, str)): if isinstance(other, (datetime, np.datetime64)): # GH#18435 strings get a pass from tzawareness compat self._assert_tzawareness_compat(other) try: other = _to_M8(other, tz=self.tz) except ValueError: # string that cannot be parsed to Timestamp return ops.invalid_comparison(self, other, op) result = op(self.asi8, other.view('i8')) if isna(other): result.fill(nat_result) elif lib.is_scalar(other) or np.ndim(other) == 0: return ops.invalid_comparison(self, other, op) elif len(other) != len(self): raise ValueError("Lengths must match") else: if isinstance(other, list): try: other = type(self)._from_sequence(other) except ValueError: other = np.array(other, dtype=np.object_) elif not isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries, DatetimeArray)): # Following Timestamp convention, __eq__ is all-False # and __ne__ is all True, others raise TypeError. return ops.invalid_comparison(self, other, op) if is_object_dtype(other): # We have to use _comp_method_OBJECT_ARRAY instead of numpy # comparison otherwise it would fail to raise when # comparing tz-aware and tz-naive with np.errstate(all='ignore'): result = ops._comp_method_OBJECT_ARRAY(op, self.astype(object), other) o_mask = isna(other) elif not (is_datetime64_dtype(other) or is_datetime64tz_dtype(other)): # e.g. is_timedelta64_dtype(other) return ops.invalid_comparison(self, other, op) else: self._assert_tzawareness_compat(other) if isinstance(other, (ABCIndexClass, ABCSeries)): other = other.array if (is_datetime64_dtype(other) and not is_datetime64_ns_dtype(other) or not hasattr(other, 'asi8')): # e.g. other.dtype == 'datetime64[s]' # or an object-dtype ndarray other = type(self)._from_sequence(other) result = op(self.view('i8'), other.view('i8')) o_mask = other._isnan result = com.values_from_object(result) if o_mask.any(): result[o_mask] = nat_result if self._hasnans: result[self._isnan] = nat_result return result return compat.set_function_name(wrapper, opname, cls)
python
def _dt_array_cmp(cls, op): """ Wrap comparison operations to convert datetime-like to datetime64 """ opname = '__{name}__'.format(name=op.__name__) nat_result = opname == '__ne__' def wrapper(self, other): if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): return NotImplemented other = lib.item_from_zerodim(other) if isinstance(other, (datetime, np.datetime64, str)): if isinstance(other, (datetime, np.datetime64)): # GH#18435 strings get a pass from tzawareness compat self._assert_tzawareness_compat(other) try: other = _to_M8(other, tz=self.tz) except ValueError: # string that cannot be parsed to Timestamp return ops.invalid_comparison(self, other, op) result = op(self.asi8, other.view('i8')) if isna(other): result.fill(nat_result) elif lib.is_scalar(other) or np.ndim(other) == 0: return ops.invalid_comparison(self, other, op) elif len(other) != len(self): raise ValueError("Lengths must match") else: if isinstance(other, list): try: other = type(self)._from_sequence(other) except ValueError: other = np.array(other, dtype=np.object_) elif not isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries, DatetimeArray)): # Following Timestamp convention, __eq__ is all-False # and __ne__ is all True, others raise TypeError. return ops.invalid_comparison(self, other, op) if is_object_dtype(other): # We have to use _comp_method_OBJECT_ARRAY instead of numpy # comparison otherwise it would fail to raise when # comparing tz-aware and tz-naive with np.errstate(all='ignore'): result = ops._comp_method_OBJECT_ARRAY(op, self.astype(object), other) o_mask = isna(other) elif not (is_datetime64_dtype(other) or is_datetime64tz_dtype(other)): # e.g. is_timedelta64_dtype(other) return ops.invalid_comparison(self, other, op) else: self._assert_tzawareness_compat(other) if isinstance(other, (ABCIndexClass, ABCSeries)): other = other.array if (is_datetime64_dtype(other) and not is_datetime64_ns_dtype(other) or not hasattr(other, 'asi8')): # e.g. other.dtype == 'datetime64[s]' # or an object-dtype ndarray other = type(self)._from_sequence(other) result = op(self.view('i8'), other.view('i8')) o_mask = other._isnan result = com.values_from_object(result) if o_mask.any(): result[o_mask] = nat_result if self._hasnans: result[self._isnan] = nat_result return result return compat.set_function_name(wrapper, opname, cls)
[ "def", "_dt_array_cmp", "(", "cls", ",", "op", ")", ":", "opname", "=", "'__{name}__'", ".", "format", "(", "name", "=", "op", ".", "__name__", ")", "nat_result", "=", "opname", "==", "'__ne__'", "def", "wrapper", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "(", "ABCDataFrame", ",", "ABCSeries", ",", "ABCIndexClass", ")", ")", ":", "return", "NotImplemented", "other", "=", "lib", ".", "item_from_zerodim", "(", "other", ")", "if", "isinstance", "(", "other", ",", "(", "datetime", ",", "np", ".", "datetime64", ",", "str", ")", ")", ":", "if", "isinstance", "(", "other", ",", "(", "datetime", ",", "np", ".", "datetime64", ")", ")", ":", "# GH#18435 strings get a pass from tzawareness compat", "self", ".", "_assert_tzawareness_compat", "(", "other", ")", "try", ":", "other", "=", "_to_M8", "(", "other", ",", "tz", "=", "self", ".", "tz", ")", "except", "ValueError", ":", "# string that cannot be parsed to Timestamp", "return", "ops", ".", "invalid_comparison", "(", "self", ",", "other", ",", "op", ")", "result", "=", "op", "(", "self", ".", "asi8", ",", "other", ".", "view", "(", "'i8'", ")", ")", "if", "isna", "(", "other", ")", ":", "result", ".", "fill", "(", "nat_result", ")", "elif", "lib", ".", "is_scalar", "(", "other", ")", "or", "np", ".", "ndim", "(", "other", ")", "==", "0", ":", "return", "ops", ".", "invalid_comparison", "(", "self", ",", "other", ",", "op", ")", "elif", "len", "(", "other", ")", "!=", "len", "(", "self", ")", ":", "raise", "ValueError", "(", "\"Lengths must match\"", ")", "else", ":", "if", "isinstance", "(", "other", ",", "list", ")", ":", "try", ":", "other", "=", "type", "(", "self", ")", ".", "_from_sequence", "(", "other", ")", "except", "ValueError", ":", "other", "=", "np", ".", "array", "(", "other", ",", "dtype", "=", "np", ".", "object_", ")", "elif", "not", "isinstance", "(", "other", ",", "(", "np", ".", "ndarray", ",", "ABCIndexClass", ",", "ABCSeries", ",", "DatetimeArray", ")", ")", ":", "# Following Timestamp convention, __eq__ is all-False", "# and __ne__ is all True, others raise TypeError.", "return", "ops", ".", "invalid_comparison", "(", "self", ",", "other", ",", "op", ")", "if", "is_object_dtype", "(", "other", ")", ":", "# We have to use _comp_method_OBJECT_ARRAY instead of numpy", "# comparison otherwise it would fail to raise when", "# comparing tz-aware and tz-naive", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "result", "=", "ops", ".", "_comp_method_OBJECT_ARRAY", "(", "op", ",", "self", ".", "astype", "(", "object", ")", ",", "other", ")", "o_mask", "=", "isna", "(", "other", ")", "elif", "not", "(", "is_datetime64_dtype", "(", "other", ")", "or", "is_datetime64tz_dtype", "(", "other", ")", ")", ":", "# e.g. is_timedelta64_dtype(other)", "return", "ops", ".", "invalid_comparison", "(", "self", ",", "other", ",", "op", ")", "else", ":", "self", ".", "_assert_tzawareness_compat", "(", "other", ")", "if", "isinstance", "(", "other", ",", "(", "ABCIndexClass", ",", "ABCSeries", ")", ")", ":", "other", "=", "other", ".", "array", "if", "(", "is_datetime64_dtype", "(", "other", ")", "and", "not", "is_datetime64_ns_dtype", "(", "other", ")", "or", "not", "hasattr", "(", "other", ",", "'asi8'", ")", ")", ":", "# e.g. other.dtype == 'datetime64[s]'", "# or an object-dtype ndarray", "other", "=", "type", "(", "self", ")", ".", "_from_sequence", "(", "other", ")", "result", "=", "op", "(", "self", ".", "view", "(", "'i8'", ")", ",", "other", ".", "view", "(", "'i8'", ")", ")", "o_mask", "=", "other", ".", "_isnan", "result", "=", "com", ".", "values_from_object", "(", "result", ")", "if", "o_mask", ".", "any", "(", ")", ":", "result", "[", "o_mask", "]", "=", "nat_result", "if", "self", ".", "_hasnans", ":", "result", "[", "self", ".", "_isnan", "]", "=", "nat_result", "return", "result", "return", "compat", ".", "set_function_name", "(", "wrapper", ",", "opname", ",", "cls", ")" ]
Wrap comparison operations to convert datetime-like to datetime64
[ "Wrap", "comparison", "operations", "to", "convert", "datetime", "-", "like", "to", "datetime64" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L126-L207
20,394
pandas-dev/pandas
pandas/core/arrays/datetimes.py
objects_to_datetime64ns
def objects_to_datetime64ns(data, dayfirst, yearfirst, utc=False, errors="raise", require_iso8601=False, allow_object=False): """ Convert data to array of timestamps. Parameters ---------- data : np.ndarray[object] dayfirst : bool yearfirst : bool utc : bool, default False Whether to convert timezone-aware timestamps to UTC errors : {'raise', 'ignore', 'coerce'} allow_object : bool Whether to return an object-dtype ndarray instead of raising if the data contains more than one timezone. Returns ------- result : ndarray np.int64 dtype if returned values represent UTC timestamps np.datetime64[ns] if returned values represent wall times object if mixed timezones inferred_tz : tzinfo or None Raises ------ ValueError : if data cannot be converted to datetimes """ assert errors in ["raise", "ignore", "coerce"] # if str-dtype, convert data = np.array(data, copy=False, dtype=np.object_) try: result, tz_parsed = tslib.array_to_datetime( data, errors=errors, utc=utc, dayfirst=dayfirst, yearfirst=yearfirst, require_iso8601=require_iso8601 ) except ValueError as e: try: values, tz_parsed = conversion.datetime_to_datetime64(data) # If tzaware, these values represent unix timestamps, so we # return them as i8 to distinguish from wall times return values.view('i8'), tz_parsed except (ValueError, TypeError): raise e if tz_parsed is not None: # We can take a shortcut since the datetime64 numpy array # is in UTC # Return i8 values to denote unix timestamps return result.view('i8'), tz_parsed elif is_datetime64_dtype(result): # returning M8[ns] denotes wall-times; since tz is None # the distinction is a thin one return result, tz_parsed elif is_object_dtype(result): # GH#23675 when called via `pd.to_datetime`, returning an object-dtype # array is allowed. When called via `pd.DatetimeIndex`, we can # only accept datetime64 dtype, so raise TypeError if object-dtype # is returned, as that indicates the values can be recognized as # datetimes but they have conflicting timezones/awareness if allow_object: return result, tz_parsed raise TypeError(result) else: # pragma: no cover # GH#23675 this TypeError should never be hit, whereas the TypeError # in the object-dtype branch above is reachable. raise TypeError(result)
python
def objects_to_datetime64ns(data, dayfirst, yearfirst, utc=False, errors="raise", require_iso8601=False, allow_object=False): """ Convert data to array of timestamps. Parameters ---------- data : np.ndarray[object] dayfirst : bool yearfirst : bool utc : bool, default False Whether to convert timezone-aware timestamps to UTC errors : {'raise', 'ignore', 'coerce'} allow_object : bool Whether to return an object-dtype ndarray instead of raising if the data contains more than one timezone. Returns ------- result : ndarray np.int64 dtype if returned values represent UTC timestamps np.datetime64[ns] if returned values represent wall times object if mixed timezones inferred_tz : tzinfo or None Raises ------ ValueError : if data cannot be converted to datetimes """ assert errors in ["raise", "ignore", "coerce"] # if str-dtype, convert data = np.array(data, copy=False, dtype=np.object_) try: result, tz_parsed = tslib.array_to_datetime( data, errors=errors, utc=utc, dayfirst=dayfirst, yearfirst=yearfirst, require_iso8601=require_iso8601 ) except ValueError as e: try: values, tz_parsed = conversion.datetime_to_datetime64(data) # If tzaware, these values represent unix timestamps, so we # return them as i8 to distinguish from wall times return values.view('i8'), tz_parsed except (ValueError, TypeError): raise e if tz_parsed is not None: # We can take a shortcut since the datetime64 numpy array # is in UTC # Return i8 values to denote unix timestamps return result.view('i8'), tz_parsed elif is_datetime64_dtype(result): # returning M8[ns] denotes wall-times; since tz is None # the distinction is a thin one return result, tz_parsed elif is_object_dtype(result): # GH#23675 when called via `pd.to_datetime`, returning an object-dtype # array is allowed. When called via `pd.DatetimeIndex`, we can # only accept datetime64 dtype, so raise TypeError if object-dtype # is returned, as that indicates the values can be recognized as # datetimes but they have conflicting timezones/awareness if allow_object: return result, tz_parsed raise TypeError(result) else: # pragma: no cover # GH#23675 this TypeError should never be hit, whereas the TypeError # in the object-dtype branch above is reachable. raise TypeError(result)
[ "def", "objects_to_datetime64ns", "(", "data", ",", "dayfirst", ",", "yearfirst", ",", "utc", "=", "False", ",", "errors", "=", "\"raise\"", ",", "require_iso8601", "=", "False", ",", "allow_object", "=", "False", ")", ":", "assert", "errors", "in", "[", "\"raise\"", ",", "\"ignore\"", ",", "\"coerce\"", "]", "# if str-dtype, convert", "data", "=", "np", ".", "array", "(", "data", ",", "copy", "=", "False", ",", "dtype", "=", "np", ".", "object_", ")", "try", ":", "result", ",", "tz_parsed", "=", "tslib", ".", "array_to_datetime", "(", "data", ",", "errors", "=", "errors", ",", "utc", "=", "utc", ",", "dayfirst", "=", "dayfirst", ",", "yearfirst", "=", "yearfirst", ",", "require_iso8601", "=", "require_iso8601", ")", "except", "ValueError", "as", "e", ":", "try", ":", "values", ",", "tz_parsed", "=", "conversion", ".", "datetime_to_datetime64", "(", "data", ")", "# If tzaware, these values represent unix timestamps, so we", "# return them as i8 to distinguish from wall times", "return", "values", ".", "view", "(", "'i8'", ")", ",", "tz_parsed", "except", "(", "ValueError", ",", "TypeError", ")", ":", "raise", "e", "if", "tz_parsed", "is", "not", "None", ":", "# We can take a shortcut since the datetime64 numpy array", "# is in UTC", "# Return i8 values to denote unix timestamps", "return", "result", ".", "view", "(", "'i8'", ")", ",", "tz_parsed", "elif", "is_datetime64_dtype", "(", "result", ")", ":", "# returning M8[ns] denotes wall-times; since tz is None", "# the distinction is a thin one", "return", "result", ",", "tz_parsed", "elif", "is_object_dtype", "(", "result", ")", ":", "# GH#23675 when called via `pd.to_datetime`, returning an object-dtype", "# array is allowed. When called via `pd.DatetimeIndex`, we can", "# only accept datetime64 dtype, so raise TypeError if object-dtype", "# is returned, as that indicates the values can be recognized as", "# datetimes but they have conflicting timezones/awareness", "if", "allow_object", ":", "return", "result", ",", "tz_parsed", "raise", "TypeError", "(", "result", ")", "else", ":", "# pragma: no cover", "# GH#23675 this TypeError should never be hit, whereas the TypeError", "# in the object-dtype branch above is reachable.", "raise", "TypeError", "(", "result", ")" ]
Convert data to array of timestamps. Parameters ---------- data : np.ndarray[object] dayfirst : bool yearfirst : bool utc : bool, default False Whether to convert timezone-aware timestamps to UTC errors : {'raise', 'ignore', 'coerce'} allow_object : bool Whether to return an object-dtype ndarray instead of raising if the data contains more than one timezone. Returns ------- result : ndarray np.int64 dtype if returned values represent UTC timestamps np.datetime64[ns] if returned values represent wall times object if mixed timezones inferred_tz : tzinfo or None Raises ------ ValueError : if data cannot be converted to datetimes
[ "Convert", "data", "to", "array", "of", "timestamps", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1803-L1877
20,395
pandas-dev/pandas
pandas/core/arrays/datetimes.py
maybe_convert_dtype
def maybe_convert_dtype(data, copy): """ Convert data based on dtype conventions, issuing deprecation warnings or errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed """ if is_float_dtype(data): # Note: we must cast to datetime64[ns] here in order to treat these # as wall-times instead of UTC timestamps. data = data.astype(_NS_DTYPE) copy = False # TODO: deprecate this behavior to instead treat symmetrically # with integer dtypes. See discussion in GH#23675 elif is_timedelta64_dtype(data): warnings.warn("Passing timedelta64-dtype data is deprecated, will " "raise a TypeError in a future version", FutureWarning, stacklevel=5) data = data.view(_NS_DTYPE) elif is_period_dtype(data): # Note: without explicitly raising here, PeriodIndex # test_setops.test_join_does_not_recur fails raise TypeError("Passing PeriodDtype data is invalid. " "Use `data.to_timestamp()` instead") elif is_categorical_dtype(data): # GH#18664 preserve tz in going DTI->Categorical->DTI # TODO: cases where we need to do another pass through this func, # e.g. the categories are timedelta64s data = data.categories.take(data.codes, fill_value=NaT)._values copy = False elif is_extension_type(data) and not is_datetime64tz_dtype(data): # Includes categorical # TODO: We have no tests for these data = np.array(data, dtype=np.object_) copy = False return data, copy
python
def maybe_convert_dtype(data, copy): """ Convert data based on dtype conventions, issuing deprecation warnings or errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed """ if is_float_dtype(data): # Note: we must cast to datetime64[ns] here in order to treat these # as wall-times instead of UTC timestamps. data = data.astype(_NS_DTYPE) copy = False # TODO: deprecate this behavior to instead treat symmetrically # with integer dtypes. See discussion in GH#23675 elif is_timedelta64_dtype(data): warnings.warn("Passing timedelta64-dtype data is deprecated, will " "raise a TypeError in a future version", FutureWarning, stacklevel=5) data = data.view(_NS_DTYPE) elif is_period_dtype(data): # Note: without explicitly raising here, PeriodIndex # test_setops.test_join_does_not_recur fails raise TypeError("Passing PeriodDtype data is invalid. " "Use `data.to_timestamp()` instead") elif is_categorical_dtype(data): # GH#18664 preserve tz in going DTI->Categorical->DTI # TODO: cases where we need to do another pass through this func, # e.g. the categories are timedelta64s data = data.categories.take(data.codes, fill_value=NaT)._values copy = False elif is_extension_type(data) and not is_datetime64tz_dtype(data): # Includes categorical # TODO: We have no tests for these data = np.array(data, dtype=np.object_) copy = False return data, copy
[ "def", "maybe_convert_dtype", "(", "data", ",", "copy", ")", ":", "if", "is_float_dtype", "(", "data", ")", ":", "# Note: we must cast to datetime64[ns] here in order to treat these", "# as wall-times instead of UTC timestamps.", "data", "=", "data", ".", "astype", "(", "_NS_DTYPE", ")", "copy", "=", "False", "# TODO: deprecate this behavior to instead treat symmetrically", "# with integer dtypes. See discussion in GH#23675", "elif", "is_timedelta64_dtype", "(", "data", ")", ":", "warnings", ".", "warn", "(", "\"Passing timedelta64-dtype data is deprecated, will \"", "\"raise a TypeError in a future version\"", ",", "FutureWarning", ",", "stacklevel", "=", "5", ")", "data", "=", "data", ".", "view", "(", "_NS_DTYPE", ")", "elif", "is_period_dtype", "(", "data", ")", ":", "# Note: without explicitly raising here, PeriodIndex", "# test_setops.test_join_does_not_recur fails", "raise", "TypeError", "(", "\"Passing PeriodDtype data is invalid. \"", "\"Use `data.to_timestamp()` instead\"", ")", "elif", "is_categorical_dtype", "(", "data", ")", ":", "# GH#18664 preserve tz in going DTI->Categorical->DTI", "# TODO: cases where we need to do another pass through this func,", "# e.g. the categories are timedelta64s", "data", "=", "data", ".", "categories", ".", "take", "(", "data", ".", "codes", ",", "fill_value", "=", "NaT", ")", ".", "_values", "copy", "=", "False", "elif", "is_extension_type", "(", "data", ")", "and", "not", "is_datetime64tz_dtype", "(", "data", ")", ":", "# Includes categorical", "# TODO: We have no tests for these", "data", "=", "np", ".", "array", "(", "data", ",", "dtype", "=", "np", ".", "object_", ")", "copy", "=", "False", "return", "data", ",", "copy" ]
Convert data based on dtype conventions, issuing deprecation warnings or errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed
[ "Convert", "data", "based", "on", "dtype", "conventions", "issuing", "deprecation", "warnings", "or", "errors", "where", "appropriate", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1880-L1932
20,396
pandas-dev/pandas
pandas/core/arrays/datetimes.py
maybe_infer_tz
def maybe_infer_tz(tz, inferred_tz): """ If a timezone is inferred from data, check that it is compatible with the user-provided timezone, if any. Parameters ---------- tz : tzinfo or None inferred_tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if both timezones are present but do not match """ if tz is None: tz = inferred_tz elif inferred_tz is None: pass elif not timezones.tz_compare(tz, inferred_tz): raise TypeError('data is already tz-aware {inferred_tz}, unable to ' 'set specified tz: {tz}' .format(inferred_tz=inferred_tz, tz=tz)) return tz
python
def maybe_infer_tz(tz, inferred_tz): """ If a timezone is inferred from data, check that it is compatible with the user-provided timezone, if any. Parameters ---------- tz : tzinfo or None inferred_tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if both timezones are present but do not match """ if tz is None: tz = inferred_tz elif inferred_tz is None: pass elif not timezones.tz_compare(tz, inferred_tz): raise TypeError('data is already tz-aware {inferred_tz}, unable to ' 'set specified tz: {tz}' .format(inferred_tz=inferred_tz, tz=tz)) return tz
[ "def", "maybe_infer_tz", "(", "tz", ",", "inferred_tz", ")", ":", "if", "tz", "is", "None", ":", "tz", "=", "inferred_tz", "elif", "inferred_tz", "is", "None", ":", "pass", "elif", "not", "timezones", ".", "tz_compare", "(", "tz", ",", "inferred_tz", ")", ":", "raise", "TypeError", "(", "'data is already tz-aware {inferred_tz}, unable to '", "'set specified tz: {tz}'", ".", "format", "(", "inferred_tz", "=", "inferred_tz", ",", "tz", "=", "tz", ")", ")", "return", "tz" ]
If a timezone is inferred from data, check that it is compatible with the user-provided timezone, if any. Parameters ---------- tz : tzinfo or None inferred_tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if both timezones are present but do not match
[ "If", "a", "timezone", "is", "inferred", "from", "data", "check", "that", "it", "is", "compatible", "with", "the", "user", "-", "provided", "timezone", "if", "any", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1938-L1964
20,397
pandas-dev/pandas
pandas/core/arrays/datetimes.py
validate_tz_from_dtype
def validate_tz_from_dtype(dtype, tz): """ If the given dtype is a DatetimeTZDtype, extract the implied tzinfo object from it and check that it does not conflict with the given tz. Parameters ---------- dtype : dtype, str tz : None, tzinfo Returns ------- tz : consensus tzinfo Raises ------ ValueError : on tzinfo mismatch """ if dtype is not None: if isinstance(dtype, str): try: dtype = DatetimeTZDtype.construct_from_string(dtype) except TypeError: # Things like `datetime64[ns]`, which is OK for the # constructors, but also nonsense, which should be validated # but not by us. We *do* allow non-existent tz errors to # go through pass dtz = getattr(dtype, 'tz', None) if dtz is not None: if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a dtype" " with a tz") tz = dtz if tz is not None and is_datetime64_dtype(dtype): # We also need to check for the case where the user passed a # tz-naive dtype (i.e. datetime64[ns]) if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a " "timezone-naive dtype (i.e. datetime64[ns])") return tz
python
def validate_tz_from_dtype(dtype, tz): """ If the given dtype is a DatetimeTZDtype, extract the implied tzinfo object from it and check that it does not conflict with the given tz. Parameters ---------- dtype : dtype, str tz : None, tzinfo Returns ------- tz : consensus tzinfo Raises ------ ValueError : on tzinfo mismatch """ if dtype is not None: if isinstance(dtype, str): try: dtype = DatetimeTZDtype.construct_from_string(dtype) except TypeError: # Things like `datetime64[ns]`, which is OK for the # constructors, but also nonsense, which should be validated # but not by us. We *do* allow non-existent tz errors to # go through pass dtz = getattr(dtype, 'tz', None) if dtz is not None: if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a dtype" " with a tz") tz = dtz if tz is not None and is_datetime64_dtype(dtype): # We also need to check for the case where the user passed a # tz-naive dtype (i.e. datetime64[ns]) if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a " "timezone-naive dtype (i.e. datetime64[ns])") return tz
[ "def", "validate_tz_from_dtype", "(", "dtype", ",", "tz", ")", ":", "if", "dtype", "is", "not", "None", ":", "if", "isinstance", "(", "dtype", ",", "str", ")", ":", "try", ":", "dtype", "=", "DatetimeTZDtype", ".", "construct_from_string", "(", "dtype", ")", "except", "TypeError", ":", "# Things like `datetime64[ns]`, which is OK for the", "# constructors, but also nonsense, which should be validated", "# but not by us. We *do* allow non-existent tz errors to", "# go through", "pass", "dtz", "=", "getattr", "(", "dtype", ",", "'tz'", ",", "None", ")", "if", "dtz", "is", "not", "None", ":", "if", "tz", "is", "not", "None", "and", "not", "timezones", ".", "tz_compare", "(", "tz", ",", "dtz", ")", ":", "raise", "ValueError", "(", "\"cannot supply both a tz and a dtype\"", "\" with a tz\"", ")", "tz", "=", "dtz", "if", "tz", "is", "not", "None", "and", "is_datetime64_dtype", "(", "dtype", ")", ":", "# We also need to check for the case where the user passed a", "# tz-naive dtype (i.e. datetime64[ns])", "if", "tz", "is", "not", "None", "and", "not", "timezones", ".", "tz_compare", "(", "tz", ",", "dtz", ")", ":", "raise", "ValueError", "(", "\"cannot supply both a tz and a \"", "\"timezone-naive dtype (i.e. datetime64[ns])\"", ")", "return", "tz" ]
If the given dtype is a DatetimeTZDtype, extract the implied tzinfo object from it and check that it does not conflict with the given tz. Parameters ---------- dtype : dtype, str tz : None, tzinfo Returns ------- tz : consensus tzinfo Raises ------ ValueError : on tzinfo mismatch
[ "If", "the", "given", "dtype", "is", "a", "DatetimeTZDtype", "extract", "the", "implied", "tzinfo", "object", "from", "it", "and", "check", "that", "it", "does", "not", "conflict", "with", "the", "given", "tz", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L2008-L2051
20,398
pandas-dev/pandas
pandas/core/arrays/datetimes.py
_infer_tz_from_endpoints
def _infer_tz_from_endpoints(start, end, tz): """ If a timezone is not explicitly given via `tz`, see if one can be inferred from the `start` and `end` endpoints. If more than one of these inputs provides a timezone, require that they all agree. Parameters ---------- start : Timestamp end : Timestamp tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if start and end timezones do not agree """ try: inferred_tz = timezones.infer_tzinfo(start, end) except Exception: raise TypeError('Start and end cannot both be tz-aware with ' 'different timezones') inferred_tz = timezones.maybe_get_tz(inferred_tz) tz = timezones.maybe_get_tz(tz) if tz is not None and inferred_tz is not None: if not timezones.tz_compare(inferred_tz, tz): raise AssertionError("Inferred time zone not equal to passed " "time zone") elif inferred_tz is not None: tz = inferred_tz return tz
python
def _infer_tz_from_endpoints(start, end, tz): """ If a timezone is not explicitly given via `tz`, see if one can be inferred from the `start` and `end` endpoints. If more than one of these inputs provides a timezone, require that they all agree. Parameters ---------- start : Timestamp end : Timestamp tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if start and end timezones do not agree """ try: inferred_tz = timezones.infer_tzinfo(start, end) except Exception: raise TypeError('Start and end cannot both be tz-aware with ' 'different timezones') inferred_tz = timezones.maybe_get_tz(inferred_tz) tz = timezones.maybe_get_tz(tz) if tz is not None and inferred_tz is not None: if not timezones.tz_compare(inferred_tz, tz): raise AssertionError("Inferred time zone not equal to passed " "time zone") elif inferred_tz is not None: tz = inferred_tz return tz
[ "def", "_infer_tz_from_endpoints", "(", "start", ",", "end", ",", "tz", ")", ":", "try", ":", "inferred_tz", "=", "timezones", ".", "infer_tzinfo", "(", "start", ",", "end", ")", "except", "Exception", ":", "raise", "TypeError", "(", "'Start and end cannot both be tz-aware with '", "'different timezones'", ")", "inferred_tz", "=", "timezones", ".", "maybe_get_tz", "(", "inferred_tz", ")", "tz", "=", "timezones", ".", "maybe_get_tz", "(", "tz", ")", "if", "tz", "is", "not", "None", "and", "inferred_tz", "is", "not", "None", ":", "if", "not", "timezones", ".", "tz_compare", "(", "inferred_tz", ",", "tz", ")", ":", "raise", "AssertionError", "(", "\"Inferred time zone not equal to passed \"", "\"time zone\"", ")", "elif", "inferred_tz", "is", "not", "None", ":", "tz", "=", "inferred_tz", "return", "tz" ]
If a timezone is not explicitly given via `tz`, see if one can be inferred from the `start` and `end` endpoints. If more than one of these inputs provides a timezone, require that they all agree. Parameters ---------- start : Timestamp end : Timestamp tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if start and end timezones do not agree
[ "If", "a", "timezone", "is", "not", "explicitly", "given", "via", "tz", "see", "if", "one", "can", "be", "inferred", "from", "the", "start", "and", "end", "endpoints", ".", "If", "more", "than", "one", "of", "these", "inputs", "provides", "a", "timezone", "require", "that", "they", "all", "agree", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L2054-L2091
20,399
pandas-dev/pandas
pandas/core/arrays/datetimes.py
_maybe_localize_point
def _maybe_localize_point(ts, is_none, is_not_none, freq, tz): """ Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None Returns ------- ts : Timestamp """ # Make sure start and end are timezone localized if: # 1) freq = a Timedelta-like frequency (Tick) # 2) freq = None i.e. generating a linspaced range if isinstance(freq, Tick) or freq is None: localize_args = {'tz': tz, 'ambiguous': False} else: localize_args = {'tz': None} if is_none is None and is_not_none is not None: ts = ts.tz_localize(**localize_args) return ts
python
def _maybe_localize_point(ts, is_none, is_not_none, freq, tz): """ Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None Returns ------- ts : Timestamp """ # Make sure start and end are timezone localized if: # 1) freq = a Timedelta-like frequency (Tick) # 2) freq = None i.e. generating a linspaced range if isinstance(freq, Tick) or freq is None: localize_args = {'tz': tz, 'ambiguous': False} else: localize_args = {'tz': None} if is_none is None and is_not_none is not None: ts = ts.tz_localize(**localize_args) return ts
[ "def", "_maybe_localize_point", "(", "ts", ",", "is_none", ",", "is_not_none", ",", "freq", ",", "tz", ")", ":", "# Make sure start and end are timezone localized if:", "# 1) freq = a Timedelta-like frequency (Tick)", "# 2) freq = None i.e. generating a linspaced range", "if", "isinstance", "(", "freq", ",", "Tick", ")", "or", "freq", "is", "None", ":", "localize_args", "=", "{", "'tz'", ":", "tz", ",", "'ambiguous'", ":", "False", "}", "else", ":", "localize_args", "=", "{", "'tz'", ":", "None", "}", "if", "is_none", "is", "None", "and", "is_not_none", "is", "not", "None", ":", "ts", "=", "ts", ".", "tz_localize", "(", "*", "*", "localize_args", ")", "return", "ts" ]
Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None Returns ------- ts : Timestamp
[ "Localize", "a", "start", "or", "end", "Timestamp", "to", "the", "timezone", "of", "the", "corresponding", "start", "or", "end", "Timestamp" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L2114-L2140