partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
train
masked_rec_array_to_mgr
Extract from a masked rec array and create the manager.
pandas/core/internals/construction.py
def masked_rec_array_to_mgr(data, index, columns, dtype, copy): """ Extract from a masked rec array and create the manager. """ # essentially process a record array then fill it fill_value = data.fill_value fdata = ma.getdata(data) if index is None: index = get_names_from_index(fdata) if index is None: index = ibase.default_index(len(data)) index = ensure_index(index) if columns is not None: columns = ensure_index(columns) arrays, arr_columns = to_arrays(fdata, columns) # fill if needed new_arrays = [] for fv, arr, col in zip(fill_value, arrays, arr_columns): mask = ma.getmaskarray(data[col]) if mask.any(): arr, fv = maybe_upcast(arr, fill_value=fv, copy=True) arr[mask] = fv new_arrays.append(arr) # create the manager arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns) if columns is None: columns = arr_columns mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype) if copy: mgr = mgr.copy() return mgr
def masked_rec_array_to_mgr(data, index, columns, dtype, copy): """ Extract from a masked rec array and create the manager. """ # essentially process a record array then fill it fill_value = data.fill_value fdata = ma.getdata(data) if index is None: index = get_names_from_index(fdata) if index is None: index = ibase.default_index(len(data)) index = ensure_index(index) if columns is not None: columns = ensure_index(columns) arrays, arr_columns = to_arrays(fdata, columns) # fill if needed new_arrays = [] for fv, arr, col in zip(fill_value, arrays, arr_columns): mask = ma.getmaskarray(data[col]) if mask.any(): arr, fv = maybe_upcast(arr, fill_value=fv, copy=True) arr[mask] = fv new_arrays.append(arr) # create the manager arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns) if columns is None: columns = arr_columns mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype) if copy: mgr = mgr.copy() return mgr
[ "Extract", "from", "a", "masked", "rec", "array", "and", "create", "the", "manager", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/construction.py#L62-L98
[ "def", "masked_rec_array_to_mgr", "(", "data", ",", "index", ",", "columns", ",", "dtype", ",", "copy", ")", ":", "# essentially process a record array then fill it", "fill_value", "=", "data", ".", "fill_value", "fdata", "=", "ma", ".", "getdata", "(", "data", ")", "if", "index", "is", "None", ":", "index", "=", "get_names_from_index", "(", "fdata", ")", "if", "index", "is", "None", ":", "index", "=", "ibase", ".", "default_index", "(", "len", "(", "data", ")", ")", "index", "=", "ensure_index", "(", "index", ")", "if", "columns", "is", "not", "None", ":", "columns", "=", "ensure_index", "(", "columns", ")", "arrays", ",", "arr_columns", "=", "to_arrays", "(", "fdata", ",", "columns", ")", "# fill if needed", "new_arrays", "=", "[", "]", "for", "fv", ",", "arr", ",", "col", "in", "zip", "(", "fill_value", ",", "arrays", ",", "arr_columns", ")", ":", "mask", "=", "ma", ".", "getmaskarray", "(", "data", "[", "col", "]", ")", "if", "mask", ".", "any", "(", ")", ":", "arr", ",", "fv", "=", "maybe_upcast", "(", "arr", ",", "fill_value", "=", "fv", ",", "copy", "=", "True", ")", "arr", "[", "mask", "]", "=", "fv", "new_arrays", ".", "append", "(", "arr", ")", "# create the manager", "arrays", ",", "arr_columns", "=", "reorder_arrays", "(", "new_arrays", ",", "arr_columns", ",", "columns", ")", "if", "columns", "is", "None", ":", "columns", "=", "arr_columns", "mgr", "=", "arrays_to_mgr", "(", "arrays", ",", "arr_columns", ",", "index", ",", "columns", ",", "dtype", ")", "if", "copy", ":", "mgr", "=", "mgr", ".", "copy", "(", ")", "return", "mgr" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
init_dict
Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases.
pandas/core/internals/construction.py
def init_dict(data, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ if columns is not None: from pandas.core.series import Series arrays = Series(data, index=columns, dtype=object) data_names = arrays.index missing = arrays.isnull() if index is None: # GH10856 # raise ValueError if only scalars in dict index = extract_index(arrays[~missing]) else: index = ensure_index(index) # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): if dtype is None or np.issubdtype(dtype, np.flexible): # GH#1783 nan_dtype = object else: nan_dtype = dtype val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) arrays.loc[missing] = [val] * missing.sum() else: keys = com.dict_keys_to_ordered_list(data) columns = data_names = Index(keys) # GH#24096 need copy to be deep for datetime64tz case # TODO: See if we can avoid these copies arrays = [data[k] if not is_datetime64tz_dtype(data[k]) else data[k].copy(deep=True) for k in keys] return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def init_dict(data, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ if columns is not None: from pandas.core.series import Series arrays = Series(data, index=columns, dtype=object) data_names = arrays.index missing = arrays.isnull() if index is None: # GH10856 # raise ValueError if only scalars in dict index = extract_index(arrays[~missing]) else: index = ensure_index(index) # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): if dtype is None or np.issubdtype(dtype, np.flexible): # GH#1783 nan_dtype = object else: nan_dtype = dtype val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) arrays.loc[missing] = [val] * missing.sum() else: keys = com.dict_keys_to_ordered_list(data) columns = data_names = Index(keys) # GH#24096 need copy to be deep for datetime64tz case # TODO: See if we can avoid these copies arrays = [data[k] if not is_datetime64tz_dtype(data[k]) else data[k].copy(deep=True) for k in keys] return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
[ "Segregate", "Series", "based", "on", "type", "and", "coerce", "into", "matrices", ".", "Needs", "to", "handle", "a", "lot", "of", "exceptional", "cases", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/construction.py#L168-L204
[ "def", "init_dict", "(", "data", ",", "index", ",", "columns", ",", "dtype", "=", "None", ")", ":", "if", "columns", "is", "not", "None", ":", "from", "pandas", ".", "core", ".", "series", "import", "Series", "arrays", "=", "Series", "(", "data", ",", "index", "=", "columns", ",", "dtype", "=", "object", ")", "data_names", "=", "arrays", ".", "index", "missing", "=", "arrays", ".", "isnull", "(", ")", "if", "index", "is", "None", ":", "# GH10856", "# raise ValueError if only scalars in dict", "index", "=", "extract_index", "(", "arrays", "[", "~", "missing", "]", ")", "else", ":", "index", "=", "ensure_index", "(", "index", ")", "# no obvious \"empty\" int column", "if", "missing", ".", "any", "(", ")", "and", "not", "is_integer_dtype", "(", "dtype", ")", ":", "if", "dtype", "is", "None", "or", "np", ".", "issubdtype", "(", "dtype", ",", "np", ".", "flexible", ")", ":", "# GH#1783", "nan_dtype", "=", "object", "else", ":", "nan_dtype", "=", "dtype", "val", "=", "construct_1d_arraylike_from_scalar", "(", "np", ".", "nan", ",", "len", "(", "index", ")", ",", "nan_dtype", ")", "arrays", ".", "loc", "[", "missing", "]", "=", "[", "val", "]", "*", "missing", ".", "sum", "(", ")", "else", ":", "keys", "=", "com", ".", "dict_keys_to_ordered_list", "(", "data", ")", "columns", "=", "data_names", "=", "Index", "(", "keys", ")", "# GH#24096 need copy to be deep for datetime64tz case", "# TODO: See if we can avoid these copies", "arrays", "=", "[", "data", "[", "k", "]", "if", "not", "is_datetime64tz_dtype", "(", "data", "[", "k", "]", ")", "else", "data", "[", "k", "]", ".", "copy", "(", "deep", "=", "True", ")", "for", "k", "in", "keys", "]", "return", "arrays_to_mgr", "(", "arrays", ",", "data_names", ",", "index", ",", "columns", ",", "dtype", "=", "dtype", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
to_arrays
Return list of arrays, columns.
pandas/core/internals/construction.py
def to_arrays(data, columns, coerce_float=False, dtype=None): """ Return list of arrays, columns. """ if isinstance(data, ABCDataFrame): if columns is not None: arrays = [data._ixs(i, axis=1).values for i, col in enumerate(data.columns) if col in columns] else: columns = data.columns arrays = [data._ixs(i, axis=1).values for i in range(len(columns))] return arrays, columns if not len(data): if isinstance(data, np.ndarray): columns = data.dtype.names if columns is not None: return [[]] * len(columns), columns return [], [] # columns if columns is not None else [] if isinstance(data[0], (list, tuple)): return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], abc.Mapping): return _list_of_dict_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], ABCSeries): return _list_of_series_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], Categorical): if columns is None: columns = ibase.default_index(len(data)) return data, columns elif (isinstance(data, (np.ndarray, ABCSeries, Index)) and data.dtype.names is not None): columns = list(data.dtype.names) arrays = [data[k] for k in columns] return arrays, columns else: # last ditch effort data = lmap(tuple, data) return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype)
def to_arrays(data, columns, coerce_float=False, dtype=None): """ Return list of arrays, columns. """ if isinstance(data, ABCDataFrame): if columns is not None: arrays = [data._ixs(i, axis=1).values for i, col in enumerate(data.columns) if col in columns] else: columns = data.columns arrays = [data._ixs(i, axis=1).values for i in range(len(columns))] return arrays, columns if not len(data): if isinstance(data, np.ndarray): columns = data.dtype.names if columns is not None: return [[]] * len(columns), columns return [], [] # columns if columns is not None else [] if isinstance(data[0], (list, tuple)): return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], abc.Mapping): return _list_of_dict_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], ABCSeries): return _list_of_series_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], Categorical): if columns is None: columns = ibase.default_index(len(data)) return data, columns elif (isinstance(data, (np.ndarray, ABCSeries, Index)) and data.dtype.names is not None): columns = list(data.dtype.names) arrays = [data[k] for k in columns] return arrays, columns else: # last ditch effort data = lmap(tuple, data) return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype)
[ "Return", "list", "of", "arrays", "columns", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/construction.py#L374-L418
[ "def", "to_arrays", "(", "data", ",", "columns", ",", "coerce_float", "=", "False", ",", "dtype", "=", "None", ")", ":", "if", "isinstance", "(", "data", ",", "ABCDataFrame", ")", ":", "if", "columns", "is", "not", "None", ":", "arrays", "=", "[", "data", ".", "_ixs", "(", "i", ",", "axis", "=", "1", ")", ".", "values", "for", "i", ",", "col", "in", "enumerate", "(", "data", ".", "columns", ")", "if", "col", "in", "columns", "]", "else", ":", "columns", "=", "data", ".", "columns", "arrays", "=", "[", "data", ".", "_ixs", "(", "i", ",", "axis", "=", "1", ")", ".", "values", "for", "i", "in", "range", "(", "len", "(", "columns", ")", ")", "]", "return", "arrays", ",", "columns", "if", "not", "len", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "columns", "=", "data", ".", "dtype", ".", "names", "if", "columns", "is", "not", "None", ":", "return", "[", "[", "]", "]", "*", "len", "(", "columns", ")", ",", "columns", "return", "[", "]", ",", "[", "]", "# columns if columns is not None else []", "if", "isinstance", "(", "data", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "_list_to_arrays", "(", "data", ",", "columns", ",", "coerce_float", "=", "coerce_float", ",", "dtype", "=", "dtype", ")", "elif", "isinstance", "(", "data", "[", "0", "]", ",", "abc", ".", "Mapping", ")", ":", "return", "_list_of_dict_to_arrays", "(", "data", ",", "columns", ",", "coerce_float", "=", "coerce_float", ",", "dtype", "=", "dtype", ")", "elif", "isinstance", "(", "data", "[", "0", "]", ",", "ABCSeries", ")", ":", "return", "_list_of_series_to_arrays", "(", "data", ",", "columns", ",", "coerce_float", "=", "coerce_float", ",", "dtype", "=", "dtype", ")", "elif", "isinstance", "(", "data", "[", "0", "]", ",", "Categorical", ")", ":", "if", "columns", "is", "None", ":", "columns", "=", "ibase", ".", "default_index", "(", "len", "(", "data", ")", ")", "return", "data", ",", "columns", "elif", "(", "isinstance", "(", "data", ",", "(", "np", ".", "ndarray", ",", "ABCSeries", ",", "Index", ")", ")", "and", "data", ".", "dtype", ".", "names", "is", "not", "None", ")", ":", "columns", "=", "list", "(", "data", ".", "dtype", ".", "names", ")", "arrays", "=", "[", "data", "[", "k", "]", "for", "k", "in", "columns", "]", "return", "arrays", ",", "columns", "else", ":", "# last ditch effort", "data", "=", "lmap", "(", "tuple", ",", "data", ")", "return", "_list_to_arrays", "(", "data", ",", "columns", ",", "coerce_float", "=", "coerce_float", ",", "dtype", "=", "dtype", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
sanitize_index
Sanitize an index type to return an ndarray of the underlying, pass through a non-Index.
pandas/core/internals/construction.py
def sanitize_index(data, index, copy=False): """ Sanitize an index type to return an ndarray of the underlying, pass through a non-Index. """ if index is None: return data if len(data) != len(index): raise ValueError('Length of values does not match length of index') if isinstance(data, ABCIndexClass) and not copy: pass elif isinstance(data, (ABCPeriodIndex, ABCDatetimeIndex)): data = data._values if copy: data = data.copy() elif isinstance(data, np.ndarray): # coerce datetimelike types if data.dtype.kind in ['M', 'm']: data = sanitize_array(data, index, copy=copy) return data
def sanitize_index(data, index, copy=False): """ Sanitize an index type to return an ndarray of the underlying, pass through a non-Index. """ if index is None: return data if len(data) != len(index): raise ValueError('Length of values does not match length of index') if isinstance(data, ABCIndexClass) and not copy: pass elif isinstance(data, (ABCPeriodIndex, ABCDatetimeIndex)): data = data._values if copy: data = data.copy() elif isinstance(data, np.ndarray): # coerce datetimelike types if data.dtype.kind in ['M', 'm']: data = sanitize_array(data, index, copy=copy) return data
[ "Sanitize", "an", "index", "type", "to", "return", "an", "ndarray", "of", "the", "underlying", "pass", "through", "a", "non", "-", "Index", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/construction.py#L501-L526
[ "def", "sanitize_index", "(", "data", ",", "index", ",", "copy", "=", "False", ")", ":", "if", "index", "is", "None", ":", "return", "data", "if", "len", "(", "data", ")", "!=", "len", "(", "index", ")", ":", "raise", "ValueError", "(", "'Length of values does not match length of index'", ")", "if", "isinstance", "(", "data", ",", "ABCIndexClass", ")", "and", "not", "copy", ":", "pass", "elif", "isinstance", "(", "data", ",", "(", "ABCPeriodIndex", ",", "ABCDatetimeIndex", ")", ")", ":", "data", "=", "data", ".", "_values", "if", "copy", ":", "data", "=", "data", ".", "copy", "(", ")", "elif", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "# coerce datetimelike types", "if", "data", ".", "dtype", ".", "kind", "in", "[", "'M'", ",", "'m'", "]", ":", "data", "=", "sanitize_array", "(", "data", ",", "index", ",", "copy", "=", "copy", ")", "return", "data" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
sanitize_array
Sanitize input data to an ndarray, copy if specified, coerce to the dtype if specified.
pandas/core/internals/construction.py
def sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False): """ Sanitize input data to an ndarray, copy if specified, coerce to the dtype if specified. """ if dtype is not None: dtype = pandas_dtype(dtype) if isinstance(data, ma.MaskedArray): mask = ma.getmaskarray(data) if mask.any(): data, fill_value = maybe_upcast(data, copy=True) data.soften_mask() # set hardmask False if it was True data[mask] = fill_value else: data = data.copy() data = extract_array(data, extract_numpy=True) # GH#846 if isinstance(data, np.ndarray): if dtype is not None: subarr = np.array(data, copy=False) # possibility of nan -> garbage if is_float_dtype(data.dtype) and is_integer_dtype(dtype): try: subarr = _try_cast(data, True, dtype, copy, True) except ValueError: if copy: subarr = data.copy() else: subarr = _try_cast(data, True, dtype, copy, raise_cast_failure) elif isinstance(data, Index): # don't coerce Index types # e.g. indexes can have different conversions (so don't fast path # them) # GH#6140 subarr = sanitize_index(data, index, copy=copy) else: # we will try to copy be-definition here subarr = _try_cast(data, True, dtype, copy, raise_cast_failure) elif isinstance(data, ExtensionArray): if isinstance(data, ABCPandasArray): # We don't want to let people put our PandasArray wrapper # (the output of Series/Index.array), into a Series. So # we explicitly unwrap it here. subarr = data.to_numpy() else: subarr = data # everything else in this block must also handle ndarray's, # becuase we've unwrapped PandasArray into an ndarray. if dtype is not None: subarr = data.astype(dtype) if copy: subarr = data.copy() return subarr elif isinstance(data, (list, tuple)) and len(data) > 0: if dtype is not None: try: subarr = _try_cast(data, False, dtype, copy, raise_cast_failure) except Exception: if raise_cast_failure: # pragma: no cover raise subarr = np.array(data, dtype=object, copy=copy) subarr = lib.maybe_convert_objects(subarr) else: subarr = maybe_convert_platform(data) subarr = maybe_cast_to_datetime(subarr, dtype) elif isinstance(data, range): # GH#16804 arr = np.arange(data.start, data.stop, data.step, dtype='int64') subarr = _try_cast(arr, False, dtype, copy, raise_cast_failure) else: subarr = _try_cast(data, False, dtype, copy, raise_cast_failure) # scalar like, GH if getattr(subarr, 'ndim', 0) == 0: if isinstance(data, list): # pragma: no cover subarr = np.array(data, dtype=object) elif index is not None: value = data # figure out the dtype from the value (upcast if necessary) if dtype is None: dtype, value = infer_dtype_from_scalar(value) else: # need to possibly convert the value here value = maybe_cast_to_datetime(value, dtype) subarr = construct_1d_arraylike_from_scalar( value, len(index), dtype) else: return subarr.item() # the result that we want elif subarr.ndim == 1: if index is not None: # a 1-element ndarray if len(subarr) != len(index) and len(subarr) == 1: subarr = construct_1d_arraylike_from_scalar( subarr[0], len(index), subarr.dtype) elif subarr.ndim > 1: if isinstance(data, np.ndarray): raise Exception('Data must be 1-dimensional') else: subarr = com.asarray_tuplesafe(data, dtype=dtype) # This is to prevent mixed-type Series getting all casted to # NumPy string type, e.g. NaN --> '-1#IND'. if issubclass(subarr.dtype.type, str): # GH#16605 # If not empty convert the data to dtype # GH#19853: If data is a scalar, subarr has already the result if not lib.is_scalar(data): if not np.all(isna(data)): data = np.array(data, dtype=dtype, copy=False) subarr = np.array(data, dtype=object, copy=copy) if is_object_dtype(subarr.dtype) and dtype != 'object': inferred = lib.infer_dtype(subarr, skipna=False) if inferred == 'period': try: subarr = period_array(subarr) except IncompatibleFrequency: pass return subarr
def sanitize_array(data, index, dtype=None, copy=False, raise_cast_failure=False): """ Sanitize input data to an ndarray, copy if specified, coerce to the dtype if specified. """ if dtype is not None: dtype = pandas_dtype(dtype) if isinstance(data, ma.MaskedArray): mask = ma.getmaskarray(data) if mask.any(): data, fill_value = maybe_upcast(data, copy=True) data.soften_mask() # set hardmask False if it was True data[mask] = fill_value else: data = data.copy() data = extract_array(data, extract_numpy=True) # GH#846 if isinstance(data, np.ndarray): if dtype is not None: subarr = np.array(data, copy=False) # possibility of nan -> garbage if is_float_dtype(data.dtype) and is_integer_dtype(dtype): try: subarr = _try_cast(data, True, dtype, copy, True) except ValueError: if copy: subarr = data.copy() else: subarr = _try_cast(data, True, dtype, copy, raise_cast_failure) elif isinstance(data, Index): # don't coerce Index types # e.g. indexes can have different conversions (so don't fast path # them) # GH#6140 subarr = sanitize_index(data, index, copy=copy) else: # we will try to copy be-definition here subarr = _try_cast(data, True, dtype, copy, raise_cast_failure) elif isinstance(data, ExtensionArray): if isinstance(data, ABCPandasArray): # We don't want to let people put our PandasArray wrapper # (the output of Series/Index.array), into a Series. So # we explicitly unwrap it here. subarr = data.to_numpy() else: subarr = data # everything else in this block must also handle ndarray's, # becuase we've unwrapped PandasArray into an ndarray. if dtype is not None: subarr = data.astype(dtype) if copy: subarr = data.copy() return subarr elif isinstance(data, (list, tuple)) and len(data) > 0: if dtype is not None: try: subarr = _try_cast(data, False, dtype, copy, raise_cast_failure) except Exception: if raise_cast_failure: # pragma: no cover raise subarr = np.array(data, dtype=object, copy=copy) subarr = lib.maybe_convert_objects(subarr) else: subarr = maybe_convert_platform(data) subarr = maybe_cast_to_datetime(subarr, dtype) elif isinstance(data, range): # GH#16804 arr = np.arange(data.start, data.stop, data.step, dtype='int64') subarr = _try_cast(arr, False, dtype, copy, raise_cast_failure) else: subarr = _try_cast(data, False, dtype, copy, raise_cast_failure) # scalar like, GH if getattr(subarr, 'ndim', 0) == 0: if isinstance(data, list): # pragma: no cover subarr = np.array(data, dtype=object) elif index is not None: value = data # figure out the dtype from the value (upcast if necessary) if dtype is None: dtype, value = infer_dtype_from_scalar(value) else: # need to possibly convert the value here value = maybe_cast_to_datetime(value, dtype) subarr = construct_1d_arraylike_from_scalar( value, len(index), dtype) else: return subarr.item() # the result that we want elif subarr.ndim == 1: if index is not None: # a 1-element ndarray if len(subarr) != len(index) and len(subarr) == 1: subarr = construct_1d_arraylike_from_scalar( subarr[0], len(index), subarr.dtype) elif subarr.ndim > 1: if isinstance(data, np.ndarray): raise Exception('Data must be 1-dimensional') else: subarr = com.asarray_tuplesafe(data, dtype=dtype) # This is to prevent mixed-type Series getting all casted to # NumPy string type, e.g. NaN --> '-1#IND'. if issubclass(subarr.dtype.type, str): # GH#16605 # If not empty convert the data to dtype # GH#19853: If data is a scalar, subarr has already the result if not lib.is_scalar(data): if not np.all(isna(data)): data = np.array(data, dtype=dtype, copy=False) subarr = np.array(data, dtype=object, copy=copy) if is_object_dtype(subarr.dtype) and dtype != 'object': inferred = lib.infer_dtype(subarr, skipna=False) if inferred == 'period': try: subarr = period_array(subarr) except IncompatibleFrequency: pass return subarr
[ "Sanitize", "input", "data", "to", "an", "ndarray", "copy", "if", "specified", "coerce", "to", "the", "dtype", "if", "specified", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/construction.py#L529-L672
[ "def", "sanitize_array", "(", "data", ",", "index", ",", "dtype", "=", "None", ",", "copy", "=", "False", ",", "raise_cast_failure", "=", "False", ")", ":", "if", "dtype", "is", "not", "None", ":", "dtype", "=", "pandas_dtype", "(", "dtype", ")", "if", "isinstance", "(", "data", ",", "ma", ".", "MaskedArray", ")", ":", "mask", "=", "ma", ".", "getmaskarray", "(", "data", ")", "if", "mask", ".", "any", "(", ")", ":", "data", ",", "fill_value", "=", "maybe_upcast", "(", "data", ",", "copy", "=", "True", ")", "data", ".", "soften_mask", "(", ")", "# set hardmask False if it was True", "data", "[", "mask", "]", "=", "fill_value", "else", ":", "data", "=", "data", ".", "copy", "(", ")", "data", "=", "extract_array", "(", "data", ",", "extract_numpy", "=", "True", ")", "# GH#846", "if", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "if", "dtype", "is", "not", "None", ":", "subarr", "=", "np", ".", "array", "(", "data", ",", "copy", "=", "False", ")", "# possibility of nan -> garbage", "if", "is_float_dtype", "(", "data", ".", "dtype", ")", "and", "is_integer_dtype", "(", "dtype", ")", ":", "try", ":", "subarr", "=", "_try_cast", "(", "data", ",", "True", ",", "dtype", ",", "copy", ",", "True", ")", "except", "ValueError", ":", "if", "copy", ":", "subarr", "=", "data", ".", "copy", "(", ")", "else", ":", "subarr", "=", "_try_cast", "(", "data", ",", "True", ",", "dtype", ",", "copy", ",", "raise_cast_failure", ")", "elif", "isinstance", "(", "data", ",", "Index", ")", ":", "# don't coerce Index types", "# e.g. indexes can have different conversions (so don't fast path", "# them)", "# GH#6140", "subarr", "=", "sanitize_index", "(", "data", ",", "index", ",", "copy", "=", "copy", ")", "else", ":", "# we will try to copy be-definition here", "subarr", "=", "_try_cast", "(", "data", ",", "True", ",", "dtype", ",", "copy", ",", "raise_cast_failure", ")", "elif", "isinstance", "(", "data", ",", "ExtensionArray", ")", ":", "if", "isinstance", "(", "data", ",", "ABCPandasArray", ")", ":", "# We don't want to let people put our PandasArray wrapper", "# (the output of Series/Index.array), into a Series. So", "# we explicitly unwrap it here.", "subarr", "=", "data", ".", "to_numpy", "(", ")", "else", ":", "subarr", "=", "data", "# everything else in this block must also handle ndarray's,", "# becuase we've unwrapped PandasArray into an ndarray.", "if", "dtype", "is", "not", "None", ":", "subarr", "=", "data", ".", "astype", "(", "dtype", ")", "if", "copy", ":", "subarr", "=", "data", ".", "copy", "(", ")", "return", "subarr", "elif", "isinstance", "(", "data", ",", "(", "list", ",", "tuple", ")", ")", "and", "len", "(", "data", ")", ">", "0", ":", "if", "dtype", "is", "not", "None", ":", "try", ":", "subarr", "=", "_try_cast", "(", "data", ",", "False", ",", "dtype", ",", "copy", ",", "raise_cast_failure", ")", "except", "Exception", ":", "if", "raise_cast_failure", ":", "# pragma: no cover", "raise", "subarr", "=", "np", ".", "array", "(", "data", ",", "dtype", "=", "object", ",", "copy", "=", "copy", ")", "subarr", "=", "lib", ".", "maybe_convert_objects", "(", "subarr", ")", "else", ":", "subarr", "=", "maybe_convert_platform", "(", "data", ")", "subarr", "=", "maybe_cast_to_datetime", "(", "subarr", ",", "dtype", ")", "elif", "isinstance", "(", "data", ",", "range", ")", ":", "# GH#16804", "arr", "=", "np", ".", "arange", "(", "data", ".", "start", ",", "data", ".", "stop", ",", "data", ".", "step", ",", "dtype", "=", "'int64'", ")", "subarr", "=", "_try_cast", "(", "arr", ",", "False", ",", "dtype", ",", "copy", ",", "raise_cast_failure", ")", "else", ":", "subarr", "=", "_try_cast", "(", "data", ",", "False", ",", "dtype", ",", "copy", ",", "raise_cast_failure", ")", "# scalar like, GH", "if", "getattr", "(", "subarr", ",", "'ndim'", ",", "0", ")", "==", "0", ":", "if", "isinstance", "(", "data", ",", "list", ")", ":", "# pragma: no cover", "subarr", "=", "np", ".", "array", "(", "data", ",", "dtype", "=", "object", ")", "elif", "index", "is", "not", "None", ":", "value", "=", "data", "# figure out the dtype from the value (upcast if necessary)", "if", "dtype", "is", "None", ":", "dtype", ",", "value", "=", "infer_dtype_from_scalar", "(", "value", ")", "else", ":", "# need to possibly convert the value here", "value", "=", "maybe_cast_to_datetime", "(", "value", ",", "dtype", ")", "subarr", "=", "construct_1d_arraylike_from_scalar", "(", "value", ",", "len", "(", "index", ")", ",", "dtype", ")", "else", ":", "return", "subarr", ".", "item", "(", ")", "# the result that we want", "elif", "subarr", ".", "ndim", "==", "1", ":", "if", "index", "is", "not", "None", ":", "# a 1-element ndarray", "if", "len", "(", "subarr", ")", "!=", "len", "(", "index", ")", "and", "len", "(", "subarr", ")", "==", "1", ":", "subarr", "=", "construct_1d_arraylike_from_scalar", "(", "subarr", "[", "0", "]", ",", "len", "(", "index", ")", ",", "subarr", ".", "dtype", ")", "elif", "subarr", ".", "ndim", ">", "1", ":", "if", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "raise", "Exception", "(", "'Data must be 1-dimensional'", ")", "else", ":", "subarr", "=", "com", ".", "asarray_tuplesafe", "(", "data", ",", "dtype", "=", "dtype", ")", "# This is to prevent mixed-type Series getting all casted to", "# NumPy string type, e.g. NaN --> '-1#IND'.", "if", "issubclass", "(", "subarr", ".", "dtype", ".", "type", ",", "str", ")", ":", "# GH#16605", "# If not empty convert the data to dtype", "# GH#19853: If data is a scalar, subarr has already the result", "if", "not", "lib", ".", "is_scalar", "(", "data", ")", ":", "if", "not", "np", ".", "all", "(", "isna", "(", "data", ")", ")", ":", "data", "=", "np", ".", "array", "(", "data", ",", "dtype", "=", "dtype", ",", "copy", "=", "False", ")", "subarr", "=", "np", ".", "array", "(", "data", ",", "dtype", "=", "object", ",", "copy", "=", "copy", ")", "if", "is_object_dtype", "(", "subarr", ".", "dtype", ")", "and", "dtype", "!=", "'object'", ":", "inferred", "=", "lib", ".", "infer_dtype", "(", "subarr", ",", "skipna", "=", "False", ")", "if", "inferred", "==", "'period'", ":", "try", ":", "subarr", "=", "period_array", "(", "subarr", ")", "except", "IncompatibleFrequency", ":", "pass", "return", "subarr" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_check_engine
Make sure a valid engine is passed. Parameters ---------- engine : str Raises ------ KeyError * If an invalid engine is passed ImportError * If numexpr was requested but doesn't exist Returns ------- string engine
pandas/core/computation/eval.py
def _check_engine(engine): """Make sure a valid engine is passed. Parameters ---------- engine : str Raises ------ KeyError * If an invalid engine is passed ImportError * If numexpr was requested but doesn't exist Returns ------- string engine """ from pandas.core.computation.check import _NUMEXPR_INSTALLED if engine is None: if _NUMEXPR_INSTALLED: engine = 'numexpr' else: engine = 'python' if engine not in _engines: valid = list(_engines.keys()) raise KeyError('Invalid engine {engine!r} passed, valid engines are' ' {valid}'.format(engine=engine, valid=valid)) # TODO: validate this in a more general way (thinking of future engines # that won't necessarily be import-able) # Could potentially be done on engine instantiation if engine == 'numexpr': if not _NUMEXPR_INSTALLED: raise ImportError("'numexpr' is not installed or an " "unsupported version. Cannot use " "engine='numexpr' for query/eval " "if 'numexpr' is not installed") return engine
def _check_engine(engine): """Make sure a valid engine is passed. Parameters ---------- engine : str Raises ------ KeyError * If an invalid engine is passed ImportError * If numexpr was requested but doesn't exist Returns ------- string engine """ from pandas.core.computation.check import _NUMEXPR_INSTALLED if engine is None: if _NUMEXPR_INSTALLED: engine = 'numexpr' else: engine = 'python' if engine not in _engines: valid = list(_engines.keys()) raise KeyError('Invalid engine {engine!r} passed, valid engines are' ' {valid}'.format(engine=engine, valid=valid)) # TODO: validate this in a more general way (thinking of future engines # that won't necessarily be import-able) # Could potentially be done on engine instantiation if engine == 'numexpr': if not _NUMEXPR_INSTALLED: raise ImportError("'numexpr' is not installed or an " "unsupported version. Cannot use " "engine='numexpr' for query/eval " "if 'numexpr' is not installed") return engine
[ "Make", "sure", "a", "valid", "engine", "is", "passed", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/eval.py#L17-L59
[ "def", "_check_engine", "(", "engine", ")", ":", "from", "pandas", ".", "core", ".", "computation", ".", "check", "import", "_NUMEXPR_INSTALLED", "if", "engine", "is", "None", ":", "if", "_NUMEXPR_INSTALLED", ":", "engine", "=", "'numexpr'", "else", ":", "engine", "=", "'python'", "if", "engine", "not", "in", "_engines", ":", "valid", "=", "list", "(", "_engines", ".", "keys", "(", ")", ")", "raise", "KeyError", "(", "'Invalid engine {engine!r} passed, valid engines are'", "' {valid}'", ".", "format", "(", "engine", "=", "engine", ",", "valid", "=", "valid", ")", ")", "# TODO: validate this in a more general way (thinking of future engines", "# that won't necessarily be import-able)", "# Could potentially be done on engine instantiation", "if", "engine", "==", "'numexpr'", ":", "if", "not", "_NUMEXPR_INSTALLED", ":", "raise", "ImportError", "(", "\"'numexpr' is not installed or an \"", "\"unsupported version. Cannot use \"", "\"engine='numexpr' for query/eval \"", "\"if 'numexpr' is not installed\"", ")", "return", "engine" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_check_parser
Make sure a valid parser is passed. Parameters ---------- parser : str Raises ------ KeyError * If an invalid parser is passed
pandas/core/computation/eval.py
def _check_parser(parser): """Make sure a valid parser is passed. Parameters ---------- parser : str Raises ------ KeyError * If an invalid parser is passed """ from pandas.core.computation.expr import _parsers if parser not in _parsers: raise KeyError('Invalid parser {parser!r} passed, valid parsers are' ' {valid}'.format(parser=parser, valid=_parsers.keys()))
def _check_parser(parser): """Make sure a valid parser is passed. Parameters ---------- parser : str Raises ------ KeyError * If an invalid parser is passed """ from pandas.core.computation.expr import _parsers if parser not in _parsers: raise KeyError('Invalid parser {parser!r} passed, valid parsers are' ' {valid}'.format(parser=parser, valid=_parsers.keys()))
[ "Make", "sure", "a", "valid", "parser", "is", "passed", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/eval.py#L62-L78
[ "def", "_check_parser", "(", "parser", ")", ":", "from", "pandas", ".", "core", ".", "computation", ".", "expr", "import", "_parsers", "if", "parser", "not", "in", "_parsers", ":", "raise", "KeyError", "(", "'Invalid parser {parser!r} passed, valid parsers are'", "' {valid}'", ".", "format", "(", "parser", "=", "parser", ",", "valid", "=", "_parsers", ".", "keys", "(", ")", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
eval
Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : string, default 'pandas', {'pandas', 'python'} The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. engine : string or None, default 'numexpr', {'python', 'numexpr'} The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'``: This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'``: Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. truediv : bool, optional Whether to use true division, like in Python >= 3 local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query DataFrame.eval Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details.
pandas/core/computation/eval.py
def eval(expr, parser='pandas', engine=None, truediv=True, local_dict=None, global_dict=None, resolvers=(), level=0, target=None, inplace=False): """Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : string, default 'pandas', {'pandas', 'python'} The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. engine : string or None, default 'numexpr', {'python', 'numexpr'} The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'``: This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'``: Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. truediv : bool, optional Whether to use true division, like in Python >= 3 local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query DataFrame.eval Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. """ from pandas.core.computation.expr import Expr inplace = validate_bool_kwarg(inplace, "inplace") if isinstance(expr, str): _check_expression(expr) exprs = [e.strip() for e in expr.splitlines() if e.strip() != ''] else: exprs = [expr] multi_line = len(exprs) > 1 if multi_line and target is None: raise ValueError("multi-line expressions are only valid in the " "context of data, use DataFrame.eval") ret = None first_expr = True target_modified = False for expr in exprs: expr = _convert_expression(expr) engine = _check_engine(engine) _check_parser(parser) _check_resolvers(resolvers) _check_for_locals(expr, level, parser) # get our (possibly passed-in) scope env = _ensure_scope(level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, truediv=truediv) # construct the engine and evaluate the parsed expression eng = _engines[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() if parsed_expr.assigner is None: if multi_line: raise ValueError("Multi-line expressions are only valid" " if all expressions contain an assignment") elif inplace: raise ValueError("Cannot operate inplace " "if there is no assignment") # assign if needed assigner = parsed_expr.assigner if env.target is not None and assigner is not None: target_modified = True # if returning a copy, copy only on the first assignment if not inplace and first_expr: try: target = env.target.copy() except AttributeError: raise ValueError("Cannot return a copy of the target") else: target = env.target # TypeError is most commonly raised (e.g. int, list), but you # get IndexError if you try to do this assignment on np.ndarray. # we will ignore numpy warnings here; e.g. if trying # to use a non-numeric indexer try: with warnings.catch_warnings(record=True): # TODO: Filter the warnings we actually care about here. target[assigner] = ret except (TypeError, IndexError): raise ValueError("Cannot assign expression output to target") if not resolvers: resolvers = ({assigner: ret},) else: # existing resolver needs updated to handle # case of mutating existing column in copy for resolver in resolvers: if assigner in resolver: resolver[assigner] = ret break else: resolvers += ({assigner: ret},) ret = None first_expr = False # We want to exclude `inplace=None` as being False. if inplace is False: return target if target_modified else ret
def eval(expr, parser='pandas', engine=None, truediv=True, local_dict=None, global_dict=None, resolvers=(), level=0, target=None, inplace=False): """Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : string, default 'pandas', {'pandas', 'python'} The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. engine : string or None, default 'numexpr', {'python', 'numexpr'} The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'``: This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'``: Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. truediv : bool, optional Whether to use true division, like in Python >= 3 local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query DataFrame.eval Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. """ from pandas.core.computation.expr import Expr inplace = validate_bool_kwarg(inplace, "inplace") if isinstance(expr, str): _check_expression(expr) exprs = [e.strip() for e in expr.splitlines() if e.strip() != ''] else: exprs = [expr] multi_line = len(exprs) > 1 if multi_line and target is None: raise ValueError("multi-line expressions are only valid in the " "context of data, use DataFrame.eval") ret = None first_expr = True target_modified = False for expr in exprs: expr = _convert_expression(expr) engine = _check_engine(engine) _check_parser(parser) _check_resolvers(resolvers) _check_for_locals(expr, level, parser) # get our (possibly passed-in) scope env = _ensure_scope(level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, truediv=truediv) # construct the engine and evaluate the parsed expression eng = _engines[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() if parsed_expr.assigner is None: if multi_line: raise ValueError("Multi-line expressions are only valid" " if all expressions contain an assignment") elif inplace: raise ValueError("Cannot operate inplace " "if there is no assignment") # assign if needed assigner = parsed_expr.assigner if env.target is not None and assigner is not None: target_modified = True # if returning a copy, copy only on the first assignment if not inplace and first_expr: try: target = env.target.copy() except AttributeError: raise ValueError("Cannot return a copy of the target") else: target = env.target # TypeError is most commonly raised (e.g. int, list), but you # get IndexError if you try to do this assignment on np.ndarray. # we will ignore numpy warnings here; e.g. if trying # to use a non-numeric indexer try: with warnings.catch_warnings(record=True): # TODO: Filter the warnings we actually care about here. target[assigner] = ret except (TypeError, IndexError): raise ValueError("Cannot assign expression output to target") if not resolvers: resolvers = ({assigner: ret},) else: # existing resolver needs updated to handle # case of mutating existing column in copy for resolver in resolvers: if assigner in resolver: resolver[assigner] = ret break else: resolvers += ({assigner: ret},) ret = None first_expr = False # We want to exclude `inplace=None` as being False. if inplace is False: return target if target_modified else ret
[ "Evaluate", "a", "Python", "expression", "as", "a", "string", "using", "various", "backends", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/eval.py#L155-L350
[ "def", "eval", "(", "expr", ",", "parser", "=", "'pandas'", ",", "engine", "=", "None", ",", "truediv", "=", "True", ",", "local_dict", "=", "None", ",", "global_dict", "=", "None", ",", "resolvers", "=", "(", ")", ",", "level", "=", "0", ",", "target", "=", "None", ",", "inplace", "=", "False", ")", ":", "from", "pandas", ".", "core", ".", "computation", ".", "expr", "import", "Expr", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "\"inplace\"", ")", "if", "isinstance", "(", "expr", ",", "str", ")", ":", "_check_expression", "(", "expr", ")", "exprs", "=", "[", "e", ".", "strip", "(", ")", "for", "e", "in", "expr", ".", "splitlines", "(", ")", "if", "e", ".", "strip", "(", ")", "!=", "''", "]", "else", ":", "exprs", "=", "[", "expr", "]", "multi_line", "=", "len", "(", "exprs", ")", ">", "1", "if", "multi_line", "and", "target", "is", "None", ":", "raise", "ValueError", "(", "\"multi-line expressions are only valid in the \"", "\"context of data, use DataFrame.eval\"", ")", "ret", "=", "None", "first_expr", "=", "True", "target_modified", "=", "False", "for", "expr", "in", "exprs", ":", "expr", "=", "_convert_expression", "(", "expr", ")", "engine", "=", "_check_engine", "(", "engine", ")", "_check_parser", "(", "parser", ")", "_check_resolvers", "(", "resolvers", ")", "_check_for_locals", "(", "expr", ",", "level", ",", "parser", ")", "# get our (possibly passed-in) scope", "env", "=", "_ensure_scope", "(", "level", "+", "1", ",", "global_dict", "=", "global_dict", ",", "local_dict", "=", "local_dict", ",", "resolvers", "=", "resolvers", ",", "target", "=", "target", ")", "parsed_expr", "=", "Expr", "(", "expr", ",", "engine", "=", "engine", ",", "parser", "=", "parser", ",", "env", "=", "env", ",", "truediv", "=", "truediv", ")", "# construct the engine and evaluate the parsed expression", "eng", "=", "_engines", "[", "engine", "]", "eng_inst", "=", "eng", "(", "parsed_expr", ")", "ret", "=", "eng_inst", ".", "evaluate", "(", ")", "if", "parsed_expr", ".", "assigner", "is", "None", ":", "if", "multi_line", ":", "raise", "ValueError", "(", "\"Multi-line expressions are only valid\"", "\" if all expressions contain an assignment\"", ")", "elif", "inplace", ":", "raise", "ValueError", "(", "\"Cannot operate inplace \"", "\"if there is no assignment\"", ")", "# assign if needed", "assigner", "=", "parsed_expr", ".", "assigner", "if", "env", ".", "target", "is", "not", "None", "and", "assigner", "is", "not", "None", ":", "target_modified", "=", "True", "# if returning a copy, copy only on the first assignment", "if", "not", "inplace", "and", "first_expr", ":", "try", ":", "target", "=", "env", ".", "target", ".", "copy", "(", ")", "except", "AttributeError", ":", "raise", "ValueError", "(", "\"Cannot return a copy of the target\"", ")", "else", ":", "target", "=", "env", ".", "target", "# TypeError is most commonly raised (e.g. int, list), but you", "# get IndexError if you try to do this assignment on np.ndarray.", "# we will ignore numpy warnings here; e.g. if trying", "# to use a non-numeric indexer", "try", ":", "with", "warnings", ".", "catch_warnings", "(", "record", "=", "True", ")", ":", "# TODO: Filter the warnings we actually care about here.", "target", "[", "assigner", "]", "=", "ret", "except", "(", "TypeError", ",", "IndexError", ")", ":", "raise", "ValueError", "(", "\"Cannot assign expression output to target\"", ")", "if", "not", "resolvers", ":", "resolvers", "=", "(", "{", "assigner", ":", "ret", "}", ",", ")", "else", ":", "# existing resolver needs updated to handle", "# case of mutating existing column in copy", "for", "resolver", "in", "resolvers", ":", "if", "assigner", "in", "resolver", ":", "resolver", "[", "assigner", "]", "=", "ret", "break", "else", ":", "resolvers", "+=", "(", "{", "assigner", ":", "ret", "}", ",", ")", "ret", "=", "None", "first_expr", "=", "False", "# We want to exclude `inplace=None` as being False.", "if", "inplace", "is", "False", ":", "return", "target", "if", "target_modified", "else", "ret" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndexUIntEngine._codes_to_ints
Transform combination(s) of uint64 in one uint64 (each), in a strictly monotonic way (i.e. respecting the lexicographic order of integer combinations): see BaseMultiIndexCodesEngine documentation. Parameters ---------- codes : 1- or 2-dimensional array of dtype uint64 Combinations of integers (one per row) Returns ------ int_keys : scalar or 1-dimensional array, of dtype uint64 Integer(s) representing one combination (each).
pandas/core/indexes/multi.py
def _codes_to_ints(self, codes): """ Transform combination(s) of uint64 in one uint64 (each), in a strictly monotonic way (i.e. respecting the lexicographic order of integer combinations): see BaseMultiIndexCodesEngine documentation. Parameters ---------- codes : 1- or 2-dimensional array of dtype uint64 Combinations of integers (one per row) Returns ------ int_keys : scalar or 1-dimensional array, of dtype uint64 Integer(s) representing one combination (each). """ # Shift the representation of each level by the pre-calculated number # of bits: codes <<= self.offsets # Now sum and OR are in fact interchangeable. This is a simple # composition of the (disjunct) significant bits of each level (i.e. # each column in "codes") in a single positive integer: if codes.ndim == 1: # Single key return np.bitwise_or.reduce(codes) # Multiple keys return np.bitwise_or.reduce(codes, axis=1)
def _codes_to_ints(self, codes): """ Transform combination(s) of uint64 in one uint64 (each), in a strictly monotonic way (i.e. respecting the lexicographic order of integer combinations): see BaseMultiIndexCodesEngine documentation. Parameters ---------- codes : 1- or 2-dimensional array of dtype uint64 Combinations of integers (one per row) Returns ------ int_keys : scalar or 1-dimensional array, of dtype uint64 Integer(s) representing one combination (each). """ # Shift the representation of each level by the pre-calculated number # of bits: codes <<= self.offsets # Now sum and OR are in fact interchangeable. This is a simple # composition of the (disjunct) significant bits of each level (i.e. # each column in "codes") in a single positive integer: if codes.ndim == 1: # Single key return np.bitwise_or.reduce(codes) # Multiple keys return np.bitwise_or.reduce(codes, axis=1)
[ "Transform", "combination", "(", "s", ")", "of", "uint64", "in", "one", "uint64", "(", "each", ")", "in", "a", "strictly", "monotonic", "way", "(", "i", ".", "e", ".", "respecting", "the", "lexicographic", "order", "of", "integer", "combinations", ")", ":", "see", "BaseMultiIndexCodesEngine", "documentation", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L49-L77
[ "def", "_codes_to_ints", "(", "self", ",", "codes", ")", ":", "# Shift the representation of each level by the pre-calculated number", "# of bits:", "codes", "<<=", "self", ".", "offsets", "# Now sum and OR are in fact interchangeable. This is a simple", "# composition of the (disjunct) significant bits of each level (i.e.", "# each column in \"codes\") in a single positive integer:", "if", "codes", ".", "ndim", "==", "1", ":", "# Single key", "return", "np", ".", "bitwise_or", ".", "reduce", "(", "codes", ")", "# Multiple keys", "return", "np", ".", "bitwise_or", ".", "reduce", "(", "codes", ",", "axis", "=", "1", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.from_arrays
Convert arrays to MultiIndex. Parameters ---------- arrays : list / sequence of array-likes Each array-like gives one level's value for each data point. len(arrays) is the number of levels. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color'])
pandas/core/indexes/multi.py
def from_arrays(cls, arrays, sortorder=None, names=None): """ Convert arrays to MultiIndex. Parameters ---------- arrays : list / sequence of array-likes Each array-like gives one level's value for each data point. len(arrays) is the number of levels. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color']) """ error_msg = "Input must be a list / sequence of array-likes." if not is_list_like(arrays): raise TypeError(error_msg) elif is_iterator(arrays): arrays = list(arrays) # Check if elements of array are list-like for array in arrays: if not is_list_like(array): raise TypeError(error_msg) # Check if lengths of all arrays are equal or not, # raise ValueError, if not for i in range(1, len(arrays)): if len(arrays[i]) != len(arrays[i - 1]): raise ValueError('all arrays must be same length') from pandas.core.arrays.categorical import _factorize_from_iterables codes, levels = _factorize_from_iterables(arrays) if names is None: names = [getattr(arr, "name", None) for arr in arrays] return MultiIndex(levels=levels, codes=codes, sortorder=sortorder, names=names, verify_integrity=False)
def from_arrays(cls, arrays, sortorder=None, names=None): """ Convert arrays to MultiIndex. Parameters ---------- arrays : list / sequence of array-likes Each array-like gives one level's value for each data point. len(arrays) is the number of levels. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color']) """ error_msg = "Input must be a list / sequence of array-likes." if not is_list_like(arrays): raise TypeError(error_msg) elif is_iterator(arrays): arrays = list(arrays) # Check if elements of array are list-like for array in arrays: if not is_list_like(array): raise TypeError(error_msg) # Check if lengths of all arrays are equal or not, # raise ValueError, if not for i in range(1, len(arrays)): if len(arrays[i]) != len(arrays[i - 1]): raise ValueError('all arrays must be same length') from pandas.core.arrays.categorical import _factorize_from_iterables codes, levels = _factorize_from_iterables(arrays) if names is None: names = [getattr(arr, "name", None) for arr in arrays] return MultiIndex(levels=levels, codes=codes, sortorder=sortorder, names=names, verify_integrity=False)
[ "Convert", "arrays", "to", "MultiIndex", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L292-L350
[ "def", "from_arrays", "(", "cls", ",", "arrays", ",", "sortorder", "=", "None", ",", "names", "=", "None", ")", ":", "error_msg", "=", "\"Input must be a list / sequence of array-likes.\"", "if", "not", "is_list_like", "(", "arrays", ")", ":", "raise", "TypeError", "(", "error_msg", ")", "elif", "is_iterator", "(", "arrays", ")", ":", "arrays", "=", "list", "(", "arrays", ")", "# Check if elements of array are list-like", "for", "array", "in", "arrays", ":", "if", "not", "is_list_like", "(", "array", ")", ":", "raise", "TypeError", "(", "error_msg", ")", "# Check if lengths of all arrays are equal or not,", "# raise ValueError, if not", "for", "i", "in", "range", "(", "1", ",", "len", "(", "arrays", ")", ")", ":", "if", "len", "(", "arrays", "[", "i", "]", ")", "!=", "len", "(", "arrays", "[", "i", "-", "1", "]", ")", ":", "raise", "ValueError", "(", "'all arrays must be same length'", ")", "from", "pandas", ".", "core", ".", "arrays", ".", "categorical", "import", "_factorize_from_iterables", "codes", ",", "levels", "=", "_factorize_from_iterables", "(", "arrays", ")", "if", "names", "is", "None", ":", "names", "=", "[", "getattr", "(", "arr", ",", "\"name\"", ",", "None", ")", "for", "arr", "in", "arrays", "]", "return", "MultiIndex", "(", "levels", "=", "levels", ",", "codes", "=", "codes", ",", "sortorder", "=", "sortorder", ",", "names", "=", "names", ",", "verify_integrity", "=", "False", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.from_tuples
Convert list of tuples to MultiIndex. Parameters ---------- tuples : list / sequence of tuple-likes Each tuple is the index of one row/column. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> tuples = [(1, 'red'), (1, 'blue'), ... (2, 'red'), (2, 'blue')] >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color'])
pandas/core/indexes/multi.py
def from_tuples(cls, tuples, sortorder=None, names=None): """ Convert list of tuples to MultiIndex. Parameters ---------- tuples : list / sequence of tuple-likes Each tuple is the index of one row/column. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> tuples = [(1, 'red'), (1, 'blue'), ... (2, 'red'), (2, 'blue')] >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color']) """ if not is_list_like(tuples): raise TypeError('Input must be a list / sequence of tuple-likes.') elif is_iterator(tuples): tuples = list(tuples) if len(tuples) == 0: if names is None: msg = 'Cannot infer number of levels from empty list' raise TypeError(msg) arrays = [[]] * len(names) elif isinstance(tuples, (np.ndarray, Index)): if isinstance(tuples, Index): tuples = tuples._values arrays = list(lib.tuples_to_object_array(tuples).T) elif isinstance(tuples, list): arrays = list(lib.to_object_array_tuples(tuples).T) else: arrays = lzip(*tuples) return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
def from_tuples(cls, tuples, sortorder=None, names=None): """ Convert list of tuples to MultiIndex. Parameters ---------- tuples : list / sequence of tuple-likes Each tuple is the index of one row/column. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> tuples = [(1, 'red'), (1, 'blue'), ... (2, 'red'), (2, 'blue')] >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color']) """ if not is_list_like(tuples): raise TypeError('Input must be a list / sequence of tuple-likes.') elif is_iterator(tuples): tuples = list(tuples) if len(tuples) == 0: if names is None: msg = 'Cannot infer number of levels from empty list' raise TypeError(msg) arrays = [[]] * len(names) elif isinstance(tuples, (np.ndarray, Index)): if isinstance(tuples, Index): tuples = tuples._values arrays = list(lib.tuples_to_object_array(tuples).T) elif isinstance(tuples, list): arrays = list(lib.to_object_array_tuples(tuples).T) else: arrays = lzip(*tuples) return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
[ "Convert", "list", "of", "tuples", "to", "MultiIndex", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L353-L407
[ "def", "from_tuples", "(", "cls", ",", "tuples", ",", "sortorder", "=", "None", ",", "names", "=", "None", ")", ":", "if", "not", "is_list_like", "(", "tuples", ")", ":", "raise", "TypeError", "(", "'Input must be a list / sequence of tuple-likes.'", ")", "elif", "is_iterator", "(", "tuples", ")", ":", "tuples", "=", "list", "(", "tuples", ")", "if", "len", "(", "tuples", ")", "==", "0", ":", "if", "names", "is", "None", ":", "msg", "=", "'Cannot infer number of levels from empty list'", "raise", "TypeError", "(", "msg", ")", "arrays", "=", "[", "[", "]", "]", "*", "len", "(", "names", ")", "elif", "isinstance", "(", "tuples", ",", "(", "np", ".", "ndarray", ",", "Index", ")", ")", ":", "if", "isinstance", "(", "tuples", ",", "Index", ")", ":", "tuples", "=", "tuples", ".", "_values", "arrays", "=", "list", "(", "lib", ".", "tuples_to_object_array", "(", "tuples", ")", ".", "T", ")", "elif", "isinstance", "(", "tuples", ",", "list", ")", ":", "arrays", "=", "list", "(", "lib", ".", "to_object_array_tuples", "(", "tuples", ")", ".", "T", ")", "else", ":", "arrays", "=", "lzip", "(", "*", "tuples", ")", "return", "MultiIndex", ".", "from_arrays", "(", "arrays", ",", "sortorder", "=", "sortorder", ",", "names", "=", "names", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.from_product
Make a MultiIndex from the cartesian product of multiple iterables. Parameters ---------- iterables : list / sequence of iterables Each iterable has unique labels for each level of the index. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> numbers = [0, 1, 2] >>> colors = ['green', 'purple'] >>> pd.MultiIndex.from_product([numbers, colors], ... names=['number', 'color']) MultiIndex(levels=[[0, 1, 2], ['green', 'purple']], codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], names=['number', 'color'])
pandas/core/indexes/multi.py
def from_product(cls, iterables, sortorder=None, names=None): """ Make a MultiIndex from the cartesian product of multiple iterables. Parameters ---------- iterables : list / sequence of iterables Each iterable has unique labels for each level of the index. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> numbers = [0, 1, 2] >>> colors = ['green', 'purple'] >>> pd.MultiIndex.from_product([numbers, colors], ... names=['number', 'color']) MultiIndex(levels=[[0, 1, 2], ['green', 'purple']], codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], names=['number', 'color']) """ from pandas.core.arrays.categorical import _factorize_from_iterables from pandas.core.reshape.util import cartesian_product if not is_list_like(iterables): raise TypeError("Input must be a list / sequence of iterables.") elif is_iterator(iterables): iterables = list(iterables) codes, levels = _factorize_from_iterables(iterables) codes = cartesian_product(codes) return MultiIndex(levels, codes, sortorder=sortorder, names=names)
def from_product(cls, iterables, sortorder=None, names=None): """ Make a MultiIndex from the cartesian product of multiple iterables. Parameters ---------- iterables : list / sequence of iterables Each iterable has unique labels for each level of the index. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> numbers = [0, 1, 2] >>> colors = ['green', 'purple'] >>> pd.MultiIndex.from_product([numbers, colors], ... names=['number', 'color']) MultiIndex(levels=[[0, 1, 2], ['green', 'purple']], codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], names=['number', 'color']) """ from pandas.core.arrays.categorical import _factorize_from_iterables from pandas.core.reshape.util import cartesian_product if not is_list_like(iterables): raise TypeError("Input must be a list / sequence of iterables.") elif is_iterator(iterables): iterables = list(iterables) codes, levels = _factorize_from_iterables(iterables) codes = cartesian_product(codes) return MultiIndex(levels, codes, sortorder=sortorder, names=names)
[ "Make", "a", "MultiIndex", "from", "the", "cartesian", "product", "of", "multiple", "iterables", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L410-L454
[ "def", "from_product", "(", "cls", ",", "iterables", ",", "sortorder", "=", "None", ",", "names", "=", "None", ")", ":", "from", "pandas", ".", "core", ".", "arrays", ".", "categorical", "import", "_factorize_from_iterables", "from", "pandas", ".", "core", ".", "reshape", ".", "util", "import", "cartesian_product", "if", "not", "is_list_like", "(", "iterables", ")", ":", "raise", "TypeError", "(", "\"Input must be a list / sequence of iterables.\"", ")", "elif", "is_iterator", "(", "iterables", ")", ":", "iterables", "=", "list", "(", "iterables", ")", "codes", ",", "levels", "=", "_factorize_from_iterables", "(", "iterables", ")", "codes", "=", "cartesian_product", "(", "codes", ")", "return", "MultiIndex", "(", "levels", ",", "codes", ",", "sortorder", "=", "sortorder", ",", "names", "=", "names", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.from_frame
Make a MultiIndex from a DataFrame. .. versionadded:: 0.24.0 Parameters ---------- df : DataFrame DataFrame to be converted to MultiIndex. sortorder : int, optional Level of sortedness (must be lexicographically sorted by that level). names : list-like, optional If no names are provided, use the column names, or tuple of column names if the columns is a MultiIndex. If a sequence, overwrite names with the given sequence. Returns ------- MultiIndex The MultiIndex representation of the given DataFrame. See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. Examples -------- >>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'], ... ['NJ', 'Temp'], ['NJ', 'Precip']], ... columns=['a', 'b']) >>> df a b 0 HI Temp 1 HI Precip 2 NJ Temp 3 NJ Precip >>> pd.MultiIndex.from_frame(df) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['a', 'b']) Using explicit names, instead of the column names >>> pd.MultiIndex.from_frame(df, names=['state', 'observation']) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['state', 'observation'])
pandas/core/indexes/multi.py
def from_frame(cls, df, sortorder=None, names=None): """ Make a MultiIndex from a DataFrame. .. versionadded:: 0.24.0 Parameters ---------- df : DataFrame DataFrame to be converted to MultiIndex. sortorder : int, optional Level of sortedness (must be lexicographically sorted by that level). names : list-like, optional If no names are provided, use the column names, or tuple of column names if the columns is a MultiIndex. If a sequence, overwrite names with the given sequence. Returns ------- MultiIndex The MultiIndex representation of the given DataFrame. See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. Examples -------- >>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'], ... ['NJ', 'Temp'], ['NJ', 'Precip']], ... columns=['a', 'b']) >>> df a b 0 HI Temp 1 HI Precip 2 NJ Temp 3 NJ Precip >>> pd.MultiIndex.from_frame(df) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['a', 'b']) Using explicit names, instead of the column names >>> pd.MultiIndex.from_frame(df, names=['state', 'observation']) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['state', 'observation']) """ if not isinstance(df, ABCDataFrame): raise TypeError("Input must be a DataFrame") column_names, columns = lzip(*df.iteritems()) names = column_names if names is None else names return cls.from_arrays(columns, sortorder=sortorder, names=names)
def from_frame(cls, df, sortorder=None, names=None): """ Make a MultiIndex from a DataFrame. .. versionadded:: 0.24.0 Parameters ---------- df : DataFrame DataFrame to be converted to MultiIndex. sortorder : int, optional Level of sortedness (must be lexicographically sorted by that level). names : list-like, optional If no names are provided, use the column names, or tuple of column names if the columns is a MultiIndex. If a sequence, overwrite names with the given sequence. Returns ------- MultiIndex The MultiIndex representation of the given DataFrame. See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. Examples -------- >>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'], ... ['NJ', 'Temp'], ['NJ', 'Precip']], ... columns=['a', 'b']) >>> df a b 0 HI Temp 1 HI Precip 2 NJ Temp 3 NJ Precip >>> pd.MultiIndex.from_frame(df) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['a', 'b']) Using explicit names, instead of the column names >>> pd.MultiIndex.from_frame(df, names=['state', 'observation']) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['state', 'observation']) """ if not isinstance(df, ABCDataFrame): raise TypeError("Input must be a DataFrame") column_names, columns = lzip(*df.iteritems()) names = column_names if names is None else names return cls.from_arrays(columns, sortorder=sortorder, names=names)
[ "Make", "a", "MultiIndex", "from", "a", "DataFrame", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L457-L516
[ "def", "from_frame", "(", "cls", ",", "df", ",", "sortorder", "=", "None", ",", "names", "=", "None", ")", ":", "if", "not", "isinstance", "(", "df", ",", "ABCDataFrame", ")", ":", "raise", "TypeError", "(", "\"Input must be a DataFrame\"", ")", "column_names", ",", "columns", "=", "lzip", "(", "*", "df", ".", "iteritems", "(", ")", ")", "names", "=", "column_names", "if", "names", "is", "None", "else", "names", "return", "cls", ".", "from_arrays", "(", "columns", ",", "sortorder", "=", "sortorder", ",", "names", "=", "names", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.set_levels
Set new levels on MultiIndex. Defaults to returning new index. Parameters ---------- levels : sequence or list of sequence new level(s) to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and codes are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]]) MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels(['a','b'], level=0) MultiIndex(levels=[['a', 'b'], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels(['a','b'], level='bar') MultiIndex(levels=[[1, 2], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]], level=[0,1]) MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar'])
pandas/core/indexes/multi.py
def set_levels(self, levels, level=None, inplace=False, verify_integrity=True): """ Set new levels on MultiIndex. Defaults to returning new index. Parameters ---------- levels : sequence or list of sequence new level(s) to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and codes are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]]) MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels(['a','b'], level=0) MultiIndex(levels=[['a', 'b'], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels(['a','b'], level='bar') MultiIndex(levels=[[1, 2], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]], level=[0,1]) MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) """ if is_list_like(levels) and not isinstance(levels, Index): levels = list(levels) if level is not None and not is_list_like(level): if not is_list_like(levels): raise TypeError("Levels must be list-like") if is_list_like(levels[0]): raise TypeError("Levels must be list-like") level = [level] levels = [levels] elif level is None or is_list_like(level): if not is_list_like(levels) or not is_list_like(levels[0]): raise TypeError("Levels must be list of lists-like") if inplace: idx = self else: idx = self._shallow_copy() idx._reset_identity() idx._set_levels(levels, level=level, validate=True, verify_integrity=verify_integrity) if not inplace: return idx
def set_levels(self, levels, level=None, inplace=False, verify_integrity=True): """ Set new levels on MultiIndex. Defaults to returning new index. Parameters ---------- levels : sequence or list of sequence new level(s) to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and codes are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]]) MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels(['a','b'], level=0) MultiIndex(levels=[['a', 'b'], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels(['a','b'], level='bar') MultiIndex(levels=[[1, 2], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]], level=[0,1]) MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) """ if is_list_like(levels) and not isinstance(levels, Index): levels = list(levels) if level is not None and not is_list_like(level): if not is_list_like(levels): raise TypeError("Levels must be list-like") if is_list_like(levels[0]): raise TypeError("Levels must be list-like") level = [level] levels = [levels] elif level is None or is_list_like(level): if not is_list_like(levels) or not is_list_like(levels[0]): raise TypeError("Levels must be list of lists-like") if inplace: idx = self else: idx = self._shallow_copy() idx._reset_identity() idx._set_levels(levels, level=level, validate=True, verify_integrity=verify_integrity) if not inplace: return idx
[ "Set", "new", "levels", "on", "MultiIndex", ".", "Defaults", "to", "returning", "new", "index", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L599-L664
[ "def", "set_levels", "(", "self", ",", "levels", ",", "level", "=", "None", ",", "inplace", "=", "False", ",", "verify_integrity", "=", "True", ")", ":", "if", "is_list_like", "(", "levels", ")", "and", "not", "isinstance", "(", "levels", ",", "Index", ")", ":", "levels", "=", "list", "(", "levels", ")", "if", "level", "is", "not", "None", "and", "not", "is_list_like", "(", "level", ")", ":", "if", "not", "is_list_like", "(", "levels", ")", ":", "raise", "TypeError", "(", "\"Levels must be list-like\"", ")", "if", "is_list_like", "(", "levels", "[", "0", "]", ")", ":", "raise", "TypeError", "(", "\"Levels must be list-like\"", ")", "level", "=", "[", "level", "]", "levels", "=", "[", "levels", "]", "elif", "level", "is", "None", "or", "is_list_like", "(", "level", ")", ":", "if", "not", "is_list_like", "(", "levels", ")", "or", "not", "is_list_like", "(", "levels", "[", "0", "]", ")", ":", "raise", "TypeError", "(", "\"Levels must be list of lists-like\"", ")", "if", "inplace", ":", "idx", "=", "self", "else", ":", "idx", "=", "self", ".", "_shallow_copy", "(", ")", "idx", ".", "_reset_identity", "(", ")", "idx", ".", "_set_levels", "(", "levels", ",", "level", "=", "level", ",", "validate", "=", "True", ",", "verify_integrity", "=", "verify_integrity", ")", "if", "not", "inplace", ":", "return", "idx" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.set_codes
Set new codes on MultiIndex. Defaults to returning new index. .. versionadded:: 0.24.0 New name for deprecated method `set_labels`. Parameters ---------- codes : sequence or list of sequence new codes to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and codes are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]]) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=['foo', 'bar']) >>> idx.set_codes([1,0,1,0], level=0) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_codes([0,0,1,1], level='bar') MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 0, 1, 1]], names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]], level=[0,1]) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=['foo', 'bar'])
pandas/core/indexes/multi.py
def set_codes(self, codes, level=None, inplace=False, verify_integrity=True): """ Set new codes on MultiIndex. Defaults to returning new index. .. versionadded:: 0.24.0 New name for deprecated method `set_labels`. Parameters ---------- codes : sequence or list of sequence new codes to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and codes are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]]) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=['foo', 'bar']) >>> idx.set_codes([1,0,1,0], level=0) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_codes([0,0,1,1], level='bar') MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 0, 1, 1]], names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]], level=[0,1]) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=['foo', 'bar']) """ if level is not None and not is_list_like(level): if not is_list_like(codes): raise TypeError("Codes must be list-like") if is_list_like(codes[0]): raise TypeError("Codes must be list-like") level = [level] codes = [codes] elif level is None or is_list_like(level): if not is_list_like(codes) or not is_list_like(codes[0]): raise TypeError("Codes must be list of lists-like") if inplace: idx = self else: idx = self._shallow_copy() idx._reset_identity() idx._set_codes(codes, level=level, verify_integrity=verify_integrity) if not inplace: return idx
def set_codes(self, codes, level=None, inplace=False, verify_integrity=True): """ Set new codes on MultiIndex. Defaults to returning new index. .. versionadded:: 0.24.0 New name for deprecated method `set_labels`. Parameters ---------- codes : sequence or list of sequence new codes to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and codes are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]]) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=['foo', 'bar']) >>> idx.set_codes([1,0,1,0], level=0) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_codes([0,0,1,1], level='bar') MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 0, 1, 1]], names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]], level=[0,1]) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=['foo', 'bar']) """ if level is not None and not is_list_like(level): if not is_list_like(codes): raise TypeError("Codes must be list-like") if is_list_like(codes[0]): raise TypeError("Codes must be list-like") level = [level] codes = [codes] elif level is None or is_list_like(level): if not is_list_like(codes) or not is_list_like(codes[0]): raise TypeError("Codes must be list of lists-like") if inplace: idx = self else: idx = self._shallow_copy() idx._reset_identity() idx._set_codes(codes, level=level, verify_integrity=verify_integrity) if not inplace: return idx
[ "Set", "new", "codes", "on", "MultiIndex", ".", "Defaults", "to", "returning", "new", "index", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L714-L779
[ "def", "set_codes", "(", "self", ",", "codes", ",", "level", "=", "None", ",", "inplace", "=", "False", ",", "verify_integrity", "=", "True", ")", ":", "if", "level", "is", "not", "None", "and", "not", "is_list_like", "(", "level", ")", ":", "if", "not", "is_list_like", "(", "codes", ")", ":", "raise", "TypeError", "(", "\"Codes must be list-like\"", ")", "if", "is_list_like", "(", "codes", "[", "0", "]", ")", ":", "raise", "TypeError", "(", "\"Codes must be list-like\"", ")", "level", "=", "[", "level", "]", "codes", "=", "[", "codes", "]", "elif", "level", "is", "None", "or", "is_list_like", "(", "level", ")", ":", "if", "not", "is_list_like", "(", "codes", ")", "or", "not", "is_list_like", "(", "codes", "[", "0", "]", ")", ":", "raise", "TypeError", "(", "\"Codes must be list of lists-like\"", ")", "if", "inplace", ":", "idx", "=", "self", "else", ":", "idx", "=", "self", ".", "_shallow_copy", "(", ")", "idx", ".", "_reset_identity", "(", ")", "idx", ".", "_set_codes", "(", "codes", ",", "level", "=", "level", ",", "verify_integrity", "=", "verify_integrity", ")", "if", "not", "inplace", ":", "return", "idx" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.copy
Make a copy of this object. Names, dtype, levels and codes can be passed and will be set on new copy. Parameters ---------- names : sequence, optional dtype : numpy dtype or pandas type, optional levels : sequence, optional codes : sequence, optional Returns ------- copy : MultiIndex Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. This could be potentially expensive on large MultiIndex objects.
pandas/core/indexes/multi.py
def copy(self, names=None, dtype=None, levels=None, codes=None, deep=False, _set_identity=False, **kwargs): """ Make a copy of this object. Names, dtype, levels and codes can be passed and will be set on new copy. Parameters ---------- names : sequence, optional dtype : numpy dtype or pandas type, optional levels : sequence, optional codes : sequence, optional Returns ------- copy : MultiIndex Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. This could be potentially expensive on large MultiIndex objects. """ name = kwargs.get('name') names = self._validate_names(name=name, names=names, deep=deep) if deep: from copy import deepcopy if levels is None: levels = deepcopy(self.levels) if codes is None: codes = deepcopy(self.codes) else: if levels is None: levels = self.levels if codes is None: codes = self.codes return MultiIndex(levels=levels, codes=codes, names=names, sortorder=self.sortorder, verify_integrity=False, _set_identity=_set_identity)
def copy(self, names=None, dtype=None, levels=None, codes=None, deep=False, _set_identity=False, **kwargs): """ Make a copy of this object. Names, dtype, levels and codes can be passed and will be set on new copy. Parameters ---------- names : sequence, optional dtype : numpy dtype or pandas type, optional levels : sequence, optional codes : sequence, optional Returns ------- copy : MultiIndex Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. This could be potentially expensive on large MultiIndex objects. """ name = kwargs.get('name') names = self._validate_names(name=name, names=names, deep=deep) if deep: from copy import deepcopy if levels is None: levels = deepcopy(self.levels) if codes is None: codes = deepcopy(self.codes) else: if levels is None: levels = self.levels if codes is None: codes = self.codes return MultiIndex(levels=levels, codes=codes, names=names, sortorder=self.sortorder, verify_integrity=False, _set_identity=_set_identity)
[ "Make", "a", "copy", "of", "this", "object", ".", "Names", "dtype", "levels", "and", "codes", "can", "be", "passed", "and", "will", "be", "set", "on", "new", "copy", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L782-L821
[ "def", "copy", "(", "self", ",", "names", "=", "None", ",", "dtype", "=", "None", ",", "levels", "=", "None", ",", "codes", "=", "None", ",", "deep", "=", "False", ",", "_set_identity", "=", "False", ",", "*", "*", "kwargs", ")", ":", "name", "=", "kwargs", ".", "get", "(", "'name'", ")", "names", "=", "self", ".", "_validate_names", "(", "name", "=", "name", ",", "names", "=", "names", ",", "deep", "=", "deep", ")", "if", "deep", ":", "from", "copy", "import", "deepcopy", "if", "levels", "is", "None", ":", "levels", "=", "deepcopy", "(", "self", ".", "levels", ")", "if", "codes", "is", "None", ":", "codes", "=", "deepcopy", "(", "self", ".", "codes", ")", "else", ":", "if", "levels", "is", "None", ":", "levels", "=", "self", ".", "levels", "if", "codes", "is", "None", ":", "codes", "=", "self", ".", "codes", "return", "MultiIndex", "(", "levels", "=", "levels", ",", "codes", "=", "codes", ",", "names", "=", "names", ",", "sortorder", "=", "self", ".", "sortorder", ",", "verify_integrity", "=", "False", ",", "_set_identity", "=", "_set_identity", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.view
this is defined as a copy with the same identity
pandas/core/indexes/multi.py
def view(self, cls=None): """ this is defined as a copy with the same identity """ result = self.copy() result._id = self._id return result
def view(self, cls=None): """ this is defined as a copy with the same identity """ result = self.copy() result._id = self._id return result
[ "this", "is", "defined", "as", "a", "copy", "with", "the", "same", "identity" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L827-L831
[ "def", "view", "(", "self", ",", "cls", "=", "None", ")", ":", "result", "=", "self", ".", "copy", "(", ")", "result", ".", "_id", "=", "self", ".", "_id", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex._is_memory_usage_qualified
return a boolean if we need a qualified .info display
pandas/core/indexes/multi.py
def _is_memory_usage_qualified(self): """ return a boolean if we need a qualified .info display """ def f(l): return 'mixed' in l or 'string' in l or 'unicode' in l return any(f(l) for l in self._inferred_type_levels)
def _is_memory_usage_qualified(self): """ return a boolean if we need a qualified .info display """ def f(l): return 'mixed' in l or 'string' in l or 'unicode' in l return any(f(l) for l in self._inferred_type_levels)
[ "return", "a", "boolean", "if", "we", "need", "a", "qualified", ".", "info", "display" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L866-L870
[ "def", "_is_memory_usage_qualified", "(", "self", ")", ":", "def", "f", "(", "l", ")", ":", "return", "'mixed'", "in", "l", "or", "'string'", "in", "l", "or", "'unicode'", "in", "l", "return", "any", "(", "f", "(", "l", ")", "for", "l", "in", "self", ".", "_inferred_type_levels", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex._nbytes
return the number of bytes in the underlying data deeply introspect the level data if deep=True include the engine hashtable *this is in internal routine*
pandas/core/indexes/multi.py
def _nbytes(self, deep=False): """ return the number of bytes in the underlying data deeply introspect the level data if deep=True include the engine hashtable *this is in internal routine* """ # for implementations with no useful getsizeof (PyPy) objsize = 24 level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels) label_nbytes = sum(i.nbytes for i in self.codes) names_nbytes = sum(getsizeof(i, objsize) for i in self.names) result = level_nbytes + label_nbytes + names_nbytes # include our engine hashtable result += self._engine.sizeof(deep=deep) return result
def _nbytes(self, deep=False): """ return the number of bytes in the underlying data deeply introspect the level data if deep=True include the engine hashtable *this is in internal routine* """ # for implementations with no useful getsizeof (PyPy) objsize = 24 level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels) label_nbytes = sum(i.nbytes for i in self.codes) names_nbytes = sum(getsizeof(i, objsize) for i in self.names) result = level_nbytes + label_nbytes + names_nbytes # include our engine hashtable result += self._engine.sizeof(deep=deep) return result
[ "return", "the", "number", "of", "bytes", "in", "the", "underlying", "data", "deeply", "introspect", "the", "level", "data", "if", "deep", "=", "True" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L884-L905
[ "def", "_nbytes", "(", "self", ",", "deep", "=", "False", ")", ":", "# for implementations with no useful getsizeof (PyPy)", "objsize", "=", "24", "level_nbytes", "=", "sum", "(", "i", ".", "memory_usage", "(", "deep", "=", "deep", ")", "for", "i", "in", "self", ".", "levels", ")", "label_nbytes", "=", "sum", "(", "i", ".", "nbytes", "for", "i", "in", "self", ".", "codes", ")", "names_nbytes", "=", "sum", "(", "getsizeof", "(", "i", ",", "objsize", ")", "for", "i", "in", "self", ".", "names", ")", "result", "=", "level_nbytes", "+", "label_nbytes", "+", "names_nbytes", "# include our engine hashtable", "result", "+=", "self", ".", "_engine", ".", "sizeof", "(", "deep", "=", "deep", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex._format_attrs
Return a list of tuples of the (attr,formatted_value)
pandas/core/indexes/multi.py
def _format_attrs(self): """ Return a list of tuples of the (attr,formatted_value) """ attrs = [ ('levels', ibase.default_pprint(self._levels, max_seq_items=False)), ('codes', ibase.default_pprint(self._codes, max_seq_items=False))] if com._any_not_none(*self.names): attrs.append(('names', ibase.default_pprint(self.names))) if self.sortorder is not None: attrs.append(('sortorder', ibase.default_pprint(self.sortorder))) return attrs
def _format_attrs(self): """ Return a list of tuples of the (attr,formatted_value) """ attrs = [ ('levels', ibase.default_pprint(self._levels, max_seq_items=False)), ('codes', ibase.default_pprint(self._codes, max_seq_items=False))] if com._any_not_none(*self.names): attrs.append(('names', ibase.default_pprint(self.names))) if self.sortorder is not None: attrs.append(('sortorder', ibase.default_pprint(self.sortorder))) return attrs
[ "Return", "a", "list", "of", "tuples", "of", "the", "(", "attr", "formatted_value", ")" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L910-L923
[ "def", "_format_attrs", "(", "self", ")", ":", "attrs", "=", "[", "(", "'levels'", ",", "ibase", ".", "default_pprint", "(", "self", ".", "_levels", ",", "max_seq_items", "=", "False", ")", ")", ",", "(", "'codes'", ",", "ibase", ".", "default_pprint", "(", "self", ".", "_codes", ",", "max_seq_items", "=", "False", ")", ")", "]", "if", "com", ".", "_any_not_none", "(", "*", "self", ".", "names", ")", ":", "attrs", ".", "append", "(", "(", "'names'", ",", "ibase", ".", "default_pprint", "(", "self", ".", "names", ")", ")", ")", "if", "self", ".", "sortorder", "is", "not", "None", ":", "attrs", ".", "append", "(", "(", "'sortorder'", ",", "ibase", ".", "default_pprint", "(", "self", ".", "sortorder", ")", ")", ")", "return", "attrs" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex._set_names
Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None validate : boolean, default True validate that the names match level lengths Raises ------ TypeError if each name is not hashable. Notes ----- sets names on levels. WARNING: mutates! Note that you generally want to set this *after* changing levels, so that it only acts on copies
pandas/core/indexes/multi.py
def _set_names(self, names, level=None, validate=True): """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None validate : boolean, default True validate that the names match level lengths Raises ------ TypeError if each name is not hashable. Notes ----- sets names on levels. WARNING: mutates! Note that you generally want to set this *after* changing levels, so that it only acts on copies """ # GH 15110 # Don't allow a single string for names in a MultiIndex if names is not None and not is_list_like(names): raise ValueError('Names should be list-like for a MultiIndex') names = list(names) if validate and level is not None and len(names) != len(level): raise ValueError('Length of names must match length of level.') if validate and level is None and len(names) != self.nlevels: raise ValueError('Length of names must match number of levels in ' 'MultiIndex.') if level is None: level = range(self.nlevels) else: level = [self._get_level_number(l) for l in level] # set the name for l, name in zip(level, names): if name is not None: # GH 20527 # All items in 'names' need to be hashable: if not is_hashable(name): raise TypeError('{}.name must be a hashable type' .format(self.__class__.__name__)) self.levels[l].rename(name, inplace=True)
def _set_names(self, names, level=None, validate=True): """ Set new names on index. Each name has to be a hashable type. Parameters ---------- values : str or sequence name(s) to set level : int, level name, or sequence of int/level names (default None) If the index is a MultiIndex (hierarchical), level(s) to set (None for all levels). Otherwise level must be None validate : boolean, default True validate that the names match level lengths Raises ------ TypeError if each name is not hashable. Notes ----- sets names on levels. WARNING: mutates! Note that you generally want to set this *after* changing levels, so that it only acts on copies """ # GH 15110 # Don't allow a single string for names in a MultiIndex if names is not None and not is_list_like(names): raise ValueError('Names should be list-like for a MultiIndex') names = list(names) if validate and level is not None and len(names) != len(level): raise ValueError('Length of names must match length of level.') if validate and level is None and len(names) != self.nlevels: raise ValueError('Length of names must match number of levels in ' 'MultiIndex.') if level is None: level = range(self.nlevels) else: level = [self._get_level_number(l) for l in level] # set the name for l, name in zip(level, names): if name is not None: # GH 20527 # All items in 'names' need to be hashable: if not is_hashable(name): raise TypeError('{}.name must be a hashable type' .format(self.__class__.__name__)) self.levels[l].rename(name, inplace=True)
[ "Set", "new", "names", "on", "index", ".", "Each", "name", "has", "to", "be", "a", "hashable", "type", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1026-L1076
[ "def", "_set_names", "(", "self", ",", "names", ",", "level", "=", "None", ",", "validate", "=", "True", ")", ":", "# GH 15110", "# Don't allow a single string for names in a MultiIndex", "if", "names", "is", "not", "None", "and", "not", "is_list_like", "(", "names", ")", ":", "raise", "ValueError", "(", "'Names should be list-like for a MultiIndex'", ")", "names", "=", "list", "(", "names", ")", "if", "validate", "and", "level", "is", "not", "None", "and", "len", "(", "names", ")", "!=", "len", "(", "level", ")", ":", "raise", "ValueError", "(", "'Length of names must match length of level.'", ")", "if", "validate", "and", "level", "is", "None", "and", "len", "(", "names", ")", "!=", "self", ".", "nlevels", ":", "raise", "ValueError", "(", "'Length of names must match number of levels in '", "'MultiIndex.'", ")", "if", "level", "is", "None", ":", "level", "=", "range", "(", "self", ".", "nlevels", ")", "else", ":", "level", "=", "[", "self", ".", "_get_level_number", "(", "l", ")", "for", "l", "in", "level", "]", "# set the name", "for", "l", ",", "name", "in", "zip", "(", "level", ",", "names", ")", ":", "if", "name", "is", "not", "None", ":", "# GH 20527", "# All items in 'names' need to be hashable:", "if", "not", "is_hashable", "(", "name", ")", ":", "raise", "TypeError", "(", "'{}.name must be a hashable type'", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "self", ".", "levels", "[", "l", "]", ".", "rename", "(", "name", ",", "inplace", "=", "True", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.is_monotonic_increasing
return if the index is monotonic increasing (only equal or increasing) values.
pandas/core/indexes/multi.py
def is_monotonic_increasing(self): """ return if the index is monotonic increasing (only equal or increasing) values. """ # reversed() because lexsort() wants the most significant key last. values = [self._get_level_values(i).values for i in reversed(range(len(self.levels)))] try: sort_order = np.lexsort(values) return Index(sort_order).is_monotonic except TypeError: # we have mixed types and np.lexsort is not happy return Index(self.values).is_monotonic
def is_monotonic_increasing(self): """ return if the index is monotonic increasing (only equal or increasing) values. """ # reversed() because lexsort() wants the most significant key last. values = [self._get_level_values(i).values for i in reversed(range(len(self.levels)))] try: sort_order = np.lexsort(values) return Index(sort_order).is_monotonic except TypeError: # we have mixed types and np.lexsort is not happy return Index(self.values).is_monotonic
[ "return", "if", "the", "index", "is", "monotonic", "increasing", "(", "only", "equal", "or", "increasing", ")", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1192-L1207
[ "def", "is_monotonic_increasing", "(", "self", ")", ":", "# reversed() because lexsort() wants the most significant key last.", "values", "=", "[", "self", ".", "_get_level_values", "(", "i", ")", ".", "values", "for", "i", "in", "reversed", "(", "range", "(", "len", "(", "self", ".", "levels", ")", ")", ")", "]", "try", ":", "sort_order", "=", "np", ".", "lexsort", "(", "values", ")", "return", "Index", "(", "sort_order", ")", ".", "is_monotonic", "except", "TypeError", ":", "# we have mixed types and np.lexsort is not happy", "return", "Index", "(", "self", ".", "values", ")", ".", "is_monotonic" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex._hashed_indexing_key
validate and return the hash for the provided key *this is internal for use for the cython routines* Parameters ---------- key : string or tuple Returns ------- np.uint64 Notes ----- we need to stringify if we have mixed levels
pandas/core/indexes/multi.py
def _hashed_indexing_key(self, key): """ validate and return the hash for the provided key *this is internal for use for the cython routines* Parameters ---------- key : string or tuple Returns ------- np.uint64 Notes ----- we need to stringify if we have mixed levels """ from pandas.core.util.hashing import hash_tuples, hash_tuple if not isinstance(key, tuple): return hash_tuples(key) if not len(key) == self.nlevels: raise KeyError def f(k, stringify): if stringify and not isinstance(k, str): k = str(k) return k key = tuple(f(k, stringify) for k, stringify in zip(key, self._have_mixed_levels)) return hash_tuple(key)
def _hashed_indexing_key(self, key): """ validate and return the hash for the provided key *this is internal for use for the cython routines* Parameters ---------- key : string or tuple Returns ------- np.uint64 Notes ----- we need to stringify if we have mixed levels """ from pandas.core.util.hashing import hash_tuples, hash_tuple if not isinstance(key, tuple): return hash_tuples(key) if not len(key) == self.nlevels: raise KeyError def f(k, stringify): if stringify and not isinstance(k, str): k = str(k) return k key = tuple(f(k, stringify) for k, stringify in zip(key, self._have_mixed_levels)) return hash_tuple(key)
[ "validate", "and", "return", "the", "hash", "for", "the", "provided", "key" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1234-L1267
[ "def", "_hashed_indexing_key", "(", "self", ",", "key", ")", ":", "from", "pandas", ".", "core", ".", "util", ".", "hashing", "import", "hash_tuples", ",", "hash_tuple", "if", "not", "isinstance", "(", "key", ",", "tuple", ")", ":", "return", "hash_tuples", "(", "key", ")", "if", "not", "len", "(", "key", ")", "==", "self", ".", "nlevels", ":", "raise", "KeyError", "def", "f", "(", "k", ",", "stringify", ")", ":", "if", "stringify", "and", "not", "isinstance", "(", "k", ",", "str", ")", ":", "k", "=", "str", "(", "k", ")", "return", "k", "key", "=", "tuple", "(", "f", "(", "k", ",", "stringify", ")", "for", "k", ",", "stringify", "in", "zip", "(", "key", ",", "self", ".", "_have_mixed_levels", ")", ")", "return", "hash_tuple", "(", "key", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex._get_level_values
Return vector of label values for requested level, equal to the length of the index **this is an internal method** Parameters ---------- level : int level unique : bool, default False if True, drop duplicated values Returns ------- values : ndarray
pandas/core/indexes/multi.py
def _get_level_values(self, level, unique=False): """ Return vector of label values for requested level, equal to the length of the index **this is an internal method** Parameters ---------- level : int level unique : bool, default False if True, drop duplicated values Returns ------- values : ndarray """ values = self.levels[level] level_codes = self.codes[level] if unique: level_codes = algos.unique(level_codes) filled = algos.take_1d(values._values, level_codes, fill_value=values._na_value) values = values._shallow_copy(filled) return values
def _get_level_values(self, level, unique=False): """ Return vector of label values for requested level, equal to the length of the index **this is an internal method** Parameters ---------- level : int level unique : bool, default False if True, drop duplicated values Returns ------- values : ndarray """ values = self.levels[level] level_codes = self.codes[level] if unique: level_codes = algos.unique(level_codes) filled = algos.take_1d(values._values, level_codes, fill_value=values._na_value) values = values._shallow_copy(filled) return values
[ "Return", "vector", "of", "label", "values", "for", "requested", "level", "equal", "to", "the", "length", "of", "the", "index" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1357-L1382
[ "def", "_get_level_values", "(", "self", ",", "level", ",", "unique", "=", "False", ")", ":", "values", "=", "self", ".", "levels", "[", "level", "]", "level_codes", "=", "self", ".", "codes", "[", "level", "]", "if", "unique", ":", "level_codes", "=", "algos", ".", "unique", "(", "level_codes", ")", "filled", "=", "algos", ".", "take_1d", "(", "values", ".", "_values", ",", "level_codes", ",", "fill_value", "=", "values", ".", "_na_value", ")", "values", "=", "values", ".", "_shallow_copy", "(", "filled", ")", "return", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.get_level_values
Return vector of label values for requested level, equal to the length of the index. Parameters ---------- level : int or str ``level`` is either the integer position of the level in the MultiIndex, or the name of the level. Returns ------- values : Index Values is a level of this MultiIndex converted to a single :class:`Index` (or subclass thereof). Examples --------- Create a MultiIndex: >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def'))) >>> mi.names = ['level_1', 'level_2'] Get level values by supplying level as either integer or name: >>> mi.get_level_values(0) Index(['a', 'b', 'c'], dtype='object', name='level_1') >>> mi.get_level_values('level_2') Index(['d', 'e', 'f'], dtype='object', name='level_2')
pandas/core/indexes/multi.py
def get_level_values(self, level): """ Return vector of label values for requested level, equal to the length of the index. Parameters ---------- level : int or str ``level`` is either the integer position of the level in the MultiIndex, or the name of the level. Returns ------- values : Index Values is a level of this MultiIndex converted to a single :class:`Index` (or subclass thereof). Examples --------- Create a MultiIndex: >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def'))) >>> mi.names = ['level_1', 'level_2'] Get level values by supplying level as either integer or name: >>> mi.get_level_values(0) Index(['a', 'b', 'c'], dtype='object', name='level_1') >>> mi.get_level_values('level_2') Index(['d', 'e', 'f'], dtype='object', name='level_2') """ level = self._get_level_number(level) values = self._get_level_values(level) return values
def get_level_values(self, level): """ Return vector of label values for requested level, equal to the length of the index. Parameters ---------- level : int or str ``level`` is either the integer position of the level in the MultiIndex, or the name of the level. Returns ------- values : Index Values is a level of this MultiIndex converted to a single :class:`Index` (or subclass thereof). Examples --------- Create a MultiIndex: >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def'))) >>> mi.names = ['level_1', 'level_2'] Get level values by supplying level as either integer or name: >>> mi.get_level_values(0) Index(['a', 'b', 'c'], dtype='object', name='level_1') >>> mi.get_level_values('level_2') Index(['d', 'e', 'f'], dtype='object', name='level_2') """ level = self._get_level_number(level) values = self._get_level_values(level) return values
[ "Return", "vector", "of", "label", "values", "for", "requested", "level", "equal", "to", "the", "length", "of", "the", "index", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1384-L1418
[ "def", "get_level_values", "(", "self", ",", "level", ")", ":", "level", "=", "self", ".", "_get_level_number", "(", "level", ")", "values", "=", "self", ".", "_get_level_values", "(", "level", ")", "return", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.to_frame
Create a DataFrame with the levels of the MultiIndex as columns. Column ordering is determined by the DataFrame constructor with data as a dict. .. versionadded:: 0.24.0 Parameters ---------- index : boolean, default True Set the index of the returned DataFrame as the original MultiIndex. name : list / sequence of strings, optional The passed names should substitute index level names. Returns ------- DataFrame : a DataFrame containing the original MultiIndex data. See Also -------- DataFrame
pandas/core/indexes/multi.py
def to_frame(self, index=True, name=None): """ Create a DataFrame with the levels of the MultiIndex as columns. Column ordering is determined by the DataFrame constructor with data as a dict. .. versionadded:: 0.24.0 Parameters ---------- index : boolean, default True Set the index of the returned DataFrame as the original MultiIndex. name : list / sequence of strings, optional The passed names should substitute index level names. Returns ------- DataFrame : a DataFrame containing the original MultiIndex data. See Also -------- DataFrame """ from pandas import DataFrame if name is not None: if not is_list_like(name): raise TypeError("'name' must be a list / sequence " "of column names.") if len(name) != len(self.levels): raise ValueError("'name' should have same length as " "number of levels on index.") idx_names = name else: idx_names = self.names # Guarantee resulting column order result = DataFrame( OrderedDict([ ((level if lvlname is None else lvlname), self._get_level_values(level)) for lvlname, level in zip(idx_names, range(len(self.levels))) ]), copy=False ) if index: result.index = self return result
def to_frame(self, index=True, name=None): """ Create a DataFrame with the levels of the MultiIndex as columns. Column ordering is determined by the DataFrame constructor with data as a dict. .. versionadded:: 0.24.0 Parameters ---------- index : boolean, default True Set the index of the returned DataFrame as the original MultiIndex. name : list / sequence of strings, optional The passed names should substitute index level names. Returns ------- DataFrame : a DataFrame containing the original MultiIndex data. See Also -------- DataFrame """ from pandas import DataFrame if name is not None: if not is_list_like(name): raise TypeError("'name' must be a list / sequence " "of column names.") if len(name) != len(self.levels): raise ValueError("'name' should have same length as " "number of levels on index.") idx_names = name else: idx_names = self.names # Guarantee resulting column order result = DataFrame( OrderedDict([ ((level if lvlname is None else lvlname), self._get_level_values(level)) for lvlname, level in zip(idx_names, range(len(self.levels))) ]), copy=False ) if index: result.index = self return result
[ "Create", "a", "DataFrame", "with", "the", "levels", "of", "the", "MultiIndex", "as", "columns", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1433-L1484
[ "def", "to_frame", "(", "self", ",", "index", "=", "True", ",", "name", "=", "None", ")", ":", "from", "pandas", "import", "DataFrame", "if", "name", "is", "not", "None", ":", "if", "not", "is_list_like", "(", "name", ")", ":", "raise", "TypeError", "(", "\"'name' must be a list / sequence \"", "\"of column names.\"", ")", "if", "len", "(", "name", ")", "!=", "len", "(", "self", ".", "levels", ")", ":", "raise", "ValueError", "(", "\"'name' should have same length as \"", "\"number of levels on index.\"", ")", "idx_names", "=", "name", "else", ":", "idx_names", "=", "self", ".", "names", "# Guarantee resulting column order", "result", "=", "DataFrame", "(", "OrderedDict", "(", "[", "(", "(", "level", "if", "lvlname", "is", "None", "else", "lvlname", ")", ",", "self", ".", "_get_level_values", "(", "level", ")", ")", "for", "lvlname", ",", "level", "in", "zip", "(", "idx_names", ",", "range", "(", "len", "(", "self", ".", "levels", ")", ")", ")", "]", ")", ",", "copy", "=", "False", ")", "if", "index", ":", "result", ".", "index", "=", "self", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.to_hierarchical
Return a MultiIndex reshaped to conform to the shapes given by n_repeat and n_shuffle. .. deprecated:: 0.24.0 Useful to replicate and rearrange a MultiIndex for combination with another Index with n_repeat items. Parameters ---------- n_repeat : int Number of times to repeat the labels on self n_shuffle : int Controls the reordering of the labels. If the result is going to be an inner level in a MultiIndex, n_shuffle will need to be greater than one. The size of each label must divisible by n_shuffle. Returns ------- MultiIndex Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]) >>> idx.to_hierarchical(3) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
pandas/core/indexes/multi.py
def to_hierarchical(self, n_repeat, n_shuffle=1): """ Return a MultiIndex reshaped to conform to the shapes given by n_repeat and n_shuffle. .. deprecated:: 0.24.0 Useful to replicate and rearrange a MultiIndex for combination with another Index with n_repeat items. Parameters ---------- n_repeat : int Number of times to repeat the labels on self n_shuffle : int Controls the reordering of the labels. If the result is going to be an inner level in a MultiIndex, n_shuffle will need to be greater than one. The size of each label must divisible by n_shuffle. Returns ------- MultiIndex Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]) >>> idx.to_hierarchical(3) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]]) """ levels = self.levels codes = [np.repeat(level_codes, n_repeat) for level_codes in self.codes] # Assumes that each level_codes is divisible by n_shuffle codes = [x.reshape(n_shuffle, -1).ravel(order='F') for x in codes] names = self.names warnings.warn("Method .to_hierarchical is deprecated and will " "be removed in a future version", FutureWarning, stacklevel=2) return MultiIndex(levels=levels, codes=codes, names=names)
def to_hierarchical(self, n_repeat, n_shuffle=1): """ Return a MultiIndex reshaped to conform to the shapes given by n_repeat and n_shuffle. .. deprecated:: 0.24.0 Useful to replicate and rearrange a MultiIndex for combination with another Index with n_repeat items. Parameters ---------- n_repeat : int Number of times to repeat the labels on self n_shuffle : int Controls the reordering of the labels. If the result is going to be an inner level in a MultiIndex, n_shuffle will need to be greater than one. The size of each label must divisible by n_shuffle. Returns ------- MultiIndex Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]) >>> idx.to_hierarchical(3) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]]) """ levels = self.levels codes = [np.repeat(level_codes, n_repeat) for level_codes in self.codes] # Assumes that each level_codes is divisible by n_shuffle codes = [x.reshape(n_shuffle, -1).ravel(order='F') for x in codes] names = self.names warnings.warn("Method .to_hierarchical is deprecated and will " "be removed in a future version", FutureWarning, stacklevel=2) return MultiIndex(levels=levels, codes=codes, names=names)
[ "Return", "a", "MultiIndex", "reshaped", "to", "conform", "to", "the", "shapes", "given", "by", "n_repeat", "and", "n_shuffle", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1486-L1528
[ "def", "to_hierarchical", "(", "self", ",", "n_repeat", ",", "n_shuffle", "=", "1", ")", ":", "levels", "=", "self", ".", "levels", "codes", "=", "[", "np", ".", "repeat", "(", "level_codes", ",", "n_repeat", ")", "for", "level_codes", "in", "self", ".", "codes", "]", "# Assumes that each level_codes is divisible by n_shuffle", "codes", "=", "[", "x", ".", "reshape", "(", "n_shuffle", ",", "-", "1", ")", ".", "ravel", "(", "order", "=", "'F'", ")", "for", "x", "in", "codes", "]", "names", "=", "self", ".", "names", "warnings", ".", "warn", "(", "\"Method .to_hierarchical is deprecated and will \"", "\"be removed in a future version\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "MultiIndex", "(", "levels", "=", "levels", ",", "codes", "=", "codes", ",", "names", "=", "names", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex._sort_levels_monotonic
.. versionadded:: 0.20.0 This is an *internal* function. Create a new MultiIndex from the current to monotonically sorted items IN the levels. This does not actually make the entire MultiIndex monotonic, JUST the levels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. Returns ------- MultiIndex Examples -------- >>> i = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i.sort_monotonic() MultiIndex(levels=[['a', 'b'], ['aa', 'bb']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]])
pandas/core/indexes/multi.py
def _sort_levels_monotonic(self): """ .. versionadded:: 0.20.0 This is an *internal* function. Create a new MultiIndex from the current to monotonically sorted items IN the levels. This does not actually make the entire MultiIndex monotonic, JUST the levels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. Returns ------- MultiIndex Examples -------- >>> i = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i.sort_monotonic() MultiIndex(levels=[['a', 'b'], ['aa', 'bb']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]]) """ if self.is_lexsorted() and self.is_monotonic: return self new_levels = [] new_codes = [] for lev, level_codes in zip(self.levels, self.codes): if not lev.is_monotonic: try: # indexer to reorder the levels indexer = lev.argsort() except TypeError: pass else: lev = lev.take(indexer) # indexer to reorder the level codes indexer = ensure_int64(indexer) ri = lib.get_reverse_indexer(indexer, len(indexer)) level_codes = algos.take_1d(ri, level_codes) new_levels.append(lev) new_codes.append(level_codes) return MultiIndex(new_levels, new_codes, names=self.names, sortorder=self.sortorder, verify_integrity=False)
def _sort_levels_monotonic(self): """ .. versionadded:: 0.20.0 This is an *internal* function. Create a new MultiIndex from the current to monotonically sorted items IN the levels. This does not actually make the entire MultiIndex monotonic, JUST the levels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. Returns ------- MultiIndex Examples -------- >>> i = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i.sort_monotonic() MultiIndex(levels=[['a', 'b'], ['aa', 'bb']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]]) """ if self.is_lexsorted() and self.is_monotonic: return self new_levels = [] new_codes = [] for lev, level_codes in zip(self.levels, self.codes): if not lev.is_monotonic: try: # indexer to reorder the levels indexer = lev.argsort() except TypeError: pass else: lev = lev.take(indexer) # indexer to reorder the level codes indexer = ensure_int64(indexer) ri = lib.get_reverse_indexer(indexer, len(indexer)) level_codes = algos.take_1d(ri, level_codes) new_levels.append(lev) new_codes.append(level_codes) return MultiIndex(new_levels, new_codes, names=self.names, sortorder=self.sortorder, verify_integrity=False)
[ "..", "versionadded", "::", "0", ".", "20", ".", "0" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1583-L1643
[ "def", "_sort_levels_monotonic", "(", "self", ")", ":", "if", "self", ".", "is_lexsorted", "(", ")", "and", "self", ".", "is_monotonic", ":", "return", "self", "new_levels", "=", "[", "]", "new_codes", "=", "[", "]", "for", "lev", ",", "level_codes", "in", "zip", "(", "self", ".", "levels", ",", "self", ".", "codes", ")", ":", "if", "not", "lev", ".", "is_monotonic", ":", "try", ":", "# indexer to reorder the levels", "indexer", "=", "lev", ".", "argsort", "(", ")", "except", "TypeError", ":", "pass", "else", ":", "lev", "=", "lev", ".", "take", "(", "indexer", ")", "# indexer to reorder the level codes", "indexer", "=", "ensure_int64", "(", "indexer", ")", "ri", "=", "lib", ".", "get_reverse_indexer", "(", "indexer", ",", "len", "(", "indexer", ")", ")", "level_codes", "=", "algos", ".", "take_1d", "(", "ri", ",", "level_codes", ")", "new_levels", ".", "append", "(", "lev", ")", "new_codes", ".", "append", "(", "level_codes", ")", "return", "MultiIndex", "(", "new_levels", ",", "new_codes", ",", "names", "=", "self", ".", "names", ",", "sortorder", "=", "self", ".", "sortorder", ",", "verify_integrity", "=", "False", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.remove_unused_levels
Create a new MultiIndex from the current that removes unused levels, meaning that they are not expressed in the labels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. .. versionadded:: 0.20.0 Returns ------- MultiIndex Examples -------- >>> i = pd.MultiIndex.from_product([range(2), list('ab')]) MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i[2:] MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[1, 1], [0, 1]]) The 0 from the first level is not represented and can be removed >>> i[2:].remove_unused_levels() MultiIndex(levels=[[1], ['a', 'b']], codes=[[0, 0], [0, 1]])
pandas/core/indexes/multi.py
def remove_unused_levels(self): """ Create a new MultiIndex from the current that removes unused levels, meaning that they are not expressed in the labels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. .. versionadded:: 0.20.0 Returns ------- MultiIndex Examples -------- >>> i = pd.MultiIndex.from_product([range(2), list('ab')]) MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i[2:] MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[1, 1], [0, 1]]) The 0 from the first level is not represented and can be removed >>> i[2:].remove_unused_levels() MultiIndex(levels=[[1], ['a', 'b']], codes=[[0, 0], [0, 1]]) """ new_levels = [] new_codes = [] changed = False for lev, level_codes in zip(self.levels, self.codes): # Since few levels are typically unused, bincount() is more # efficient than unique() - however it only accepts positive values # (and drops order): uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1 has_na = int(len(uniques) and (uniques[0] == -1)) if len(uniques) != len(lev) + has_na: # We have unused levels changed = True # Recalculate uniques, now preserving order. # Can easily be cythonized by exploiting the already existing # "uniques" and stop parsing "level_codes" when all items # are found: uniques = algos.unique(level_codes) if has_na: na_idx = np.where(uniques == -1)[0] # Just ensure that -1 is in first position: uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]] # codes get mapped from uniques to 0:len(uniques) # -1 (if present) is mapped to last position code_mapping = np.zeros(len(lev) + has_na) # ... and reassigned value -1: code_mapping[uniques] = np.arange(len(uniques)) - has_na level_codes = code_mapping[level_codes] # new levels are simple lev = lev.take(uniques[has_na:]) new_levels.append(lev) new_codes.append(level_codes) result = self._shallow_copy() if changed: result._reset_identity() result._set_levels(new_levels, validate=False) result._set_codes(new_codes, validate=False) return result
def remove_unused_levels(self): """ Create a new MultiIndex from the current that removes unused levels, meaning that they are not expressed in the labels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. .. versionadded:: 0.20.0 Returns ------- MultiIndex Examples -------- >>> i = pd.MultiIndex.from_product([range(2), list('ab')]) MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i[2:] MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[1, 1], [0, 1]]) The 0 from the first level is not represented and can be removed >>> i[2:].remove_unused_levels() MultiIndex(levels=[[1], ['a', 'b']], codes=[[0, 0], [0, 1]]) """ new_levels = [] new_codes = [] changed = False for lev, level_codes in zip(self.levels, self.codes): # Since few levels are typically unused, bincount() is more # efficient than unique() - however it only accepts positive values # (and drops order): uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1 has_na = int(len(uniques) and (uniques[0] == -1)) if len(uniques) != len(lev) + has_na: # We have unused levels changed = True # Recalculate uniques, now preserving order. # Can easily be cythonized by exploiting the already existing # "uniques" and stop parsing "level_codes" when all items # are found: uniques = algos.unique(level_codes) if has_na: na_idx = np.where(uniques == -1)[0] # Just ensure that -1 is in first position: uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]] # codes get mapped from uniques to 0:len(uniques) # -1 (if present) is mapped to last position code_mapping = np.zeros(len(lev) + has_na) # ... and reassigned value -1: code_mapping[uniques] = np.arange(len(uniques)) - has_na level_codes = code_mapping[level_codes] # new levels are simple lev = lev.take(uniques[has_na:]) new_levels.append(lev) new_codes.append(level_codes) result = self._shallow_copy() if changed: result._reset_identity() result._set_levels(new_levels, validate=False) result._set_codes(new_codes, validate=False) return result
[ "Create", "a", "new", "MultiIndex", "from", "the", "current", "that", "removes", "unused", "levels", "meaning", "that", "they", "are", "not", "expressed", "in", "the", "labels", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1645-L1725
[ "def", "remove_unused_levels", "(", "self", ")", ":", "new_levels", "=", "[", "]", "new_codes", "=", "[", "]", "changed", "=", "False", "for", "lev", ",", "level_codes", "in", "zip", "(", "self", ".", "levels", ",", "self", ".", "codes", ")", ":", "# Since few levels are typically unused, bincount() is more", "# efficient than unique() - however it only accepts positive values", "# (and drops order):", "uniques", "=", "np", ".", "where", "(", "np", ".", "bincount", "(", "level_codes", "+", "1", ")", ">", "0", ")", "[", "0", "]", "-", "1", "has_na", "=", "int", "(", "len", "(", "uniques", ")", "and", "(", "uniques", "[", "0", "]", "==", "-", "1", ")", ")", "if", "len", "(", "uniques", ")", "!=", "len", "(", "lev", ")", "+", "has_na", ":", "# We have unused levels", "changed", "=", "True", "# Recalculate uniques, now preserving order.", "# Can easily be cythonized by exploiting the already existing", "# \"uniques\" and stop parsing \"level_codes\" when all items", "# are found:", "uniques", "=", "algos", ".", "unique", "(", "level_codes", ")", "if", "has_na", ":", "na_idx", "=", "np", ".", "where", "(", "uniques", "==", "-", "1", ")", "[", "0", "]", "# Just ensure that -1 is in first position:", "uniques", "[", "[", "0", ",", "na_idx", "[", "0", "]", "]", "]", "=", "uniques", "[", "[", "na_idx", "[", "0", "]", ",", "0", "]", "]", "# codes get mapped from uniques to 0:len(uniques)", "# -1 (if present) is mapped to last position", "code_mapping", "=", "np", ".", "zeros", "(", "len", "(", "lev", ")", "+", "has_na", ")", "# ... and reassigned value -1:", "code_mapping", "[", "uniques", "]", "=", "np", ".", "arange", "(", "len", "(", "uniques", ")", ")", "-", "has_na", "level_codes", "=", "code_mapping", "[", "level_codes", "]", "# new levels are simple", "lev", "=", "lev", ".", "take", "(", "uniques", "[", "has_na", ":", "]", ")", "new_levels", ".", "append", "(", "lev", ")", "new_codes", ".", "append", "(", "level_codes", ")", "result", "=", "self", ".", "_shallow_copy", "(", ")", "if", "changed", ":", "result", ".", "_reset_identity", "(", ")", "result", ".", "_set_levels", "(", "new_levels", ",", "validate", "=", "False", ")", "result", ".", "_set_codes", "(", "new_codes", ",", "validate", "=", "False", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex._assert_take_fillable
Internal method to handle NA filling of take
pandas/core/indexes/multi.py
def _assert_take_fillable(self, values, indices, allow_fill=True, fill_value=None, na_value=None): """ Internal method to handle NA filling of take """ # only fill if we are passing a non-None fill_value if allow_fill and fill_value is not None: if (indices < -1).any(): msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') raise ValueError(msg) taken = [lab.take(indices) for lab in self.codes] mask = indices == -1 if mask.any(): masked = [] for new_label in taken: label_values = new_label.values() label_values[mask] = na_value masked.append(np.asarray(label_values)) taken = masked else: taken = [lab.take(indices) for lab in self.codes] return taken
def _assert_take_fillable(self, values, indices, allow_fill=True, fill_value=None, na_value=None): """ Internal method to handle NA filling of take """ # only fill if we are passing a non-None fill_value if allow_fill and fill_value is not None: if (indices < -1).any(): msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') raise ValueError(msg) taken = [lab.take(indices) for lab in self.codes] mask = indices == -1 if mask.any(): masked = [] for new_label in taken: label_values = new_label.values() label_values[mask] = na_value masked.append(np.asarray(label_values)) taken = masked else: taken = [lab.take(indices) for lab in self.codes] return taken
[ "Internal", "method", "to", "handle", "NA", "filling", "of", "take" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1806-L1826
[ "def", "_assert_take_fillable", "(", "self", ",", "values", ",", "indices", ",", "allow_fill", "=", "True", ",", "fill_value", "=", "None", ",", "na_value", "=", "None", ")", ":", "# only fill if we are passing a non-None fill_value", "if", "allow_fill", "and", "fill_value", "is", "not", "None", ":", "if", "(", "indices", "<", "-", "1", ")", ".", "any", "(", ")", ":", "msg", "=", "(", "'When allow_fill=True and fill_value is not None, '", "'all indices must be >= -1'", ")", "raise", "ValueError", "(", "msg", ")", "taken", "=", "[", "lab", ".", "take", "(", "indices", ")", "for", "lab", "in", "self", ".", "codes", "]", "mask", "=", "indices", "==", "-", "1", "if", "mask", ".", "any", "(", ")", ":", "masked", "=", "[", "]", "for", "new_label", "in", "taken", ":", "label_values", "=", "new_label", ".", "values", "(", ")", "label_values", "[", "mask", "]", "=", "na_value", "masked", ".", "append", "(", "np", ".", "asarray", "(", "label_values", ")", ")", "taken", "=", "masked", "else", ":", "taken", "=", "[", "lab", ".", "take", "(", "indices", ")", "for", "lab", "in", "self", ".", "codes", "]", "return", "taken" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.append
Append a collection of Index options together Parameters ---------- other : Index or list/tuple of indices Returns ------- appended : Index
pandas/core/indexes/multi.py
def append(self, other): """ Append a collection of Index options together Parameters ---------- other : Index or list/tuple of indices Returns ------- appended : Index """ if not isinstance(other, (list, tuple)): other = [other] if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other): arrays = [] for i in range(self.nlevels): label = self._get_level_values(i) appended = [o._get_level_values(i) for o in other] arrays.append(label.append(appended)) return MultiIndex.from_arrays(arrays, names=self.names) to_concat = (self.values, ) + tuple(k._values for k in other) new_tuples = np.concatenate(to_concat) # if all(isinstance(x, MultiIndex) for x in other): try: return MultiIndex.from_tuples(new_tuples, names=self.names) except (TypeError, IndexError): return Index(new_tuples)
def append(self, other): """ Append a collection of Index options together Parameters ---------- other : Index or list/tuple of indices Returns ------- appended : Index """ if not isinstance(other, (list, tuple)): other = [other] if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other): arrays = [] for i in range(self.nlevels): label = self._get_level_values(i) appended = [o._get_level_values(i) for o in other] arrays.append(label.append(appended)) return MultiIndex.from_arrays(arrays, names=self.names) to_concat = (self.values, ) + tuple(k._values for k in other) new_tuples = np.concatenate(to_concat) # if all(isinstance(x, MultiIndex) for x in other): try: return MultiIndex.from_tuples(new_tuples, names=self.names) except (TypeError, IndexError): return Index(new_tuples)
[ "Append", "a", "collection", "of", "Index", "options", "together" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1828-L1859
[ "def", "append", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "(", "list", ",", "tuple", ")", ")", ":", "other", "=", "[", "other", "]", "if", "all", "(", "(", "isinstance", "(", "o", ",", "MultiIndex", ")", "and", "o", ".", "nlevels", ">=", "self", ".", "nlevels", ")", "for", "o", "in", "other", ")", ":", "arrays", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "nlevels", ")", ":", "label", "=", "self", ".", "_get_level_values", "(", "i", ")", "appended", "=", "[", "o", ".", "_get_level_values", "(", "i", ")", "for", "o", "in", "other", "]", "arrays", ".", "append", "(", "label", ".", "append", "(", "appended", ")", ")", "return", "MultiIndex", ".", "from_arrays", "(", "arrays", ",", "names", "=", "self", ".", "names", ")", "to_concat", "=", "(", "self", ".", "values", ",", ")", "+", "tuple", "(", "k", ".", "_values", "for", "k", "in", "other", ")", "new_tuples", "=", "np", ".", "concatenate", "(", "to_concat", ")", "# if all(isinstance(x, MultiIndex) for x in other):", "try", ":", "return", "MultiIndex", ".", "from_tuples", "(", "new_tuples", ",", "names", "=", "self", ".", "names", ")", "except", "(", "TypeError", ",", "IndexError", ")", ":", "return", "Index", "(", "new_tuples", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.drop
Make new MultiIndex with passed list of codes deleted Parameters ---------- codes : array-like Must be a list of tuples level : int or level name, default None Returns ------- dropped : MultiIndex
pandas/core/indexes/multi.py
def drop(self, codes, level=None, errors='raise'): """ Make new MultiIndex with passed list of codes deleted Parameters ---------- codes : array-like Must be a list of tuples level : int or level name, default None Returns ------- dropped : MultiIndex """ if level is not None: return self._drop_from_level(codes, level) try: if not isinstance(codes, (np.ndarray, Index)): codes = com.index_labels_to_array(codes) indexer = self.get_indexer(codes) mask = indexer == -1 if mask.any(): if errors != 'ignore': raise ValueError('codes %s not contained in axis' % codes[mask]) except Exception: pass inds = [] for level_codes in codes: try: loc = self.get_loc(level_codes) # get_loc returns either an integer, a slice, or a boolean # mask if isinstance(loc, int): inds.append(loc) elif isinstance(loc, slice): inds.extend(lrange(loc.start, loc.stop)) elif com.is_bool_indexer(loc): if self.lexsort_depth == 0: warnings.warn('dropping on a non-lexsorted multi-index' ' without a level parameter may impact ' 'performance.', PerformanceWarning, stacklevel=3) loc = loc.nonzero()[0] inds.extend(loc) else: msg = 'unsupported indexer of type {}'.format(type(loc)) raise AssertionError(msg) except KeyError: if errors != 'ignore': raise return self.delete(inds)
def drop(self, codes, level=None, errors='raise'): """ Make new MultiIndex with passed list of codes deleted Parameters ---------- codes : array-like Must be a list of tuples level : int or level name, default None Returns ------- dropped : MultiIndex """ if level is not None: return self._drop_from_level(codes, level) try: if not isinstance(codes, (np.ndarray, Index)): codes = com.index_labels_to_array(codes) indexer = self.get_indexer(codes) mask = indexer == -1 if mask.any(): if errors != 'ignore': raise ValueError('codes %s not contained in axis' % codes[mask]) except Exception: pass inds = [] for level_codes in codes: try: loc = self.get_loc(level_codes) # get_loc returns either an integer, a slice, or a boolean # mask if isinstance(loc, int): inds.append(loc) elif isinstance(loc, slice): inds.extend(lrange(loc.start, loc.stop)) elif com.is_bool_indexer(loc): if self.lexsort_depth == 0: warnings.warn('dropping on a non-lexsorted multi-index' ' without a level parameter may impact ' 'performance.', PerformanceWarning, stacklevel=3) loc = loc.nonzero()[0] inds.extend(loc) else: msg = 'unsupported indexer of type {}'.format(type(loc)) raise AssertionError(msg) except KeyError: if errors != 'ignore': raise return self.delete(inds)
[ "Make", "new", "MultiIndex", "with", "passed", "list", "of", "codes", "deleted" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1878-L1933
[ "def", "drop", "(", "self", ",", "codes", ",", "level", "=", "None", ",", "errors", "=", "'raise'", ")", ":", "if", "level", "is", "not", "None", ":", "return", "self", ".", "_drop_from_level", "(", "codes", ",", "level", ")", "try", ":", "if", "not", "isinstance", "(", "codes", ",", "(", "np", ".", "ndarray", ",", "Index", ")", ")", ":", "codes", "=", "com", ".", "index_labels_to_array", "(", "codes", ")", "indexer", "=", "self", ".", "get_indexer", "(", "codes", ")", "mask", "=", "indexer", "==", "-", "1", "if", "mask", ".", "any", "(", ")", ":", "if", "errors", "!=", "'ignore'", ":", "raise", "ValueError", "(", "'codes %s not contained in axis'", "%", "codes", "[", "mask", "]", ")", "except", "Exception", ":", "pass", "inds", "=", "[", "]", "for", "level_codes", "in", "codes", ":", "try", ":", "loc", "=", "self", ".", "get_loc", "(", "level_codes", ")", "# get_loc returns either an integer, a slice, or a boolean", "# mask", "if", "isinstance", "(", "loc", ",", "int", ")", ":", "inds", ".", "append", "(", "loc", ")", "elif", "isinstance", "(", "loc", ",", "slice", ")", ":", "inds", ".", "extend", "(", "lrange", "(", "loc", ".", "start", ",", "loc", ".", "stop", ")", ")", "elif", "com", ".", "is_bool_indexer", "(", "loc", ")", ":", "if", "self", ".", "lexsort_depth", "==", "0", ":", "warnings", ".", "warn", "(", "'dropping on a non-lexsorted multi-index'", "' without a level parameter may impact '", "'performance.'", ",", "PerformanceWarning", ",", "stacklevel", "=", "3", ")", "loc", "=", "loc", ".", "nonzero", "(", ")", "[", "0", "]", "inds", ".", "extend", "(", "loc", ")", "else", ":", "msg", "=", "'unsupported indexer of type {}'", ".", "format", "(", "type", "(", "loc", ")", ")", "raise", "AssertionError", "(", "msg", ")", "except", "KeyError", ":", "if", "errors", "!=", "'ignore'", ":", "raise", "return", "self", ".", "delete", "(", "inds", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.swaplevel
Swap level i with level j. Calling this method does not change the ordering of the values. Parameters ---------- i : int, str, default -2 First level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. j : int, str, default -1 Second level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. Returns ------- MultiIndex A new MultiIndex. .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. See Also -------- Series.swaplevel : Swap levels i and j in a MultiIndex. Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a particular axis. Examples -------- >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi.swaplevel(0, 1) MultiIndex(levels=[['bb', 'aa'], ['a', 'b']], codes=[[0, 1, 0, 1], [0, 0, 1, 1]])
pandas/core/indexes/multi.py
def swaplevel(self, i=-2, j=-1): """ Swap level i with level j. Calling this method does not change the ordering of the values. Parameters ---------- i : int, str, default -2 First level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. j : int, str, default -1 Second level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. Returns ------- MultiIndex A new MultiIndex. .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. See Also -------- Series.swaplevel : Swap levels i and j in a MultiIndex. Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a particular axis. Examples -------- >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi.swaplevel(0, 1) MultiIndex(levels=[['bb', 'aa'], ['a', 'b']], codes=[[0, 1, 0, 1], [0, 0, 1, 1]]) """ new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) i = self._get_level_number(i) j = self._get_level_number(j) new_levels[i], new_levels[j] = new_levels[j], new_levels[i] new_codes[i], new_codes[j] = new_codes[j], new_codes[i] new_names[i], new_names[j] = new_names[j], new_names[i] return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)
def swaplevel(self, i=-2, j=-1): """ Swap level i with level j. Calling this method does not change the ordering of the values. Parameters ---------- i : int, str, default -2 First level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. j : int, str, default -1 Second level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. Returns ------- MultiIndex A new MultiIndex. .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. See Also -------- Series.swaplevel : Swap levels i and j in a MultiIndex. Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a particular axis. Examples -------- >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi.swaplevel(0, 1) MultiIndex(levels=[['bb', 'aa'], ['a', 'b']], codes=[[0, 1, 0, 1], [0, 0, 1, 1]]) """ new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) i = self._get_level_number(i) j = self._get_level_number(j) new_levels[i], new_levels[j] = new_levels[j], new_levels[i] new_codes[i], new_codes[j] = new_codes[j], new_codes[i] new_names[i], new_names[j] = new_names[j], new_names[i] return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)
[ "Swap", "level", "i", "with", "level", "j", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L1945-L1999
[ "def", "swaplevel", "(", "self", ",", "i", "=", "-", "2", ",", "j", "=", "-", "1", ")", ":", "new_levels", "=", "list", "(", "self", ".", "levels", ")", "new_codes", "=", "list", "(", "self", ".", "codes", ")", "new_names", "=", "list", "(", "self", ".", "names", ")", "i", "=", "self", ".", "_get_level_number", "(", "i", ")", "j", "=", "self", ".", "_get_level_number", "(", "j", ")", "new_levels", "[", "i", "]", ",", "new_levels", "[", "j", "]", "=", "new_levels", "[", "j", "]", ",", "new_levels", "[", "i", "]", "new_codes", "[", "i", "]", ",", "new_codes", "[", "j", "]", "=", "new_codes", "[", "j", "]", ",", "new_codes", "[", "i", "]", "new_names", "[", "i", "]", ",", "new_names", "[", "j", "]", "=", "new_names", "[", "j", "]", ",", "new_names", "[", "i", "]", "return", "MultiIndex", "(", "levels", "=", "new_levels", ",", "codes", "=", "new_codes", ",", "names", "=", "new_names", ",", "verify_integrity", "=", "False", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.reorder_levels
Rearrange levels using input order. May not drop or duplicate levels Parameters ----------
pandas/core/indexes/multi.py
def reorder_levels(self, order): """ Rearrange levels using input order. May not drop or duplicate levels Parameters ---------- """ order = [self._get_level_number(i) for i in order] if len(order) != self.nlevels: raise AssertionError('Length of order must be same as ' 'number of levels (%d), got %d' % (self.nlevels, len(order))) new_levels = [self.levels[i] for i in order] new_codes = [self.codes[i] for i in order] new_names = [self.names[i] for i in order] return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)
def reorder_levels(self, order): """ Rearrange levels using input order. May not drop or duplicate levels Parameters ---------- """ order = [self._get_level_number(i) for i in order] if len(order) != self.nlevels: raise AssertionError('Length of order must be same as ' 'number of levels (%d), got %d' % (self.nlevels, len(order))) new_levels = [self.levels[i] for i in order] new_codes = [self.codes[i] for i in order] new_names = [self.names[i] for i in order] return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)
[ "Rearrange", "levels", "using", "input", "order", ".", "May", "not", "drop", "or", "duplicate", "levels" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2001-L2018
[ "def", "reorder_levels", "(", "self", ",", "order", ")", ":", "order", "=", "[", "self", ".", "_get_level_number", "(", "i", ")", "for", "i", "in", "order", "]", "if", "len", "(", "order", ")", "!=", "self", ".", "nlevels", ":", "raise", "AssertionError", "(", "'Length of order must be same as '", "'number of levels (%d), got %d'", "%", "(", "self", ".", "nlevels", ",", "len", "(", "order", ")", ")", ")", "new_levels", "=", "[", "self", ".", "levels", "[", "i", "]", "for", "i", "in", "order", "]", "new_codes", "=", "[", "self", ".", "codes", "[", "i", "]", "for", "i", "in", "order", "]", "new_names", "=", "[", "self", ".", "names", "[", "i", "]", "for", "i", "in", "order", "]", "return", "MultiIndex", "(", "levels", "=", "new_levels", ",", "codes", "=", "new_codes", ",", "names", "=", "new_names", ",", "verify_integrity", "=", "False", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex._get_codes_for_sorting
we categorizing our codes by using the available categories (all, not just observed) excluding any missing ones (-1); this is in preparation for sorting, where we need to disambiguate that -1 is not a valid valid
pandas/core/indexes/multi.py
def _get_codes_for_sorting(self): """ we categorizing our codes by using the available categories (all, not just observed) excluding any missing ones (-1); this is in preparation for sorting, where we need to disambiguate that -1 is not a valid valid """ from pandas.core.arrays import Categorical def cats(level_codes): return np.arange(np.array(level_codes).max() + 1 if len(level_codes) else 0, dtype=level_codes.dtype) return [Categorical.from_codes(level_codes, cats(level_codes), ordered=True) for level_codes in self.codes]
def _get_codes_for_sorting(self): """ we categorizing our codes by using the available categories (all, not just observed) excluding any missing ones (-1); this is in preparation for sorting, where we need to disambiguate that -1 is not a valid valid """ from pandas.core.arrays import Categorical def cats(level_codes): return np.arange(np.array(level_codes).max() + 1 if len(level_codes) else 0, dtype=level_codes.dtype) return [Categorical.from_codes(level_codes, cats(level_codes), ordered=True) for level_codes in self.codes]
[ "we", "categorizing", "our", "codes", "by", "using", "the", "available", "categories", "(", "all", "not", "just", "observed", ")", "excluding", "any", "missing", "ones", "(", "-", "1", ")", ";", "this", "is", "in", "preparation", "for", "sorting", "where", "we", "need", "to", "disambiguate", "that", "-", "1", "is", "not", "a", "valid", "valid" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2023-L2040
[ "def", "_get_codes_for_sorting", "(", "self", ")", ":", "from", "pandas", ".", "core", ".", "arrays", "import", "Categorical", "def", "cats", "(", "level_codes", ")", ":", "return", "np", ".", "arange", "(", "np", ".", "array", "(", "level_codes", ")", ".", "max", "(", ")", "+", "1", "if", "len", "(", "level_codes", ")", "else", "0", ",", "dtype", "=", "level_codes", ".", "dtype", ")", "return", "[", "Categorical", ".", "from_codes", "(", "level_codes", ",", "cats", "(", "level_codes", ")", ",", "ordered", "=", "True", ")", "for", "level_codes", "in", "self", ".", "codes", "]" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.sortlevel
Sort MultiIndex at the requested level. The result will respect the original ordering of the associated factor at that level. Parameters ---------- level : list-like, int or str, default 0 If a string is given, must be a name of the level If list-like must be names or ints of levels. ascending : boolean, default True False to sort in descending order Can also be a list to specify a directed ordering sort_remaining : sort by the remaining levels after level Returns ------- sorted_index : pd.MultiIndex Resulting index. indexer : np.ndarray Indices of output values in original index.
pandas/core/indexes/multi.py
def sortlevel(self, level=0, ascending=True, sort_remaining=True): """ Sort MultiIndex at the requested level. The result will respect the original ordering of the associated factor at that level. Parameters ---------- level : list-like, int or str, default 0 If a string is given, must be a name of the level If list-like must be names or ints of levels. ascending : boolean, default True False to sort in descending order Can also be a list to specify a directed ordering sort_remaining : sort by the remaining levels after level Returns ------- sorted_index : pd.MultiIndex Resulting index. indexer : np.ndarray Indices of output values in original index. """ from pandas.core.sorting import indexer_from_factorized if isinstance(level, (str, int)): level = [level] level = [self._get_level_number(lev) for lev in level] sortorder = None # we have a directed ordering via ascending if isinstance(ascending, list): if not len(level) == len(ascending): raise ValueError("level must have same length as ascending") from pandas.core.sorting import lexsort_indexer indexer = lexsort_indexer([self.codes[lev] for lev in level], orders=ascending) # level ordering else: codes = list(self.codes) shape = list(self.levshape) # partition codes and shape primary = tuple(codes[lev] for lev in level) primshp = tuple(shape[lev] for lev in level) # Reverse sorted to retain the order of # smaller indices that needs to be removed for lev in sorted(level, reverse=True): codes.pop(lev) shape.pop(lev) if sort_remaining: primary += primary + tuple(codes) primshp += primshp + tuple(shape) else: sortorder = level[0] indexer = indexer_from_factorized(primary, primshp, compress=False) if not ascending: indexer = indexer[::-1] indexer = ensure_platform_int(indexer) new_codes = [level_codes.take(indexer) for level_codes in self.codes] new_index = MultiIndex(codes=new_codes, levels=self.levels, names=self.names, sortorder=sortorder, verify_integrity=False) return new_index, indexer
def sortlevel(self, level=0, ascending=True, sort_remaining=True): """ Sort MultiIndex at the requested level. The result will respect the original ordering of the associated factor at that level. Parameters ---------- level : list-like, int or str, default 0 If a string is given, must be a name of the level If list-like must be names or ints of levels. ascending : boolean, default True False to sort in descending order Can also be a list to specify a directed ordering sort_remaining : sort by the remaining levels after level Returns ------- sorted_index : pd.MultiIndex Resulting index. indexer : np.ndarray Indices of output values in original index. """ from pandas.core.sorting import indexer_from_factorized if isinstance(level, (str, int)): level = [level] level = [self._get_level_number(lev) for lev in level] sortorder = None # we have a directed ordering via ascending if isinstance(ascending, list): if not len(level) == len(ascending): raise ValueError("level must have same length as ascending") from pandas.core.sorting import lexsort_indexer indexer = lexsort_indexer([self.codes[lev] for lev in level], orders=ascending) # level ordering else: codes = list(self.codes) shape = list(self.levshape) # partition codes and shape primary = tuple(codes[lev] for lev in level) primshp = tuple(shape[lev] for lev in level) # Reverse sorted to retain the order of # smaller indices that needs to be removed for lev in sorted(level, reverse=True): codes.pop(lev) shape.pop(lev) if sort_remaining: primary += primary + tuple(codes) primshp += primshp + tuple(shape) else: sortorder = level[0] indexer = indexer_from_factorized(primary, primshp, compress=False) if not ascending: indexer = indexer[::-1] indexer = ensure_platform_int(indexer) new_codes = [level_codes.take(indexer) for level_codes in self.codes] new_index = MultiIndex(codes=new_codes, levels=self.levels, names=self.names, sortorder=sortorder, verify_integrity=False) return new_index, indexer
[ "Sort", "MultiIndex", "at", "the", "requested", "level", ".", "The", "result", "will", "respect", "the", "original", "ordering", "of", "the", "associated", "factor", "at", "that", "level", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2042-L2115
[ "def", "sortlevel", "(", "self", ",", "level", "=", "0", ",", "ascending", "=", "True", ",", "sort_remaining", "=", "True", ")", ":", "from", "pandas", ".", "core", ".", "sorting", "import", "indexer_from_factorized", "if", "isinstance", "(", "level", ",", "(", "str", ",", "int", ")", ")", ":", "level", "=", "[", "level", "]", "level", "=", "[", "self", ".", "_get_level_number", "(", "lev", ")", "for", "lev", "in", "level", "]", "sortorder", "=", "None", "# we have a directed ordering via ascending", "if", "isinstance", "(", "ascending", ",", "list", ")", ":", "if", "not", "len", "(", "level", ")", "==", "len", "(", "ascending", ")", ":", "raise", "ValueError", "(", "\"level must have same length as ascending\"", ")", "from", "pandas", ".", "core", ".", "sorting", "import", "lexsort_indexer", "indexer", "=", "lexsort_indexer", "(", "[", "self", ".", "codes", "[", "lev", "]", "for", "lev", "in", "level", "]", ",", "orders", "=", "ascending", ")", "# level ordering", "else", ":", "codes", "=", "list", "(", "self", ".", "codes", ")", "shape", "=", "list", "(", "self", ".", "levshape", ")", "# partition codes and shape", "primary", "=", "tuple", "(", "codes", "[", "lev", "]", "for", "lev", "in", "level", ")", "primshp", "=", "tuple", "(", "shape", "[", "lev", "]", "for", "lev", "in", "level", ")", "# Reverse sorted to retain the order of", "# smaller indices that needs to be removed", "for", "lev", "in", "sorted", "(", "level", ",", "reverse", "=", "True", ")", ":", "codes", ".", "pop", "(", "lev", ")", "shape", ".", "pop", "(", "lev", ")", "if", "sort_remaining", ":", "primary", "+=", "primary", "+", "tuple", "(", "codes", ")", "primshp", "+=", "primshp", "+", "tuple", "(", "shape", ")", "else", ":", "sortorder", "=", "level", "[", "0", "]", "indexer", "=", "indexer_from_factorized", "(", "primary", ",", "primshp", ",", "compress", "=", "False", ")", "if", "not", "ascending", ":", "indexer", "=", "indexer", "[", ":", ":", "-", "1", "]", "indexer", "=", "ensure_platform_int", "(", "indexer", ")", "new_codes", "=", "[", "level_codes", ".", "take", "(", "indexer", ")", "for", "level_codes", "in", "self", ".", "codes", "]", "new_index", "=", "MultiIndex", "(", "codes", "=", "new_codes", ",", "levels", "=", "self", ".", "levels", ",", "names", "=", "self", ".", "names", ",", "sortorder", "=", "sortorder", ",", "verify_integrity", "=", "False", ")", "return", "new_index", ",", "indexer" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex._convert_listlike_indexer
Parameters ---------- keyarr : list-like Indexer to convert. Returns ------- tuple (indexer, keyarr) indexer is an ndarray or None if cannot convert keyarr are tuple-safe keys
pandas/core/indexes/multi.py
def _convert_listlike_indexer(self, keyarr, kind=None): """ Parameters ---------- keyarr : list-like Indexer to convert. Returns ------- tuple (indexer, keyarr) indexer is an ndarray or None if cannot convert keyarr are tuple-safe keys """ indexer, keyarr = super()._convert_listlike_indexer(keyarr, kind=kind) # are we indexing a specific level if indexer is None and len(keyarr) and not isinstance(keyarr[0], tuple): level = 0 _, indexer = self.reindex(keyarr, level=level) # take all if indexer is None: indexer = np.arange(len(self)) check = self.levels[0].get_indexer(keyarr) mask = check == -1 if mask.any(): raise KeyError('%s not in index' % keyarr[mask]) return indexer, keyarr
def _convert_listlike_indexer(self, keyarr, kind=None): """ Parameters ---------- keyarr : list-like Indexer to convert. Returns ------- tuple (indexer, keyarr) indexer is an ndarray or None if cannot convert keyarr are tuple-safe keys """ indexer, keyarr = super()._convert_listlike_indexer(keyarr, kind=kind) # are we indexing a specific level if indexer is None and len(keyarr) and not isinstance(keyarr[0], tuple): level = 0 _, indexer = self.reindex(keyarr, level=level) # take all if indexer is None: indexer = np.arange(len(self)) check = self.levels[0].get_indexer(keyarr) mask = check == -1 if mask.any(): raise KeyError('%s not in index' % keyarr[mask]) return indexer, keyarr
[ "Parameters", "----------", "keyarr", ":", "list", "-", "like", "Indexer", "to", "convert", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2117-L2147
[ "def", "_convert_listlike_indexer", "(", "self", ",", "keyarr", ",", "kind", "=", "None", ")", ":", "indexer", ",", "keyarr", "=", "super", "(", ")", ".", "_convert_listlike_indexer", "(", "keyarr", ",", "kind", "=", "kind", ")", "# are we indexing a specific level", "if", "indexer", "is", "None", "and", "len", "(", "keyarr", ")", "and", "not", "isinstance", "(", "keyarr", "[", "0", "]", ",", "tuple", ")", ":", "level", "=", "0", "_", ",", "indexer", "=", "self", ".", "reindex", "(", "keyarr", ",", "level", "=", "level", ")", "# take all", "if", "indexer", "is", "None", ":", "indexer", "=", "np", ".", "arange", "(", "len", "(", "self", ")", ")", "check", "=", "self", ".", "levels", "[", "0", "]", ".", "get_indexer", "(", "keyarr", ")", "mask", "=", "check", "==", "-", "1", "if", "mask", ".", "any", "(", ")", ":", "raise", "KeyError", "(", "'%s not in index'", "%", "keyarr", "[", "mask", "]", ")", "return", "indexer", ",", "keyarr" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.reindex
Create index with target's values (move/add/delete values as necessary) Returns ------- new_index : pd.MultiIndex Resulting index indexer : np.ndarray or None Indices of output values in original index.
pandas/core/indexes/multi.py
def reindex(self, target, method=None, level=None, limit=None, tolerance=None): """ Create index with target's values (move/add/delete values as necessary) Returns ------- new_index : pd.MultiIndex Resulting index indexer : np.ndarray or None Indices of output values in original index. """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, 'names') if level is not None: if method is not None: raise TypeError('Fill method not supported if level passed') # GH7774: preserve dtype/tz if target is empty and not an Index. # target may be an iterator target = ibase._ensure_has_len(target) if len(target) == 0 and not isinstance(target, Index): idx = self.levels[level] attrs = idx._get_attributes_dict() attrs.pop('freq', None) # don't preserve freq target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs) else: target = ensure_index(target) target, indexer, _ = self._join_level(target, level, how='right', return_indexers=True, keep_order=False) else: target = ensure_index(target) if self.equals(target): indexer = None else: if self.is_unique: indexer = self.get_indexer(target, method=method, limit=limit, tolerance=tolerance) else: raise ValueError("cannot handle a non-unique multi-index!") if not isinstance(target, MultiIndex): if indexer is None: target = self elif (indexer >= 0).all(): target = self.take(indexer) else: # hopefully? target = MultiIndex.from_tuples(target) if (preserve_names and target.nlevels == self.nlevels and target.names != self.names): target = target.copy(deep=False) target.names = self.names return target, indexer
def reindex(self, target, method=None, level=None, limit=None, tolerance=None): """ Create index with target's values (move/add/delete values as necessary) Returns ------- new_index : pd.MultiIndex Resulting index indexer : np.ndarray or None Indices of output values in original index. """ # GH6552: preserve names when reindexing to non-named target # (i.e. neither Index nor Series). preserve_names = not hasattr(target, 'names') if level is not None: if method is not None: raise TypeError('Fill method not supported if level passed') # GH7774: preserve dtype/tz if target is empty and not an Index. # target may be an iterator target = ibase._ensure_has_len(target) if len(target) == 0 and not isinstance(target, Index): idx = self.levels[level] attrs = idx._get_attributes_dict() attrs.pop('freq', None) # don't preserve freq target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs) else: target = ensure_index(target) target, indexer, _ = self._join_level(target, level, how='right', return_indexers=True, keep_order=False) else: target = ensure_index(target) if self.equals(target): indexer = None else: if self.is_unique: indexer = self.get_indexer(target, method=method, limit=limit, tolerance=tolerance) else: raise ValueError("cannot handle a non-unique multi-index!") if not isinstance(target, MultiIndex): if indexer is None: target = self elif (indexer >= 0).all(): target = self.take(indexer) else: # hopefully? target = MultiIndex.from_tuples(target) if (preserve_names and target.nlevels == self.nlevels and target.names != self.names): target = target.copy(deep=False) target.names = self.names return target, indexer
[ "Create", "index", "with", "target", "s", "values", "(", "move", "/", "add", "/", "delete", "values", "as", "necessary", ")" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2191-L2252
[ "def", "reindex", "(", "self", ",", "target", ",", "method", "=", "None", ",", "level", "=", "None", ",", "limit", "=", "None", ",", "tolerance", "=", "None", ")", ":", "# GH6552: preserve names when reindexing to non-named target", "# (i.e. neither Index nor Series).", "preserve_names", "=", "not", "hasattr", "(", "target", ",", "'names'", ")", "if", "level", "is", "not", "None", ":", "if", "method", "is", "not", "None", ":", "raise", "TypeError", "(", "'Fill method not supported if level passed'", ")", "# GH7774: preserve dtype/tz if target is empty and not an Index.", "# target may be an iterator", "target", "=", "ibase", ".", "_ensure_has_len", "(", "target", ")", "if", "len", "(", "target", ")", "==", "0", "and", "not", "isinstance", "(", "target", ",", "Index", ")", ":", "idx", "=", "self", ".", "levels", "[", "level", "]", "attrs", "=", "idx", ".", "_get_attributes_dict", "(", ")", "attrs", ".", "pop", "(", "'freq'", ",", "None", ")", "# don't preserve freq", "target", "=", "type", "(", "idx", ")", ".", "_simple_new", "(", "np", ".", "empty", "(", "0", ",", "dtype", "=", "idx", ".", "dtype", ")", ",", "*", "*", "attrs", ")", "else", ":", "target", "=", "ensure_index", "(", "target", ")", "target", ",", "indexer", ",", "_", "=", "self", ".", "_join_level", "(", "target", ",", "level", ",", "how", "=", "'right'", ",", "return_indexers", "=", "True", ",", "keep_order", "=", "False", ")", "else", ":", "target", "=", "ensure_index", "(", "target", ")", "if", "self", ".", "equals", "(", "target", ")", ":", "indexer", "=", "None", "else", ":", "if", "self", ".", "is_unique", ":", "indexer", "=", "self", ".", "get_indexer", "(", "target", ",", "method", "=", "method", ",", "limit", "=", "limit", ",", "tolerance", "=", "tolerance", ")", "else", ":", "raise", "ValueError", "(", "\"cannot handle a non-unique multi-index!\"", ")", "if", "not", "isinstance", "(", "target", ",", "MultiIndex", ")", ":", "if", "indexer", "is", "None", ":", "target", "=", "self", "elif", "(", "indexer", ">=", "0", ")", ".", "all", "(", ")", ":", "target", "=", "self", ".", "take", "(", "indexer", ")", "else", ":", "# hopefully?", "target", "=", "MultiIndex", ".", "from_tuples", "(", "target", ")", "if", "(", "preserve_names", "and", "target", ".", "nlevels", "==", "self", ".", "nlevels", "and", "target", ".", "names", "!=", "self", ".", "names", ")", ":", "target", "=", "target", ".", "copy", "(", "deep", "=", "False", ")", "target", ".", "names", "=", "self", ".", "names", "return", "target", ",", "indexer" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.slice_locs
For an ordered MultiIndex, compute the slice locations for input labels. The input labels can be tuples representing partial levels, e.g. for a MultiIndex with 3 levels, you can pass a single value (corresponding to the first level), or a 1-, 2-, or 3-tuple. Parameters ---------- start : label or tuple, default None If None, defaults to the beginning end : label or tuple If None, defaults to the end step : int or None Slice step kind : string, optional, defaults None Returns ------- (start, end) : (int, int) Notes ----- This method only works if the MultiIndex is properly lexsorted. So, if only the first 2 levels of a 3-level MultiIndex are lexsorted, you can only pass two levels to ``.slice_locs``. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')], ... names=['A', 'B']) Get the slice locations from the beginning of 'b' in the first level until the end of the multiindex: >>> mi.slice_locs(start='b') (1, 4) Like above, but stop at the end of 'b' in the first level and 'f' in the second level: >>> mi.slice_locs(start='b', end=('b', 'f')) (1, 3) See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such.
pandas/core/indexes/multi.py
def slice_locs(self, start=None, end=None, step=None, kind=None): """ For an ordered MultiIndex, compute the slice locations for input labels. The input labels can be tuples representing partial levels, e.g. for a MultiIndex with 3 levels, you can pass a single value (corresponding to the first level), or a 1-, 2-, or 3-tuple. Parameters ---------- start : label or tuple, default None If None, defaults to the beginning end : label or tuple If None, defaults to the end step : int or None Slice step kind : string, optional, defaults None Returns ------- (start, end) : (int, int) Notes ----- This method only works if the MultiIndex is properly lexsorted. So, if only the first 2 levels of a 3-level MultiIndex are lexsorted, you can only pass two levels to ``.slice_locs``. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')], ... names=['A', 'B']) Get the slice locations from the beginning of 'b' in the first level until the end of the multiindex: >>> mi.slice_locs(start='b') (1, 4) Like above, but stop at the end of 'b' in the first level and 'f' in the second level: >>> mi.slice_locs(start='b', end=('b', 'f')) (1, 3) See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """ # This function adds nothing to its parent implementation (the magic # happens in get_slice_bound method), but it adds meaningful doc. return super().slice_locs(start, end, step, kind=kind)
def slice_locs(self, start=None, end=None, step=None, kind=None): """ For an ordered MultiIndex, compute the slice locations for input labels. The input labels can be tuples representing partial levels, e.g. for a MultiIndex with 3 levels, you can pass a single value (corresponding to the first level), or a 1-, 2-, or 3-tuple. Parameters ---------- start : label or tuple, default None If None, defaults to the beginning end : label or tuple If None, defaults to the end step : int or None Slice step kind : string, optional, defaults None Returns ------- (start, end) : (int, int) Notes ----- This method only works if the MultiIndex is properly lexsorted. So, if only the first 2 levels of a 3-level MultiIndex are lexsorted, you can only pass two levels to ``.slice_locs``. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')], ... names=['A', 'B']) Get the slice locations from the beginning of 'b' in the first level until the end of the multiindex: >>> mi.slice_locs(start='b') (1, 4) Like above, but stop at the end of 'b' in the first level and 'f' in the second level: >>> mi.slice_locs(start='b', end=('b', 'f')) (1, 3) See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """ # This function adds nothing to its parent implementation (the magic # happens in get_slice_bound method), but it adds meaningful doc. return super().slice_locs(start, end, step, kind=kind)
[ "For", "an", "ordered", "MultiIndex", "compute", "the", "slice", "locations", "for", "input", "labels", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2260-L2314
[ "def", "slice_locs", "(", "self", ",", "start", "=", "None", ",", "end", "=", "None", ",", "step", "=", "None", ",", "kind", "=", "None", ")", ":", "# This function adds nothing to its parent implementation (the magic", "# happens in get_slice_bound method), but it adds meaningful doc.", "return", "super", "(", ")", ".", "slice_locs", "(", "start", ",", "end", ",", "step", ",", "kind", "=", "kind", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.get_loc
Get location for a label or a tuple of labels as an integer, slice or boolean mask. Parameters ---------- key : label or tuple of labels (one for each level) method : None Returns ------- loc : int, slice object or boolean mask If the key is past the lexsort depth, the return may be a boolean mask array, otherwise it is always a slice or int. Examples --------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')]) >>> mi.get_loc('b') slice(1, 3, None) >>> mi.get_loc(('b', 'e')) 1 Notes ------ The key cannot be a slice, list of same-level labels, a boolean mask, or a sequence of such. If you want to use those, use :meth:`MultiIndex.get_locs` instead. See Also -------- Index.get_loc : The get_loc method for (single-level) index. MultiIndex.slice_locs : Get slice location given start label(s) and end label(s). MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such.
pandas/core/indexes/multi.py
def get_loc(self, key, method=None): """ Get location for a label or a tuple of labels as an integer, slice or boolean mask. Parameters ---------- key : label or tuple of labels (one for each level) method : None Returns ------- loc : int, slice object or boolean mask If the key is past the lexsort depth, the return may be a boolean mask array, otherwise it is always a slice or int. Examples --------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')]) >>> mi.get_loc('b') slice(1, 3, None) >>> mi.get_loc(('b', 'e')) 1 Notes ------ The key cannot be a slice, list of same-level labels, a boolean mask, or a sequence of such. If you want to use those, use :meth:`MultiIndex.get_locs` instead. See Also -------- Index.get_loc : The get_loc method for (single-level) index. MultiIndex.slice_locs : Get slice location given start label(s) and end label(s). MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """ if method is not None: raise NotImplementedError('only the default get_loc method is ' 'currently supported for MultiIndex') def _maybe_to_slice(loc): """convert integer indexer to boolean mask or slice if possible""" if not isinstance(loc, np.ndarray) or loc.dtype != 'int64': return loc loc = lib.maybe_indices_to_slice(loc, len(self)) if isinstance(loc, slice): return loc mask = np.empty(len(self), dtype='bool') mask.fill(False) mask[loc] = True return mask if not isinstance(key, tuple): loc = self._get_level_indexer(key, level=0) return _maybe_to_slice(loc) keylen = len(key) if self.nlevels < keylen: raise KeyError('Key length ({0}) exceeds index depth ({1})' ''.format(keylen, self.nlevels)) if keylen == self.nlevels and self.is_unique: return self._engine.get_loc(key) # -- partial selection or non-unique index # break the key into 2 parts based on the lexsort_depth of the index; # the first part returns a continuous slice of the index; the 2nd part # needs linear search within the slice i = self.lexsort_depth lead_key, follow_key = key[:i], key[i:] start, stop = (self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))) if start == stop: raise KeyError(key) if not follow_key: return slice(start, stop) warnings.warn('indexing past lexsort depth may impact performance.', PerformanceWarning, stacklevel=10) loc = np.arange(start, stop, dtype='int64') for i, k in enumerate(follow_key, len(lead_key)): mask = self.codes[i][loc] == self.levels[i].get_loc(k) if not mask.all(): loc = loc[mask] if not len(loc): raise KeyError(key) return (_maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop))
def get_loc(self, key, method=None): """ Get location for a label or a tuple of labels as an integer, slice or boolean mask. Parameters ---------- key : label or tuple of labels (one for each level) method : None Returns ------- loc : int, slice object or boolean mask If the key is past the lexsort depth, the return may be a boolean mask array, otherwise it is always a slice or int. Examples --------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')]) >>> mi.get_loc('b') slice(1, 3, None) >>> mi.get_loc(('b', 'e')) 1 Notes ------ The key cannot be a slice, list of same-level labels, a boolean mask, or a sequence of such. If you want to use those, use :meth:`MultiIndex.get_locs` instead. See Also -------- Index.get_loc : The get_loc method for (single-level) index. MultiIndex.slice_locs : Get slice location given start label(s) and end label(s). MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """ if method is not None: raise NotImplementedError('only the default get_loc method is ' 'currently supported for MultiIndex') def _maybe_to_slice(loc): """convert integer indexer to boolean mask or slice if possible""" if not isinstance(loc, np.ndarray) or loc.dtype != 'int64': return loc loc = lib.maybe_indices_to_slice(loc, len(self)) if isinstance(loc, slice): return loc mask = np.empty(len(self), dtype='bool') mask.fill(False) mask[loc] = True return mask if not isinstance(key, tuple): loc = self._get_level_indexer(key, level=0) return _maybe_to_slice(loc) keylen = len(key) if self.nlevels < keylen: raise KeyError('Key length ({0}) exceeds index depth ({1})' ''.format(keylen, self.nlevels)) if keylen == self.nlevels and self.is_unique: return self._engine.get_loc(key) # -- partial selection or non-unique index # break the key into 2 parts based on the lexsort_depth of the index; # the first part returns a continuous slice of the index; the 2nd part # needs linear search within the slice i = self.lexsort_depth lead_key, follow_key = key[:i], key[i:] start, stop = (self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))) if start == stop: raise KeyError(key) if not follow_key: return slice(start, stop) warnings.warn('indexing past lexsort depth may impact performance.', PerformanceWarning, stacklevel=10) loc = np.arange(start, stop, dtype='int64') for i, k in enumerate(follow_key, len(lead_key)): mask = self.codes[i][loc] == self.levels[i].get_loc(k) if not mask.all(): loc = loc[mask] if not len(loc): raise KeyError(key) return (_maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop))
[ "Get", "location", "for", "a", "label", "or", "a", "tuple", "of", "labels", "as", "an", "integer", "slice", "or", "boolean", "mask", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2347-L2445
[ "def", "get_loc", "(", "self", ",", "key", ",", "method", "=", "None", ")", ":", "if", "method", "is", "not", "None", ":", "raise", "NotImplementedError", "(", "'only the default get_loc method is '", "'currently supported for MultiIndex'", ")", "def", "_maybe_to_slice", "(", "loc", ")", ":", "\"\"\"convert integer indexer to boolean mask or slice if possible\"\"\"", "if", "not", "isinstance", "(", "loc", ",", "np", ".", "ndarray", ")", "or", "loc", ".", "dtype", "!=", "'int64'", ":", "return", "loc", "loc", "=", "lib", ".", "maybe_indices_to_slice", "(", "loc", ",", "len", "(", "self", ")", ")", "if", "isinstance", "(", "loc", ",", "slice", ")", ":", "return", "loc", "mask", "=", "np", ".", "empty", "(", "len", "(", "self", ")", ",", "dtype", "=", "'bool'", ")", "mask", ".", "fill", "(", "False", ")", "mask", "[", "loc", "]", "=", "True", "return", "mask", "if", "not", "isinstance", "(", "key", ",", "tuple", ")", ":", "loc", "=", "self", ".", "_get_level_indexer", "(", "key", ",", "level", "=", "0", ")", "return", "_maybe_to_slice", "(", "loc", ")", "keylen", "=", "len", "(", "key", ")", "if", "self", ".", "nlevels", "<", "keylen", ":", "raise", "KeyError", "(", "'Key length ({0}) exceeds index depth ({1})'", "''", ".", "format", "(", "keylen", ",", "self", ".", "nlevels", ")", ")", "if", "keylen", "==", "self", ".", "nlevels", "and", "self", ".", "is_unique", ":", "return", "self", ".", "_engine", ".", "get_loc", "(", "key", ")", "# -- partial selection or non-unique index", "# break the key into 2 parts based on the lexsort_depth of the index;", "# the first part returns a continuous slice of the index; the 2nd part", "# needs linear search within the slice", "i", "=", "self", ".", "lexsort_depth", "lead_key", ",", "follow_key", "=", "key", "[", ":", "i", "]", ",", "key", "[", "i", ":", "]", "start", ",", "stop", "=", "(", "self", ".", "slice_locs", "(", "lead_key", ",", "lead_key", ")", "if", "lead_key", "else", "(", "0", ",", "len", "(", "self", ")", ")", ")", "if", "start", "==", "stop", ":", "raise", "KeyError", "(", "key", ")", "if", "not", "follow_key", ":", "return", "slice", "(", "start", ",", "stop", ")", "warnings", ".", "warn", "(", "'indexing past lexsort depth may impact performance.'", ",", "PerformanceWarning", ",", "stacklevel", "=", "10", ")", "loc", "=", "np", ".", "arange", "(", "start", ",", "stop", ",", "dtype", "=", "'int64'", ")", "for", "i", ",", "k", "in", "enumerate", "(", "follow_key", ",", "len", "(", "lead_key", ")", ")", ":", "mask", "=", "self", ".", "codes", "[", "i", "]", "[", "loc", "]", "==", "self", ".", "levels", "[", "i", "]", ".", "get_loc", "(", "k", ")", "if", "not", "mask", ".", "all", "(", ")", ":", "loc", "=", "loc", "[", "mask", "]", "if", "not", "len", "(", "loc", ")", ":", "raise", "KeyError", "(", "key", ")", "return", "(", "_maybe_to_slice", "(", "loc", ")", "if", "len", "(", "loc", ")", "!=", "stop", "-", "start", "else", "slice", "(", "start", ",", "stop", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.get_loc_level
Get both the location for the requested label(s) and the resulting sliced index. Parameters ---------- key : label or sequence of labels level : int/level name or list thereof, optional drop_level : bool, default True if ``False``, the resulting index will not drop any level. Returns ------- loc : A 2-tuple where the elements are: Element 0: int, slice object or boolean array Element 1: The resulting sliced multiindex/index. If the key contains all levels, this will be ``None``. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')], ... names=['A', 'B']) >>> mi.get_loc_level('b') (slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B')) >>> mi.get_loc_level('e', level='B') (array([False, True, False], dtype=bool), Index(['b'], dtype='object', name='A')) >>> mi.get_loc_level(['b', 'e']) (1, None) See Also --------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such.
pandas/core/indexes/multi.py
def get_loc_level(self, key, level=0, drop_level=True): """ Get both the location for the requested label(s) and the resulting sliced index. Parameters ---------- key : label or sequence of labels level : int/level name or list thereof, optional drop_level : bool, default True if ``False``, the resulting index will not drop any level. Returns ------- loc : A 2-tuple where the elements are: Element 0: int, slice object or boolean array Element 1: The resulting sliced multiindex/index. If the key contains all levels, this will be ``None``. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')], ... names=['A', 'B']) >>> mi.get_loc_level('b') (slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B')) >>> mi.get_loc_level('e', level='B') (array([False, True, False], dtype=bool), Index(['b'], dtype='object', name='A')) >>> mi.get_loc_level(['b', 'e']) (1, None) See Also --------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """ def maybe_droplevels(indexer, levels, drop_level): if not drop_level: return self[indexer] # kludgearound orig_index = new_index = self[indexer] levels = [self._get_level_number(i) for i in levels] for i in sorted(levels, reverse=True): try: new_index = new_index.droplevel(i) except ValueError: # no dropping here return orig_index return new_index if isinstance(level, (tuple, list)): if len(key) != len(level): raise AssertionError('Key for location must have same ' 'length as number of levels') result = None for lev, k in zip(level, key): loc, new_index = self.get_loc_level(k, level=lev) if isinstance(loc, slice): mask = np.zeros(len(self), dtype=bool) mask[loc] = True loc = mask result = loc if result is None else result & loc return result, maybe_droplevels(result, level, drop_level) level = self._get_level_number(level) # kludge for #1796 if isinstance(key, list): key = tuple(key) if isinstance(key, tuple) and level == 0: try: if key in self.levels[0]: indexer = self._get_level_indexer(key, level=level) new_index = maybe_droplevels(indexer, [0], drop_level) return indexer, new_index except TypeError: pass if not any(isinstance(k, slice) for k in key): # partial selection # optionally get indexer to avoid re-calculation def partial_selection(key, indexer=None): if indexer is None: indexer = self.get_loc(key) ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)] return indexer, maybe_droplevels(indexer, ilevels, drop_level) if len(key) == self.nlevels and self.is_unique: # Complete key in unique index -> standard get_loc return (self._engine.get_loc(key), None) else: return partial_selection(key) else: indexer = None for i, k in enumerate(key): if not isinstance(k, slice): k = self._get_level_indexer(k, level=i) if isinstance(k, slice): # everything if k.start == 0 and k.stop == len(self): k = slice(None, None) else: k_index = k if isinstance(k, slice): if k == slice(None, None): continue else: raise TypeError(key) if indexer is None: indexer = k_index else: # pragma: no cover indexer &= k_index if indexer is None: indexer = slice(None, None) ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)] return indexer, maybe_droplevels(indexer, ilevels, drop_level) else: indexer = self._get_level_indexer(key, level=level) return indexer, maybe_droplevels(indexer, [level], drop_level)
def get_loc_level(self, key, level=0, drop_level=True): """ Get both the location for the requested label(s) and the resulting sliced index. Parameters ---------- key : label or sequence of labels level : int/level name or list thereof, optional drop_level : bool, default True if ``False``, the resulting index will not drop any level. Returns ------- loc : A 2-tuple where the elements are: Element 0: int, slice object or boolean array Element 1: The resulting sliced multiindex/index. If the key contains all levels, this will be ``None``. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')], ... names=['A', 'B']) >>> mi.get_loc_level('b') (slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B')) >>> mi.get_loc_level('e', level='B') (array([False, True, False], dtype=bool), Index(['b'], dtype='object', name='A')) >>> mi.get_loc_level(['b', 'e']) (1, None) See Also --------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """ def maybe_droplevels(indexer, levels, drop_level): if not drop_level: return self[indexer] # kludgearound orig_index = new_index = self[indexer] levels = [self._get_level_number(i) for i in levels] for i in sorted(levels, reverse=True): try: new_index = new_index.droplevel(i) except ValueError: # no dropping here return orig_index return new_index if isinstance(level, (tuple, list)): if len(key) != len(level): raise AssertionError('Key for location must have same ' 'length as number of levels') result = None for lev, k in zip(level, key): loc, new_index = self.get_loc_level(k, level=lev) if isinstance(loc, slice): mask = np.zeros(len(self), dtype=bool) mask[loc] = True loc = mask result = loc if result is None else result & loc return result, maybe_droplevels(result, level, drop_level) level = self._get_level_number(level) # kludge for #1796 if isinstance(key, list): key = tuple(key) if isinstance(key, tuple) and level == 0: try: if key in self.levels[0]: indexer = self._get_level_indexer(key, level=level) new_index = maybe_droplevels(indexer, [0], drop_level) return indexer, new_index except TypeError: pass if not any(isinstance(k, slice) for k in key): # partial selection # optionally get indexer to avoid re-calculation def partial_selection(key, indexer=None): if indexer is None: indexer = self.get_loc(key) ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)] return indexer, maybe_droplevels(indexer, ilevels, drop_level) if len(key) == self.nlevels and self.is_unique: # Complete key in unique index -> standard get_loc return (self._engine.get_loc(key), None) else: return partial_selection(key) else: indexer = None for i, k in enumerate(key): if not isinstance(k, slice): k = self._get_level_indexer(k, level=i) if isinstance(k, slice): # everything if k.start == 0 and k.stop == len(self): k = slice(None, None) else: k_index = k if isinstance(k, slice): if k == slice(None, None): continue else: raise TypeError(key) if indexer is None: indexer = k_index else: # pragma: no cover indexer &= k_index if indexer is None: indexer = slice(None, None) ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)] return indexer, maybe_droplevels(indexer, ilevels, drop_level) else: indexer = self._get_level_indexer(key, level=level) return indexer, maybe_droplevels(indexer, [level], drop_level)
[ "Get", "both", "the", "location", "for", "the", "requested", "label", "(", "s", ")", "and", "the", "resulting", "sliced", "index", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2447-L2581
[ "def", "get_loc_level", "(", "self", ",", "key", ",", "level", "=", "0", ",", "drop_level", "=", "True", ")", ":", "def", "maybe_droplevels", "(", "indexer", ",", "levels", ",", "drop_level", ")", ":", "if", "not", "drop_level", ":", "return", "self", "[", "indexer", "]", "# kludgearound", "orig_index", "=", "new_index", "=", "self", "[", "indexer", "]", "levels", "=", "[", "self", ".", "_get_level_number", "(", "i", ")", "for", "i", "in", "levels", "]", "for", "i", "in", "sorted", "(", "levels", ",", "reverse", "=", "True", ")", ":", "try", ":", "new_index", "=", "new_index", ".", "droplevel", "(", "i", ")", "except", "ValueError", ":", "# no dropping here", "return", "orig_index", "return", "new_index", "if", "isinstance", "(", "level", ",", "(", "tuple", ",", "list", ")", ")", ":", "if", "len", "(", "key", ")", "!=", "len", "(", "level", ")", ":", "raise", "AssertionError", "(", "'Key for location must have same '", "'length as number of levels'", ")", "result", "=", "None", "for", "lev", ",", "k", "in", "zip", "(", "level", ",", "key", ")", ":", "loc", ",", "new_index", "=", "self", ".", "get_loc_level", "(", "k", ",", "level", "=", "lev", ")", "if", "isinstance", "(", "loc", ",", "slice", ")", ":", "mask", "=", "np", ".", "zeros", "(", "len", "(", "self", ")", ",", "dtype", "=", "bool", ")", "mask", "[", "loc", "]", "=", "True", "loc", "=", "mask", "result", "=", "loc", "if", "result", "is", "None", "else", "result", "&", "loc", "return", "result", ",", "maybe_droplevels", "(", "result", ",", "level", ",", "drop_level", ")", "level", "=", "self", ".", "_get_level_number", "(", "level", ")", "# kludge for #1796", "if", "isinstance", "(", "key", ",", "list", ")", ":", "key", "=", "tuple", "(", "key", ")", "if", "isinstance", "(", "key", ",", "tuple", ")", "and", "level", "==", "0", ":", "try", ":", "if", "key", "in", "self", ".", "levels", "[", "0", "]", ":", "indexer", "=", "self", ".", "_get_level_indexer", "(", "key", ",", "level", "=", "level", ")", "new_index", "=", "maybe_droplevels", "(", "indexer", ",", "[", "0", "]", ",", "drop_level", ")", "return", "indexer", ",", "new_index", "except", "TypeError", ":", "pass", "if", "not", "any", "(", "isinstance", "(", "k", ",", "slice", ")", "for", "k", "in", "key", ")", ":", "# partial selection", "# optionally get indexer to avoid re-calculation", "def", "partial_selection", "(", "key", ",", "indexer", "=", "None", ")", ":", "if", "indexer", "is", "None", ":", "indexer", "=", "self", ".", "get_loc", "(", "key", ")", "ilevels", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "key", ")", ")", "if", "key", "[", "i", "]", "!=", "slice", "(", "None", ",", "None", ")", "]", "return", "indexer", ",", "maybe_droplevels", "(", "indexer", ",", "ilevels", ",", "drop_level", ")", "if", "len", "(", "key", ")", "==", "self", ".", "nlevels", "and", "self", ".", "is_unique", ":", "# Complete key in unique index -> standard get_loc", "return", "(", "self", ".", "_engine", ".", "get_loc", "(", "key", ")", ",", "None", ")", "else", ":", "return", "partial_selection", "(", "key", ")", "else", ":", "indexer", "=", "None", "for", "i", ",", "k", "in", "enumerate", "(", "key", ")", ":", "if", "not", "isinstance", "(", "k", ",", "slice", ")", ":", "k", "=", "self", ".", "_get_level_indexer", "(", "k", ",", "level", "=", "i", ")", "if", "isinstance", "(", "k", ",", "slice", ")", ":", "# everything", "if", "k", ".", "start", "==", "0", "and", "k", ".", "stop", "==", "len", "(", "self", ")", ":", "k", "=", "slice", "(", "None", ",", "None", ")", "else", ":", "k_index", "=", "k", "if", "isinstance", "(", "k", ",", "slice", ")", ":", "if", "k", "==", "slice", "(", "None", ",", "None", ")", ":", "continue", "else", ":", "raise", "TypeError", "(", "key", ")", "if", "indexer", "is", "None", ":", "indexer", "=", "k_index", "else", ":", "# pragma: no cover", "indexer", "&=", "k_index", "if", "indexer", "is", "None", ":", "indexer", "=", "slice", "(", "None", ",", "None", ")", "ilevels", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "key", ")", ")", "if", "key", "[", "i", "]", "!=", "slice", "(", "None", ",", "None", ")", "]", "return", "indexer", ",", "maybe_droplevels", "(", "indexer", ",", "ilevels", ",", "drop_level", ")", "else", ":", "indexer", "=", "self", ".", "_get_level_indexer", "(", "key", ",", "level", "=", "level", ")", "return", "indexer", ",", "maybe_droplevels", "(", "indexer", ",", "[", "level", "]", ",", "drop_level", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.get_locs
Get location for a given label/slice/list/mask or a sequence of such as an array of integers. Parameters ---------- seq : label/slice/list/mask or a sequence of such You should use one of the above for each level. If a level should not be used, set it to ``slice(None)``. Returns ------- locs : array of integers suitable for passing to iloc Examples --------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')]) >>> mi.get_locs('b') array([1, 2], dtype=int64) >>> mi.get_locs([slice(None), ['e', 'f']]) array([1, 2], dtype=int64) >>> mi.get_locs([[True, False, True], slice('e', 'f')]) array([2], dtype=int64) See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.slice_locs : Get slice location given start label(s) and end label(s).
pandas/core/indexes/multi.py
def get_locs(self, seq): """ Get location for a given label/slice/list/mask or a sequence of such as an array of integers. Parameters ---------- seq : label/slice/list/mask or a sequence of such You should use one of the above for each level. If a level should not be used, set it to ``slice(None)``. Returns ------- locs : array of integers suitable for passing to iloc Examples --------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')]) >>> mi.get_locs('b') array([1, 2], dtype=int64) >>> mi.get_locs([slice(None), ['e', 'f']]) array([1, 2], dtype=int64) >>> mi.get_locs([[True, False, True], slice('e', 'f')]) array([2], dtype=int64) See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.slice_locs : Get slice location given start label(s) and end label(s). """ from .numeric import Int64Index # must be lexsorted to at least as many levels true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s] if true_slices and true_slices[-1] >= self.lexsort_depth: raise UnsortedIndexError('MultiIndex slicing requires the index ' 'to be lexsorted: slicing on levels {0}, ' 'lexsort depth {1}' .format(true_slices, self.lexsort_depth)) # indexer # this is the list of all values that we want to select n = len(self) indexer = None def _convert_to_indexer(r): # return an indexer if isinstance(r, slice): m = np.zeros(n, dtype=bool) m[r] = True r = m.nonzero()[0] elif com.is_bool_indexer(r): if len(r) != n: raise ValueError("cannot index with a boolean indexer " "that is not the same length as the " "index") r = r.nonzero()[0] return Int64Index(r) def _update_indexer(idxr, indexer=indexer): if indexer is None: indexer = Index(np.arange(n)) if idxr is None: return indexer return indexer & idxr for i, k in enumerate(seq): if com.is_bool_indexer(k): # a boolean indexer, must be the same length! k = np.asarray(k) indexer = _update_indexer(_convert_to_indexer(k), indexer=indexer) elif is_list_like(k): # a collection of labels to include from this level (these # are or'd) indexers = None for x in k: try: idxrs = _convert_to_indexer( self._get_level_indexer(x, level=i, indexer=indexer)) indexers = (idxrs if indexers is None else indexers | idxrs) except KeyError: # ignore not founds continue if indexers is not None: indexer = _update_indexer(indexers, indexer=indexer) else: # no matches we are done return Int64Index([])._ndarray_values elif com.is_null_slice(k): # empty slice indexer = _update_indexer(None, indexer=indexer) elif isinstance(k, slice): # a slice, include BOTH of the labels indexer = _update_indexer(_convert_to_indexer( self._get_level_indexer(k, level=i, indexer=indexer)), indexer=indexer) else: # a single label indexer = _update_indexer(_convert_to_indexer( self.get_loc_level(k, level=i, drop_level=False)[0]), indexer=indexer) # empty indexer if indexer is None: return Int64Index([])._ndarray_values return indexer._ndarray_values
def get_locs(self, seq): """ Get location for a given label/slice/list/mask or a sequence of such as an array of integers. Parameters ---------- seq : label/slice/list/mask or a sequence of such You should use one of the above for each level. If a level should not be used, set it to ``slice(None)``. Returns ------- locs : array of integers suitable for passing to iloc Examples --------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')]) >>> mi.get_locs('b') array([1, 2], dtype=int64) >>> mi.get_locs([slice(None), ['e', 'f']]) array([1, 2], dtype=int64) >>> mi.get_locs([[True, False, True], slice('e', 'f')]) array([2], dtype=int64) See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.slice_locs : Get slice location given start label(s) and end label(s). """ from .numeric import Int64Index # must be lexsorted to at least as many levels true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s] if true_slices and true_slices[-1] >= self.lexsort_depth: raise UnsortedIndexError('MultiIndex slicing requires the index ' 'to be lexsorted: slicing on levels {0}, ' 'lexsort depth {1}' .format(true_slices, self.lexsort_depth)) # indexer # this is the list of all values that we want to select n = len(self) indexer = None def _convert_to_indexer(r): # return an indexer if isinstance(r, slice): m = np.zeros(n, dtype=bool) m[r] = True r = m.nonzero()[0] elif com.is_bool_indexer(r): if len(r) != n: raise ValueError("cannot index with a boolean indexer " "that is not the same length as the " "index") r = r.nonzero()[0] return Int64Index(r) def _update_indexer(idxr, indexer=indexer): if indexer is None: indexer = Index(np.arange(n)) if idxr is None: return indexer return indexer & idxr for i, k in enumerate(seq): if com.is_bool_indexer(k): # a boolean indexer, must be the same length! k = np.asarray(k) indexer = _update_indexer(_convert_to_indexer(k), indexer=indexer) elif is_list_like(k): # a collection of labels to include from this level (these # are or'd) indexers = None for x in k: try: idxrs = _convert_to_indexer( self._get_level_indexer(x, level=i, indexer=indexer)) indexers = (idxrs if indexers is None else indexers | idxrs) except KeyError: # ignore not founds continue if indexers is not None: indexer = _update_indexer(indexers, indexer=indexer) else: # no matches we are done return Int64Index([])._ndarray_values elif com.is_null_slice(k): # empty slice indexer = _update_indexer(None, indexer=indexer) elif isinstance(k, slice): # a slice, include BOTH of the labels indexer = _update_indexer(_convert_to_indexer( self._get_level_indexer(k, level=i, indexer=indexer)), indexer=indexer) else: # a single label indexer = _update_indexer(_convert_to_indexer( self.get_loc_level(k, level=i, drop_level=False)[0]), indexer=indexer) # empty indexer if indexer is None: return Int64Index([])._ndarray_values return indexer._ndarray_values
[ "Get", "location", "for", "a", "given", "label", "/", "slice", "/", "list", "/", "mask", "or", "a", "sequence", "of", "such", "as", "an", "array", "of", "integers", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2678-L2796
[ "def", "get_locs", "(", "self", ",", "seq", ")", ":", "from", ".", "numeric", "import", "Int64Index", "# must be lexsorted to at least as many levels", "true_slices", "=", "[", "i", "for", "(", "i", ",", "s", ")", "in", "enumerate", "(", "com", ".", "is_true_slices", "(", "seq", ")", ")", "if", "s", "]", "if", "true_slices", "and", "true_slices", "[", "-", "1", "]", ">=", "self", ".", "lexsort_depth", ":", "raise", "UnsortedIndexError", "(", "'MultiIndex slicing requires the index '", "'to be lexsorted: slicing on levels {0}, '", "'lexsort depth {1}'", ".", "format", "(", "true_slices", ",", "self", ".", "lexsort_depth", ")", ")", "# indexer", "# this is the list of all values that we want to select", "n", "=", "len", "(", "self", ")", "indexer", "=", "None", "def", "_convert_to_indexer", "(", "r", ")", ":", "# return an indexer", "if", "isinstance", "(", "r", ",", "slice", ")", ":", "m", "=", "np", ".", "zeros", "(", "n", ",", "dtype", "=", "bool", ")", "m", "[", "r", "]", "=", "True", "r", "=", "m", ".", "nonzero", "(", ")", "[", "0", "]", "elif", "com", ".", "is_bool_indexer", "(", "r", ")", ":", "if", "len", "(", "r", ")", "!=", "n", ":", "raise", "ValueError", "(", "\"cannot index with a boolean indexer \"", "\"that is not the same length as the \"", "\"index\"", ")", "r", "=", "r", ".", "nonzero", "(", ")", "[", "0", "]", "return", "Int64Index", "(", "r", ")", "def", "_update_indexer", "(", "idxr", ",", "indexer", "=", "indexer", ")", ":", "if", "indexer", "is", "None", ":", "indexer", "=", "Index", "(", "np", ".", "arange", "(", "n", ")", ")", "if", "idxr", "is", "None", ":", "return", "indexer", "return", "indexer", "&", "idxr", "for", "i", ",", "k", "in", "enumerate", "(", "seq", ")", ":", "if", "com", ".", "is_bool_indexer", "(", "k", ")", ":", "# a boolean indexer, must be the same length!", "k", "=", "np", ".", "asarray", "(", "k", ")", "indexer", "=", "_update_indexer", "(", "_convert_to_indexer", "(", "k", ")", ",", "indexer", "=", "indexer", ")", "elif", "is_list_like", "(", "k", ")", ":", "# a collection of labels to include from this level (these", "# are or'd)", "indexers", "=", "None", "for", "x", "in", "k", ":", "try", ":", "idxrs", "=", "_convert_to_indexer", "(", "self", ".", "_get_level_indexer", "(", "x", ",", "level", "=", "i", ",", "indexer", "=", "indexer", ")", ")", "indexers", "=", "(", "idxrs", "if", "indexers", "is", "None", "else", "indexers", "|", "idxrs", ")", "except", "KeyError", ":", "# ignore not founds", "continue", "if", "indexers", "is", "not", "None", ":", "indexer", "=", "_update_indexer", "(", "indexers", ",", "indexer", "=", "indexer", ")", "else", ":", "# no matches we are done", "return", "Int64Index", "(", "[", "]", ")", ".", "_ndarray_values", "elif", "com", ".", "is_null_slice", "(", "k", ")", ":", "# empty slice", "indexer", "=", "_update_indexer", "(", "None", ",", "indexer", "=", "indexer", ")", "elif", "isinstance", "(", "k", ",", "slice", ")", ":", "# a slice, include BOTH of the labels", "indexer", "=", "_update_indexer", "(", "_convert_to_indexer", "(", "self", ".", "_get_level_indexer", "(", "k", ",", "level", "=", "i", ",", "indexer", "=", "indexer", ")", ")", ",", "indexer", "=", "indexer", ")", "else", ":", "# a single label", "indexer", "=", "_update_indexer", "(", "_convert_to_indexer", "(", "self", ".", "get_loc_level", "(", "k", ",", "level", "=", "i", ",", "drop_level", "=", "False", ")", "[", "0", "]", ")", ",", "indexer", "=", "indexer", ")", "# empty indexer", "if", "indexer", "is", "None", ":", "return", "Int64Index", "(", "[", "]", ")", ".", "_ndarray_values", "return", "indexer", ".", "_ndarray_values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.truncate
Slice index between two labels / tuples, return new MultiIndex Parameters ---------- before : label or tuple, can be partial. Default None None defaults to start after : label or tuple, can be partial. Default None None defaults to end Returns ------- truncated : MultiIndex
pandas/core/indexes/multi.py
def truncate(self, before=None, after=None): """ Slice index between two labels / tuples, return new MultiIndex Parameters ---------- before : label or tuple, can be partial. Default None None defaults to start after : label or tuple, can be partial. Default None None defaults to end Returns ------- truncated : MultiIndex """ if after and before and after < before: raise ValueError('after < before') i, j = self.levels[0].slice_locs(before, after) left, right = self.slice_locs(before, after) new_levels = list(self.levels) new_levels[0] = new_levels[0][i:j] new_codes = [level_codes[left:right] for level_codes in self.codes] new_codes[0] = new_codes[0] - i return MultiIndex(levels=new_levels, codes=new_codes, verify_integrity=False)
def truncate(self, before=None, after=None): """ Slice index between two labels / tuples, return new MultiIndex Parameters ---------- before : label or tuple, can be partial. Default None None defaults to start after : label or tuple, can be partial. Default None None defaults to end Returns ------- truncated : MultiIndex """ if after and before and after < before: raise ValueError('after < before') i, j = self.levels[0].slice_locs(before, after) left, right = self.slice_locs(before, after) new_levels = list(self.levels) new_levels[0] = new_levels[0][i:j] new_codes = [level_codes[left:right] for level_codes in self.codes] new_codes[0] = new_codes[0] - i return MultiIndex(levels=new_levels, codes=new_codes, verify_integrity=False)
[ "Slice", "index", "between", "two", "labels", "/", "tuples", "return", "new", "MultiIndex" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2798-L2826
[ "def", "truncate", "(", "self", ",", "before", "=", "None", ",", "after", "=", "None", ")", ":", "if", "after", "and", "before", "and", "after", "<", "before", ":", "raise", "ValueError", "(", "'after < before'", ")", "i", ",", "j", "=", "self", ".", "levels", "[", "0", "]", ".", "slice_locs", "(", "before", ",", "after", ")", "left", ",", "right", "=", "self", ".", "slice_locs", "(", "before", ",", "after", ")", "new_levels", "=", "list", "(", "self", ".", "levels", ")", "new_levels", "[", "0", "]", "=", "new_levels", "[", "0", "]", "[", "i", ":", "j", "]", "new_codes", "=", "[", "level_codes", "[", "left", ":", "right", "]", "for", "level_codes", "in", "self", ".", "codes", "]", "new_codes", "[", "0", "]", "=", "new_codes", "[", "0", "]", "-", "i", "return", "MultiIndex", "(", "levels", "=", "new_levels", ",", "codes", "=", "new_codes", ",", "verify_integrity", "=", "False", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.equals
Determines if two MultiIndex objects have the same labeling information (the levels themselves do not necessarily have to be the same) See Also -------- equal_levels
pandas/core/indexes/multi.py
def equals(self, other): """ Determines if two MultiIndex objects have the same labeling information (the levels themselves do not necessarily have to be the same) See Also -------- equal_levels """ if self.is_(other): return True if not isinstance(other, Index): return False if not isinstance(other, MultiIndex): other_vals = com.values_from_object(ensure_index(other)) return array_equivalent(self._ndarray_values, other_vals) if self.nlevels != other.nlevels: return False if len(self) != len(other): return False for i in range(self.nlevels): self_codes = self.codes[i] self_codes = self_codes[self_codes != -1] self_values = algos.take_nd(np.asarray(self.levels[i]._values), self_codes, allow_fill=False) other_codes = other.codes[i] other_codes = other_codes[other_codes != -1] other_values = algos.take_nd( np.asarray(other.levels[i]._values), other_codes, allow_fill=False) # since we use NaT both datetime64 and timedelta64 # we can have a situation where a level is typed say # timedelta64 in self (IOW it has other values than NaT) # but types datetime64 in other (where its all NaT) # but these are equivalent if len(self_values) == 0 and len(other_values) == 0: continue if not array_equivalent(self_values, other_values): return False return True
def equals(self, other): """ Determines if two MultiIndex objects have the same labeling information (the levels themselves do not necessarily have to be the same) See Also -------- equal_levels """ if self.is_(other): return True if not isinstance(other, Index): return False if not isinstance(other, MultiIndex): other_vals = com.values_from_object(ensure_index(other)) return array_equivalent(self._ndarray_values, other_vals) if self.nlevels != other.nlevels: return False if len(self) != len(other): return False for i in range(self.nlevels): self_codes = self.codes[i] self_codes = self_codes[self_codes != -1] self_values = algos.take_nd(np.asarray(self.levels[i]._values), self_codes, allow_fill=False) other_codes = other.codes[i] other_codes = other_codes[other_codes != -1] other_values = algos.take_nd( np.asarray(other.levels[i]._values), other_codes, allow_fill=False) # since we use NaT both datetime64 and timedelta64 # we can have a situation where a level is typed say # timedelta64 in self (IOW it has other values than NaT) # but types datetime64 in other (where its all NaT) # but these are equivalent if len(self_values) == 0 and len(other_values) == 0: continue if not array_equivalent(self_values, other_values): return False return True
[ "Determines", "if", "two", "MultiIndex", "objects", "have", "the", "same", "labeling", "information", "(", "the", "levels", "themselves", "do", "not", "necessarily", "have", "to", "be", "the", "same", ")" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2828-L2876
[ "def", "equals", "(", "self", ",", "other", ")", ":", "if", "self", ".", "is_", "(", "other", ")", ":", "return", "True", "if", "not", "isinstance", "(", "other", ",", "Index", ")", ":", "return", "False", "if", "not", "isinstance", "(", "other", ",", "MultiIndex", ")", ":", "other_vals", "=", "com", ".", "values_from_object", "(", "ensure_index", "(", "other", ")", ")", "return", "array_equivalent", "(", "self", ".", "_ndarray_values", ",", "other_vals", ")", "if", "self", ".", "nlevels", "!=", "other", ".", "nlevels", ":", "return", "False", "if", "len", "(", "self", ")", "!=", "len", "(", "other", ")", ":", "return", "False", "for", "i", "in", "range", "(", "self", ".", "nlevels", ")", ":", "self_codes", "=", "self", ".", "codes", "[", "i", "]", "self_codes", "=", "self_codes", "[", "self_codes", "!=", "-", "1", "]", "self_values", "=", "algos", ".", "take_nd", "(", "np", ".", "asarray", "(", "self", ".", "levels", "[", "i", "]", ".", "_values", ")", ",", "self_codes", ",", "allow_fill", "=", "False", ")", "other_codes", "=", "other", ".", "codes", "[", "i", "]", "other_codes", "=", "other_codes", "[", "other_codes", "!=", "-", "1", "]", "other_values", "=", "algos", ".", "take_nd", "(", "np", ".", "asarray", "(", "other", ".", "levels", "[", "i", "]", ".", "_values", ")", ",", "other_codes", ",", "allow_fill", "=", "False", ")", "# since we use NaT both datetime64 and timedelta64", "# we can have a situation where a level is typed say", "# timedelta64 in self (IOW it has other values than NaT)", "# but types datetime64 in other (where its all NaT)", "# but these are equivalent", "if", "len", "(", "self_values", ")", "==", "0", "and", "len", "(", "other_values", ")", "==", "0", ":", "continue", "if", "not", "array_equivalent", "(", "self_values", ",", "other_values", ")", ":", "return", "False", "return", "True" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.equal_levels
Return True if the levels of both MultiIndex objects are the same
pandas/core/indexes/multi.py
def equal_levels(self, other): """ Return True if the levels of both MultiIndex objects are the same """ if self.nlevels != other.nlevels: return False for i in range(self.nlevels): if not self.levels[i].equals(other.levels[i]): return False return True
def equal_levels(self, other): """ Return True if the levels of both MultiIndex objects are the same """ if self.nlevels != other.nlevels: return False for i in range(self.nlevels): if not self.levels[i].equals(other.levels[i]): return False return True
[ "Return", "True", "if", "the", "levels", "of", "both", "MultiIndex", "objects", "are", "the", "same" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2878-L2889
[ "def", "equal_levels", "(", "self", ",", "other", ")", ":", "if", "self", ".", "nlevels", "!=", "other", ".", "nlevels", ":", "return", "False", "for", "i", "in", "range", "(", "self", ".", "nlevels", ")", ":", "if", "not", "self", ".", "levels", "[", "i", "]", ".", "equals", "(", "other", ".", "levels", "[", "i", "]", ")", ":", "return", "False", "return", "True" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.union
Form the union of two MultiIndex objects Parameters ---------- other : MultiIndex or array / Index of tuples sort : False or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- Index >>> index.union(index2)
pandas/core/indexes/multi.py
def union(self, other, sort=None): """ Form the union of two MultiIndex objects Parameters ---------- other : MultiIndex or array / Index of tuples sort : False or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- Index >>> index.union(index2) """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if len(other) == 0 or self.equals(other): return self # TODO: Index.union returns other when `len(self)` is 0. uniq_tuples = lib.fast_unique_multiple([self._ndarray_values, other._ndarray_values], sort=sort) return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names)
def union(self, other, sort=None): """ Form the union of two MultiIndex objects Parameters ---------- other : MultiIndex or array / Index of tuples sort : False or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- Index >>> index.union(index2) """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if len(other) == 0 or self.equals(other): return self # TODO: Index.union returns other when `len(self)` is 0. uniq_tuples = lib.fast_unique_multiple([self._ndarray_values, other._ndarray_values], sort=sort) return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names)
[ "Form", "the", "union", "of", "two", "MultiIndex", "objects" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2891-L2937
[ "def", "union", "(", "self", ",", "other", ",", "sort", "=", "None", ")", ":", "self", ".", "_validate_sort_keyword", "(", "sort", ")", "self", ".", "_assert_can_do_setop", "(", "other", ")", "other", ",", "result_names", "=", "self", ".", "_convert_can_do_setop", "(", "other", ")", "if", "len", "(", "other", ")", "==", "0", "or", "self", ".", "equals", "(", "other", ")", ":", "return", "self", "# TODO: Index.union returns other when `len(self)` is 0.", "uniq_tuples", "=", "lib", ".", "fast_unique_multiple", "(", "[", "self", ".", "_ndarray_values", ",", "other", ".", "_ndarray_values", "]", ",", "sort", "=", "sort", ")", "return", "MultiIndex", ".", "from_arrays", "(", "lzip", "(", "*", "uniq_tuples", ")", ",", "sortorder", "=", "0", ",", "names", "=", "result_names", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.intersection
Form the intersection of two MultiIndex objects. Parameters ---------- other : MultiIndex or array / Index of tuples sort : False or None, default False Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default from ``True`` to ``False``, to match behaviour from before 0.24.0 Returns ------- Index
pandas/core/indexes/multi.py
def intersection(self, other, sort=False): """ Form the intersection of two MultiIndex objects. Parameters ---------- other : MultiIndex or array / Index of tuples sort : False or None, default False Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default from ``True`` to ``False``, to match behaviour from before 0.24.0 Returns ------- Index """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if self.equals(other): return self self_tuples = self._ndarray_values other_tuples = other._ndarray_values uniq_tuples = set(self_tuples) & set(other_tuples) if sort is None: uniq_tuples = sorted(uniq_tuples) if len(uniq_tuples) == 0: return MultiIndex(levels=self.levels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) else: return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names)
def intersection(self, other, sort=False): """ Form the intersection of two MultiIndex objects. Parameters ---------- other : MultiIndex or array / Index of tuples sort : False or None, default False Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default from ``True`` to ``False``, to match behaviour from before 0.24.0 Returns ------- Index """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if self.equals(other): return self self_tuples = self._ndarray_values other_tuples = other._ndarray_values uniq_tuples = set(self_tuples) & set(other_tuples) if sort is None: uniq_tuples = sorted(uniq_tuples) if len(uniq_tuples) == 0: return MultiIndex(levels=self.levels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) else: return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names)
[ "Form", "the", "intersection", "of", "two", "MultiIndex", "objects", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2939-L2980
[ "def", "intersection", "(", "self", ",", "other", ",", "sort", "=", "False", ")", ":", "self", ".", "_validate_sort_keyword", "(", "sort", ")", "self", ".", "_assert_can_do_setop", "(", "other", ")", "other", ",", "result_names", "=", "self", ".", "_convert_can_do_setop", "(", "other", ")", "if", "self", ".", "equals", "(", "other", ")", ":", "return", "self", "self_tuples", "=", "self", ".", "_ndarray_values", "other_tuples", "=", "other", ".", "_ndarray_values", "uniq_tuples", "=", "set", "(", "self_tuples", ")", "&", "set", "(", "other_tuples", ")", "if", "sort", "is", "None", ":", "uniq_tuples", "=", "sorted", "(", "uniq_tuples", ")", "if", "len", "(", "uniq_tuples", ")", "==", "0", ":", "return", "MultiIndex", "(", "levels", "=", "self", ".", "levels", ",", "codes", "=", "[", "[", "]", "]", "*", "self", ".", "nlevels", ",", "names", "=", "result_names", ",", "verify_integrity", "=", "False", ")", "else", ":", "return", "MultiIndex", ".", "from_arrays", "(", "lzip", "(", "*", "uniq_tuples", ")", ",", "sortorder", "=", "0", ",", "names", "=", "result_names", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.difference
Compute set difference of two MultiIndex objects Parameters ---------- other : MultiIndex sort : False or None, default None Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- diff : MultiIndex
pandas/core/indexes/multi.py
def difference(self, other, sort=None): """ Compute set difference of two MultiIndex objects Parameters ---------- other : MultiIndex sort : False or None, default None Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- diff : MultiIndex """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if len(other) == 0: return self if self.equals(other): return MultiIndex(levels=self.levels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) this = self._get_unique_index() indexer = this.get_indexer(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) difference = this.values.take(label_diff) if sort is None: difference = sorted(difference) if len(difference) == 0: return MultiIndex(levels=[[]] * self.nlevels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) else: return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
def difference(self, other, sort=None): """ Compute set difference of two MultiIndex objects Parameters ---------- other : MultiIndex sort : False or None, default None Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- diff : MultiIndex """ self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if len(other) == 0: return self if self.equals(other): return MultiIndex(levels=self.levels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) this = self._get_unique_index() indexer = this.get_indexer(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) difference = this.values.take(label_diff) if sort is None: difference = sorted(difference) if len(difference) == 0: return MultiIndex(levels=[[]] * self.nlevels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) else: return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
[ "Compute", "set", "difference", "of", "two", "MultiIndex", "objects" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L2982-L3032
[ "def", "difference", "(", "self", ",", "other", ",", "sort", "=", "None", ")", ":", "self", ".", "_validate_sort_keyword", "(", "sort", ")", "self", ".", "_assert_can_do_setop", "(", "other", ")", "other", ",", "result_names", "=", "self", ".", "_convert_can_do_setop", "(", "other", ")", "if", "len", "(", "other", ")", "==", "0", ":", "return", "self", "if", "self", ".", "equals", "(", "other", ")", ":", "return", "MultiIndex", "(", "levels", "=", "self", ".", "levels", ",", "codes", "=", "[", "[", "]", "]", "*", "self", ".", "nlevels", ",", "names", "=", "result_names", ",", "verify_integrity", "=", "False", ")", "this", "=", "self", ".", "_get_unique_index", "(", ")", "indexer", "=", "this", ".", "get_indexer", "(", "other", ")", "indexer", "=", "indexer", ".", "take", "(", "(", "indexer", "!=", "-", "1", ")", ".", "nonzero", "(", ")", "[", "0", "]", ")", "label_diff", "=", "np", ".", "setdiff1d", "(", "np", ".", "arange", "(", "this", ".", "size", ")", ",", "indexer", ",", "assume_unique", "=", "True", ")", "difference", "=", "this", ".", "values", ".", "take", "(", "label_diff", ")", "if", "sort", "is", "None", ":", "difference", "=", "sorted", "(", "difference", ")", "if", "len", "(", "difference", ")", "==", "0", ":", "return", "MultiIndex", "(", "levels", "=", "[", "[", "]", "]", "*", "self", ".", "nlevels", ",", "codes", "=", "[", "[", "]", "]", "*", "self", ".", "nlevels", ",", "names", "=", "result_names", ",", "verify_integrity", "=", "False", ")", "else", ":", "return", "MultiIndex", ".", "from_tuples", "(", "difference", ",", "sortorder", "=", "0", ",", "names", "=", "result_names", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.insert
Make new MultiIndex inserting new item at location Parameters ---------- loc : int item : tuple Must be same length as number of levels in the MultiIndex Returns ------- new_index : Index
pandas/core/indexes/multi.py
def insert(self, loc, item): """ Make new MultiIndex inserting new item at location Parameters ---------- loc : int item : tuple Must be same length as number of levels in the MultiIndex Returns ------- new_index : Index """ # Pad the key with empty strings if lower levels of the key # aren't specified: if not isinstance(item, tuple): item = (item, ) + ('', ) * (self.nlevels - 1) elif len(item) != self.nlevels: raise ValueError('Item must have length equal to number of ' 'levels.') new_levels = [] new_codes = [] for k, level, level_codes in zip(item, self.levels, self.codes): if k not in level: # have to insert into level # must insert at end otherwise you have to recompute all the # other codes lev_loc = len(level) level = level.insert(lev_loc, k) else: lev_loc = level.get_loc(k) new_levels.append(level) new_codes.append(np.insert( ensure_int64(level_codes), loc, lev_loc)) return MultiIndex(levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False)
def insert(self, loc, item): """ Make new MultiIndex inserting new item at location Parameters ---------- loc : int item : tuple Must be same length as number of levels in the MultiIndex Returns ------- new_index : Index """ # Pad the key with empty strings if lower levels of the key # aren't specified: if not isinstance(item, tuple): item = (item, ) + ('', ) * (self.nlevels - 1) elif len(item) != self.nlevels: raise ValueError('Item must have length equal to number of ' 'levels.') new_levels = [] new_codes = [] for k, level, level_codes in zip(item, self.levels, self.codes): if k not in level: # have to insert into level # must insert at end otherwise you have to recompute all the # other codes lev_loc = len(level) level = level.insert(lev_loc, k) else: lev_loc = level.get_loc(k) new_levels.append(level) new_codes.append(np.insert( ensure_int64(level_codes), loc, lev_loc)) return MultiIndex(levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False)
[ "Make", "new", "MultiIndex", "inserting", "new", "item", "at", "location" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L3066-L3105
[ "def", "insert", "(", "self", ",", "loc", ",", "item", ")", ":", "# Pad the key with empty strings if lower levels of the key", "# aren't specified:", "if", "not", "isinstance", "(", "item", ",", "tuple", ")", ":", "item", "=", "(", "item", ",", ")", "+", "(", "''", ",", ")", "*", "(", "self", ".", "nlevels", "-", "1", ")", "elif", "len", "(", "item", ")", "!=", "self", ".", "nlevels", ":", "raise", "ValueError", "(", "'Item must have length equal to number of '", "'levels.'", ")", "new_levels", "=", "[", "]", "new_codes", "=", "[", "]", "for", "k", ",", "level", ",", "level_codes", "in", "zip", "(", "item", ",", "self", ".", "levels", ",", "self", ".", "codes", ")", ":", "if", "k", "not", "in", "level", ":", "# have to insert into level", "# must insert at end otherwise you have to recompute all the", "# other codes", "lev_loc", "=", "len", "(", "level", ")", "level", "=", "level", ".", "insert", "(", "lev_loc", ",", "k", ")", "else", ":", "lev_loc", "=", "level", ".", "get_loc", "(", "k", ")", "new_levels", ".", "append", "(", "level", ")", "new_codes", ".", "append", "(", "np", ".", "insert", "(", "ensure_int64", "(", "level_codes", ")", ",", "loc", ",", "lev_loc", ")", ")", "return", "MultiIndex", "(", "levels", "=", "new_levels", ",", "codes", "=", "new_codes", ",", "names", "=", "self", ".", "names", ",", "verify_integrity", "=", "False", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
MultiIndex.delete
Make new index with passed location deleted Returns ------- new_index : MultiIndex
pandas/core/indexes/multi.py
def delete(self, loc): """ Make new index with passed location deleted Returns ------- new_index : MultiIndex """ new_codes = [np.delete(level_codes, loc) for level_codes in self.codes] return MultiIndex(levels=self.levels, codes=new_codes, names=self.names, verify_integrity=False)
def delete(self, loc): """ Make new index with passed location deleted Returns ------- new_index : MultiIndex """ new_codes = [np.delete(level_codes, loc) for level_codes in self.codes] return MultiIndex(levels=self.levels, codes=new_codes, names=self.names, verify_integrity=False)
[ "Make", "new", "index", "with", "passed", "location", "deleted" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/multi.py#L3107-L3117
[ "def", "delete", "(", "self", ",", "loc", ")", ":", "new_codes", "=", "[", "np", ".", "delete", "(", "level_codes", ",", "loc", ")", "for", "level_codes", "in", "self", ".", "codes", "]", "return", "MultiIndex", "(", "levels", "=", "self", ".", "levels", ",", "codes", "=", "new_codes", ",", "names", "=", "self", ".", "names", ",", "verify_integrity", "=", "False", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_ensure_data
routine to ensure that our data is of the correct input dtype for lower-level routines This will coerce: - ints -> int64 - uint -> uint64 - bool -> uint64 (TODO this should be uint8) - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes Parameters ---------- values : array-like dtype : pandas_dtype, optional coerce to this dtype Returns ------- (ndarray, pandas_dtype, algo dtype as a string)
pandas/core/algorithms.py
def _ensure_data(values, dtype=None): """ routine to ensure that our data is of the correct input dtype for lower-level routines This will coerce: - ints -> int64 - uint -> uint64 - bool -> uint64 (TODO this should be uint8) - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes Parameters ---------- values : array-like dtype : pandas_dtype, optional coerce to this dtype Returns ------- (ndarray, pandas_dtype, algo dtype as a string) """ # we check some simple dtypes first try: if is_object_dtype(dtype): return ensure_object(np.asarray(values)), 'object', 'object' if is_bool_dtype(values) or is_bool_dtype(dtype): # we are actually coercing to uint64 # until our algos support uint8 directly (see TODO) return np.asarray(values).astype('uint64'), 'bool', 'uint64' elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype): return ensure_int64(values), 'int64', 'int64' elif (is_unsigned_integer_dtype(values) or is_unsigned_integer_dtype(dtype)): return ensure_uint64(values), 'uint64', 'uint64' elif is_float_dtype(values) or is_float_dtype(dtype): return ensure_float64(values), 'float64', 'float64' elif is_object_dtype(values) and dtype is None: return ensure_object(np.asarray(values)), 'object', 'object' elif is_complex_dtype(values) or is_complex_dtype(dtype): # ignore the fact that we are casting to float # which discards complex parts with catch_warnings(): simplefilter("ignore", np.ComplexWarning) values = ensure_float64(values) return values, 'float64', 'float64' except (TypeError, ValueError, OverflowError): # if we are trying to coerce to a dtype # and it is incompat this will fall thru to here return ensure_object(values), 'object', 'object' # datetimelike if (needs_i8_conversion(values) or is_period_dtype(dtype) or is_datetime64_any_dtype(dtype) or is_timedelta64_dtype(dtype)): if is_period_dtype(values) or is_period_dtype(dtype): from pandas import PeriodIndex values = PeriodIndex(values) dtype = values.dtype elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype): from pandas import TimedeltaIndex values = TimedeltaIndex(values) dtype = values.dtype else: # Datetime from pandas import DatetimeIndex values = DatetimeIndex(values) dtype = values.dtype return values.asi8, dtype, 'int64' elif (is_categorical_dtype(values) and (is_categorical_dtype(dtype) or dtype is None)): values = getattr(values, 'values', values) values = values.codes dtype = 'category' # we are actually coercing to int64 # until our algos support int* directly (not all do) values = ensure_int64(values) return values, dtype, 'int64' # we have failed, return object values = np.asarray(values, dtype=np.object) return ensure_object(values), 'object', 'object'
def _ensure_data(values, dtype=None): """ routine to ensure that our data is of the correct input dtype for lower-level routines This will coerce: - ints -> int64 - uint -> uint64 - bool -> uint64 (TODO this should be uint8) - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes Parameters ---------- values : array-like dtype : pandas_dtype, optional coerce to this dtype Returns ------- (ndarray, pandas_dtype, algo dtype as a string) """ # we check some simple dtypes first try: if is_object_dtype(dtype): return ensure_object(np.asarray(values)), 'object', 'object' if is_bool_dtype(values) or is_bool_dtype(dtype): # we are actually coercing to uint64 # until our algos support uint8 directly (see TODO) return np.asarray(values).astype('uint64'), 'bool', 'uint64' elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype): return ensure_int64(values), 'int64', 'int64' elif (is_unsigned_integer_dtype(values) or is_unsigned_integer_dtype(dtype)): return ensure_uint64(values), 'uint64', 'uint64' elif is_float_dtype(values) or is_float_dtype(dtype): return ensure_float64(values), 'float64', 'float64' elif is_object_dtype(values) and dtype is None: return ensure_object(np.asarray(values)), 'object', 'object' elif is_complex_dtype(values) or is_complex_dtype(dtype): # ignore the fact that we are casting to float # which discards complex parts with catch_warnings(): simplefilter("ignore", np.ComplexWarning) values = ensure_float64(values) return values, 'float64', 'float64' except (TypeError, ValueError, OverflowError): # if we are trying to coerce to a dtype # and it is incompat this will fall thru to here return ensure_object(values), 'object', 'object' # datetimelike if (needs_i8_conversion(values) or is_period_dtype(dtype) or is_datetime64_any_dtype(dtype) or is_timedelta64_dtype(dtype)): if is_period_dtype(values) or is_period_dtype(dtype): from pandas import PeriodIndex values = PeriodIndex(values) dtype = values.dtype elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype): from pandas import TimedeltaIndex values = TimedeltaIndex(values) dtype = values.dtype else: # Datetime from pandas import DatetimeIndex values = DatetimeIndex(values) dtype = values.dtype return values.asi8, dtype, 'int64' elif (is_categorical_dtype(values) and (is_categorical_dtype(dtype) or dtype is None)): values = getattr(values, 'values', values) values = values.codes dtype = 'category' # we are actually coercing to int64 # until our algos support int* directly (not all do) values = ensure_int64(values) return values, dtype, 'int64' # we have failed, return object values = np.asarray(values, dtype=np.object) return ensure_object(values), 'object', 'object'
[ "routine", "to", "ensure", "that", "our", "data", "is", "of", "the", "correct", "input", "dtype", "for", "lower", "-", "level", "routines" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L36-L127
[ "def", "_ensure_data", "(", "values", ",", "dtype", "=", "None", ")", ":", "# we check some simple dtypes first", "try", ":", "if", "is_object_dtype", "(", "dtype", ")", ":", "return", "ensure_object", "(", "np", ".", "asarray", "(", "values", ")", ")", ",", "'object'", ",", "'object'", "if", "is_bool_dtype", "(", "values", ")", "or", "is_bool_dtype", "(", "dtype", ")", ":", "# we are actually coercing to uint64", "# until our algos support uint8 directly (see TODO)", "return", "np", ".", "asarray", "(", "values", ")", ".", "astype", "(", "'uint64'", ")", ",", "'bool'", ",", "'uint64'", "elif", "is_signed_integer_dtype", "(", "values", ")", "or", "is_signed_integer_dtype", "(", "dtype", ")", ":", "return", "ensure_int64", "(", "values", ")", ",", "'int64'", ",", "'int64'", "elif", "(", "is_unsigned_integer_dtype", "(", "values", ")", "or", "is_unsigned_integer_dtype", "(", "dtype", ")", ")", ":", "return", "ensure_uint64", "(", "values", ")", ",", "'uint64'", ",", "'uint64'", "elif", "is_float_dtype", "(", "values", ")", "or", "is_float_dtype", "(", "dtype", ")", ":", "return", "ensure_float64", "(", "values", ")", ",", "'float64'", ",", "'float64'", "elif", "is_object_dtype", "(", "values", ")", "and", "dtype", "is", "None", ":", "return", "ensure_object", "(", "np", ".", "asarray", "(", "values", ")", ")", ",", "'object'", ",", "'object'", "elif", "is_complex_dtype", "(", "values", ")", "or", "is_complex_dtype", "(", "dtype", ")", ":", "# ignore the fact that we are casting to float", "# which discards complex parts", "with", "catch_warnings", "(", ")", ":", "simplefilter", "(", "\"ignore\"", ",", "np", ".", "ComplexWarning", ")", "values", "=", "ensure_float64", "(", "values", ")", "return", "values", ",", "'float64'", ",", "'float64'", "except", "(", "TypeError", ",", "ValueError", ",", "OverflowError", ")", ":", "# if we are trying to coerce to a dtype", "# and it is incompat this will fall thru to here", "return", "ensure_object", "(", "values", ")", ",", "'object'", ",", "'object'", "# datetimelike", "if", "(", "needs_i8_conversion", "(", "values", ")", "or", "is_period_dtype", "(", "dtype", ")", "or", "is_datetime64_any_dtype", "(", "dtype", ")", "or", "is_timedelta64_dtype", "(", "dtype", ")", ")", ":", "if", "is_period_dtype", "(", "values", ")", "or", "is_period_dtype", "(", "dtype", ")", ":", "from", "pandas", "import", "PeriodIndex", "values", "=", "PeriodIndex", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "elif", "is_timedelta64_dtype", "(", "values", ")", "or", "is_timedelta64_dtype", "(", "dtype", ")", ":", "from", "pandas", "import", "TimedeltaIndex", "values", "=", "TimedeltaIndex", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "else", ":", "# Datetime", "from", "pandas", "import", "DatetimeIndex", "values", "=", "DatetimeIndex", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "return", "values", ".", "asi8", ",", "dtype", ",", "'int64'", "elif", "(", "is_categorical_dtype", "(", "values", ")", "and", "(", "is_categorical_dtype", "(", "dtype", ")", "or", "dtype", "is", "None", ")", ")", ":", "values", "=", "getattr", "(", "values", ",", "'values'", ",", "values", ")", "values", "=", "values", ".", "codes", "dtype", "=", "'category'", "# we are actually coercing to int64", "# until our algos support int* directly (not all do)", "values", "=", "ensure_int64", "(", "values", ")", "return", "values", ",", "dtype", ",", "'int64'", "# we have failed, return object", "values", "=", "np", ".", "asarray", "(", "values", ",", "dtype", "=", "np", ".", "object", ")", "return", "ensure_object", "(", "values", ")", ",", "'object'", ",", "'object'" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_reconstruct_data
reverse of _ensure_data Parameters ---------- values : ndarray dtype : pandas_dtype original : ndarray-like Returns ------- Index for extension types, otherwise ndarray casted to dtype
pandas/core/algorithms.py
def _reconstruct_data(values, dtype, original): """ reverse of _ensure_data Parameters ---------- values : ndarray dtype : pandas_dtype original : ndarray-like Returns ------- Index for extension types, otherwise ndarray casted to dtype """ from pandas import Index if is_extension_array_dtype(dtype): values = dtype.construct_array_type()._from_sequence(values) elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype): values = Index(original)._shallow_copy(values, name=None) elif is_bool_dtype(dtype): values = values.astype(dtype) # we only support object dtypes bool Index if isinstance(original, Index): values = values.astype(object) elif dtype is not None: values = values.astype(dtype) return values
def _reconstruct_data(values, dtype, original): """ reverse of _ensure_data Parameters ---------- values : ndarray dtype : pandas_dtype original : ndarray-like Returns ------- Index for extension types, otherwise ndarray casted to dtype """ from pandas import Index if is_extension_array_dtype(dtype): values = dtype.construct_array_type()._from_sequence(values) elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype): values = Index(original)._shallow_copy(values, name=None) elif is_bool_dtype(dtype): values = values.astype(dtype) # we only support object dtypes bool Index if isinstance(original, Index): values = values.astype(object) elif dtype is not None: values = values.astype(dtype) return values
[ "reverse", "of", "_ensure_data" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L130-L158
[ "def", "_reconstruct_data", "(", "values", ",", "dtype", ",", "original", ")", ":", "from", "pandas", "import", "Index", "if", "is_extension_array_dtype", "(", "dtype", ")", ":", "values", "=", "dtype", ".", "construct_array_type", "(", ")", ".", "_from_sequence", "(", "values", ")", "elif", "is_datetime64tz_dtype", "(", "dtype", ")", "or", "is_period_dtype", "(", "dtype", ")", ":", "values", "=", "Index", "(", "original", ")", ".", "_shallow_copy", "(", "values", ",", "name", "=", "None", ")", "elif", "is_bool_dtype", "(", "dtype", ")", ":", "values", "=", "values", ".", "astype", "(", "dtype", ")", "# we only support object dtypes bool Index", "if", "isinstance", "(", "original", ",", "Index", ")", ":", "values", "=", "values", ".", "astype", "(", "object", ")", "elif", "dtype", "is", "not", "None", ":", "values", "=", "values", ".", "astype", "(", "dtype", ")", "return", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_ensure_arraylike
ensure that we are arraylike if not already
pandas/core/algorithms.py
def _ensure_arraylike(values): """ ensure that we are arraylike if not already """ if not is_array_like(values): inferred = lib.infer_dtype(values, skipna=False) if inferred in ['mixed', 'string', 'unicode']: if isinstance(values, tuple): values = list(values) values = construct_1d_object_array_from_listlike(values) else: values = np.asarray(values) return values
def _ensure_arraylike(values): """ ensure that we are arraylike if not already """ if not is_array_like(values): inferred = lib.infer_dtype(values, skipna=False) if inferred in ['mixed', 'string', 'unicode']: if isinstance(values, tuple): values = list(values) values = construct_1d_object_array_from_listlike(values) else: values = np.asarray(values) return values
[ "ensure", "that", "we", "are", "arraylike", "if", "not", "already" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L161-L173
[ "def", "_ensure_arraylike", "(", "values", ")", ":", "if", "not", "is_array_like", "(", "values", ")", ":", "inferred", "=", "lib", ".", "infer_dtype", "(", "values", ",", "skipna", "=", "False", ")", "if", "inferred", "in", "[", "'mixed'", ",", "'string'", ",", "'unicode'", "]", ":", "if", "isinstance", "(", "values", ",", "tuple", ")", ":", "values", "=", "list", "(", "values", ")", "values", "=", "construct_1d_object_array_from_listlike", "(", "values", ")", "else", ":", "values", "=", "np", ".", "asarray", "(", "values", ")", "return", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_get_hashtable_algo
Parameters ---------- values : arraylike Returns ------- tuples(hashtable class, vector class, values, dtype, ndtype)
pandas/core/algorithms.py
def _get_hashtable_algo(values): """ Parameters ---------- values : arraylike Returns ------- tuples(hashtable class, vector class, values, dtype, ndtype) """ values, dtype, ndtype = _ensure_data(values) if ndtype == 'object': # it's cheaper to use a String Hash Table than Object; we infer # including nulls because that is the only difference between # StringHashTable and ObjectHashtable if lib.infer_dtype(values, skipna=False) in ['string']: ndtype = 'string' else: ndtype = 'object' htable, table = _hashtables[ndtype] return (htable, table, values, dtype, ndtype)
def _get_hashtable_algo(values): """ Parameters ---------- values : arraylike Returns ------- tuples(hashtable class, vector class, values, dtype, ndtype) """ values, dtype, ndtype = _ensure_data(values) if ndtype == 'object': # it's cheaper to use a String Hash Table than Object; we infer # including nulls because that is the only difference between # StringHashTable and ObjectHashtable if lib.infer_dtype(values, skipna=False) in ['string']: ndtype = 'string' else: ndtype = 'object' htable, table = _hashtables[ndtype] return (htable, table, values, dtype, ndtype)
[ "Parameters", "----------", "values", ":", "arraylike" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L185-L212
[ "def", "_get_hashtable_algo", "(", "values", ")", ":", "values", ",", "dtype", ",", "ndtype", "=", "_ensure_data", "(", "values", ")", "if", "ndtype", "==", "'object'", ":", "# it's cheaper to use a String Hash Table than Object; we infer", "# including nulls because that is the only difference between", "# StringHashTable and ObjectHashtable", "if", "lib", ".", "infer_dtype", "(", "values", ",", "skipna", "=", "False", ")", "in", "[", "'string'", "]", ":", "ndtype", "=", "'string'", "else", ":", "ndtype", "=", "'object'", "htable", ",", "table", "=", "_hashtables", "[", "ndtype", "]", "return", "(", "htable", ",", "table", ",", "values", ",", "dtype", ",", "ndtype", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
match
Compute locations of to_match into values Parameters ---------- to_match : array-like values to find positions of values : array-like Unique set of values na_sentinel : int, default -1 Value to mark "not found" Examples -------- Returns ------- match : ndarray of integers
pandas/core/algorithms.py
def match(to_match, values, na_sentinel=-1): """ Compute locations of to_match into values Parameters ---------- to_match : array-like values to find positions of values : array-like Unique set of values na_sentinel : int, default -1 Value to mark "not found" Examples -------- Returns ------- match : ndarray of integers """ values = com.asarray_tuplesafe(values) htable, _, values, dtype, ndtype = _get_hashtable_algo(values) to_match, _, _ = _ensure_data(to_match, dtype) table = htable(min(len(to_match), 1000000)) table.map_locations(values) result = table.lookup(to_match) if na_sentinel != -1: # replace but return a numpy array # use a Series because it handles dtype conversions properly from pandas import Series result = Series(result.ravel()).replace(-1, na_sentinel) result = result.values.reshape(result.shape) return result
def match(to_match, values, na_sentinel=-1): """ Compute locations of to_match into values Parameters ---------- to_match : array-like values to find positions of values : array-like Unique set of values na_sentinel : int, default -1 Value to mark "not found" Examples -------- Returns ------- match : ndarray of integers """ values = com.asarray_tuplesafe(values) htable, _, values, dtype, ndtype = _get_hashtable_algo(values) to_match, _, _ = _ensure_data(to_match, dtype) table = htable(min(len(to_match), 1000000)) table.map_locations(values) result = table.lookup(to_match) if na_sentinel != -1: # replace but return a numpy array # use a Series because it handles dtype conversions properly from pandas import Series result = Series(result.ravel()).replace(-1, na_sentinel) result = result.values.reshape(result.shape) return result
[ "Compute", "locations", "of", "to_match", "into", "values" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L238-L273
[ "def", "match", "(", "to_match", ",", "values", ",", "na_sentinel", "=", "-", "1", ")", ":", "values", "=", "com", ".", "asarray_tuplesafe", "(", "values", ")", "htable", ",", "_", ",", "values", ",", "dtype", ",", "ndtype", "=", "_get_hashtable_algo", "(", "values", ")", "to_match", ",", "_", ",", "_", "=", "_ensure_data", "(", "to_match", ",", "dtype", ")", "table", "=", "htable", "(", "min", "(", "len", "(", "to_match", ")", ",", "1000000", ")", ")", "table", ".", "map_locations", "(", "values", ")", "result", "=", "table", ".", "lookup", "(", "to_match", ")", "if", "na_sentinel", "!=", "-", "1", ":", "# replace but return a numpy array", "# use a Series because it handles dtype conversions properly", "from", "pandas", "import", "Series", "result", "=", "Series", "(", "result", ".", "ravel", "(", ")", ")", ".", "replace", "(", "-", "1", ",", "na_sentinel", ")", "result", "=", "result", ".", "values", ".", "reshape", "(", "result", ".", "shape", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
unique
Hash table-based unique. Uniques are returned in order of appearance. This does NOT sort. Significantly faster than numpy.unique. Includes NA values. Parameters ---------- values : 1d array-like Returns ------- numpy.ndarray or ExtensionArray The return can be: * Index : when the input is an Index * Categorical : when the input is a Categorical dtype * ndarray : when the input is a Series/ndarray Return numpy.ndarray or ExtensionArray. See Also -------- Index.unique Series.unique Examples -------- >>> pd.unique(pd.Series([2, 1, 3, 3])) array([2, 1, 3]) >>> pd.unique(pd.Series([2] + [1] * 5)) array([2, 1]) >>> pd.unique(pd.Series([pd.Timestamp('20160101'), ... pd.Timestamp('20160101')])) array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], dtype=object) >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) DatetimeIndex(['2016-01-01 00:00:00-05:00'], ... dtype='datetime64[ns, US/Eastern]', freq=None) >>> pd.unique(list('baabc')) array(['b', 'a', 'c'], dtype=object) An unordered Categorical will return categories in the order of appearance. >>> pd.unique(pd.Series(pd.Categorical(list('baabc')))) [b, a, c] Categories (3, object): [b, a, c] >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc')))) [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc'), ... ordered=True))) [b, a, c] Categories (3, object): [a < b < c] An array of tuples >>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')]) array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object)
pandas/core/algorithms.py
def unique(values): """ Hash table-based unique. Uniques are returned in order of appearance. This does NOT sort. Significantly faster than numpy.unique. Includes NA values. Parameters ---------- values : 1d array-like Returns ------- numpy.ndarray or ExtensionArray The return can be: * Index : when the input is an Index * Categorical : when the input is a Categorical dtype * ndarray : when the input is a Series/ndarray Return numpy.ndarray or ExtensionArray. See Also -------- Index.unique Series.unique Examples -------- >>> pd.unique(pd.Series([2, 1, 3, 3])) array([2, 1, 3]) >>> pd.unique(pd.Series([2] + [1] * 5)) array([2, 1]) >>> pd.unique(pd.Series([pd.Timestamp('20160101'), ... pd.Timestamp('20160101')])) array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], dtype=object) >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) DatetimeIndex(['2016-01-01 00:00:00-05:00'], ... dtype='datetime64[ns, US/Eastern]', freq=None) >>> pd.unique(list('baabc')) array(['b', 'a', 'c'], dtype=object) An unordered Categorical will return categories in the order of appearance. >>> pd.unique(pd.Series(pd.Categorical(list('baabc')))) [b, a, c] Categories (3, object): [b, a, c] >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc')))) [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc'), ... ordered=True))) [b, a, c] Categories (3, object): [a < b < c] An array of tuples >>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')]) array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object) """ values = _ensure_arraylike(values) if is_extension_array_dtype(values): # Dispatch to extension dtype's unique. return values.unique() original = values htable, _, values, dtype, ndtype = _get_hashtable_algo(values) table = htable(len(values)) uniques = table.unique(values) uniques = _reconstruct_data(uniques, dtype, original) return uniques
def unique(values): """ Hash table-based unique. Uniques are returned in order of appearance. This does NOT sort. Significantly faster than numpy.unique. Includes NA values. Parameters ---------- values : 1d array-like Returns ------- numpy.ndarray or ExtensionArray The return can be: * Index : when the input is an Index * Categorical : when the input is a Categorical dtype * ndarray : when the input is a Series/ndarray Return numpy.ndarray or ExtensionArray. See Also -------- Index.unique Series.unique Examples -------- >>> pd.unique(pd.Series([2, 1, 3, 3])) array([2, 1, 3]) >>> pd.unique(pd.Series([2] + [1] * 5)) array([2, 1]) >>> pd.unique(pd.Series([pd.Timestamp('20160101'), ... pd.Timestamp('20160101')])) array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], dtype=object) >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) DatetimeIndex(['2016-01-01 00:00:00-05:00'], ... dtype='datetime64[ns, US/Eastern]', freq=None) >>> pd.unique(list('baabc')) array(['b', 'a', 'c'], dtype=object) An unordered Categorical will return categories in the order of appearance. >>> pd.unique(pd.Series(pd.Categorical(list('baabc')))) [b, a, c] Categories (3, object): [b, a, c] >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc')))) [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc'), ... ordered=True))) [b, a, c] Categories (3, object): [a < b < c] An array of tuples >>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')]) array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object) """ values = _ensure_arraylike(values) if is_extension_array_dtype(values): # Dispatch to extension dtype's unique. return values.unique() original = values htable, _, values, dtype, ndtype = _get_hashtable_algo(values) table = htable(len(values)) uniques = table.unique(values) uniques = _reconstruct_data(uniques, dtype, original) return uniques
[ "Hash", "table", "-", "based", "unique", ".", "Uniques", "are", "returned", "in", "order", "of", "appearance", ".", "This", "does", "NOT", "sort", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L276-L367
[ "def", "unique", "(", "values", ")", ":", "values", "=", "_ensure_arraylike", "(", "values", ")", "if", "is_extension_array_dtype", "(", "values", ")", ":", "# Dispatch to extension dtype's unique.", "return", "values", ".", "unique", "(", ")", "original", "=", "values", "htable", ",", "_", ",", "values", ",", "dtype", ",", "ndtype", "=", "_get_hashtable_algo", "(", "values", ")", "table", "=", "htable", "(", "len", "(", "values", ")", ")", "uniques", "=", "table", ".", "unique", "(", "values", ")", "uniques", "=", "_reconstruct_data", "(", "uniques", ",", "dtype", ",", "original", ")", "return", "uniques" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
isin
Compute the isin boolean array Parameters ---------- comps : array-like values : array-like Returns ------- boolean array same length as comps
pandas/core/algorithms.py
def isin(comps, values): """ Compute the isin boolean array Parameters ---------- comps : array-like values : array-like Returns ------- boolean array same length as comps """ if not is_list_like(comps): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{comps_type}]" .format(comps_type=type(comps).__name__)) if not is_list_like(values): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{values_type}]" .format(values_type=type(values).__name__)) if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)): values = construct_1d_object_array_from_listlike(list(values)) if is_categorical_dtype(comps): # TODO(extension) # handle categoricals return comps._values.isin(values) comps = com.values_from_object(comps) comps, dtype, _ = _ensure_data(comps) values, _, _ = _ensure_data(values, dtype=dtype) # faster for larger cases to use np.in1d f = lambda x, y: htable.ismember_object(x, values) # GH16012 # Ensure np.in1d doesn't get object types or it *may* throw an exception if len(comps) > 1000000 and not is_object_dtype(comps): f = lambda x, y: np.in1d(x, y) elif is_integer_dtype(comps): try: values = values.astype('int64', copy=False) comps = comps.astype('int64', copy=False) f = lambda x, y: htable.ismember_int64(x, y) except (TypeError, ValueError, OverflowError): values = values.astype(object) comps = comps.astype(object) elif is_float_dtype(comps): try: values = values.astype('float64', copy=False) comps = comps.astype('float64', copy=False) f = lambda x, y: htable.ismember_float64(x, y) except (TypeError, ValueError): values = values.astype(object) comps = comps.astype(object) return f(comps, values)
def isin(comps, values): """ Compute the isin boolean array Parameters ---------- comps : array-like values : array-like Returns ------- boolean array same length as comps """ if not is_list_like(comps): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{comps_type}]" .format(comps_type=type(comps).__name__)) if not is_list_like(values): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{values_type}]" .format(values_type=type(values).__name__)) if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)): values = construct_1d_object_array_from_listlike(list(values)) if is_categorical_dtype(comps): # TODO(extension) # handle categoricals return comps._values.isin(values) comps = com.values_from_object(comps) comps, dtype, _ = _ensure_data(comps) values, _, _ = _ensure_data(values, dtype=dtype) # faster for larger cases to use np.in1d f = lambda x, y: htable.ismember_object(x, values) # GH16012 # Ensure np.in1d doesn't get object types or it *may* throw an exception if len(comps) > 1000000 and not is_object_dtype(comps): f = lambda x, y: np.in1d(x, y) elif is_integer_dtype(comps): try: values = values.astype('int64', copy=False) comps = comps.astype('int64', copy=False) f = lambda x, y: htable.ismember_int64(x, y) except (TypeError, ValueError, OverflowError): values = values.astype(object) comps = comps.astype(object) elif is_float_dtype(comps): try: values = values.astype('float64', copy=False) comps = comps.astype('float64', copy=False) f = lambda x, y: htable.ismember_float64(x, y) except (TypeError, ValueError): values = values.astype(object) comps = comps.astype(object) return f(comps, values)
[ "Compute", "the", "isin", "boolean", "array" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L373-L434
[ "def", "isin", "(", "comps", ",", "values", ")", ":", "if", "not", "is_list_like", "(", "comps", ")", ":", "raise", "TypeError", "(", "\"only list-like objects are allowed to be passed\"", "\" to isin(), you passed a [{comps_type}]\"", ".", "format", "(", "comps_type", "=", "type", "(", "comps", ")", ".", "__name__", ")", ")", "if", "not", "is_list_like", "(", "values", ")", ":", "raise", "TypeError", "(", "\"only list-like objects are allowed to be passed\"", "\" to isin(), you passed a [{values_type}]\"", ".", "format", "(", "values_type", "=", "type", "(", "values", ")", ".", "__name__", ")", ")", "if", "not", "isinstance", "(", "values", ",", "(", "ABCIndex", ",", "ABCSeries", ",", "np", ".", "ndarray", ")", ")", ":", "values", "=", "construct_1d_object_array_from_listlike", "(", "list", "(", "values", ")", ")", "if", "is_categorical_dtype", "(", "comps", ")", ":", "# TODO(extension)", "# handle categoricals", "return", "comps", ".", "_values", ".", "isin", "(", "values", ")", "comps", "=", "com", ".", "values_from_object", "(", "comps", ")", "comps", ",", "dtype", ",", "_", "=", "_ensure_data", "(", "comps", ")", "values", ",", "_", ",", "_", "=", "_ensure_data", "(", "values", ",", "dtype", "=", "dtype", ")", "# faster for larger cases to use np.in1d", "f", "=", "lambda", "x", ",", "y", ":", "htable", ".", "ismember_object", "(", "x", ",", "values", ")", "# GH16012", "# Ensure np.in1d doesn't get object types or it *may* throw an exception", "if", "len", "(", "comps", ")", ">", "1000000", "and", "not", "is_object_dtype", "(", "comps", ")", ":", "f", "=", "lambda", "x", ",", "y", ":", "np", ".", "in1d", "(", "x", ",", "y", ")", "elif", "is_integer_dtype", "(", "comps", ")", ":", "try", ":", "values", "=", "values", ".", "astype", "(", "'int64'", ",", "copy", "=", "False", ")", "comps", "=", "comps", ".", "astype", "(", "'int64'", ",", "copy", "=", "False", ")", "f", "=", "lambda", "x", ",", "y", ":", "htable", ".", "ismember_int64", "(", "x", ",", "y", ")", "except", "(", "TypeError", ",", "ValueError", ",", "OverflowError", ")", ":", "values", "=", "values", ".", "astype", "(", "object", ")", "comps", "=", "comps", ".", "astype", "(", "object", ")", "elif", "is_float_dtype", "(", "comps", ")", ":", "try", ":", "values", "=", "values", ".", "astype", "(", "'float64'", ",", "copy", "=", "False", ")", "comps", "=", "comps", ".", "astype", "(", "'float64'", ",", "copy", "=", "False", ")", "f", "=", "lambda", "x", ",", "y", ":", "htable", ".", "ismember_float64", "(", "x", ",", "y", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "values", "=", "values", ".", "astype", "(", "object", ")", "comps", "=", "comps", ".", "astype", "(", "object", ")", "return", "f", "(", "comps", ",", "values", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_factorize_array
Factorize an array-like to labels and uniques. This doesn't do any coercion of types or unboxing before factorization. Parameters ---------- values : ndarray na_sentinel : int, default -1 size_hint : int, optional Passsed through to the hashtable's 'get_labels' method na_value : object, optional A value in `values` to consider missing. Note: only use this parameter when you know that you don't have any values pandas would consider missing in the array (NaN for float data, iNaT for datetimes, etc.). Returns ------- labels, uniques : ndarray
pandas/core/algorithms.py
def _factorize_array(values, na_sentinel=-1, size_hint=None, na_value=None): """Factorize an array-like to labels and uniques. This doesn't do any coercion of types or unboxing before factorization. Parameters ---------- values : ndarray na_sentinel : int, default -1 size_hint : int, optional Passsed through to the hashtable's 'get_labels' method na_value : object, optional A value in `values` to consider missing. Note: only use this parameter when you know that you don't have any values pandas would consider missing in the array (NaN for float data, iNaT for datetimes, etc.). Returns ------- labels, uniques : ndarray """ (hash_klass, _), values = _get_data_algo(values, _hashtables) table = hash_klass(size_hint or len(values)) uniques, labels = table.factorize(values, na_sentinel=na_sentinel, na_value=na_value) labels = ensure_platform_int(labels) return labels, uniques
def _factorize_array(values, na_sentinel=-1, size_hint=None, na_value=None): """Factorize an array-like to labels and uniques. This doesn't do any coercion of types or unboxing before factorization. Parameters ---------- values : ndarray na_sentinel : int, default -1 size_hint : int, optional Passsed through to the hashtable's 'get_labels' method na_value : object, optional A value in `values` to consider missing. Note: only use this parameter when you know that you don't have any values pandas would consider missing in the array (NaN for float data, iNaT for datetimes, etc.). Returns ------- labels, uniques : ndarray """ (hash_klass, _), values = _get_data_algo(values, _hashtables) table = hash_klass(size_hint or len(values)) uniques, labels = table.factorize(values, na_sentinel=na_sentinel, na_value=na_value) labels = ensure_platform_int(labels) return labels, uniques
[ "Factorize", "an", "array", "-", "like", "to", "labels", "and", "uniques", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L437-L466
[ "def", "_factorize_array", "(", "values", ",", "na_sentinel", "=", "-", "1", ",", "size_hint", "=", "None", ",", "na_value", "=", "None", ")", ":", "(", "hash_klass", ",", "_", ")", ",", "values", "=", "_get_data_algo", "(", "values", ",", "_hashtables", ")", "table", "=", "hash_klass", "(", "size_hint", "or", "len", "(", "values", ")", ")", "uniques", ",", "labels", "=", "table", ".", "factorize", "(", "values", ",", "na_sentinel", "=", "na_sentinel", ",", "na_value", "=", "na_value", ")", "labels", "=", "ensure_platform_int", "(", "labels", ")", "return", "labels", ",", "uniques" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
value_counts
Compute a histogram of the counts of non-null values. Parameters ---------- values : ndarray (1-d) sort : boolean, default True Sort by values ascending : boolean, default False Sort in ascending order normalize: boolean, default False If True then compute a relative histogram bins : integer, optional Rather than count values, group them into half-open bins, convenience for pd.cut, only works with numeric data dropna : boolean, default True Don't include counts of NaN Returns ------- value_counts : Series
pandas/core/algorithms.py
def value_counts(values, sort=True, ascending=False, normalize=False, bins=None, dropna=True): """ Compute a histogram of the counts of non-null values. Parameters ---------- values : ndarray (1-d) sort : boolean, default True Sort by values ascending : boolean, default False Sort in ascending order normalize: boolean, default False If True then compute a relative histogram bins : integer, optional Rather than count values, group them into half-open bins, convenience for pd.cut, only works with numeric data dropna : boolean, default True Don't include counts of NaN Returns ------- value_counts : Series """ from pandas.core.series import Series, Index name = getattr(values, 'name', None) if bins is not None: try: from pandas.core.reshape.tile import cut values = Series(values) ii = cut(values, bins, include_lowest=True) except TypeError: raise TypeError("bins argument only works with numeric data.") # count, remove nulls (from the index), and but the bins result = ii.value_counts(dropna=dropna) result = result[result.index.notna()] result.index = result.index.astype('interval') result = result.sort_index() # if we are dropna and we have NO values if dropna and (result.values == 0).all(): result = result.iloc[0:0] # normalizing is by len of all (regardless of dropna) counts = np.array([len(ii)]) else: if is_extension_array_dtype(values) or is_sparse(values): # handle Categorical and sparse, result = Series(values)._values.value_counts(dropna=dropna) result.name = name counts = result.values else: keys, counts = _value_counts_arraylike(values, dropna) if not isinstance(keys, Index): keys = Index(keys) result = Series(counts, index=keys, name=name) if sort: result = result.sort_values(ascending=ascending) if normalize: result = result / float(counts.sum()) return result
def value_counts(values, sort=True, ascending=False, normalize=False, bins=None, dropna=True): """ Compute a histogram of the counts of non-null values. Parameters ---------- values : ndarray (1-d) sort : boolean, default True Sort by values ascending : boolean, default False Sort in ascending order normalize: boolean, default False If True then compute a relative histogram bins : integer, optional Rather than count values, group them into half-open bins, convenience for pd.cut, only works with numeric data dropna : boolean, default True Don't include counts of NaN Returns ------- value_counts : Series """ from pandas.core.series import Series, Index name = getattr(values, 'name', None) if bins is not None: try: from pandas.core.reshape.tile import cut values = Series(values) ii = cut(values, bins, include_lowest=True) except TypeError: raise TypeError("bins argument only works with numeric data.") # count, remove nulls (from the index), and but the bins result = ii.value_counts(dropna=dropna) result = result[result.index.notna()] result.index = result.index.astype('interval') result = result.sort_index() # if we are dropna and we have NO values if dropna and (result.values == 0).all(): result = result.iloc[0:0] # normalizing is by len of all (regardless of dropna) counts = np.array([len(ii)]) else: if is_extension_array_dtype(values) or is_sparse(values): # handle Categorical and sparse, result = Series(values)._values.value_counts(dropna=dropna) result.name = name counts = result.values else: keys, counts = _value_counts_arraylike(values, dropna) if not isinstance(keys, Index): keys = Index(keys) result = Series(counts, index=keys, name=name) if sort: result = result.sort_values(ascending=ascending) if normalize: result = result / float(counts.sum()) return result
[ "Compute", "a", "histogram", "of", "the", "counts", "of", "non", "-", "null", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L649-L720
[ "def", "value_counts", "(", "values", ",", "sort", "=", "True", ",", "ascending", "=", "False", ",", "normalize", "=", "False", ",", "bins", "=", "None", ",", "dropna", "=", "True", ")", ":", "from", "pandas", ".", "core", ".", "series", "import", "Series", ",", "Index", "name", "=", "getattr", "(", "values", ",", "'name'", ",", "None", ")", "if", "bins", "is", "not", "None", ":", "try", ":", "from", "pandas", ".", "core", ".", "reshape", ".", "tile", "import", "cut", "values", "=", "Series", "(", "values", ")", "ii", "=", "cut", "(", "values", ",", "bins", ",", "include_lowest", "=", "True", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "\"bins argument only works with numeric data.\"", ")", "# count, remove nulls (from the index), and but the bins", "result", "=", "ii", ".", "value_counts", "(", "dropna", "=", "dropna", ")", "result", "=", "result", "[", "result", ".", "index", ".", "notna", "(", ")", "]", "result", ".", "index", "=", "result", ".", "index", ".", "astype", "(", "'interval'", ")", "result", "=", "result", ".", "sort_index", "(", ")", "# if we are dropna and we have NO values", "if", "dropna", "and", "(", "result", ".", "values", "==", "0", ")", ".", "all", "(", ")", ":", "result", "=", "result", ".", "iloc", "[", "0", ":", "0", "]", "# normalizing is by len of all (regardless of dropna)", "counts", "=", "np", ".", "array", "(", "[", "len", "(", "ii", ")", "]", ")", "else", ":", "if", "is_extension_array_dtype", "(", "values", ")", "or", "is_sparse", "(", "values", ")", ":", "# handle Categorical and sparse,", "result", "=", "Series", "(", "values", ")", ".", "_values", ".", "value_counts", "(", "dropna", "=", "dropna", ")", "result", ".", "name", "=", "name", "counts", "=", "result", ".", "values", "else", ":", "keys", ",", "counts", "=", "_value_counts_arraylike", "(", "values", ",", "dropna", ")", "if", "not", "isinstance", "(", "keys", ",", "Index", ")", ":", "keys", "=", "Index", "(", "keys", ")", "result", "=", "Series", "(", "counts", ",", "index", "=", "keys", ",", "name", "=", "name", ")", "if", "sort", ":", "result", "=", "result", ".", "sort_values", "(", "ascending", "=", "ascending", ")", "if", "normalize", ":", "result", "=", "result", "/", "float", "(", "counts", ".", "sum", "(", ")", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_value_counts_arraylike
Parameters ---------- values : arraylike dropna : boolean Returns ------- (uniques, counts)
pandas/core/algorithms.py
def _value_counts_arraylike(values, dropna): """ Parameters ---------- values : arraylike dropna : boolean Returns ------- (uniques, counts) """ values = _ensure_arraylike(values) original = values values, dtype, ndtype = _ensure_data(values) if needs_i8_conversion(dtype): # i8 keys, counts = htable.value_count_int64(values, dropna) if dropna: msk = keys != iNaT keys, counts = keys[msk], counts[msk] else: # ndarray like # TODO: handle uint8 f = getattr(htable, "value_count_{dtype}".format(dtype=ndtype)) keys, counts = f(values, dropna) mask = isna(values) if not dropna and mask.any(): if not isna(keys).any(): keys = np.insert(keys, 0, np.NaN) counts = np.insert(counts, 0, mask.sum()) keys = _reconstruct_data(keys, original.dtype, original) return keys, counts
def _value_counts_arraylike(values, dropna): """ Parameters ---------- values : arraylike dropna : boolean Returns ------- (uniques, counts) """ values = _ensure_arraylike(values) original = values values, dtype, ndtype = _ensure_data(values) if needs_i8_conversion(dtype): # i8 keys, counts = htable.value_count_int64(values, dropna) if dropna: msk = keys != iNaT keys, counts = keys[msk], counts[msk] else: # ndarray like # TODO: handle uint8 f = getattr(htable, "value_count_{dtype}".format(dtype=ndtype)) keys, counts = f(values, dropna) mask = isna(values) if not dropna and mask.any(): if not isna(keys).any(): keys = np.insert(keys, 0, np.NaN) counts = np.insert(counts, 0, mask.sum()) keys = _reconstruct_data(keys, original.dtype, original) return keys, counts
[ "Parameters", "----------", "values", ":", "arraylike", "dropna", ":", "boolean" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L723-L763
[ "def", "_value_counts_arraylike", "(", "values", ",", "dropna", ")", ":", "values", "=", "_ensure_arraylike", "(", "values", ")", "original", "=", "values", "values", ",", "dtype", ",", "ndtype", "=", "_ensure_data", "(", "values", ")", "if", "needs_i8_conversion", "(", "dtype", ")", ":", "# i8", "keys", ",", "counts", "=", "htable", ".", "value_count_int64", "(", "values", ",", "dropna", ")", "if", "dropna", ":", "msk", "=", "keys", "!=", "iNaT", "keys", ",", "counts", "=", "keys", "[", "msk", "]", ",", "counts", "[", "msk", "]", "else", ":", "# ndarray like", "# TODO: handle uint8", "f", "=", "getattr", "(", "htable", ",", "\"value_count_{dtype}\"", ".", "format", "(", "dtype", "=", "ndtype", ")", ")", "keys", ",", "counts", "=", "f", "(", "values", ",", "dropna", ")", "mask", "=", "isna", "(", "values", ")", "if", "not", "dropna", "and", "mask", ".", "any", "(", ")", ":", "if", "not", "isna", "(", "keys", ")", ".", "any", "(", ")", ":", "keys", "=", "np", ".", "insert", "(", "keys", ",", "0", ",", "np", ".", "NaN", ")", "counts", "=", "np", ".", "insert", "(", "counts", ",", "0", ",", "mask", ".", "sum", "(", ")", ")", "keys", "=", "_reconstruct_data", "(", "keys", ",", "original", ".", "dtype", ",", "original", ")", "return", "keys", ",", "counts" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
duplicated
Return boolean ndarray denoting duplicate values. .. versionadded:: 0.19.0 Parameters ---------- values : ndarray-like Array over which to check for duplicate values. keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : ndarray
pandas/core/algorithms.py
def duplicated(values, keep='first'): """ Return boolean ndarray denoting duplicate values. .. versionadded:: 0.19.0 Parameters ---------- values : ndarray-like Array over which to check for duplicate values. keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : ndarray """ values, dtype, ndtype = _ensure_data(values) f = getattr(htable, "duplicated_{dtype}".format(dtype=ndtype)) return f(values, keep=keep)
def duplicated(values, keep='first'): """ Return boolean ndarray denoting duplicate values. .. versionadded:: 0.19.0 Parameters ---------- values : ndarray-like Array over which to check for duplicate values. keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : ndarray """ values, dtype, ndtype = _ensure_data(values) f = getattr(htable, "duplicated_{dtype}".format(dtype=ndtype)) return f(values, keep=keep)
[ "Return", "boolean", "ndarray", "denoting", "duplicate", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L766-L790
[ "def", "duplicated", "(", "values", ",", "keep", "=", "'first'", ")", ":", "values", ",", "dtype", ",", "ndtype", "=", "_ensure_data", "(", "values", ")", "f", "=", "getattr", "(", "htable", ",", "\"duplicated_{dtype}\"", ".", "format", "(", "dtype", "=", "ndtype", ")", ")", "return", "f", "(", "values", ",", "keep", "=", "keep", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
mode
Returns the mode(s) of an array. Parameters ---------- values : array-like Array over which to check for duplicate values. dropna : boolean, default True Don't consider counts of NaN/NaT. .. versionadded:: 0.24.0 Returns ------- mode : Series
pandas/core/algorithms.py
def mode(values, dropna=True): """ Returns the mode(s) of an array. Parameters ---------- values : array-like Array over which to check for duplicate values. dropna : boolean, default True Don't consider counts of NaN/NaT. .. versionadded:: 0.24.0 Returns ------- mode : Series """ from pandas import Series values = _ensure_arraylike(values) original = values # categorical is a fast-path if is_categorical_dtype(values): if isinstance(values, Series): return Series(values.values.mode(dropna=dropna), name=values.name) return values.mode(dropna=dropna) if dropna and is_datetimelike(values): mask = values.isnull() values = values[~mask] values, dtype, ndtype = _ensure_data(values) f = getattr(htable, "mode_{dtype}".format(dtype=ndtype)) result = f(values, dropna=dropna) try: result = np.sort(result) except TypeError as e: warn("Unable to sort modes: {error}".format(error=e)) result = _reconstruct_data(result, original.dtype, original) return Series(result)
def mode(values, dropna=True): """ Returns the mode(s) of an array. Parameters ---------- values : array-like Array over which to check for duplicate values. dropna : boolean, default True Don't consider counts of NaN/NaT. .. versionadded:: 0.24.0 Returns ------- mode : Series """ from pandas import Series values = _ensure_arraylike(values) original = values # categorical is a fast-path if is_categorical_dtype(values): if isinstance(values, Series): return Series(values.values.mode(dropna=dropna), name=values.name) return values.mode(dropna=dropna) if dropna and is_datetimelike(values): mask = values.isnull() values = values[~mask] values, dtype, ndtype = _ensure_data(values) f = getattr(htable, "mode_{dtype}".format(dtype=ndtype)) result = f(values, dropna=dropna) try: result = np.sort(result) except TypeError as e: warn("Unable to sort modes: {error}".format(error=e)) result = _reconstruct_data(result, original.dtype, original) return Series(result)
[ "Returns", "the", "mode", "(", "s", ")", "of", "an", "array", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L793-L835
[ "def", "mode", "(", "values", ",", "dropna", "=", "True", ")", ":", "from", "pandas", "import", "Series", "values", "=", "_ensure_arraylike", "(", "values", ")", "original", "=", "values", "# categorical is a fast-path", "if", "is_categorical_dtype", "(", "values", ")", ":", "if", "isinstance", "(", "values", ",", "Series", ")", ":", "return", "Series", "(", "values", ".", "values", ".", "mode", "(", "dropna", "=", "dropna", ")", ",", "name", "=", "values", ".", "name", ")", "return", "values", ".", "mode", "(", "dropna", "=", "dropna", ")", "if", "dropna", "and", "is_datetimelike", "(", "values", ")", ":", "mask", "=", "values", ".", "isnull", "(", ")", "values", "=", "values", "[", "~", "mask", "]", "values", ",", "dtype", ",", "ndtype", "=", "_ensure_data", "(", "values", ")", "f", "=", "getattr", "(", "htable", ",", "\"mode_{dtype}\"", ".", "format", "(", "dtype", "=", "ndtype", ")", ")", "result", "=", "f", "(", "values", ",", "dropna", "=", "dropna", ")", "try", ":", "result", "=", "np", ".", "sort", "(", "result", ")", "except", "TypeError", "as", "e", ":", "warn", "(", "\"Unable to sort modes: {error}\"", ".", "format", "(", "error", "=", "e", ")", ")", "result", "=", "_reconstruct_data", "(", "result", ",", "original", ".", "dtype", ",", "original", ")", "return", "Series", "(", "result", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
rank
Rank the values along a given axis. Parameters ---------- values : array-like Array whose values will be ranked. The number of dimensions in this array must not exceed 2. axis : int, default 0 Axis over which to perform rankings. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' The method by which tiebreaks are broken during the ranking. na_option : {'keep', 'top'}, default 'keep' The method by which NaNs are placed in the ranking. - ``keep``: rank each NaN value with a NaN ranking - ``top``: replace each NaN with either +/- inf so that they there are ranked at the top ascending : boolean, default True Whether or not the elements should be ranked in ascending order. pct : boolean, default False Whether or not to the display the returned rankings in integer form (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1).
pandas/core/algorithms.py
def rank(values, axis=0, method='average', na_option='keep', ascending=True, pct=False): """ Rank the values along a given axis. Parameters ---------- values : array-like Array whose values will be ranked. The number of dimensions in this array must not exceed 2. axis : int, default 0 Axis over which to perform rankings. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' The method by which tiebreaks are broken during the ranking. na_option : {'keep', 'top'}, default 'keep' The method by which NaNs are placed in the ranking. - ``keep``: rank each NaN value with a NaN ranking - ``top``: replace each NaN with either +/- inf so that they there are ranked at the top ascending : boolean, default True Whether or not the elements should be ranked in ascending order. pct : boolean, default False Whether or not to the display the returned rankings in integer form (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1). """ if values.ndim == 1: f, values = _get_data_algo(values, _rank1d_functions) ranks = f(values, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) elif values.ndim == 2: f, values = _get_data_algo(values, _rank2d_functions) ranks = f(values, axis=axis, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) else: raise TypeError("Array with ndim > 2 are not supported.") return ranks
def rank(values, axis=0, method='average', na_option='keep', ascending=True, pct=False): """ Rank the values along a given axis. Parameters ---------- values : array-like Array whose values will be ranked. The number of dimensions in this array must not exceed 2. axis : int, default 0 Axis over which to perform rankings. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' The method by which tiebreaks are broken during the ranking. na_option : {'keep', 'top'}, default 'keep' The method by which NaNs are placed in the ranking. - ``keep``: rank each NaN value with a NaN ranking - ``top``: replace each NaN with either +/- inf so that they there are ranked at the top ascending : boolean, default True Whether or not the elements should be ranked in ascending order. pct : boolean, default False Whether or not to the display the returned rankings in integer form (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1). """ if values.ndim == 1: f, values = _get_data_algo(values, _rank1d_functions) ranks = f(values, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) elif values.ndim == 2: f, values = _get_data_algo(values, _rank2d_functions) ranks = f(values, axis=axis, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) else: raise TypeError("Array with ndim > 2 are not supported.") return ranks
[ "Rank", "the", "values", "along", "a", "given", "axis", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L838-L874
[ "def", "rank", "(", "values", ",", "axis", "=", "0", ",", "method", "=", "'average'", ",", "na_option", "=", "'keep'", ",", "ascending", "=", "True", ",", "pct", "=", "False", ")", ":", "if", "values", ".", "ndim", "==", "1", ":", "f", ",", "values", "=", "_get_data_algo", "(", "values", ",", "_rank1d_functions", ")", "ranks", "=", "f", "(", "values", ",", "ties_method", "=", "method", ",", "ascending", "=", "ascending", ",", "na_option", "=", "na_option", ",", "pct", "=", "pct", ")", "elif", "values", ".", "ndim", "==", "2", ":", "f", ",", "values", "=", "_get_data_algo", "(", "values", ",", "_rank2d_functions", ")", "ranks", "=", "f", "(", "values", ",", "axis", "=", "axis", ",", "ties_method", "=", "method", ",", "ascending", "=", "ascending", ",", "na_option", "=", "na_option", ",", "pct", "=", "pct", ")", "else", ":", "raise", "TypeError", "(", "\"Array with ndim > 2 are not supported.\"", ")", "return", "ranks" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
checked_add_with_arr
Perform array addition that checks for underflow and overflow. Performs the addition of an int64 array and an int64 integer (or array) but checks that they do not result in overflow first. For elements that are indicated to be NaN, whether or not there is overflow for that element is automatically ignored. Parameters ---------- arr : array addend. b : array or scalar addend. arr_mask : boolean array or None array indicating which elements to exclude from checking b_mask : boolean array or boolean or None array or scalar indicating which element(s) to exclude from checking Returns ------- sum : An array for elements x + b for each element x in arr if b is a scalar or an array for elements x + y for each element pair (x, y) in (arr, b). Raises ------ OverflowError if any x + y exceeds the maximum or minimum int64 value.
pandas/core/algorithms.py
def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None): """ Perform array addition that checks for underflow and overflow. Performs the addition of an int64 array and an int64 integer (or array) but checks that they do not result in overflow first. For elements that are indicated to be NaN, whether or not there is overflow for that element is automatically ignored. Parameters ---------- arr : array addend. b : array or scalar addend. arr_mask : boolean array or None array indicating which elements to exclude from checking b_mask : boolean array or boolean or None array or scalar indicating which element(s) to exclude from checking Returns ------- sum : An array for elements x + b for each element x in arr if b is a scalar or an array for elements x + y for each element pair (x, y) in (arr, b). Raises ------ OverflowError if any x + y exceeds the maximum or minimum int64 value. """ # For performance reasons, we broadcast 'b' to the new array 'b2' # so that it has the same size as 'arr'. b2 = np.broadcast_to(b, arr.shape) if b_mask is not None: # We do the same broadcasting for b_mask as well. b2_mask = np.broadcast_to(b_mask, arr.shape) else: b2_mask = None # For elements that are NaN, regardless of their value, we should # ignore whether they overflow or not when doing the checked add. if arr_mask is not None and b2_mask is not None: not_nan = np.logical_not(arr_mask | b2_mask) elif arr_mask is not None: not_nan = np.logical_not(arr_mask) elif b_mask is not None: not_nan = np.logical_not(b2_mask) else: not_nan = np.empty(arr.shape, dtype=bool) not_nan.fill(True) # gh-14324: For each element in 'arr' and its corresponding element # in 'b2', we check the sign of the element in 'b2'. If it is positive, # we then check whether its sum with the element in 'arr' exceeds # np.iinfo(np.int64).max. If so, we have an overflow error. If it # it is negative, we then check whether its sum with the element in # 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow # error as well. mask1 = b2 > 0 mask2 = b2 < 0 if not mask1.any(): to_raise = ((np.iinfo(np.int64).min - b2 > arr) & not_nan).any() elif not mask2.any(): to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any() else: to_raise = (((np.iinfo(np.int64).max - b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or ((np.iinfo(np.int64).min - b2[mask2] > arr[mask2]) & not_nan[mask2]).any()) if to_raise: raise OverflowError("Overflow in int64 addition") return arr + b
def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None): """ Perform array addition that checks for underflow and overflow. Performs the addition of an int64 array and an int64 integer (or array) but checks that they do not result in overflow first. For elements that are indicated to be NaN, whether or not there is overflow for that element is automatically ignored. Parameters ---------- arr : array addend. b : array or scalar addend. arr_mask : boolean array or None array indicating which elements to exclude from checking b_mask : boolean array or boolean or None array or scalar indicating which element(s) to exclude from checking Returns ------- sum : An array for elements x + b for each element x in arr if b is a scalar or an array for elements x + y for each element pair (x, y) in (arr, b). Raises ------ OverflowError if any x + y exceeds the maximum or minimum int64 value. """ # For performance reasons, we broadcast 'b' to the new array 'b2' # so that it has the same size as 'arr'. b2 = np.broadcast_to(b, arr.shape) if b_mask is not None: # We do the same broadcasting for b_mask as well. b2_mask = np.broadcast_to(b_mask, arr.shape) else: b2_mask = None # For elements that are NaN, regardless of their value, we should # ignore whether they overflow or not when doing the checked add. if arr_mask is not None and b2_mask is not None: not_nan = np.logical_not(arr_mask | b2_mask) elif arr_mask is not None: not_nan = np.logical_not(arr_mask) elif b_mask is not None: not_nan = np.logical_not(b2_mask) else: not_nan = np.empty(arr.shape, dtype=bool) not_nan.fill(True) # gh-14324: For each element in 'arr' and its corresponding element # in 'b2', we check the sign of the element in 'b2'. If it is positive, # we then check whether its sum with the element in 'arr' exceeds # np.iinfo(np.int64).max. If so, we have an overflow error. If it # it is negative, we then check whether its sum with the element in # 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow # error as well. mask1 = b2 > 0 mask2 = b2 < 0 if not mask1.any(): to_raise = ((np.iinfo(np.int64).min - b2 > arr) & not_nan).any() elif not mask2.any(): to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any() else: to_raise = (((np.iinfo(np.int64).max - b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or ((np.iinfo(np.int64).min - b2[mask2] > arr[mask2]) & not_nan[mask2]).any()) if to_raise: raise OverflowError("Overflow in int64 addition") return arr + b
[ "Perform", "array", "addition", "that", "checks", "for", "underflow", "and", "overflow", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L877-L948
[ "def", "checked_add_with_arr", "(", "arr", ",", "b", ",", "arr_mask", "=", "None", ",", "b_mask", "=", "None", ")", ":", "# For performance reasons, we broadcast 'b' to the new array 'b2'", "# so that it has the same size as 'arr'.", "b2", "=", "np", ".", "broadcast_to", "(", "b", ",", "arr", ".", "shape", ")", "if", "b_mask", "is", "not", "None", ":", "# We do the same broadcasting for b_mask as well.", "b2_mask", "=", "np", ".", "broadcast_to", "(", "b_mask", ",", "arr", ".", "shape", ")", "else", ":", "b2_mask", "=", "None", "# For elements that are NaN, regardless of their value, we should", "# ignore whether they overflow or not when doing the checked add.", "if", "arr_mask", "is", "not", "None", "and", "b2_mask", "is", "not", "None", ":", "not_nan", "=", "np", ".", "logical_not", "(", "arr_mask", "|", "b2_mask", ")", "elif", "arr_mask", "is", "not", "None", ":", "not_nan", "=", "np", ".", "logical_not", "(", "arr_mask", ")", "elif", "b_mask", "is", "not", "None", ":", "not_nan", "=", "np", ".", "logical_not", "(", "b2_mask", ")", "else", ":", "not_nan", "=", "np", ".", "empty", "(", "arr", ".", "shape", ",", "dtype", "=", "bool", ")", "not_nan", ".", "fill", "(", "True", ")", "# gh-14324: For each element in 'arr' and its corresponding element", "# in 'b2', we check the sign of the element in 'b2'. If it is positive,", "# we then check whether its sum with the element in 'arr' exceeds", "# np.iinfo(np.int64).max. If so, we have an overflow error. If it", "# it is negative, we then check whether its sum with the element in", "# 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow", "# error as well.", "mask1", "=", "b2", ">", "0", "mask2", "=", "b2", "<", "0", "if", "not", "mask1", ".", "any", "(", ")", ":", "to_raise", "=", "(", "(", "np", ".", "iinfo", "(", "np", ".", "int64", ")", ".", "min", "-", "b2", ">", "arr", ")", "&", "not_nan", ")", ".", "any", "(", ")", "elif", "not", "mask2", ".", "any", "(", ")", ":", "to_raise", "=", "(", "(", "np", ".", "iinfo", "(", "np", ".", "int64", ")", ".", "max", "-", "b2", "<", "arr", ")", "&", "not_nan", ")", ".", "any", "(", ")", "else", ":", "to_raise", "=", "(", "(", "(", "np", ".", "iinfo", "(", "np", ".", "int64", ")", ".", "max", "-", "b2", "[", "mask1", "]", "<", "arr", "[", "mask1", "]", ")", "&", "not_nan", "[", "mask1", "]", ")", ".", "any", "(", ")", "or", "(", "(", "np", ".", "iinfo", "(", "np", ".", "int64", ")", ".", "min", "-", "b2", "[", "mask2", "]", ">", "arr", "[", "mask2", "]", ")", "&", "not_nan", "[", "mask2", "]", ")", ".", "any", "(", ")", ")", "if", "to_raise", ":", "raise", "OverflowError", "(", "\"Overflow in int64 addition\"", ")", "return", "arr", "+", "b" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
quantile
Compute sample quantile or quantiles of the input array. For example, q=0.5 computes the median. The `interpolation_method` parameter supports three values, namely `fraction` (default), `lower` and `higher`. Interpolation is done only, if the desired quantile lies between two data points `i` and `j`. For `fraction`, the result is an interpolated value between `i` and `j`; for `lower`, the result is `i`, for `higher` the result is `j`. Parameters ---------- x : ndarray Values from which to extract score. q : scalar or array Percentile at which to extract score. interpolation_method : {'fraction', 'lower', 'higher'}, optional This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: - fraction: `i + (j - i)*fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. -lower: `i`. - higher: `j`. Returns ------- score : float Score at percentile. Examples -------- >>> from scipy import stats >>> a = np.arange(100) >>> stats.scoreatpercentile(a, 50) 49.5
pandas/core/algorithms.py
def quantile(x, q, interpolation_method='fraction'): """ Compute sample quantile or quantiles of the input array. For example, q=0.5 computes the median. The `interpolation_method` parameter supports three values, namely `fraction` (default), `lower` and `higher`. Interpolation is done only, if the desired quantile lies between two data points `i` and `j`. For `fraction`, the result is an interpolated value between `i` and `j`; for `lower`, the result is `i`, for `higher` the result is `j`. Parameters ---------- x : ndarray Values from which to extract score. q : scalar or array Percentile at which to extract score. interpolation_method : {'fraction', 'lower', 'higher'}, optional This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: - fraction: `i + (j - i)*fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. -lower: `i`. - higher: `j`. Returns ------- score : float Score at percentile. Examples -------- >>> from scipy import stats >>> a = np.arange(100) >>> stats.scoreatpercentile(a, 50) 49.5 """ x = np.asarray(x) mask = isna(x) x = x[~mask] values = np.sort(x) def _interpolate(a, b, fraction): """Returns the point at the given fraction between a and b, where 'fraction' must be between 0 and 1. """ return a + (b - a) * fraction def _get_score(at): if len(values) == 0: return np.nan idx = at * (len(values) - 1) if idx % 1 == 0: score = values[int(idx)] else: if interpolation_method == 'fraction': score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1) elif interpolation_method == 'lower': score = values[np.floor(idx)] elif interpolation_method == 'higher': score = values[np.ceil(idx)] else: raise ValueError("interpolation_method can only be 'fraction' " ", 'lower' or 'higher'") return score if is_scalar(q): return _get_score(q) else: q = np.asarray(q, np.float64) return algos.arrmap_float64(q, _get_score)
def quantile(x, q, interpolation_method='fraction'): """ Compute sample quantile or quantiles of the input array. For example, q=0.5 computes the median. The `interpolation_method` parameter supports three values, namely `fraction` (default), `lower` and `higher`. Interpolation is done only, if the desired quantile lies between two data points `i` and `j`. For `fraction`, the result is an interpolated value between `i` and `j`; for `lower`, the result is `i`, for `higher` the result is `j`. Parameters ---------- x : ndarray Values from which to extract score. q : scalar or array Percentile at which to extract score. interpolation_method : {'fraction', 'lower', 'higher'}, optional This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: - fraction: `i + (j - i)*fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. -lower: `i`. - higher: `j`. Returns ------- score : float Score at percentile. Examples -------- >>> from scipy import stats >>> a = np.arange(100) >>> stats.scoreatpercentile(a, 50) 49.5 """ x = np.asarray(x) mask = isna(x) x = x[~mask] values = np.sort(x) def _interpolate(a, b, fraction): """Returns the point at the given fraction between a and b, where 'fraction' must be between 0 and 1. """ return a + (b - a) * fraction def _get_score(at): if len(values) == 0: return np.nan idx = at * (len(values) - 1) if idx % 1 == 0: score = values[int(idx)] else: if interpolation_method == 'fraction': score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1) elif interpolation_method == 'lower': score = values[np.floor(idx)] elif interpolation_method == 'higher': score = values[np.ceil(idx)] else: raise ValueError("interpolation_method can only be 'fraction' " ", 'lower' or 'higher'") return score if is_scalar(q): return _get_score(q) else: q = np.asarray(q, np.float64) return algos.arrmap_float64(q, _get_score)
[ "Compute", "sample", "quantile", "or", "quantiles", "of", "the", "input", "array", ".", "For", "example", "q", "=", "0", ".", "5", "computes", "the", "median", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L966-L1043
[ "def", "quantile", "(", "x", ",", "q", ",", "interpolation_method", "=", "'fraction'", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "mask", "=", "isna", "(", "x", ")", "x", "=", "x", "[", "~", "mask", "]", "values", "=", "np", ".", "sort", "(", "x", ")", "def", "_interpolate", "(", "a", ",", "b", ",", "fraction", ")", ":", "\"\"\"Returns the point at the given fraction between a and b, where\n 'fraction' must be between 0 and 1.\n \"\"\"", "return", "a", "+", "(", "b", "-", "a", ")", "*", "fraction", "def", "_get_score", "(", "at", ")", ":", "if", "len", "(", "values", ")", "==", "0", ":", "return", "np", ".", "nan", "idx", "=", "at", "*", "(", "len", "(", "values", ")", "-", "1", ")", "if", "idx", "%", "1", "==", "0", ":", "score", "=", "values", "[", "int", "(", "idx", ")", "]", "else", ":", "if", "interpolation_method", "==", "'fraction'", ":", "score", "=", "_interpolate", "(", "values", "[", "int", "(", "idx", ")", "]", ",", "values", "[", "int", "(", "idx", ")", "+", "1", "]", ",", "idx", "%", "1", ")", "elif", "interpolation_method", "==", "'lower'", ":", "score", "=", "values", "[", "np", ".", "floor", "(", "idx", ")", "]", "elif", "interpolation_method", "==", "'higher'", ":", "score", "=", "values", "[", "np", ".", "ceil", "(", "idx", ")", "]", "else", ":", "raise", "ValueError", "(", "\"interpolation_method can only be 'fraction' \"", "\", 'lower' or 'higher'\"", ")", "return", "score", "if", "is_scalar", "(", "q", ")", ":", "return", "_get_score", "(", "q", ")", "else", ":", "q", "=", "np", ".", "asarray", "(", "q", ",", "np", ".", "float64", ")", "return", "algos", ".", "arrmap_float64", "(", "q", ",", "_get_score", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
take
Take elements from an array. .. versionadded:: 0.23.0 Parameters ---------- arr : sequence Non array-likes (sequences without a dtype) are coerced to an ndarray. indices : sequence of integers Indices to be taken. axis : int, default 0 The axis over which to select values. allow_fill : bool, default False How to handle negative values in `indices`. * False: negative values in `indices` indicate positional indices from the right (the default). This is similar to :func:`numpy.take`. * True: negative values in `indices` indicate missing values. These values are set to `fill_value`. Any other other negative values raise a ``ValueError``. fill_value : any, optional Fill value to use for NA-indices when `allow_fill` is True. This may be ``None``, in which case the default NA value for the type (``self.dtype.na_value``) is used. For multi-dimensional `arr`, each *element* is filled with `fill_value`. Returns ------- ndarray or ExtensionArray Same type as the input. Raises ------ IndexError When `indices` is out of bounds for the array. ValueError When the indexer contains negative values other than ``-1`` and `allow_fill` is True. Notes ----- When `allow_fill` is False, `indices` may be whatever dimensionality is accepted by NumPy for `arr`. When `allow_fill` is True, `indices` should be 1-D. See Also -------- numpy.take Examples -------- >>> from pandas.api.extensions import take With the default ``allow_fill=False``, negative numbers indicate positional indices from the right. >>> take(np.array([10, 20, 30]), [0, 0, -1]) array([10, 10, 30]) Setting ``allow_fill=True`` will place `fill_value` in those positions. >>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True) array([10., 10., nan]) >>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True, ... fill_value=-10) array([ 10, 10, -10])
pandas/core/algorithms.py
def take(arr, indices, axis=0, allow_fill=False, fill_value=None): """ Take elements from an array. .. versionadded:: 0.23.0 Parameters ---------- arr : sequence Non array-likes (sequences without a dtype) are coerced to an ndarray. indices : sequence of integers Indices to be taken. axis : int, default 0 The axis over which to select values. allow_fill : bool, default False How to handle negative values in `indices`. * False: negative values in `indices` indicate positional indices from the right (the default). This is similar to :func:`numpy.take`. * True: negative values in `indices` indicate missing values. These values are set to `fill_value`. Any other other negative values raise a ``ValueError``. fill_value : any, optional Fill value to use for NA-indices when `allow_fill` is True. This may be ``None``, in which case the default NA value for the type (``self.dtype.na_value``) is used. For multi-dimensional `arr`, each *element* is filled with `fill_value`. Returns ------- ndarray or ExtensionArray Same type as the input. Raises ------ IndexError When `indices` is out of bounds for the array. ValueError When the indexer contains negative values other than ``-1`` and `allow_fill` is True. Notes ----- When `allow_fill` is False, `indices` may be whatever dimensionality is accepted by NumPy for `arr`. When `allow_fill` is True, `indices` should be 1-D. See Also -------- numpy.take Examples -------- >>> from pandas.api.extensions import take With the default ``allow_fill=False``, negative numbers indicate positional indices from the right. >>> take(np.array([10, 20, 30]), [0, 0, -1]) array([10, 10, 30]) Setting ``allow_fill=True`` will place `fill_value` in those positions. >>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True) array([10., 10., nan]) >>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True, ... fill_value=-10) array([ 10, 10, -10]) """ from pandas.core.indexing import validate_indices if not is_array_like(arr): arr = np.asarray(arr) indices = np.asarray(indices, dtype=np.intp) if allow_fill: # Pandas style, -1 means NA validate_indices(indices, len(arr)) result = take_1d(arr, indices, axis=axis, allow_fill=True, fill_value=fill_value) else: # NumPy style result = arr.take(indices, axis=axis) return result
def take(arr, indices, axis=0, allow_fill=False, fill_value=None): """ Take elements from an array. .. versionadded:: 0.23.0 Parameters ---------- arr : sequence Non array-likes (sequences without a dtype) are coerced to an ndarray. indices : sequence of integers Indices to be taken. axis : int, default 0 The axis over which to select values. allow_fill : bool, default False How to handle negative values in `indices`. * False: negative values in `indices` indicate positional indices from the right (the default). This is similar to :func:`numpy.take`. * True: negative values in `indices` indicate missing values. These values are set to `fill_value`. Any other other negative values raise a ``ValueError``. fill_value : any, optional Fill value to use for NA-indices when `allow_fill` is True. This may be ``None``, in which case the default NA value for the type (``self.dtype.na_value``) is used. For multi-dimensional `arr`, each *element* is filled with `fill_value`. Returns ------- ndarray or ExtensionArray Same type as the input. Raises ------ IndexError When `indices` is out of bounds for the array. ValueError When the indexer contains negative values other than ``-1`` and `allow_fill` is True. Notes ----- When `allow_fill` is False, `indices` may be whatever dimensionality is accepted by NumPy for `arr`. When `allow_fill` is True, `indices` should be 1-D. See Also -------- numpy.take Examples -------- >>> from pandas.api.extensions import take With the default ``allow_fill=False``, negative numbers indicate positional indices from the right. >>> take(np.array([10, 20, 30]), [0, 0, -1]) array([10, 10, 30]) Setting ``allow_fill=True`` will place `fill_value` in those positions. >>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True) array([10., 10., nan]) >>> take(np.array([10, 20, 30]), [0, 0, -1], allow_fill=True, ... fill_value=-10) array([ 10, 10, -10]) """ from pandas.core.indexing import validate_indices if not is_array_like(arr): arr = np.asarray(arr) indices = np.asarray(indices, dtype=np.intp) if allow_fill: # Pandas style, -1 means NA validate_indices(indices, len(arr)) result = take_1d(arr, indices, axis=axis, allow_fill=True, fill_value=fill_value) else: # NumPy style result = arr.take(indices, axis=axis) return result
[ "Take", "elements", "from", "an", "array", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L1457-L1548
[ "def", "take", "(", "arr", ",", "indices", ",", "axis", "=", "0", ",", "allow_fill", "=", "False", ",", "fill_value", "=", "None", ")", ":", "from", "pandas", ".", "core", ".", "indexing", "import", "validate_indices", "if", "not", "is_array_like", "(", "arr", ")", ":", "arr", "=", "np", ".", "asarray", "(", "arr", ")", "indices", "=", "np", ".", "asarray", "(", "indices", ",", "dtype", "=", "np", ".", "intp", ")", "if", "allow_fill", ":", "# Pandas style, -1 means NA", "validate_indices", "(", "indices", ",", "len", "(", "arr", ")", ")", "result", "=", "take_1d", "(", "arr", ",", "indices", ",", "axis", "=", "axis", ",", "allow_fill", "=", "True", ",", "fill_value", "=", "fill_value", ")", "else", ":", "# NumPy style", "result", "=", "arr", ".", "take", "(", "indices", ",", "axis", "=", "axis", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
take_nd
Specialized Cython take which sets NaN values in one pass This dispatches to ``take`` defined on ExtensionArrays. It does not currently dispatch to ``SparseArray.take`` for sparse ``arr``. Parameters ---------- arr : array-like Input array. indexer : ndarray 1-D array of indices to take, subarrays corresponding to -1 value indices are filed with fill_value axis : int, default 0 Axis to take from out : ndarray or None, default None Optional output array, must be appropriate type to hold input and fill_value together, if indexer has any -1 value entries; call _maybe_promote to determine this type for any fill_value fill_value : any, default np.nan Fill value to replace -1 values with mask_info : tuple of (ndarray, boolean) If provided, value should correspond to: (indexer != -1, (indexer != -1).any()) If not provided, it will be computed internally if necessary allow_fill : boolean, default True If False, indexer is assumed to contain no -1 values so no filling will be done. This short-circuits computation of a mask. Result is undefined if allow_fill == False and -1 is present in indexer. Returns ------- subarray : array-like May be the same type as the input, or cast to an ndarray.
pandas/core/algorithms.py
def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None, allow_fill=True): """ Specialized Cython take which sets NaN values in one pass This dispatches to ``take`` defined on ExtensionArrays. It does not currently dispatch to ``SparseArray.take`` for sparse ``arr``. Parameters ---------- arr : array-like Input array. indexer : ndarray 1-D array of indices to take, subarrays corresponding to -1 value indices are filed with fill_value axis : int, default 0 Axis to take from out : ndarray or None, default None Optional output array, must be appropriate type to hold input and fill_value together, if indexer has any -1 value entries; call _maybe_promote to determine this type for any fill_value fill_value : any, default np.nan Fill value to replace -1 values with mask_info : tuple of (ndarray, boolean) If provided, value should correspond to: (indexer != -1, (indexer != -1).any()) If not provided, it will be computed internally if necessary allow_fill : boolean, default True If False, indexer is assumed to contain no -1 values so no filling will be done. This short-circuits computation of a mask. Result is undefined if allow_fill == False and -1 is present in indexer. Returns ------- subarray : array-like May be the same type as the input, or cast to an ndarray. """ # TODO(EA): Remove these if / elifs as datetimeTZ, interval, become EAs # dispatch to internal type takes if is_extension_array_dtype(arr): return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) elif is_datetime64tz_dtype(arr): return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) elif is_interval_dtype(arr): return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) if is_sparse(arr): arr = arr.get_values() elif isinstance(arr, (ABCIndexClass, ABCSeries)): arr = arr.values arr = np.asarray(arr) if indexer is None: indexer = np.arange(arr.shape[axis], dtype=np.int64) dtype, fill_value = arr.dtype, arr.dtype.type() else: indexer = ensure_int64(indexer, copy=False) if not allow_fill: dtype, fill_value = arr.dtype, arr.dtype.type() mask_info = None, False else: # check for promotion based on types only (do this first because # it's faster than computing a mask) dtype, fill_value = maybe_promote(arr.dtype, fill_value) if dtype != arr.dtype and (out is None or out.dtype != dtype): # check if promotion is actually required based on indexer if mask_info is not None: mask, needs_masking = mask_info else: mask = indexer == -1 needs_masking = mask.any() mask_info = mask, needs_masking if needs_masking: if out is not None and out.dtype != dtype: raise TypeError('Incompatible type for fill_value') else: # if not, then depromote, set fill_value to dummy # (it won't be used but we don't want the cython code # to crash when trying to cast it to dtype) dtype, fill_value = arr.dtype, arr.dtype.type() flip_order = False if arr.ndim == 2: if arr.flags.f_contiguous: flip_order = True if flip_order: arr = arr.T axis = arr.ndim - axis - 1 if out is not None: out = out.T # at this point, it's guaranteed that dtype can hold both the arr values # and the fill_value if out is None: out_shape = list(arr.shape) out_shape[axis] = len(indexer) out_shape = tuple(out_shape) if arr.flags.f_contiguous and axis == arr.ndim - 1: # minor tweak that can make an order-of-magnitude difference # for dataframes initialized directly from 2-d ndarrays # (s.t. df.values is c-contiguous and df._data.blocks[0] is its # f-contiguous transpose) out = np.empty(out_shape, dtype=dtype, order='F') else: out = np.empty(out_shape, dtype=dtype) func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info) func(arr, indexer, out, fill_value) if flip_order: out = out.T return out
def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None, allow_fill=True): """ Specialized Cython take which sets NaN values in one pass This dispatches to ``take`` defined on ExtensionArrays. It does not currently dispatch to ``SparseArray.take`` for sparse ``arr``. Parameters ---------- arr : array-like Input array. indexer : ndarray 1-D array of indices to take, subarrays corresponding to -1 value indices are filed with fill_value axis : int, default 0 Axis to take from out : ndarray or None, default None Optional output array, must be appropriate type to hold input and fill_value together, if indexer has any -1 value entries; call _maybe_promote to determine this type for any fill_value fill_value : any, default np.nan Fill value to replace -1 values with mask_info : tuple of (ndarray, boolean) If provided, value should correspond to: (indexer != -1, (indexer != -1).any()) If not provided, it will be computed internally if necessary allow_fill : boolean, default True If False, indexer is assumed to contain no -1 values so no filling will be done. This short-circuits computation of a mask. Result is undefined if allow_fill == False and -1 is present in indexer. Returns ------- subarray : array-like May be the same type as the input, or cast to an ndarray. """ # TODO(EA): Remove these if / elifs as datetimeTZ, interval, become EAs # dispatch to internal type takes if is_extension_array_dtype(arr): return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) elif is_datetime64tz_dtype(arr): return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) elif is_interval_dtype(arr): return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) if is_sparse(arr): arr = arr.get_values() elif isinstance(arr, (ABCIndexClass, ABCSeries)): arr = arr.values arr = np.asarray(arr) if indexer is None: indexer = np.arange(arr.shape[axis], dtype=np.int64) dtype, fill_value = arr.dtype, arr.dtype.type() else: indexer = ensure_int64(indexer, copy=False) if not allow_fill: dtype, fill_value = arr.dtype, arr.dtype.type() mask_info = None, False else: # check for promotion based on types only (do this first because # it's faster than computing a mask) dtype, fill_value = maybe_promote(arr.dtype, fill_value) if dtype != arr.dtype and (out is None or out.dtype != dtype): # check if promotion is actually required based on indexer if mask_info is not None: mask, needs_masking = mask_info else: mask = indexer == -1 needs_masking = mask.any() mask_info = mask, needs_masking if needs_masking: if out is not None and out.dtype != dtype: raise TypeError('Incompatible type for fill_value') else: # if not, then depromote, set fill_value to dummy # (it won't be used but we don't want the cython code # to crash when trying to cast it to dtype) dtype, fill_value = arr.dtype, arr.dtype.type() flip_order = False if arr.ndim == 2: if arr.flags.f_contiguous: flip_order = True if flip_order: arr = arr.T axis = arr.ndim - axis - 1 if out is not None: out = out.T # at this point, it's guaranteed that dtype can hold both the arr values # and the fill_value if out is None: out_shape = list(arr.shape) out_shape[axis] = len(indexer) out_shape = tuple(out_shape) if arr.flags.f_contiguous and axis == arr.ndim - 1: # minor tweak that can make an order-of-magnitude difference # for dataframes initialized directly from 2-d ndarrays # (s.t. df.values is c-contiguous and df._data.blocks[0] is its # f-contiguous transpose) out = np.empty(out_shape, dtype=dtype, order='F') else: out = np.empty(out_shape, dtype=dtype) func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info) func(arr, indexer, out, fill_value) if flip_order: out = out.T return out
[ "Specialized", "Cython", "take", "which", "sets", "NaN", "values", "in", "one", "pass" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L1551-L1666
[ "def", "take_nd", "(", "arr", ",", "indexer", ",", "axis", "=", "0", ",", "out", "=", "None", ",", "fill_value", "=", "np", ".", "nan", ",", "mask_info", "=", "None", ",", "allow_fill", "=", "True", ")", ":", "# TODO(EA): Remove these if / elifs as datetimeTZ, interval, become EAs", "# dispatch to internal type takes", "if", "is_extension_array_dtype", "(", "arr", ")", ":", "return", "arr", ".", "take", "(", "indexer", ",", "fill_value", "=", "fill_value", ",", "allow_fill", "=", "allow_fill", ")", "elif", "is_datetime64tz_dtype", "(", "arr", ")", ":", "return", "arr", ".", "take", "(", "indexer", ",", "fill_value", "=", "fill_value", ",", "allow_fill", "=", "allow_fill", ")", "elif", "is_interval_dtype", "(", "arr", ")", ":", "return", "arr", ".", "take", "(", "indexer", ",", "fill_value", "=", "fill_value", ",", "allow_fill", "=", "allow_fill", ")", "if", "is_sparse", "(", "arr", ")", ":", "arr", "=", "arr", ".", "get_values", "(", ")", "elif", "isinstance", "(", "arr", ",", "(", "ABCIndexClass", ",", "ABCSeries", ")", ")", ":", "arr", "=", "arr", ".", "values", "arr", "=", "np", ".", "asarray", "(", "arr", ")", "if", "indexer", "is", "None", ":", "indexer", "=", "np", ".", "arange", "(", "arr", ".", "shape", "[", "axis", "]", ",", "dtype", "=", "np", ".", "int64", ")", "dtype", ",", "fill_value", "=", "arr", ".", "dtype", ",", "arr", ".", "dtype", ".", "type", "(", ")", "else", ":", "indexer", "=", "ensure_int64", "(", "indexer", ",", "copy", "=", "False", ")", "if", "not", "allow_fill", ":", "dtype", ",", "fill_value", "=", "arr", ".", "dtype", ",", "arr", ".", "dtype", ".", "type", "(", ")", "mask_info", "=", "None", ",", "False", "else", ":", "# check for promotion based on types only (do this first because", "# it's faster than computing a mask)", "dtype", ",", "fill_value", "=", "maybe_promote", "(", "arr", ".", "dtype", ",", "fill_value", ")", "if", "dtype", "!=", "arr", ".", "dtype", "and", "(", "out", "is", "None", "or", "out", ".", "dtype", "!=", "dtype", ")", ":", "# check if promotion is actually required based on indexer", "if", "mask_info", "is", "not", "None", ":", "mask", ",", "needs_masking", "=", "mask_info", "else", ":", "mask", "=", "indexer", "==", "-", "1", "needs_masking", "=", "mask", ".", "any", "(", ")", "mask_info", "=", "mask", ",", "needs_masking", "if", "needs_masking", ":", "if", "out", "is", "not", "None", "and", "out", ".", "dtype", "!=", "dtype", ":", "raise", "TypeError", "(", "'Incompatible type for fill_value'", ")", "else", ":", "# if not, then depromote, set fill_value to dummy", "# (it won't be used but we don't want the cython code", "# to crash when trying to cast it to dtype)", "dtype", ",", "fill_value", "=", "arr", ".", "dtype", ",", "arr", ".", "dtype", ".", "type", "(", ")", "flip_order", "=", "False", "if", "arr", ".", "ndim", "==", "2", ":", "if", "arr", ".", "flags", ".", "f_contiguous", ":", "flip_order", "=", "True", "if", "flip_order", ":", "arr", "=", "arr", ".", "T", "axis", "=", "arr", ".", "ndim", "-", "axis", "-", "1", "if", "out", "is", "not", "None", ":", "out", "=", "out", ".", "T", "# at this point, it's guaranteed that dtype can hold both the arr values", "# and the fill_value", "if", "out", "is", "None", ":", "out_shape", "=", "list", "(", "arr", ".", "shape", ")", "out_shape", "[", "axis", "]", "=", "len", "(", "indexer", ")", "out_shape", "=", "tuple", "(", "out_shape", ")", "if", "arr", ".", "flags", ".", "f_contiguous", "and", "axis", "==", "arr", ".", "ndim", "-", "1", ":", "# minor tweak that can make an order-of-magnitude difference", "# for dataframes initialized directly from 2-d ndarrays", "# (s.t. df.values is c-contiguous and df._data.blocks[0] is its", "# f-contiguous transpose)", "out", "=", "np", ".", "empty", "(", "out_shape", ",", "dtype", "=", "dtype", ",", "order", "=", "'F'", ")", "else", ":", "out", "=", "np", ".", "empty", "(", "out_shape", ",", "dtype", "=", "dtype", ")", "func", "=", "_get_take_nd_function", "(", "arr", ".", "ndim", ",", "arr", ".", "dtype", ",", "out", ".", "dtype", ",", "axis", "=", "axis", ",", "mask_info", "=", "mask_info", ")", "func", "(", "arr", ",", "indexer", ",", "out", ",", "fill_value", ")", "if", "flip_order", ":", "out", "=", "out", ".", "T", "return", "out" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
take_2d_multi
Specialized Cython take which sets NaN values in one pass
pandas/core/algorithms.py
def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None, allow_fill=True): """ Specialized Cython take which sets NaN values in one pass """ if indexer is None or (indexer[0] is None and indexer[1] is None): row_idx = np.arange(arr.shape[0], dtype=np.int64) col_idx = np.arange(arr.shape[1], dtype=np.int64) indexer = row_idx, col_idx dtype, fill_value = arr.dtype, arr.dtype.type() else: row_idx, col_idx = indexer if row_idx is None: row_idx = np.arange(arr.shape[0], dtype=np.int64) else: row_idx = ensure_int64(row_idx) if col_idx is None: col_idx = np.arange(arr.shape[1], dtype=np.int64) else: col_idx = ensure_int64(col_idx) indexer = row_idx, col_idx if not allow_fill: dtype, fill_value = arr.dtype, arr.dtype.type() mask_info = None, False else: # check for promotion based on types only (do this first because # it's faster than computing a mask) dtype, fill_value = maybe_promote(arr.dtype, fill_value) if dtype != arr.dtype and (out is None or out.dtype != dtype): # check if promotion is actually required based on indexer if mask_info is not None: (row_mask, col_mask), (row_needs, col_needs) = mask_info else: row_mask = row_idx == -1 col_mask = col_idx == -1 row_needs = row_mask.any() col_needs = col_mask.any() mask_info = (row_mask, col_mask), (row_needs, col_needs) if row_needs or col_needs: if out is not None and out.dtype != dtype: raise TypeError('Incompatible type for fill_value') else: # if not, then depromote, set fill_value to dummy # (it won't be used but we don't want the cython code # to crash when trying to cast it to dtype) dtype, fill_value = arr.dtype, arr.dtype.type() # at this point, it's guaranteed that dtype can hold both the arr values # and the fill_value if out is None: out_shape = len(row_idx), len(col_idx) out = np.empty(out_shape, dtype=dtype) func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None) if func is None and arr.dtype != out.dtype: func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None) if func is not None: func = _convert_wrapper(func, out.dtype) if func is None: def func(arr, indexer, out, fill_value=np.nan): _take_2d_multi_object(arr, indexer, out, fill_value=fill_value, mask_info=mask_info) func(arr, indexer, out=out, fill_value=fill_value) return out
def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None, allow_fill=True): """ Specialized Cython take which sets NaN values in one pass """ if indexer is None or (indexer[0] is None and indexer[1] is None): row_idx = np.arange(arr.shape[0], dtype=np.int64) col_idx = np.arange(arr.shape[1], dtype=np.int64) indexer = row_idx, col_idx dtype, fill_value = arr.dtype, arr.dtype.type() else: row_idx, col_idx = indexer if row_idx is None: row_idx = np.arange(arr.shape[0], dtype=np.int64) else: row_idx = ensure_int64(row_idx) if col_idx is None: col_idx = np.arange(arr.shape[1], dtype=np.int64) else: col_idx = ensure_int64(col_idx) indexer = row_idx, col_idx if not allow_fill: dtype, fill_value = arr.dtype, arr.dtype.type() mask_info = None, False else: # check for promotion based on types only (do this first because # it's faster than computing a mask) dtype, fill_value = maybe_promote(arr.dtype, fill_value) if dtype != arr.dtype and (out is None or out.dtype != dtype): # check if promotion is actually required based on indexer if mask_info is not None: (row_mask, col_mask), (row_needs, col_needs) = mask_info else: row_mask = row_idx == -1 col_mask = col_idx == -1 row_needs = row_mask.any() col_needs = col_mask.any() mask_info = (row_mask, col_mask), (row_needs, col_needs) if row_needs or col_needs: if out is not None and out.dtype != dtype: raise TypeError('Incompatible type for fill_value') else: # if not, then depromote, set fill_value to dummy # (it won't be used but we don't want the cython code # to crash when trying to cast it to dtype) dtype, fill_value = arr.dtype, arr.dtype.type() # at this point, it's guaranteed that dtype can hold both the arr values # and the fill_value if out is None: out_shape = len(row_idx), len(col_idx) out = np.empty(out_shape, dtype=dtype) func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None) if func is None and arr.dtype != out.dtype: func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None) if func is not None: func = _convert_wrapper(func, out.dtype) if func is None: def func(arr, indexer, out, fill_value=np.nan): _take_2d_multi_object(arr, indexer, out, fill_value=fill_value, mask_info=mask_info) func(arr, indexer, out=out, fill_value=fill_value) return out
[ "Specialized", "Cython", "take", "which", "sets", "NaN", "values", "in", "one", "pass" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L1672-L1737
[ "def", "take_2d_multi", "(", "arr", ",", "indexer", ",", "out", "=", "None", ",", "fill_value", "=", "np", ".", "nan", ",", "mask_info", "=", "None", ",", "allow_fill", "=", "True", ")", ":", "if", "indexer", "is", "None", "or", "(", "indexer", "[", "0", "]", "is", "None", "and", "indexer", "[", "1", "]", "is", "None", ")", ":", "row_idx", "=", "np", ".", "arange", "(", "arr", ".", "shape", "[", "0", "]", ",", "dtype", "=", "np", ".", "int64", ")", "col_idx", "=", "np", ".", "arange", "(", "arr", ".", "shape", "[", "1", "]", ",", "dtype", "=", "np", ".", "int64", ")", "indexer", "=", "row_idx", ",", "col_idx", "dtype", ",", "fill_value", "=", "arr", ".", "dtype", ",", "arr", ".", "dtype", ".", "type", "(", ")", "else", ":", "row_idx", ",", "col_idx", "=", "indexer", "if", "row_idx", "is", "None", ":", "row_idx", "=", "np", ".", "arange", "(", "arr", ".", "shape", "[", "0", "]", ",", "dtype", "=", "np", ".", "int64", ")", "else", ":", "row_idx", "=", "ensure_int64", "(", "row_idx", ")", "if", "col_idx", "is", "None", ":", "col_idx", "=", "np", ".", "arange", "(", "arr", ".", "shape", "[", "1", "]", ",", "dtype", "=", "np", ".", "int64", ")", "else", ":", "col_idx", "=", "ensure_int64", "(", "col_idx", ")", "indexer", "=", "row_idx", ",", "col_idx", "if", "not", "allow_fill", ":", "dtype", ",", "fill_value", "=", "arr", ".", "dtype", ",", "arr", ".", "dtype", ".", "type", "(", ")", "mask_info", "=", "None", ",", "False", "else", ":", "# check for promotion based on types only (do this first because", "# it's faster than computing a mask)", "dtype", ",", "fill_value", "=", "maybe_promote", "(", "arr", ".", "dtype", ",", "fill_value", ")", "if", "dtype", "!=", "arr", ".", "dtype", "and", "(", "out", "is", "None", "or", "out", ".", "dtype", "!=", "dtype", ")", ":", "# check if promotion is actually required based on indexer", "if", "mask_info", "is", "not", "None", ":", "(", "row_mask", ",", "col_mask", ")", ",", "(", "row_needs", ",", "col_needs", ")", "=", "mask_info", "else", ":", "row_mask", "=", "row_idx", "==", "-", "1", "col_mask", "=", "col_idx", "==", "-", "1", "row_needs", "=", "row_mask", ".", "any", "(", ")", "col_needs", "=", "col_mask", ".", "any", "(", ")", "mask_info", "=", "(", "row_mask", ",", "col_mask", ")", ",", "(", "row_needs", ",", "col_needs", ")", "if", "row_needs", "or", "col_needs", ":", "if", "out", "is", "not", "None", "and", "out", ".", "dtype", "!=", "dtype", ":", "raise", "TypeError", "(", "'Incompatible type for fill_value'", ")", "else", ":", "# if not, then depromote, set fill_value to dummy", "# (it won't be used but we don't want the cython code", "# to crash when trying to cast it to dtype)", "dtype", ",", "fill_value", "=", "arr", ".", "dtype", ",", "arr", ".", "dtype", ".", "type", "(", ")", "# at this point, it's guaranteed that dtype can hold both the arr values", "# and the fill_value", "if", "out", "is", "None", ":", "out_shape", "=", "len", "(", "row_idx", ")", ",", "len", "(", "col_idx", ")", "out", "=", "np", ".", "empty", "(", "out_shape", ",", "dtype", "=", "dtype", ")", "func", "=", "_take_2d_multi_dict", ".", "get", "(", "(", "arr", ".", "dtype", ".", "name", ",", "out", ".", "dtype", ".", "name", ")", ",", "None", ")", "if", "func", "is", "None", "and", "arr", ".", "dtype", "!=", "out", ".", "dtype", ":", "func", "=", "_take_2d_multi_dict", ".", "get", "(", "(", "out", ".", "dtype", ".", "name", ",", "out", ".", "dtype", ".", "name", ")", ",", "None", ")", "if", "func", "is", "not", "None", ":", "func", "=", "_convert_wrapper", "(", "func", ",", "out", ".", "dtype", ")", "if", "func", "is", "None", ":", "def", "func", "(", "arr", ",", "indexer", ",", "out", ",", "fill_value", "=", "np", ".", "nan", ")", ":", "_take_2d_multi_object", "(", "arr", ",", "indexer", ",", "out", ",", "fill_value", "=", "fill_value", ",", "mask_info", "=", "mask_info", ")", "func", "(", "arr", ",", "indexer", ",", "out", "=", "out", ",", "fill_value", "=", "fill_value", ")", "return", "out" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
searchsorted
Find indices where elements should be inserted to maintain order. .. versionadded:: 0.25.0 Find the indices into a sorted array `arr` (a) such that, if the corresponding elements in `value` were inserted before the indices, the order of `arr` would be preserved. Assuming that `arr` is sorted: ====== ================================ `side` returned index `i` satisfies ====== ================================ left ``arr[i-1] < value <= self[i]`` right ``arr[i-1] <= value < self[i]`` ====== ================================ Parameters ---------- arr: array-like Input array. If `sorter` is None, then it must be sorted in ascending order, otherwise `sorter` must be an array of indices that sort it. value : array_like Values to insert into `arr`. side : {'left', 'right'}, optional If 'left', the index of the first suitable location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `self`). sorter : 1-D array_like, optional Optional array of integer indices that sort array a into ascending order. They are typically the result of argsort. Returns ------- array of ints Array of insertion points with the same shape as `value`. See Also -------- numpy.searchsorted : Similar method from NumPy.
pandas/core/algorithms.py
def searchsorted(arr, value, side="left", sorter=None): """ Find indices where elements should be inserted to maintain order. .. versionadded:: 0.25.0 Find the indices into a sorted array `arr` (a) such that, if the corresponding elements in `value` were inserted before the indices, the order of `arr` would be preserved. Assuming that `arr` is sorted: ====== ================================ `side` returned index `i` satisfies ====== ================================ left ``arr[i-1] < value <= self[i]`` right ``arr[i-1] <= value < self[i]`` ====== ================================ Parameters ---------- arr: array-like Input array. If `sorter` is None, then it must be sorted in ascending order, otherwise `sorter` must be an array of indices that sort it. value : array_like Values to insert into `arr`. side : {'left', 'right'}, optional If 'left', the index of the first suitable location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `self`). sorter : 1-D array_like, optional Optional array of integer indices that sort array a into ascending order. They are typically the result of argsort. Returns ------- array of ints Array of insertion points with the same shape as `value`. See Also -------- numpy.searchsorted : Similar method from NumPy. """ if sorter is not None: sorter = ensure_platform_int(sorter) if isinstance(arr, np.ndarray) and is_integer_dtype(arr) and ( is_integer(value) or is_integer_dtype(value)): from .arrays.array_ import array # if `arr` and `value` have different dtypes, `arr` would be # recast by numpy, causing a slow search. # Before searching below, we therefore try to give `value` the # same dtype as `arr`, while guarding against integer overflows. iinfo = np.iinfo(arr.dtype.type) value_arr = np.array([value]) if is_scalar(value) else np.array(value) if (value_arr >= iinfo.min).all() and (value_arr <= iinfo.max).all(): # value within bounds, so no overflow, so can convert value dtype # to dtype of arr dtype = arr.dtype else: dtype = value_arr.dtype if is_scalar(value): value = dtype.type(value) else: value = array(value, dtype=dtype) elif not (is_object_dtype(arr) or is_numeric_dtype(arr) or is_categorical_dtype(arr)): from pandas.core.series import Series # E.g. if `arr` is an array with dtype='datetime64[ns]' # and `value` is a pd.Timestamp, we may need to convert value value_ser = Series(value)._values value = value_ser[0] if is_scalar(value) else value_ser result = arr.searchsorted(value, side=side, sorter=sorter) return result
def searchsorted(arr, value, side="left", sorter=None): """ Find indices where elements should be inserted to maintain order. .. versionadded:: 0.25.0 Find the indices into a sorted array `arr` (a) such that, if the corresponding elements in `value` were inserted before the indices, the order of `arr` would be preserved. Assuming that `arr` is sorted: ====== ================================ `side` returned index `i` satisfies ====== ================================ left ``arr[i-1] < value <= self[i]`` right ``arr[i-1] <= value < self[i]`` ====== ================================ Parameters ---------- arr: array-like Input array. If `sorter` is None, then it must be sorted in ascending order, otherwise `sorter` must be an array of indices that sort it. value : array_like Values to insert into `arr`. side : {'left', 'right'}, optional If 'left', the index of the first suitable location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `self`). sorter : 1-D array_like, optional Optional array of integer indices that sort array a into ascending order. They are typically the result of argsort. Returns ------- array of ints Array of insertion points with the same shape as `value`. See Also -------- numpy.searchsorted : Similar method from NumPy. """ if sorter is not None: sorter = ensure_platform_int(sorter) if isinstance(arr, np.ndarray) and is_integer_dtype(arr) and ( is_integer(value) or is_integer_dtype(value)): from .arrays.array_ import array # if `arr` and `value` have different dtypes, `arr` would be # recast by numpy, causing a slow search. # Before searching below, we therefore try to give `value` the # same dtype as `arr`, while guarding against integer overflows. iinfo = np.iinfo(arr.dtype.type) value_arr = np.array([value]) if is_scalar(value) else np.array(value) if (value_arr >= iinfo.min).all() and (value_arr <= iinfo.max).all(): # value within bounds, so no overflow, so can convert value dtype # to dtype of arr dtype = arr.dtype else: dtype = value_arr.dtype if is_scalar(value): value = dtype.type(value) else: value = array(value, dtype=dtype) elif not (is_object_dtype(arr) or is_numeric_dtype(arr) or is_categorical_dtype(arr)): from pandas.core.series import Series # E.g. if `arr` is an array with dtype='datetime64[ns]' # and `value` is a pd.Timestamp, we may need to convert value value_ser = Series(value)._values value = value_ser[0] if is_scalar(value) else value_ser result = arr.searchsorted(value, side=side, sorter=sorter) return result
[ "Find", "indices", "where", "elements", "should", "be", "inserted", "to", "maintain", "order", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L1744-L1820
[ "def", "searchsorted", "(", "arr", ",", "value", ",", "side", "=", "\"left\"", ",", "sorter", "=", "None", ")", ":", "if", "sorter", "is", "not", "None", ":", "sorter", "=", "ensure_platform_int", "(", "sorter", ")", "if", "isinstance", "(", "arr", ",", "np", ".", "ndarray", ")", "and", "is_integer_dtype", "(", "arr", ")", "and", "(", "is_integer", "(", "value", ")", "or", "is_integer_dtype", "(", "value", ")", ")", ":", "from", ".", "arrays", ".", "array_", "import", "array", "# if `arr` and `value` have different dtypes, `arr` would be", "# recast by numpy, causing a slow search.", "# Before searching below, we therefore try to give `value` the", "# same dtype as `arr`, while guarding against integer overflows.", "iinfo", "=", "np", ".", "iinfo", "(", "arr", ".", "dtype", ".", "type", ")", "value_arr", "=", "np", ".", "array", "(", "[", "value", "]", ")", "if", "is_scalar", "(", "value", ")", "else", "np", ".", "array", "(", "value", ")", "if", "(", "value_arr", ">=", "iinfo", ".", "min", ")", ".", "all", "(", ")", "and", "(", "value_arr", "<=", "iinfo", ".", "max", ")", ".", "all", "(", ")", ":", "# value within bounds, so no overflow, so can convert value dtype", "# to dtype of arr", "dtype", "=", "arr", ".", "dtype", "else", ":", "dtype", "=", "value_arr", ".", "dtype", "if", "is_scalar", "(", "value", ")", ":", "value", "=", "dtype", ".", "type", "(", "value", ")", "else", ":", "value", "=", "array", "(", "value", ",", "dtype", "=", "dtype", ")", "elif", "not", "(", "is_object_dtype", "(", "arr", ")", "or", "is_numeric_dtype", "(", "arr", ")", "or", "is_categorical_dtype", "(", "arr", ")", ")", ":", "from", "pandas", ".", "core", ".", "series", "import", "Series", "# E.g. if `arr` is an array with dtype='datetime64[ns]'", "# and `value` is a pd.Timestamp, we may need to convert value", "value_ser", "=", "Series", "(", "value", ")", ".", "_values", "value", "=", "value_ser", "[", "0", "]", "if", "is_scalar", "(", "value", ")", "else", "value_ser", "result", "=", "arr", ".", "searchsorted", "(", "value", ",", "side", "=", "side", ",", "sorter", "=", "sorter", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
diff
difference of n between self, analogous to s-s.shift(n) Parameters ---------- arr : ndarray n : int number of periods axis : int axis to shift on Returns ------- shifted
pandas/core/algorithms.py
def diff(arr, n, axis=0): """ difference of n between self, analogous to s-s.shift(n) Parameters ---------- arr : ndarray n : int number of periods axis : int axis to shift on Returns ------- shifted """ n = int(n) na = np.nan dtype = arr.dtype is_timedelta = False if needs_i8_conversion(arr): dtype = np.float64 arr = arr.view('i8') na = iNaT is_timedelta = True elif is_bool_dtype(dtype): dtype = np.object_ elif is_integer_dtype(dtype): dtype = np.float64 dtype = np.dtype(dtype) out_arr = np.empty(arr.shape, dtype=dtype) na_indexer = [slice(None)] * arr.ndim na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None) out_arr[tuple(na_indexer)] = na if arr.ndim == 2 and arr.dtype.name in _diff_special: f = _diff_special[arr.dtype.name] f(arr, out_arr, n, axis) else: res_indexer = [slice(None)] * arr.ndim res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n) res_indexer = tuple(res_indexer) lag_indexer = [slice(None)] * arr.ndim lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None) lag_indexer = tuple(lag_indexer) # need to make sure that we account for na for datelike/timedelta # we don't actually want to subtract these i8 numbers if is_timedelta: res = arr[res_indexer] lag = arr[lag_indexer] mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na) if mask.any(): res = res.copy() res[mask] = 0 lag = lag.copy() lag[mask] = 0 result = res - lag result[mask] = na out_arr[res_indexer] = result else: out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer] if is_timedelta: from pandas import TimedeltaIndex out_arr = TimedeltaIndex(out_arr.ravel().astype('int64')).asi8.reshape( out_arr.shape).astype('timedelta64[ns]') return out_arr
def diff(arr, n, axis=0): """ difference of n between self, analogous to s-s.shift(n) Parameters ---------- arr : ndarray n : int number of periods axis : int axis to shift on Returns ------- shifted """ n = int(n) na = np.nan dtype = arr.dtype is_timedelta = False if needs_i8_conversion(arr): dtype = np.float64 arr = arr.view('i8') na = iNaT is_timedelta = True elif is_bool_dtype(dtype): dtype = np.object_ elif is_integer_dtype(dtype): dtype = np.float64 dtype = np.dtype(dtype) out_arr = np.empty(arr.shape, dtype=dtype) na_indexer = [slice(None)] * arr.ndim na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None) out_arr[tuple(na_indexer)] = na if arr.ndim == 2 and arr.dtype.name in _diff_special: f = _diff_special[arr.dtype.name] f(arr, out_arr, n, axis) else: res_indexer = [slice(None)] * arr.ndim res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n) res_indexer = tuple(res_indexer) lag_indexer = [slice(None)] * arr.ndim lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None) lag_indexer = tuple(lag_indexer) # need to make sure that we account for na for datelike/timedelta # we don't actually want to subtract these i8 numbers if is_timedelta: res = arr[res_indexer] lag = arr[lag_indexer] mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na) if mask.any(): res = res.copy() res[mask] = 0 lag = lag.copy() lag[mask] = 0 result = res - lag result[mask] = na out_arr[res_indexer] = result else: out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer] if is_timedelta: from pandas import TimedeltaIndex out_arr = TimedeltaIndex(out_arr.ravel().astype('int64')).asi8.reshape( out_arr.shape).astype('timedelta64[ns]') return out_arr
[ "difference", "of", "n", "between", "self", "analogous", "to", "s", "-", "s", ".", "shift", "(", "n", ")" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/algorithms.py#L1837-L1916
[ "def", "diff", "(", "arr", ",", "n", ",", "axis", "=", "0", ")", ":", "n", "=", "int", "(", "n", ")", "na", "=", "np", ".", "nan", "dtype", "=", "arr", ".", "dtype", "is_timedelta", "=", "False", "if", "needs_i8_conversion", "(", "arr", ")", ":", "dtype", "=", "np", ".", "float64", "arr", "=", "arr", ".", "view", "(", "'i8'", ")", "na", "=", "iNaT", "is_timedelta", "=", "True", "elif", "is_bool_dtype", "(", "dtype", ")", ":", "dtype", "=", "np", ".", "object_", "elif", "is_integer_dtype", "(", "dtype", ")", ":", "dtype", "=", "np", ".", "float64", "dtype", "=", "np", ".", "dtype", "(", "dtype", ")", "out_arr", "=", "np", ".", "empty", "(", "arr", ".", "shape", ",", "dtype", "=", "dtype", ")", "na_indexer", "=", "[", "slice", "(", "None", ")", "]", "*", "arr", ".", "ndim", "na_indexer", "[", "axis", "]", "=", "slice", "(", "None", ",", "n", ")", "if", "n", ">=", "0", "else", "slice", "(", "n", ",", "None", ")", "out_arr", "[", "tuple", "(", "na_indexer", ")", "]", "=", "na", "if", "arr", ".", "ndim", "==", "2", "and", "arr", ".", "dtype", ".", "name", "in", "_diff_special", ":", "f", "=", "_diff_special", "[", "arr", ".", "dtype", ".", "name", "]", "f", "(", "arr", ",", "out_arr", ",", "n", ",", "axis", ")", "else", ":", "res_indexer", "=", "[", "slice", "(", "None", ")", "]", "*", "arr", ".", "ndim", "res_indexer", "[", "axis", "]", "=", "slice", "(", "n", ",", "None", ")", "if", "n", ">=", "0", "else", "slice", "(", "None", ",", "n", ")", "res_indexer", "=", "tuple", "(", "res_indexer", ")", "lag_indexer", "=", "[", "slice", "(", "None", ")", "]", "*", "arr", ".", "ndim", "lag_indexer", "[", "axis", "]", "=", "slice", "(", "None", ",", "-", "n", ")", "if", "n", ">", "0", "else", "slice", "(", "-", "n", ",", "None", ")", "lag_indexer", "=", "tuple", "(", "lag_indexer", ")", "# need to make sure that we account for na for datelike/timedelta", "# we don't actually want to subtract these i8 numbers", "if", "is_timedelta", ":", "res", "=", "arr", "[", "res_indexer", "]", "lag", "=", "arr", "[", "lag_indexer", "]", "mask", "=", "(", "arr", "[", "res_indexer", "]", "==", "na", ")", "|", "(", "arr", "[", "lag_indexer", "]", "==", "na", ")", "if", "mask", ".", "any", "(", ")", ":", "res", "=", "res", ".", "copy", "(", ")", "res", "[", "mask", "]", "=", "0", "lag", "=", "lag", ".", "copy", "(", ")", "lag", "[", "mask", "]", "=", "0", "result", "=", "res", "-", "lag", "result", "[", "mask", "]", "=", "na", "out_arr", "[", "res_indexer", "]", "=", "result", "else", ":", "out_arr", "[", "res_indexer", "]", "=", "arr", "[", "res_indexer", "]", "-", "arr", "[", "lag_indexer", "]", "if", "is_timedelta", ":", "from", "pandas", "import", "TimedeltaIndex", "out_arr", "=", "TimedeltaIndex", "(", "out_arr", ".", "ravel", "(", ")", ".", "astype", "(", "'int64'", ")", ")", ".", "asi8", ".", "reshape", "(", "out_arr", ".", "shape", ")", ".", "astype", "(", "'timedelta64[ns]'", ")", "return", "out_arr" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_to_ijv
For arbitrary (MultiIndexed) SparseSeries return (v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for passing to scipy.sparse.coo constructor.
pandas/core/sparse/scipy_sparse.py
def _to_ijv(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False): """ For arbitrary (MultiIndexed) SparseSeries return (v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for passing to scipy.sparse.coo constructor. """ # index and column levels must be a partition of the index _check_is_partition([row_levels, column_levels], range(ss.index.nlevels)) # from the SparseSeries: get the labels and data for non-null entries values = ss._data.internal_values()._valid_sp_values nonnull_labels = ss.dropna() def get_indexers(levels): """ Return sparse coords and dense labels for subset levels """ # TODO: how to do this better? cleanly slice nonnull_labels given the # coord values_ilabels = [tuple(x[i] for i in levels) for x in nonnull_labels.index] if len(levels) == 1: values_ilabels = [x[0] for x in values_ilabels] # # performance issues with groupby ################################### # TODO: these two lines can rejplace the code below but # groupby is too slow (in some cases at least) # labels_to_i = ss.groupby(level=levels, sort=sort_labels).first() # labels_to_i[:] = np.arange(labels_to_i.shape[0]) def _get_label_to_i_dict(labels, sort_labels=False): """ Return OrderedDict of unique labels to number. Optionally sort by label. """ labels = Index(lmap(tuple, labels)).unique().tolist() # squish if sort_labels: labels = sorted(list(labels)) d = OrderedDict((k, i) for i, k in enumerate(labels)) return (d) def _get_index_subset_to_coord_dict(index, subset, sort_labels=False): ilabels = list(zip(*[index._get_level_values(i) for i in subset])) labels_to_i = _get_label_to_i_dict(ilabels, sort_labels=sort_labels) labels_to_i = Series(labels_to_i) if len(subset) > 1: labels_to_i.index = MultiIndex.from_tuples(labels_to_i.index) labels_to_i.index.names = [index.names[i] for i in subset] else: labels_to_i.index = Index(x[0] for x in labels_to_i.index) labels_to_i.index.name = index.names[subset[0]] labels_to_i.name = 'value' return (labels_to_i) labels_to_i = _get_index_subset_to_coord_dict(ss.index, levels, sort_labels=sort_labels) # ##################################################################### # ##################################################################### i_coord = labels_to_i[values_ilabels].tolist() i_labels = labels_to_i.index.tolist() return i_coord, i_labels i_coord, i_labels = get_indexers(row_levels) j_coord, j_labels = get_indexers(column_levels) return values, i_coord, j_coord, i_labels, j_labels
def _to_ijv(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False): """ For arbitrary (MultiIndexed) SparseSeries return (v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for passing to scipy.sparse.coo constructor. """ # index and column levels must be a partition of the index _check_is_partition([row_levels, column_levels], range(ss.index.nlevels)) # from the SparseSeries: get the labels and data for non-null entries values = ss._data.internal_values()._valid_sp_values nonnull_labels = ss.dropna() def get_indexers(levels): """ Return sparse coords and dense labels for subset levels """ # TODO: how to do this better? cleanly slice nonnull_labels given the # coord values_ilabels = [tuple(x[i] for i in levels) for x in nonnull_labels.index] if len(levels) == 1: values_ilabels = [x[0] for x in values_ilabels] # # performance issues with groupby ################################### # TODO: these two lines can rejplace the code below but # groupby is too slow (in some cases at least) # labels_to_i = ss.groupby(level=levels, sort=sort_labels).first() # labels_to_i[:] = np.arange(labels_to_i.shape[0]) def _get_label_to_i_dict(labels, sort_labels=False): """ Return OrderedDict of unique labels to number. Optionally sort by label. """ labels = Index(lmap(tuple, labels)).unique().tolist() # squish if sort_labels: labels = sorted(list(labels)) d = OrderedDict((k, i) for i, k in enumerate(labels)) return (d) def _get_index_subset_to_coord_dict(index, subset, sort_labels=False): ilabels = list(zip(*[index._get_level_values(i) for i in subset])) labels_to_i = _get_label_to_i_dict(ilabels, sort_labels=sort_labels) labels_to_i = Series(labels_to_i) if len(subset) > 1: labels_to_i.index = MultiIndex.from_tuples(labels_to_i.index) labels_to_i.index.names = [index.names[i] for i in subset] else: labels_to_i.index = Index(x[0] for x in labels_to_i.index) labels_to_i.index.name = index.names[subset[0]] labels_to_i.name = 'value' return (labels_to_i) labels_to_i = _get_index_subset_to_coord_dict(ss.index, levels, sort_labels=sort_labels) # ##################################################################### # ##################################################################### i_coord = labels_to_i[values_ilabels].tolist() i_labels = labels_to_i.index.tolist() return i_coord, i_labels i_coord, i_labels = get_indexers(row_levels) j_coord, j_labels = get_indexers(column_levels) return values, i_coord, j_coord, i_labels, j_labels
[ "For", "arbitrary", "(", "MultiIndexed", ")", "SparseSeries", "return", "(", "v", "i", "j", "ilabels", "jlabels", ")", "where", "(", "v", "(", "i", "j", "))", "is", "suitable", "for", "passing", "to", "scipy", ".", "sparse", ".", "coo", "constructor", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/scipy_sparse.py#L24-L90
[ "def", "_to_ijv", "(", "ss", ",", "row_levels", "=", "(", "0", ",", ")", ",", "column_levels", "=", "(", "1", ",", ")", ",", "sort_labels", "=", "False", ")", ":", "# index and column levels must be a partition of the index", "_check_is_partition", "(", "[", "row_levels", ",", "column_levels", "]", ",", "range", "(", "ss", ".", "index", ".", "nlevels", ")", ")", "# from the SparseSeries: get the labels and data for non-null entries", "values", "=", "ss", ".", "_data", ".", "internal_values", "(", ")", ".", "_valid_sp_values", "nonnull_labels", "=", "ss", ".", "dropna", "(", ")", "def", "get_indexers", "(", "levels", ")", ":", "\"\"\" Return sparse coords and dense labels for subset levels \"\"\"", "# TODO: how to do this better? cleanly slice nonnull_labels given the", "# coord", "values_ilabels", "=", "[", "tuple", "(", "x", "[", "i", "]", "for", "i", "in", "levels", ")", "for", "x", "in", "nonnull_labels", ".", "index", "]", "if", "len", "(", "levels", ")", "==", "1", ":", "values_ilabels", "=", "[", "x", "[", "0", "]", "for", "x", "in", "values_ilabels", "]", "# # performance issues with groupby ###################################", "# TODO: these two lines can rejplace the code below but", "# groupby is too slow (in some cases at least)", "# labels_to_i = ss.groupby(level=levels, sort=sort_labels).first()", "# labels_to_i[:] = np.arange(labels_to_i.shape[0])", "def", "_get_label_to_i_dict", "(", "labels", ",", "sort_labels", "=", "False", ")", ":", "\"\"\" Return OrderedDict of unique labels to number.\n Optionally sort by label.\n \"\"\"", "labels", "=", "Index", "(", "lmap", "(", "tuple", ",", "labels", ")", ")", ".", "unique", "(", ")", ".", "tolist", "(", ")", "# squish", "if", "sort_labels", ":", "labels", "=", "sorted", "(", "list", "(", "labels", ")", ")", "d", "=", "OrderedDict", "(", "(", "k", ",", "i", ")", "for", "i", ",", "k", "in", "enumerate", "(", "labels", ")", ")", "return", "(", "d", ")", "def", "_get_index_subset_to_coord_dict", "(", "index", ",", "subset", ",", "sort_labels", "=", "False", ")", ":", "ilabels", "=", "list", "(", "zip", "(", "*", "[", "index", ".", "_get_level_values", "(", "i", ")", "for", "i", "in", "subset", "]", ")", ")", "labels_to_i", "=", "_get_label_to_i_dict", "(", "ilabels", ",", "sort_labels", "=", "sort_labels", ")", "labels_to_i", "=", "Series", "(", "labels_to_i", ")", "if", "len", "(", "subset", ")", ">", "1", ":", "labels_to_i", ".", "index", "=", "MultiIndex", ".", "from_tuples", "(", "labels_to_i", ".", "index", ")", "labels_to_i", ".", "index", ".", "names", "=", "[", "index", ".", "names", "[", "i", "]", "for", "i", "in", "subset", "]", "else", ":", "labels_to_i", ".", "index", "=", "Index", "(", "x", "[", "0", "]", "for", "x", "in", "labels_to_i", ".", "index", ")", "labels_to_i", ".", "index", ".", "name", "=", "index", ".", "names", "[", "subset", "[", "0", "]", "]", "labels_to_i", ".", "name", "=", "'value'", "return", "(", "labels_to_i", ")", "labels_to_i", "=", "_get_index_subset_to_coord_dict", "(", "ss", ".", "index", ",", "levels", ",", "sort_labels", "=", "sort_labels", ")", "# #####################################################################", "# #####################################################################", "i_coord", "=", "labels_to_i", "[", "values_ilabels", "]", ".", "tolist", "(", ")", "i_labels", "=", "labels_to_i", ".", "index", ".", "tolist", "(", ")", "return", "i_coord", ",", "i_labels", "i_coord", ",", "i_labels", "=", "get_indexers", "(", "row_levels", ")", "j_coord", ",", "j_labels", "=", "get_indexers", "(", "column_levels", ")", "return", "values", ",", "i_coord", ",", "j_coord", ",", "i_labels", ",", "j_labels" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_sparse_series_to_coo
Convert a SparseSeries to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels.
pandas/core/sparse/scipy_sparse.py
def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False): """ Convert a SparseSeries to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels. """ import scipy.sparse if ss.index.nlevels < 2: raise ValueError('to_coo requires MultiIndex with nlevels > 2') if not ss.index.is_unique: raise ValueError('Duplicate index entries are not allowed in to_coo ' 'transformation.') # to keep things simple, only rely on integer indexing (not labels) row_levels = [ss.index._get_level_number(x) for x in row_levels] column_levels = [ss.index._get_level_number(x) for x in column_levels] v, i, j, rows, columns = _to_ijv(ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels) sparse_matrix = scipy.sparse.coo_matrix( (v, (i, j)), shape=(len(rows), len(columns))) return sparse_matrix, rows, columns
def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False): """ Convert a SparseSeries to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels. """ import scipy.sparse if ss.index.nlevels < 2: raise ValueError('to_coo requires MultiIndex with nlevels > 2') if not ss.index.is_unique: raise ValueError('Duplicate index entries are not allowed in to_coo ' 'transformation.') # to keep things simple, only rely on integer indexing (not labels) row_levels = [ss.index._get_level_number(x) for x in row_levels] column_levels = [ss.index._get_level_number(x) for x in column_levels] v, i, j, rows, columns = _to_ijv(ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels) sparse_matrix = scipy.sparse.coo_matrix( (v, (i, j)), shape=(len(rows), len(columns))) return sparse_matrix, rows, columns
[ "Convert", "a", "SparseSeries", "to", "a", "scipy", ".", "sparse", ".", "coo_matrix", "using", "index", "levels", "row_levels", "column_levels", "as", "the", "row", "and", "column", "labels", "respectively", ".", "Returns", "the", "sparse_matrix", "row", "and", "column", "labels", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/scipy_sparse.py#L93-L118
[ "def", "_sparse_series_to_coo", "(", "ss", ",", "row_levels", "=", "(", "0", ",", ")", ",", "column_levels", "=", "(", "1", ",", ")", ",", "sort_labels", "=", "False", ")", ":", "import", "scipy", ".", "sparse", "if", "ss", ".", "index", ".", "nlevels", "<", "2", ":", "raise", "ValueError", "(", "'to_coo requires MultiIndex with nlevels > 2'", ")", "if", "not", "ss", ".", "index", ".", "is_unique", ":", "raise", "ValueError", "(", "'Duplicate index entries are not allowed in to_coo '", "'transformation.'", ")", "# to keep things simple, only rely on integer indexing (not labels)", "row_levels", "=", "[", "ss", ".", "index", ".", "_get_level_number", "(", "x", ")", "for", "x", "in", "row_levels", "]", "column_levels", "=", "[", "ss", ".", "index", ".", "_get_level_number", "(", "x", ")", "for", "x", "in", "column_levels", "]", "v", ",", "i", ",", "j", ",", "rows", ",", "columns", "=", "_to_ijv", "(", "ss", ",", "row_levels", "=", "row_levels", ",", "column_levels", "=", "column_levels", ",", "sort_labels", "=", "sort_labels", ")", "sparse_matrix", "=", "scipy", ".", "sparse", ".", "coo_matrix", "(", "(", "v", ",", "(", "i", ",", "j", ")", ")", ",", "shape", "=", "(", "len", "(", "rows", ")", ",", "len", "(", "columns", ")", ")", ")", "return", "sparse_matrix", ",", "rows", ",", "columns" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_coo_to_sparse_series
Convert a scipy.sparse.coo_matrix to a SparseSeries. Use the defaults given in the SparseSeries constructor.
pandas/core/sparse/scipy_sparse.py
def _coo_to_sparse_series(A, dense_index=False): """ Convert a scipy.sparse.coo_matrix to a SparseSeries. Use the defaults given in the SparseSeries constructor. """ s = Series(A.data, MultiIndex.from_arrays((A.row, A.col))) s = s.sort_index() s = s.to_sparse() # TODO: specify kind? if dense_index: # is there a better constructor method to use here? i = range(A.shape[0]) j = range(A.shape[1]) ind = MultiIndex.from_product([i, j]) s = s.reindex(ind) return s
def _coo_to_sparse_series(A, dense_index=False): """ Convert a scipy.sparse.coo_matrix to a SparseSeries. Use the defaults given in the SparseSeries constructor. """ s = Series(A.data, MultiIndex.from_arrays((A.row, A.col))) s = s.sort_index() s = s.to_sparse() # TODO: specify kind? if dense_index: # is there a better constructor method to use here? i = range(A.shape[0]) j = range(A.shape[1]) ind = MultiIndex.from_product([i, j]) s = s.reindex(ind) return s
[ "Convert", "a", "scipy", ".", "sparse", ".", "coo_matrix", "to", "a", "SparseSeries", ".", "Use", "the", "defaults", "given", "in", "the", "SparseSeries", "constructor", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/scipy_sparse.py#L121-L135
[ "def", "_coo_to_sparse_series", "(", "A", ",", "dense_index", "=", "False", ")", ":", "s", "=", "Series", "(", "A", ".", "data", ",", "MultiIndex", ".", "from_arrays", "(", "(", "A", ".", "row", ",", "A", ".", "col", ")", ")", ")", "s", "=", "s", ".", "sort_index", "(", ")", "s", "=", "s", ".", "to_sparse", "(", ")", "# TODO: specify kind?", "if", "dense_index", ":", "# is there a better constructor method to use here?", "i", "=", "range", "(", "A", ".", "shape", "[", "0", "]", ")", "j", "=", "range", "(", "A", ".", "shape", "[", "1", "]", ")", "ind", "=", "MultiIndex", ".", "from_product", "(", "[", "i", ",", "j", "]", ")", "s", "=", "s", ".", "reindex", "(", "ind", ")", "return", "s" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_to_M8
Timestamp-like => dt64
pandas/core/arrays/datetimes.py
def _to_M8(key, tz=None): """ Timestamp-like => dt64 """ if not isinstance(key, Timestamp): # this also converts strings key = Timestamp(key) if key.tzinfo is not None and tz is not None: # Don't tz_localize(None) if key is already tz-aware key = key.tz_convert(tz) else: key = key.tz_localize(tz) return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE)
def _to_M8(key, tz=None): """ Timestamp-like => dt64 """ if not isinstance(key, Timestamp): # this also converts strings key = Timestamp(key) if key.tzinfo is not None and tz is not None: # Don't tz_localize(None) if key is already tz-aware key = key.tz_convert(tz) else: key = key.tz_localize(tz) return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE)
[ "Timestamp", "-", "like", "=", ">", "dt64" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L72-L85
[ "def", "_to_M8", "(", "key", ",", "tz", "=", "None", ")", ":", "if", "not", "isinstance", "(", "key", ",", "Timestamp", ")", ":", "# this also converts strings", "key", "=", "Timestamp", "(", "key", ")", "if", "key", ".", "tzinfo", "is", "not", "None", "and", "tz", "is", "not", "None", ":", "# Don't tz_localize(None) if key is already tz-aware", "key", "=", "key", ".", "tz_convert", "(", "tz", ")", "else", ":", "key", "=", "key", ".", "tz_localize", "(", "tz", ")", "return", "np", ".", "int64", "(", "conversion", ".", "pydt_to_i8", "(", "key", ")", ")", ".", "view", "(", "_NS_DTYPE", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_dt_array_cmp
Wrap comparison operations to convert datetime-like to datetime64
pandas/core/arrays/datetimes.py
def _dt_array_cmp(cls, op): """ Wrap comparison operations to convert datetime-like to datetime64 """ opname = '__{name}__'.format(name=op.__name__) nat_result = opname == '__ne__' def wrapper(self, other): if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): return NotImplemented other = lib.item_from_zerodim(other) if isinstance(other, (datetime, np.datetime64, str)): if isinstance(other, (datetime, np.datetime64)): # GH#18435 strings get a pass from tzawareness compat self._assert_tzawareness_compat(other) try: other = _to_M8(other, tz=self.tz) except ValueError: # string that cannot be parsed to Timestamp return ops.invalid_comparison(self, other, op) result = op(self.asi8, other.view('i8')) if isna(other): result.fill(nat_result) elif lib.is_scalar(other) or np.ndim(other) == 0: return ops.invalid_comparison(self, other, op) elif len(other) != len(self): raise ValueError("Lengths must match") else: if isinstance(other, list): try: other = type(self)._from_sequence(other) except ValueError: other = np.array(other, dtype=np.object_) elif not isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries, DatetimeArray)): # Following Timestamp convention, __eq__ is all-False # and __ne__ is all True, others raise TypeError. return ops.invalid_comparison(self, other, op) if is_object_dtype(other): # We have to use _comp_method_OBJECT_ARRAY instead of numpy # comparison otherwise it would fail to raise when # comparing tz-aware and tz-naive with np.errstate(all='ignore'): result = ops._comp_method_OBJECT_ARRAY(op, self.astype(object), other) o_mask = isna(other) elif not (is_datetime64_dtype(other) or is_datetime64tz_dtype(other)): # e.g. is_timedelta64_dtype(other) return ops.invalid_comparison(self, other, op) else: self._assert_tzawareness_compat(other) if isinstance(other, (ABCIndexClass, ABCSeries)): other = other.array if (is_datetime64_dtype(other) and not is_datetime64_ns_dtype(other) or not hasattr(other, 'asi8')): # e.g. other.dtype == 'datetime64[s]' # or an object-dtype ndarray other = type(self)._from_sequence(other) result = op(self.view('i8'), other.view('i8')) o_mask = other._isnan result = com.values_from_object(result) if o_mask.any(): result[o_mask] = nat_result if self._hasnans: result[self._isnan] = nat_result return result return compat.set_function_name(wrapper, opname, cls)
def _dt_array_cmp(cls, op): """ Wrap comparison operations to convert datetime-like to datetime64 """ opname = '__{name}__'.format(name=op.__name__) nat_result = opname == '__ne__' def wrapper(self, other): if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): return NotImplemented other = lib.item_from_zerodim(other) if isinstance(other, (datetime, np.datetime64, str)): if isinstance(other, (datetime, np.datetime64)): # GH#18435 strings get a pass from tzawareness compat self._assert_tzawareness_compat(other) try: other = _to_M8(other, tz=self.tz) except ValueError: # string that cannot be parsed to Timestamp return ops.invalid_comparison(self, other, op) result = op(self.asi8, other.view('i8')) if isna(other): result.fill(nat_result) elif lib.is_scalar(other) or np.ndim(other) == 0: return ops.invalid_comparison(self, other, op) elif len(other) != len(self): raise ValueError("Lengths must match") else: if isinstance(other, list): try: other = type(self)._from_sequence(other) except ValueError: other = np.array(other, dtype=np.object_) elif not isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries, DatetimeArray)): # Following Timestamp convention, __eq__ is all-False # and __ne__ is all True, others raise TypeError. return ops.invalid_comparison(self, other, op) if is_object_dtype(other): # We have to use _comp_method_OBJECT_ARRAY instead of numpy # comparison otherwise it would fail to raise when # comparing tz-aware and tz-naive with np.errstate(all='ignore'): result = ops._comp_method_OBJECT_ARRAY(op, self.astype(object), other) o_mask = isna(other) elif not (is_datetime64_dtype(other) or is_datetime64tz_dtype(other)): # e.g. is_timedelta64_dtype(other) return ops.invalid_comparison(self, other, op) else: self._assert_tzawareness_compat(other) if isinstance(other, (ABCIndexClass, ABCSeries)): other = other.array if (is_datetime64_dtype(other) and not is_datetime64_ns_dtype(other) or not hasattr(other, 'asi8')): # e.g. other.dtype == 'datetime64[s]' # or an object-dtype ndarray other = type(self)._from_sequence(other) result = op(self.view('i8'), other.view('i8')) o_mask = other._isnan result = com.values_from_object(result) if o_mask.any(): result[o_mask] = nat_result if self._hasnans: result[self._isnan] = nat_result return result return compat.set_function_name(wrapper, opname, cls)
[ "Wrap", "comparison", "operations", "to", "convert", "datetime", "-", "like", "to", "datetime64" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L126-L207
[ "def", "_dt_array_cmp", "(", "cls", ",", "op", ")", ":", "opname", "=", "'__{name}__'", ".", "format", "(", "name", "=", "op", ".", "__name__", ")", "nat_result", "=", "opname", "==", "'__ne__'", "def", "wrapper", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "(", "ABCDataFrame", ",", "ABCSeries", ",", "ABCIndexClass", ")", ")", ":", "return", "NotImplemented", "other", "=", "lib", ".", "item_from_zerodim", "(", "other", ")", "if", "isinstance", "(", "other", ",", "(", "datetime", ",", "np", ".", "datetime64", ",", "str", ")", ")", ":", "if", "isinstance", "(", "other", ",", "(", "datetime", ",", "np", ".", "datetime64", ")", ")", ":", "# GH#18435 strings get a pass from tzawareness compat", "self", ".", "_assert_tzawareness_compat", "(", "other", ")", "try", ":", "other", "=", "_to_M8", "(", "other", ",", "tz", "=", "self", ".", "tz", ")", "except", "ValueError", ":", "# string that cannot be parsed to Timestamp", "return", "ops", ".", "invalid_comparison", "(", "self", ",", "other", ",", "op", ")", "result", "=", "op", "(", "self", ".", "asi8", ",", "other", ".", "view", "(", "'i8'", ")", ")", "if", "isna", "(", "other", ")", ":", "result", ".", "fill", "(", "nat_result", ")", "elif", "lib", ".", "is_scalar", "(", "other", ")", "or", "np", ".", "ndim", "(", "other", ")", "==", "0", ":", "return", "ops", ".", "invalid_comparison", "(", "self", ",", "other", ",", "op", ")", "elif", "len", "(", "other", ")", "!=", "len", "(", "self", ")", ":", "raise", "ValueError", "(", "\"Lengths must match\"", ")", "else", ":", "if", "isinstance", "(", "other", ",", "list", ")", ":", "try", ":", "other", "=", "type", "(", "self", ")", ".", "_from_sequence", "(", "other", ")", "except", "ValueError", ":", "other", "=", "np", ".", "array", "(", "other", ",", "dtype", "=", "np", ".", "object_", ")", "elif", "not", "isinstance", "(", "other", ",", "(", "np", ".", "ndarray", ",", "ABCIndexClass", ",", "ABCSeries", ",", "DatetimeArray", ")", ")", ":", "# Following Timestamp convention, __eq__ is all-False", "# and __ne__ is all True, others raise TypeError.", "return", "ops", ".", "invalid_comparison", "(", "self", ",", "other", ",", "op", ")", "if", "is_object_dtype", "(", "other", ")", ":", "# We have to use _comp_method_OBJECT_ARRAY instead of numpy", "# comparison otherwise it would fail to raise when", "# comparing tz-aware and tz-naive", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "result", "=", "ops", ".", "_comp_method_OBJECT_ARRAY", "(", "op", ",", "self", ".", "astype", "(", "object", ")", ",", "other", ")", "o_mask", "=", "isna", "(", "other", ")", "elif", "not", "(", "is_datetime64_dtype", "(", "other", ")", "or", "is_datetime64tz_dtype", "(", "other", ")", ")", ":", "# e.g. is_timedelta64_dtype(other)", "return", "ops", ".", "invalid_comparison", "(", "self", ",", "other", ",", "op", ")", "else", ":", "self", ".", "_assert_tzawareness_compat", "(", "other", ")", "if", "isinstance", "(", "other", ",", "(", "ABCIndexClass", ",", "ABCSeries", ")", ")", ":", "other", "=", "other", ".", "array", "if", "(", "is_datetime64_dtype", "(", "other", ")", "and", "not", "is_datetime64_ns_dtype", "(", "other", ")", "or", "not", "hasattr", "(", "other", ",", "'asi8'", ")", ")", ":", "# e.g. other.dtype == 'datetime64[s]'", "# or an object-dtype ndarray", "other", "=", "type", "(", "self", ")", ".", "_from_sequence", "(", "other", ")", "result", "=", "op", "(", "self", ".", "view", "(", "'i8'", ")", ",", "other", ".", "view", "(", "'i8'", ")", ")", "o_mask", "=", "other", ".", "_isnan", "result", "=", "com", ".", "values_from_object", "(", "result", ")", "if", "o_mask", ".", "any", "(", ")", ":", "result", "[", "o_mask", "]", "=", "nat_result", "if", "self", ".", "_hasnans", ":", "result", "[", "self", ".", "_isnan", "]", "=", "nat_result", "return", "result", "return", "compat", ".", "set_function_name", "(", "wrapper", ",", "opname", ",", "cls", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
sequence_to_dt64ns
Parameters ---------- data : list-like dtype : dtype, str, or None, default None copy : bool, default False tz : tzinfo, str, or None, default None dayfirst : bool, default False yearfirst : bool, default False ambiguous : str, bool, or arraylike, default 'raise' See pandas._libs.tslibs.conversion.tz_localize_to_utc int_as_wall_time : bool, default False Whether to treat ints as wall time in specified timezone, or as nanosecond-precision UNIX epoch (wall time in UTC). This is used in DatetimeIndex.__init__ to deprecate the wall-time behaviour. ..versionadded:: 0.24.0 Returns ------- result : numpy.ndarray The sequence converted to a numpy array with dtype ``datetime64[ns]``. tz : tzinfo or None Either the user-provided tzinfo or one inferred from the data. inferred_freq : Tick or None The inferred frequency of the sequence. Raises ------ TypeError : PeriodDType data is passed
pandas/core/arrays/datetimes.py
def sequence_to_dt64ns(data, dtype=None, copy=False, tz=None, dayfirst=False, yearfirst=False, ambiguous='raise', int_as_wall_time=False): """ Parameters ---------- data : list-like dtype : dtype, str, or None, default None copy : bool, default False tz : tzinfo, str, or None, default None dayfirst : bool, default False yearfirst : bool, default False ambiguous : str, bool, or arraylike, default 'raise' See pandas._libs.tslibs.conversion.tz_localize_to_utc int_as_wall_time : bool, default False Whether to treat ints as wall time in specified timezone, or as nanosecond-precision UNIX epoch (wall time in UTC). This is used in DatetimeIndex.__init__ to deprecate the wall-time behaviour. ..versionadded:: 0.24.0 Returns ------- result : numpy.ndarray The sequence converted to a numpy array with dtype ``datetime64[ns]``. tz : tzinfo or None Either the user-provided tzinfo or one inferred from the data. inferred_freq : Tick or None The inferred frequency of the sequence. Raises ------ TypeError : PeriodDType data is passed """ inferred_freq = None dtype = _validate_dt64_dtype(dtype) if not hasattr(data, "dtype"): # e.g. list, tuple if np.ndim(data) == 0: # i.e. generator data = list(data) data = np.asarray(data) copy = False elif isinstance(data, ABCSeries): data = data._values if isinstance(data, ABCPandasArray): data = data.to_numpy() if hasattr(data, "freq"): # i.e. DatetimeArray/Index inferred_freq = data.freq # if dtype has an embedded tz, capture it tz = validate_tz_from_dtype(dtype, tz) if isinstance(data, ABCIndexClass): data = data._data # By this point we are assured to have either a numpy array or Index data, copy = maybe_convert_dtype(data, copy) if is_object_dtype(data) or is_string_dtype(data): # TODO: We do not have tests specific to string-dtypes, # also complex or categorical or other extension copy = False if lib.infer_dtype(data, skipna=False) == 'integer': data = data.astype(np.int64) else: # data comes back here as either i8 to denote UTC timestamps # or M8[ns] to denote wall times data, inferred_tz = objects_to_datetime64ns( data, dayfirst=dayfirst, yearfirst=yearfirst) tz = maybe_infer_tz(tz, inferred_tz) # When a sequence of timestamp objects is passed, we always # want to treat the (now i8-valued) data as UTC timestamps, # not wall times. int_as_wall_time = False # `data` may have originally been a Categorical[datetime64[ns, tz]], # so we need to handle these types. if is_datetime64tz_dtype(data): # DatetimeArray -> ndarray tz = maybe_infer_tz(tz, data.tz) result = data._data elif is_datetime64_dtype(data): # tz-naive DatetimeArray or ndarray[datetime64] data = getattr(data, "_data", data) if data.dtype != _NS_DTYPE: data = conversion.ensure_datetime64ns(data) if tz is not None: # Convert tz-naive to UTC tz = timezones.maybe_get_tz(tz) data = conversion.tz_localize_to_utc(data.view('i8'), tz, ambiguous=ambiguous) data = data.view(_NS_DTYPE) assert data.dtype == _NS_DTYPE, data.dtype result = data else: # must be integer dtype otherwise # assume this data are epoch timestamps if tz: tz = timezones.maybe_get_tz(tz) if data.dtype != _INT64_DTYPE: data = data.astype(np.int64, copy=False) if int_as_wall_time and tz is not None and not timezones.is_utc(tz): warnings.warn(_i8_message, FutureWarning, stacklevel=4) data = conversion.tz_localize_to_utc(data.view('i8'), tz, ambiguous=ambiguous) data = data.view(_NS_DTYPE) result = data.view(_NS_DTYPE) if copy: # TODO: should this be deepcopy? result = result.copy() assert isinstance(result, np.ndarray), type(result) assert result.dtype == 'M8[ns]', result.dtype # We have to call this again after possibly inferring a tz above validate_tz_from_dtype(dtype, tz) return result, tz, inferred_freq
def sequence_to_dt64ns(data, dtype=None, copy=False, tz=None, dayfirst=False, yearfirst=False, ambiguous='raise', int_as_wall_time=False): """ Parameters ---------- data : list-like dtype : dtype, str, or None, default None copy : bool, default False tz : tzinfo, str, or None, default None dayfirst : bool, default False yearfirst : bool, default False ambiguous : str, bool, or arraylike, default 'raise' See pandas._libs.tslibs.conversion.tz_localize_to_utc int_as_wall_time : bool, default False Whether to treat ints as wall time in specified timezone, or as nanosecond-precision UNIX epoch (wall time in UTC). This is used in DatetimeIndex.__init__ to deprecate the wall-time behaviour. ..versionadded:: 0.24.0 Returns ------- result : numpy.ndarray The sequence converted to a numpy array with dtype ``datetime64[ns]``. tz : tzinfo or None Either the user-provided tzinfo or one inferred from the data. inferred_freq : Tick or None The inferred frequency of the sequence. Raises ------ TypeError : PeriodDType data is passed """ inferred_freq = None dtype = _validate_dt64_dtype(dtype) if not hasattr(data, "dtype"): # e.g. list, tuple if np.ndim(data) == 0: # i.e. generator data = list(data) data = np.asarray(data) copy = False elif isinstance(data, ABCSeries): data = data._values if isinstance(data, ABCPandasArray): data = data.to_numpy() if hasattr(data, "freq"): # i.e. DatetimeArray/Index inferred_freq = data.freq # if dtype has an embedded tz, capture it tz = validate_tz_from_dtype(dtype, tz) if isinstance(data, ABCIndexClass): data = data._data # By this point we are assured to have either a numpy array or Index data, copy = maybe_convert_dtype(data, copy) if is_object_dtype(data) or is_string_dtype(data): # TODO: We do not have tests specific to string-dtypes, # also complex or categorical or other extension copy = False if lib.infer_dtype(data, skipna=False) == 'integer': data = data.astype(np.int64) else: # data comes back here as either i8 to denote UTC timestamps # or M8[ns] to denote wall times data, inferred_tz = objects_to_datetime64ns( data, dayfirst=dayfirst, yearfirst=yearfirst) tz = maybe_infer_tz(tz, inferred_tz) # When a sequence of timestamp objects is passed, we always # want to treat the (now i8-valued) data as UTC timestamps, # not wall times. int_as_wall_time = False # `data` may have originally been a Categorical[datetime64[ns, tz]], # so we need to handle these types. if is_datetime64tz_dtype(data): # DatetimeArray -> ndarray tz = maybe_infer_tz(tz, data.tz) result = data._data elif is_datetime64_dtype(data): # tz-naive DatetimeArray or ndarray[datetime64] data = getattr(data, "_data", data) if data.dtype != _NS_DTYPE: data = conversion.ensure_datetime64ns(data) if tz is not None: # Convert tz-naive to UTC tz = timezones.maybe_get_tz(tz) data = conversion.tz_localize_to_utc(data.view('i8'), tz, ambiguous=ambiguous) data = data.view(_NS_DTYPE) assert data.dtype == _NS_DTYPE, data.dtype result = data else: # must be integer dtype otherwise # assume this data are epoch timestamps if tz: tz = timezones.maybe_get_tz(tz) if data.dtype != _INT64_DTYPE: data = data.astype(np.int64, copy=False) if int_as_wall_time and tz is not None and not timezones.is_utc(tz): warnings.warn(_i8_message, FutureWarning, stacklevel=4) data = conversion.tz_localize_to_utc(data.view('i8'), tz, ambiguous=ambiguous) data = data.view(_NS_DTYPE) result = data.view(_NS_DTYPE) if copy: # TODO: should this be deepcopy? result = result.copy() assert isinstance(result, np.ndarray), type(result) assert result.dtype == 'M8[ns]', result.dtype # We have to call this again after possibly inferring a tz above validate_tz_from_dtype(dtype, tz) return result, tz, inferred_freq
[ "Parameters", "----------", "data", ":", "list", "-", "like", "dtype", ":", "dtype", "str", "or", "None", "default", "None", "copy", ":", "bool", "default", "False", "tz", ":", "tzinfo", "str", "or", "None", "default", "None", "dayfirst", ":", "bool", "default", "False", "yearfirst", ":", "bool", "default", "False", "ambiguous", ":", "str", "bool", "or", "arraylike", "default", "raise", "See", "pandas", ".", "_libs", ".", "tslibs", ".", "conversion", ".", "tz_localize_to_utc", "int_as_wall_time", ":", "bool", "default", "False", "Whether", "to", "treat", "ints", "as", "wall", "time", "in", "specified", "timezone", "or", "as", "nanosecond", "-", "precision", "UNIX", "epoch", "(", "wall", "time", "in", "UTC", ")", ".", "This", "is", "used", "in", "DatetimeIndex", ".", "__init__", "to", "deprecate", "the", "wall", "-", "time", "behaviour", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1669-L1800
[ "def", "sequence_to_dt64ns", "(", "data", ",", "dtype", "=", "None", ",", "copy", "=", "False", ",", "tz", "=", "None", ",", "dayfirst", "=", "False", ",", "yearfirst", "=", "False", ",", "ambiguous", "=", "'raise'", ",", "int_as_wall_time", "=", "False", ")", ":", "inferred_freq", "=", "None", "dtype", "=", "_validate_dt64_dtype", "(", "dtype", ")", "if", "not", "hasattr", "(", "data", ",", "\"dtype\"", ")", ":", "# e.g. list, tuple", "if", "np", ".", "ndim", "(", "data", ")", "==", "0", ":", "# i.e. generator", "data", "=", "list", "(", "data", ")", "data", "=", "np", ".", "asarray", "(", "data", ")", "copy", "=", "False", "elif", "isinstance", "(", "data", ",", "ABCSeries", ")", ":", "data", "=", "data", ".", "_values", "if", "isinstance", "(", "data", ",", "ABCPandasArray", ")", ":", "data", "=", "data", ".", "to_numpy", "(", ")", "if", "hasattr", "(", "data", ",", "\"freq\"", ")", ":", "# i.e. DatetimeArray/Index", "inferred_freq", "=", "data", ".", "freq", "# if dtype has an embedded tz, capture it", "tz", "=", "validate_tz_from_dtype", "(", "dtype", ",", "tz", ")", "if", "isinstance", "(", "data", ",", "ABCIndexClass", ")", ":", "data", "=", "data", ".", "_data", "# By this point we are assured to have either a numpy array or Index", "data", ",", "copy", "=", "maybe_convert_dtype", "(", "data", ",", "copy", ")", "if", "is_object_dtype", "(", "data", ")", "or", "is_string_dtype", "(", "data", ")", ":", "# TODO: We do not have tests specific to string-dtypes,", "# also complex or categorical or other extension", "copy", "=", "False", "if", "lib", ".", "infer_dtype", "(", "data", ",", "skipna", "=", "False", ")", "==", "'integer'", ":", "data", "=", "data", ".", "astype", "(", "np", ".", "int64", ")", "else", ":", "# data comes back here as either i8 to denote UTC timestamps", "# or M8[ns] to denote wall times", "data", ",", "inferred_tz", "=", "objects_to_datetime64ns", "(", "data", ",", "dayfirst", "=", "dayfirst", ",", "yearfirst", "=", "yearfirst", ")", "tz", "=", "maybe_infer_tz", "(", "tz", ",", "inferred_tz", ")", "# When a sequence of timestamp objects is passed, we always", "# want to treat the (now i8-valued) data as UTC timestamps,", "# not wall times.", "int_as_wall_time", "=", "False", "# `data` may have originally been a Categorical[datetime64[ns, tz]],", "# so we need to handle these types.", "if", "is_datetime64tz_dtype", "(", "data", ")", ":", "# DatetimeArray -> ndarray", "tz", "=", "maybe_infer_tz", "(", "tz", ",", "data", ".", "tz", ")", "result", "=", "data", ".", "_data", "elif", "is_datetime64_dtype", "(", "data", ")", ":", "# tz-naive DatetimeArray or ndarray[datetime64]", "data", "=", "getattr", "(", "data", ",", "\"_data\"", ",", "data", ")", "if", "data", ".", "dtype", "!=", "_NS_DTYPE", ":", "data", "=", "conversion", ".", "ensure_datetime64ns", "(", "data", ")", "if", "tz", "is", "not", "None", ":", "# Convert tz-naive to UTC", "tz", "=", "timezones", ".", "maybe_get_tz", "(", "tz", ")", "data", "=", "conversion", ".", "tz_localize_to_utc", "(", "data", ".", "view", "(", "'i8'", ")", ",", "tz", ",", "ambiguous", "=", "ambiguous", ")", "data", "=", "data", ".", "view", "(", "_NS_DTYPE", ")", "assert", "data", ".", "dtype", "==", "_NS_DTYPE", ",", "data", ".", "dtype", "result", "=", "data", "else", ":", "# must be integer dtype otherwise", "# assume this data are epoch timestamps", "if", "tz", ":", "tz", "=", "timezones", ".", "maybe_get_tz", "(", "tz", ")", "if", "data", ".", "dtype", "!=", "_INT64_DTYPE", ":", "data", "=", "data", ".", "astype", "(", "np", ".", "int64", ",", "copy", "=", "False", ")", "if", "int_as_wall_time", "and", "tz", "is", "not", "None", "and", "not", "timezones", ".", "is_utc", "(", "tz", ")", ":", "warnings", ".", "warn", "(", "_i8_message", ",", "FutureWarning", ",", "stacklevel", "=", "4", ")", "data", "=", "conversion", ".", "tz_localize_to_utc", "(", "data", ".", "view", "(", "'i8'", ")", ",", "tz", ",", "ambiguous", "=", "ambiguous", ")", "data", "=", "data", ".", "view", "(", "_NS_DTYPE", ")", "result", "=", "data", ".", "view", "(", "_NS_DTYPE", ")", "if", "copy", ":", "# TODO: should this be deepcopy?", "result", "=", "result", ".", "copy", "(", ")", "assert", "isinstance", "(", "result", ",", "np", ".", "ndarray", ")", ",", "type", "(", "result", ")", "assert", "result", ".", "dtype", "==", "'M8[ns]'", ",", "result", ".", "dtype", "# We have to call this again after possibly inferring a tz above", "validate_tz_from_dtype", "(", "dtype", ",", "tz", ")", "return", "result", ",", "tz", ",", "inferred_freq" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
objects_to_datetime64ns
Convert data to array of timestamps. Parameters ---------- data : np.ndarray[object] dayfirst : bool yearfirst : bool utc : bool, default False Whether to convert timezone-aware timestamps to UTC errors : {'raise', 'ignore', 'coerce'} allow_object : bool Whether to return an object-dtype ndarray instead of raising if the data contains more than one timezone. Returns ------- result : ndarray np.int64 dtype if returned values represent UTC timestamps np.datetime64[ns] if returned values represent wall times object if mixed timezones inferred_tz : tzinfo or None Raises ------ ValueError : if data cannot be converted to datetimes
pandas/core/arrays/datetimes.py
def objects_to_datetime64ns(data, dayfirst, yearfirst, utc=False, errors="raise", require_iso8601=False, allow_object=False): """ Convert data to array of timestamps. Parameters ---------- data : np.ndarray[object] dayfirst : bool yearfirst : bool utc : bool, default False Whether to convert timezone-aware timestamps to UTC errors : {'raise', 'ignore', 'coerce'} allow_object : bool Whether to return an object-dtype ndarray instead of raising if the data contains more than one timezone. Returns ------- result : ndarray np.int64 dtype if returned values represent UTC timestamps np.datetime64[ns] if returned values represent wall times object if mixed timezones inferred_tz : tzinfo or None Raises ------ ValueError : if data cannot be converted to datetimes """ assert errors in ["raise", "ignore", "coerce"] # if str-dtype, convert data = np.array(data, copy=False, dtype=np.object_) try: result, tz_parsed = tslib.array_to_datetime( data, errors=errors, utc=utc, dayfirst=dayfirst, yearfirst=yearfirst, require_iso8601=require_iso8601 ) except ValueError as e: try: values, tz_parsed = conversion.datetime_to_datetime64(data) # If tzaware, these values represent unix timestamps, so we # return them as i8 to distinguish from wall times return values.view('i8'), tz_parsed except (ValueError, TypeError): raise e if tz_parsed is not None: # We can take a shortcut since the datetime64 numpy array # is in UTC # Return i8 values to denote unix timestamps return result.view('i8'), tz_parsed elif is_datetime64_dtype(result): # returning M8[ns] denotes wall-times; since tz is None # the distinction is a thin one return result, tz_parsed elif is_object_dtype(result): # GH#23675 when called via `pd.to_datetime`, returning an object-dtype # array is allowed. When called via `pd.DatetimeIndex`, we can # only accept datetime64 dtype, so raise TypeError if object-dtype # is returned, as that indicates the values can be recognized as # datetimes but they have conflicting timezones/awareness if allow_object: return result, tz_parsed raise TypeError(result) else: # pragma: no cover # GH#23675 this TypeError should never be hit, whereas the TypeError # in the object-dtype branch above is reachable. raise TypeError(result)
def objects_to_datetime64ns(data, dayfirst, yearfirst, utc=False, errors="raise", require_iso8601=False, allow_object=False): """ Convert data to array of timestamps. Parameters ---------- data : np.ndarray[object] dayfirst : bool yearfirst : bool utc : bool, default False Whether to convert timezone-aware timestamps to UTC errors : {'raise', 'ignore', 'coerce'} allow_object : bool Whether to return an object-dtype ndarray instead of raising if the data contains more than one timezone. Returns ------- result : ndarray np.int64 dtype if returned values represent UTC timestamps np.datetime64[ns] if returned values represent wall times object if mixed timezones inferred_tz : tzinfo or None Raises ------ ValueError : if data cannot be converted to datetimes """ assert errors in ["raise", "ignore", "coerce"] # if str-dtype, convert data = np.array(data, copy=False, dtype=np.object_) try: result, tz_parsed = tslib.array_to_datetime( data, errors=errors, utc=utc, dayfirst=dayfirst, yearfirst=yearfirst, require_iso8601=require_iso8601 ) except ValueError as e: try: values, tz_parsed = conversion.datetime_to_datetime64(data) # If tzaware, these values represent unix timestamps, so we # return them as i8 to distinguish from wall times return values.view('i8'), tz_parsed except (ValueError, TypeError): raise e if tz_parsed is not None: # We can take a shortcut since the datetime64 numpy array # is in UTC # Return i8 values to denote unix timestamps return result.view('i8'), tz_parsed elif is_datetime64_dtype(result): # returning M8[ns] denotes wall-times; since tz is None # the distinction is a thin one return result, tz_parsed elif is_object_dtype(result): # GH#23675 when called via `pd.to_datetime`, returning an object-dtype # array is allowed. When called via `pd.DatetimeIndex`, we can # only accept datetime64 dtype, so raise TypeError if object-dtype # is returned, as that indicates the values can be recognized as # datetimes but they have conflicting timezones/awareness if allow_object: return result, tz_parsed raise TypeError(result) else: # pragma: no cover # GH#23675 this TypeError should never be hit, whereas the TypeError # in the object-dtype branch above is reachable. raise TypeError(result)
[ "Convert", "data", "to", "array", "of", "timestamps", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1803-L1877
[ "def", "objects_to_datetime64ns", "(", "data", ",", "dayfirst", ",", "yearfirst", ",", "utc", "=", "False", ",", "errors", "=", "\"raise\"", ",", "require_iso8601", "=", "False", ",", "allow_object", "=", "False", ")", ":", "assert", "errors", "in", "[", "\"raise\"", ",", "\"ignore\"", ",", "\"coerce\"", "]", "# if str-dtype, convert", "data", "=", "np", ".", "array", "(", "data", ",", "copy", "=", "False", ",", "dtype", "=", "np", ".", "object_", ")", "try", ":", "result", ",", "tz_parsed", "=", "tslib", ".", "array_to_datetime", "(", "data", ",", "errors", "=", "errors", ",", "utc", "=", "utc", ",", "dayfirst", "=", "dayfirst", ",", "yearfirst", "=", "yearfirst", ",", "require_iso8601", "=", "require_iso8601", ")", "except", "ValueError", "as", "e", ":", "try", ":", "values", ",", "tz_parsed", "=", "conversion", ".", "datetime_to_datetime64", "(", "data", ")", "# If tzaware, these values represent unix timestamps, so we", "# return them as i8 to distinguish from wall times", "return", "values", ".", "view", "(", "'i8'", ")", ",", "tz_parsed", "except", "(", "ValueError", ",", "TypeError", ")", ":", "raise", "e", "if", "tz_parsed", "is", "not", "None", ":", "# We can take a shortcut since the datetime64 numpy array", "# is in UTC", "# Return i8 values to denote unix timestamps", "return", "result", ".", "view", "(", "'i8'", ")", ",", "tz_parsed", "elif", "is_datetime64_dtype", "(", "result", ")", ":", "# returning M8[ns] denotes wall-times; since tz is None", "# the distinction is a thin one", "return", "result", ",", "tz_parsed", "elif", "is_object_dtype", "(", "result", ")", ":", "# GH#23675 when called via `pd.to_datetime`, returning an object-dtype", "# array is allowed. When called via `pd.DatetimeIndex`, we can", "# only accept datetime64 dtype, so raise TypeError if object-dtype", "# is returned, as that indicates the values can be recognized as", "# datetimes but they have conflicting timezones/awareness", "if", "allow_object", ":", "return", "result", ",", "tz_parsed", "raise", "TypeError", "(", "result", ")", "else", ":", "# pragma: no cover", "# GH#23675 this TypeError should never be hit, whereas the TypeError", "# in the object-dtype branch above is reachable.", "raise", "TypeError", "(", "result", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
maybe_convert_dtype
Convert data based on dtype conventions, issuing deprecation warnings or errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed
pandas/core/arrays/datetimes.py
def maybe_convert_dtype(data, copy): """ Convert data based on dtype conventions, issuing deprecation warnings or errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed """ if is_float_dtype(data): # Note: we must cast to datetime64[ns] here in order to treat these # as wall-times instead of UTC timestamps. data = data.astype(_NS_DTYPE) copy = False # TODO: deprecate this behavior to instead treat symmetrically # with integer dtypes. See discussion in GH#23675 elif is_timedelta64_dtype(data): warnings.warn("Passing timedelta64-dtype data is deprecated, will " "raise a TypeError in a future version", FutureWarning, stacklevel=5) data = data.view(_NS_DTYPE) elif is_period_dtype(data): # Note: without explicitly raising here, PeriodIndex # test_setops.test_join_does_not_recur fails raise TypeError("Passing PeriodDtype data is invalid. " "Use `data.to_timestamp()` instead") elif is_categorical_dtype(data): # GH#18664 preserve tz in going DTI->Categorical->DTI # TODO: cases where we need to do another pass through this func, # e.g. the categories are timedelta64s data = data.categories.take(data.codes, fill_value=NaT)._values copy = False elif is_extension_type(data) and not is_datetime64tz_dtype(data): # Includes categorical # TODO: We have no tests for these data = np.array(data, dtype=np.object_) copy = False return data, copy
def maybe_convert_dtype(data, copy): """ Convert data based on dtype conventions, issuing deprecation warnings or errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed """ if is_float_dtype(data): # Note: we must cast to datetime64[ns] here in order to treat these # as wall-times instead of UTC timestamps. data = data.astype(_NS_DTYPE) copy = False # TODO: deprecate this behavior to instead treat symmetrically # with integer dtypes. See discussion in GH#23675 elif is_timedelta64_dtype(data): warnings.warn("Passing timedelta64-dtype data is deprecated, will " "raise a TypeError in a future version", FutureWarning, stacklevel=5) data = data.view(_NS_DTYPE) elif is_period_dtype(data): # Note: without explicitly raising here, PeriodIndex # test_setops.test_join_does_not_recur fails raise TypeError("Passing PeriodDtype data is invalid. " "Use `data.to_timestamp()` instead") elif is_categorical_dtype(data): # GH#18664 preserve tz in going DTI->Categorical->DTI # TODO: cases where we need to do another pass through this func, # e.g. the categories are timedelta64s data = data.categories.take(data.codes, fill_value=NaT)._values copy = False elif is_extension_type(data) and not is_datetime64tz_dtype(data): # Includes categorical # TODO: We have no tests for these data = np.array(data, dtype=np.object_) copy = False return data, copy
[ "Convert", "data", "based", "on", "dtype", "conventions", "issuing", "deprecation", "warnings", "or", "errors", "where", "appropriate", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1880-L1932
[ "def", "maybe_convert_dtype", "(", "data", ",", "copy", ")", ":", "if", "is_float_dtype", "(", "data", ")", ":", "# Note: we must cast to datetime64[ns] here in order to treat these", "# as wall-times instead of UTC timestamps.", "data", "=", "data", ".", "astype", "(", "_NS_DTYPE", ")", "copy", "=", "False", "# TODO: deprecate this behavior to instead treat symmetrically", "# with integer dtypes. See discussion in GH#23675", "elif", "is_timedelta64_dtype", "(", "data", ")", ":", "warnings", ".", "warn", "(", "\"Passing timedelta64-dtype data is deprecated, will \"", "\"raise a TypeError in a future version\"", ",", "FutureWarning", ",", "stacklevel", "=", "5", ")", "data", "=", "data", ".", "view", "(", "_NS_DTYPE", ")", "elif", "is_period_dtype", "(", "data", ")", ":", "# Note: without explicitly raising here, PeriodIndex", "# test_setops.test_join_does_not_recur fails", "raise", "TypeError", "(", "\"Passing PeriodDtype data is invalid. \"", "\"Use `data.to_timestamp()` instead\"", ")", "elif", "is_categorical_dtype", "(", "data", ")", ":", "# GH#18664 preserve tz in going DTI->Categorical->DTI", "# TODO: cases where we need to do another pass through this func,", "# e.g. the categories are timedelta64s", "data", "=", "data", ".", "categories", ".", "take", "(", "data", ".", "codes", ",", "fill_value", "=", "NaT", ")", ".", "_values", "copy", "=", "False", "elif", "is_extension_type", "(", "data", ")", "and", "not", "is_datetime64tz_dtype", "(", "data", ")", ":", "# Includes categorical", "# TODO: We have no tests for these", "data", "=", "np", ".", "array", "(", "data", ",", "dtype", "=", "np", ".", "object_", ")", "copy", "=", "False", "return", "data", ",", "copy" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
maybe_infer_tz
If a timezone is inferred from data, check that it is compatible with the user-provided timezone, if any. Parameters ---------- tz : tzinfo or None inferred_tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if both timezones are present but do not match
pandas/core/arrays/datetimes.py
def maybe_infer_tz(tz, inferred_tz): """ If a timezone is inferred from data, check that it is compatible with the user-provided timezone, if any. Parameters ---------- tz : tzinfo or None inferred_tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if both timezones are present but do not match """ if tz is None: tz = inferred_tz elif inferred_tz is None: pass elif not timezones.tz_compare(tz, inferred_tz): raise TypeError('data is already tz-aware {inferred_tz}, unable to ' 'set specified tz: {tz}' .format(inferred_tz=inferred_tz, tz=tz)) return tz
def maybe_infer_tz(tz, inferred_tz): """ If a timezone is inferred from data, check that it is compatible with the user-provided timezone, if any. Parameters ---------- tz : tzinfo or None inferred_tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if both timezones are present but do not match """ if tz is None: tz = inferred_tz elif inferred_tz is None: pass elif not timezones.tz_compare(tz, inferred_tz): raise TypeError('data is already tz-aware {inferred_tz}, unable to ' 'set specified tz: {tz}' .format(inferred_tz=inferred_tz, tz=tz)) return tz
[ "If", "a", "timezone", "is", "inferred", "from", "data", "check", "that", "it", "is", "compatible", "with", "the", "user", "-", "provided", "timezone", "if", "any", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1938-L1964
[ "def", "maybe_infer_tz", "(", "tz", ",", "inferred_tz", ")", ":", "if", "tz", "is", "None", ":", "tz", "=", "inferred_tz", "elif", "inferred_tz", "is", "None", ":", "pass", "elif", "not", "timezones", ".", "tz_compare", "(", "tz", ",", "inferred_tz", ")", ":", "raise", "TypeError", "(", "'data is already tz-aware {inferred_tz}, unable to '", "'set specified tz: {tz}'", ".", "format", "(", "inferred_tz", "=", "inferred_tz", ",", "tz", "=", "tz", ")", ")", "return", "tz" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_validate_dt64_dtype
Check that a dtype, if passed, represents either a numpy datetime64[ns] dtype or a pandas DatetimeTZDtype. Parameters ---------- dtype : object Returns ------- dtype : None, numpy.dtype, or DatetimeTZDtype Raises ------ ValueError : invalid dtype Notes ----- Unlike validate_tz_from_dtype, this does _not_ allow non-existent tz errors to go through
pandas/core/arrays/datetimes.py
def _validate_dt64_dtype(dtype): """ Check that a dtype, if passed, represents either a numpy datetime64[ns] dtype or a pandas DatetimeTZDtype. Parameters ---------- dtype : object Returns ------- dtype : None, numpy.dtype, or DatetimeTZDtype Raises ------ ValueError : invalid dtype Notes ----- Unlike validate_tz_from_dtype, this does _not_ allow non-existent tz errors to go through """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, np.dtype("M8")): # no precision, warn dtype = _NS_DTYPE msg = textwrap.dedent("""\ Passing in 'datetime64' dtype with no precision is deprecated and will raise in a future version. Please pass in 'datetime64[ns]' instead.""") warnings.warn(msg, FutureWarning, stacklevel=5) if ((isinstance(dtype, np.dtype) and dtype != _NS_DTYPE) or not isinstance(dtype, (np.dtype, DatetimeTZDtype))): raise ValueError("Unexpected value for 'dtype': '{dtype}'. " "Must be 'datetime64[ns]' or DatetimeTZDtype'." .format(dtype=dtype)) return dtype
def _validate_dt64_dtype(dtype): """ Check that a dtype, if passed, represents either a numpy datetime64[ns] dtype or a pandas DatetimeTZDtype. Parameters ---------- dtype : object Returns ------- dtype : None, numpy.dtype, or DatetimeTZDtype Raises ------ ValueError : invalid dtype Notes ----- Unlike validate_tz_from_dtype, this does _not_ allow non-existent tz errors to go through """ if dtype is not None: dtype = pandas_dtype(dtype) if is_dtype_equal(dtype, np.dtype("M8")): # no precision, warn dtype = _NS_DTYPE msg = textwrap.dedent("""\ Passing in 'datetime64' dtype with no precision is deprecated and will raise in a future version. Please pass in 'datetime64[ns]' instead.""") warnings.warn(msg, FutureWarning, stacklevel=5) if ((isinstance(dtype, np.dtype) and dtype != _NS_DTYPE) or not isinstance(dtype, (np.dtype, DatetimeTZDtype))): raise ValueError("Unexpected value for 'dtype': '{dtype}'. " "Must be 'datetime64[ns]' or DatetimeTZDtype'." .format(dtype=dtype)) return dtype
[ "Check", "that", "a", "dtype", "if", "passed", "represents", "either", "a", "numpy", "datetime64", "[", "ns", "]", "dtype", "or", "a", "pandas", "DatetimeTZDtype", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1967-L2005
[ "def", "_validate_dt64_dtype", "(", "dtype", ")", ":", "if", "dtype", "is", "not", "None", ":", "dtype", "=", "pandas_dtype", "(", "dtype", ")", "if", "is_dtype_equal", "(", "dtype", ",", "np", ".", "dtype", "(", "\"M8\"", ")", ")", ":", "# no precision, warn", "dtype", "=", "_NS_DTYPE", "msg", "=", "textwrap", ".", "dedent", "(", "\"\"\"\\\n Passing in 'datetime64' dtype with no precision is deprecated\n and will raise in a future version. Please pass in\n 'datetime64[ns]' instead.\"\"\"", ")", "warnings", ".", "warn", "(", "msg", ",", "FutureWarning", ",", "stacklevel", "=", "5", ")", "if", "(", "(", "isinstance", "(", "dtype", ",", "np", ".", "dtype", ")", "and", "dtype", "!=", "_NS_DTYPE", ")", "or", "not", "isinstance", "(", "dtype", ",", "(", "np", ".", "dtype", ",", "DatetimeTZDtype", ")", ")", ")", ":", "raise", "ValueError", "(", "\"Unexpected value for 'dtype': '{dtype}'. \"", "\"Must be 'datetime64[ns]' or DatetimeTZDtype'.\"", ".", "format", "(", "dtype", "=", "dtype", ")", ")", "return", "dtype" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
validate_tz_from_dtype
If the given dtype is a DatetimeTZDtype, extract the implied tzinfo object from it and check that it does not conflict with the given tz. Parameters ---------- dtype : dtype, str tz : None, tzinfo Returns ------- tz : consensus tzinfo Raises ------ ValueError : on tzinfo mismatch
pandas/core/arrays/datetimes.py
def validate_tz_from_dtype(dtype, tz): """ If the given dtype is a DatetimeTZDtype, extract the implied tzinfo object from it and check that it does not conflict with the given tz. Parameters ---------- dtype : dtype, str tz : None, tzinfo Returns ------- tz : consensus tzinfo Raises ------ ValueError : on tzinfo mismatch """ if dtype is not None: if isinstance(dtype, str): try: dtype = DatetimeTZDtype.construct_from_string(dtype) except TypeError: # Things like `datetime64[ns]`, which is OK for the # constructors, but also nonsense, which should be validated # but not by us. We *do* allow non-existent tz errors to # go through pass dtz = getattr(dtype, 'tz', None) if dtz is not None: if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a dtype" " with a tz") tz = dtz if tz is not None and is_datetime64_dtype(dtype): # We also need to check for the case where the user passed a # tz-naive dtype (i.e. datetime64[ns]) if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a " "timezone-naive dtype (i.e. datetime64[ns])") return tz
def validate_tz_from_dtype(dtype, tz): """ If the given dtype is a DatetimeTZDtype, extract the implied tzinfo object from it and check that it does not conflict with the given tz. Parameters ---------- dtype : dtype, str tz : None, tzinfo Returns ------- tz : consensus tzinfo Raises ------ ValueError : on tzinfo mismatch """ if dtype is not None: if isinstance(dtype, str): try: dtype = DatetimeTZDtype.construct_from_string(dtype) except TypeError: # Things like `datetime64[ns]`, which is OK for the # constructors, but also nonsense, which should be validated # but not by us. We *do* allow non-existent tz errors to # go through pass dtz = getattr(dtype, 'tz', None) if dtz is not None: if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a dtype" " with a tz") tz = dtz if tz is not None and is_datetime64_dtype(dtype): # We also need to check for the case where the user passed a # tz-naive dtype (i.e. datetime64[ns]) if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a " "timezone-naive dtype (i.e. datetime64[ns])") return tz
[ "If", "the", "given", "dtype", "is", "a", "DatetimeTZDtype", "extract", "the", "implied", "tzinfo", "object", "from", "it", "and", "check", "that", "it", "does", "not", "conflict", "with", "the", "given", "tz", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L2008-L2051
[ "def", "validate_tz_from_dtype", "(", "dtype", ",", "tz", ")", ":", "if", "dtype", "is", "not", "None", ":", "if", "isinstance", "(", "dtype", ",", "str", ")", ":", "try", ":", "dtype", "=", "DatetimeTZDtype", ".", "construct_from_string", "(", "dtype", ")", "except", "TypeError", ":", "# Things like `datetime64[ns]`, which is OK for the", "# constructors, but also nonsense, which should be validated", "# but not by us. We *do* allow non-existent tz errors to", "# go through", "pass", "dtz", "=", "getattr", "(", "dtype", ",", "'tz'", ",", "None", ")", "if", "dtz", "is", "not", "None", ":", "if", "tz", "is", "not", "None", "and", "not", "timezones", ".", "tz_compare", "(", "tz", ",", "dtz", ")", ":", "raise", "ValueError", "(", "\"cannot supply both a tz and a dtype\"", "\" with a tz\"", ")", "tz", "=", "dtz", "if", "tz", "is", "not", "None", "and", "is_datetime64_dtype", "(", "dtype", ")", ":", "# We also need to check for the case where the user passed a", "# tz-naive dtype (i.e. datetime64[ns])", "if", "tz", "is", "not", "None", "and", "not", "timezones", ".", "tz_compare", "(", "tz", ",", "dtz", ")", ":", "raise", "ValueError", "(", "\"cannot supply both a tz and a \"", "\"timezone-naive dtype (i.e. datetime64[ns])\"", ")", "return", "tz" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_infer_tz_from_endpoints
If a timezone is not explicitly given via `tz`, see if one can be inferred from the `start` and `end` endpoints. If more than one of these inputs provides a timezone, require that they all agree. Parameters ---------- start : Timestamp end : Timestamp tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if start and end timezones do not agree
pandas/core/arrays/datetimes.py
def _infer_tz_from_endpoints(start, end, tz): """ If a timezone is not explicitly given via `tz`, see if one can be inferred from the `start` and `end` endpoints. If more than one of these inputs provides a timezone, require that they all agree. Parameters ---------- start : Timestamp end : Timestamp tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if start and end timezones do not agree """ try: inferred_tz = timezones.infer_tzinfo(start, end) except Exception: raise TypeError('Start and end cannot both be tz-aware with ' 'different timezones') inferred_tz = timezones.maybe_get_tz(inferred_tz) tz = timezones.maybe_get_tz(tz) if tz is not None and inferred_tz is not None: if not timezones.tz_compare(inferred_tz, tz): raise AssertionError("Inferred time zone not equal to passed " "time zone") elif inferred_tz is not None: tz = inferred_tz return tz
def _infer_tz_from_endpoints(start, end, tz): """ If a timezone is not explicitly given via `tz`, see if one can be inferred from the `start` and `end` endpoints. If more than one of these inputs provides a timezone, require that they all agree. Parameters ---------- start : Timestamp end : Timestamp tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if start and end timezones do not agree """ try: inferred_tz = timezones.infer_tzinfo(start, end) except Exception: raise TypeError('Start and end cannot both be tz-aware with ' 'different timezones') inferred_tz = timezones.maybe_get_tz(inferred_tz) tz = timezones.maybe_get_tz(tz) if tz is not None and inferred_tz is not None: if not timezones.tz_compare(inferred_tz, tz): raise AssertionError("Inferred time zone not equal to passed " "time zone") elif inferred_tz is not None: tz = inferred_tz return tz
[ "If", "a", "timezone", "is", "not", "explicitly", "given", "via", "tz", "see", "if", "one", "can", "be", "inferred", "from", "the", "start", "and", "end", "endpoints", ".", "If", "more", "than", "one", "of", "these", "inputs", "provides", "a", "timezone", "require", "that", "they", "all", "agree", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L2054-L2091
[ "def", "_infer_tz_from_endpoints", "(", "start", ",", "end", ",", "tz", ")", ":", "try", ":", "inferred_tz", "=", "timezones", ".", "infer_tzinfo", "(", "start", ",", "end", ")", "except", "Exception", ":", "raise", "TypeError", "(", "'Start and end cannot both be tz-aware with '", "'different timezones'", ")", "inferred_tz", "=", "timezones", ".", "maybe_get_tz", "(", "inferred_tz", ")", "tz", "=", "timezones", ".", "maybe_get_tz", "(", "tz", ")", "if", "tz", "is", "not", "None", "and", "inferred_tz", "is", "not", "None", ":", "if", "not", "timezones", ".", "tz_compare", "(", "inferred_tz", ",", "tz", ")", ":", "raise", "AssertionError", "(", "\"Inferred time zone not equal to passed \"", "\"time zone\"", ")", "elif", "inferred_tz", "is", "not", "None", ":", "tz", "=", "inferred_tz", "return", "tz" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_maybe_localize_point
Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None Returns ------- ts : Timestamp
pandas/core/arrays/datetimes.py
def _maybe_localize_point(ts, is_none, is_not_none, freq, tz): """ Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None Returns ------- ts : Timestamp """ # Make sure start and end are timezone localized if: # 1) freq = a Timedelta-like frequency (Tick) # 2) freq = None i.e. generating a linspaced range if isinstance(freq, Tick) or freq is None: localize_args = {'tz': tz, 'ambiguous': False} else: localize_args = {'tz': None} if is_none is None and is_not_none is not None: ts = ts.tz_localize(**localize_args) return ts
def _maybe_localize_point(ts, is_none, is_not_none, freq, tz): """ Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None Returns ------- ts : Timestamp """ # Make sure start and end are timezone localized if: # 1) freq = a Timedelta-like frequency (Tick) # 2) freq = None i.e. generating a linspaced range if isinstance(freq, Tick) or freq is None: localize_args = {'tz': tz, 'ambiguous': False} else: localize_args = {'tz': None} if is_none is None and is_not_none is not None: ts = ts.tz_localize(**localize_args) return ts
[ "Localize", "a", "start", "or", "end", "Timestamp", "to", "the", "timezone", "of", "the", "corresponding", "start", "or", "end", "Timestamp" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L2114-L2140
[ "def", "_maybe_localize_point", "(", "ts", ",", "is_none", ",", "is_not_none", ",", "freq", ",", "tz", ")", ":", "# Make sure start and end are timezone localized if:", "# 1) freq = a Timedelta-like frequency (Tick)", "# 2) freq = None i.e. generating a linspaced range", "if", "isinstance", "(", "freq", ",", "Tick", ")", "or", "freq", "is", "None", ":", "localize_args", "=", "{", "'tz'", ":", "tz", ",", "'ambiguous'", ":", "False", "}", "else", ":", "localize_args", "=", "{", "'tz'", ":", "None", "}", "if", "is_none", "is", "None", "and", "is_not_none", "is", "not", "None", ":", "ts", "=", "ts", ".", "tz_localize", "(", "*", "*", "localize_args", ")", "return", "ts" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DatetimeArray._sub_datetime_arraylike
subtract DatetimeArray/Index or ndarray[datetime64]
pandas/core/arrays/datetimes.py
def _sub_datetime_arraylike(self, other): """subtract DatetimeArray/Index or ndarray[datetime64]""" if len(self) != len(other): raise ValueError("cannot add indices of unequal length") if isinstance(other, np.ndarray): assert is_datetime64_dtype(other) other = type(self)(other) if not self._has_same_tz(other): # require tz compat raise TypeError("{cls} subtraction must have the same " "timezones or no timezones" .format(cls=type(self).__name__)) self_i8 = self.asi8 other_i8 = other.asi8 arr_mask = self._isnan | other._isnan new_values = checked_add_with_arr(self_i8, -other_i8, arr_mask=arr_mask) if self._hasnans or other._hasnans: new_values[arr_mask] = iNaT return new_values.view('timedelta64[ns]')
def _sub_datetime_arraylike(self, other): """subtract DatetimeArray/Index or ndarray[datetime64]""" if len(self) != len(other): raise ValueError("cannot add indices of unequal length") if isinstance(other, np.ndarray): assert is_datetime64_dtype(other) other = type(self)(other) if not self._has_same_tz(other): # require tz compat raise TypeError("{cls} subtraction must have the same " "timezones or no timezones" .format(cls=type(self).__name__)) self_i8 = self.asi8 other_i8 = other.asi8 arr_mask = self._isnan | other._isnan new_values = checked_add_with_arr(self_i8, -other_i8, arr_mask=arr_mask) if self._hasnans or other._hasnans: new_values[arr_mask] = iNaT return new_values.view('timedelta64[ns]')
[ "subtract", "DatetimeArray", "/", "Index", "or", "ndarray", "[", "datetime64", "]" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L698-L720
[ "def", "_sub_datetime_arraylike", "(", "self", ",", "other", ")", ":", "if", "len", "(", "self", ")", "!=", "len", "(", "other", ")", ":", "raise", "ValueError", "(", "\"cannot add indices of unequal length\"", ")", "if", "isinstance", "(", "other", ",", "np", ".", "ndarray", ")", ":", "assert", "is_datetime64_dtype", "(", "other", ")", "other", "=", "type", "(", "self", ")", "(", "other", ")", "if", "not", "self", ".", "_has_same_tz", "(", "other", ")", ":", "# require tz compat", "raise", "TypeError", "(", "\"{cls} subtraction must have the same \"", "\"timezones or no timezones\"", ".", "format", "(", "cls", "=", "type", "(", "self", ")", ".", "__name__", ")", ")", "self_i8", "=", "self", ".", "asi8", "other_i8", "=", "other", ".", "asi8", "arr_mask", "=", "self", ".", "_isnan", "|", "other", ".", "_isnan", "new_values", "=", "checked_add_with_arr", "(", "self_i8", ",", "-", "other_i8", ",", "arr_mask", "=", "arr_mask", ")", "if", "self", ".", "_hasnans", "or", "other", ".", "_hasnans", ":", "new_values", "[", "arr_mask", "]", "=", "iNaT", "return", "new_values", ".", "view", "(", "'timedelta64[ns]'", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DatetimeArray._add_delta
Add a timedelta-like, Tick, or TimedeltaIndex-like object to self, yielding a new DatetimeArray Parameters ---------- other : {timedelta, np.timedelta64, Tick, TimedeltaIndex, ndarray[timedelta64]} Returns ------- result : DatetimeArray
pandas/core/arrays/datetimes.py
def _add_delta(self, delta): """ Add a timedelta-like, Tick, or TimedeltaIndex-like object to self, yielding a new DatetimeArray Parameters ---------- other : {timedelta, np.timedelta64, Tick, TimedeltaIndex, ndarray[timedelta64]} Returns ------- result : DatetimeArray """ new_values = super()._add_delta(delta) return type(self)._from_sequence(new_values, tz=self.tz, freq='infer')
def _add_delta(self, delta): """ Add a timedelta-like, Tick, or TimedeltaIndex-like object to self, yielding a new DatetimeArray Parameters ---------- other : {timedelta, np.timedelta64, Tick, TimedeltaIndex, ndarray[timedelta64]} Returns ------- result : DatetimeArray """ new_values = super()._add_delta(delta) return type(self)._from_sequence(new_values, tz=self.tz, freq='infer')
[ "Add", "a", "timedelta", "-", "like", "Tick", "or", "TimedeltaIndex", "-", "like", "object", "to", "self", "yielding", "a", "new", "DatetimeArray" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L759-L774
[ "def", "_add_delta", "(", "self", ",", "delta", ")", ":", "new_values", "=", "super", "(", ")", ".", "_add_delta", "(", "delta", ")", "return", "type", "(", "self", ")", ".", "_from_sequence", "(", "new_values", ",", "tz", "=", "self", ".", "tz", ",", "freq", "=", "'infer'", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DatetimeArray.tz_convert
Convert tz-aware Datetime Array/Index from one time zone to another. Parameters ---------- tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time. Corresponding timestamps would be converted to this time zone of the Datetime Array/Index. A `tz` of None will convert to UTC and remove the timezone information. Returns ------- Array or Index Raises ------ TypeError If Datetime Array/Index is tz-naive. See Also -------- DatetimeIndex.tz : A timezone that has a variable offset from UTC. DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a given time zone, or remove timezone from a tz-aware DatetimeIndex. Examples -------- With the `tz` parameter, we can change the DatetimeIndex to other time zones: >>> dti = pd.date_range(start='2014-08-01 09:00', ... freq='H', periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='H') >>> dti.tz_convert('US/Central') DatetimeIndex(['2014-08-01 02:00:00-05:00', '2014-08-01 03:00:00-05:00', '2014-08-01 04:00:00-05:00'], dtype='datetime64[ns, US/Central]', freq='H') With the ``tz=None``, we can remove the timezone (after converting to UTC if necessary): >>> dti = pd.date_range(start='2014-08-01 09:00', freq='H', ... periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='H') >>> dti.tz_convert(None) DatetimeIndex(['2014-08-01 07:00:00', '2014-08-01 08:00:00', '2014-08-01 09:00:00'], dtype='datetime64[ns]', freq='H')
pandas/core/arrays/datetimes.py
def tz_convert(self, tz): """ Convert tz-aware Datetime Array/Index from one time zone to another. Parameters ---------- tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time. Corresponding timestamps would be converted to this time zone of the Datetime Array/Index. A `tz` of None will convert to UTC and remove the timezone information. Returns ------- Array or Index Raises ------ TypeError If Datetime Array/Index is tz-naive. See Also -------- DatetimeIndex.tz : A timezone that has a variable offset from UTC. DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a given time zone, or remove timezone from a tz-aware DatetimeIndex. Examples -------- With the `tz` parameter, we can change the DatetimeIndex to other time zones: >>> dti = pd.date_range(start='2014-08-01 09:00', ... freq='H', periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='H') >>> dti.tz_convert('US/Central') DatetimeIndex(['2014-08-01 02:00:00-05:00', '2014-08-01 03:00:00-05:00', '2014-08-01 04:00:00-05:00'], dtype='datetime64[ns, US/Central]', freq='H') With the ``tz=None``, we can remove the timezone (after converting to UTC if necessary): >>> dti = pd.date_range(start='2014-08-01 09:00', freq='H', ... periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='H') >>> dti.tz_convert(None) DatetimeIndex(['2014-08-01 07:00:00', '2014-08-01 08:00:00', '2014-08-01 09:00:00'], dtype='datetime64[ns]', freq='H') """ tz = timezones.maybe_get_tz(tz) if self.tz is None: # tz naive, use tz_localize raise TypeError('Cannot convert tz-naive timestamps, use ' 'tz_localize to localize') # No conversion since timestamps are all UTC to begin with dtype = tz_to_dtype(tz) return self._simple_new(self.asi8, dtype=dtype, freq=self.freq)
def tz_convert(self, tz): """ Convert tz-aware Datetime Array/Index from one time zone to another. Parameters ---------- tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone for time. Corresponding timestamps would be converted to this time zone of the Datetime Array/Index. A `tz` of None will convert to UTC and remove the timezone information. Returns ------- Array or Index Raises ------ TypeError If Datetime Array/Index is tz-naive. See Also -------- DatetimeIndex.tz : A timezone that has a variable offset from UTC. DatetimeIndex.tz_localize : Localize tz-naive DatetimeIndex to a given time zone, or remove timezone from a tz-aware DatetimeIndex. Examples -------- With the `tz` parameter, we can change the DatetimeIndex to other time zones: >>> dti = pd.date_range(start='2014-08-01 09:00', ... freq='H', periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='H') >>> dti.tz_convert('US/Central') DatetimeIndex(['2014-08-01 02:00:00-05:00', '2014-08-01 03:00:00-05:00', '2014-08-01 04:00:00-05:00'], dtype='datetime64[ns, US/Central]', freq='H') With the ``tz=None``, we can remove the timezone (after converting to UTC if necessary): >>> dti = pd.date_range(start='2014-08-01 09:00', freq='H', ... periods=3, tz='Europe/Berlin') >>> dti DatetimeIndex(['2014-08-01 09:00:00+02:00', '2014-08-01 10:00:00+02:00', '2014-08-01 11:00:00+02:00'], dtype='datetime64[ns, Europe/Berlin]', freq='H') >>> dti.tz_convert(None) DatetimeIndex(['2014-08-01 07:00:00', '2014-08-01 08:00:00', '2014-08-01 09:00:00'], dtype='datetime64[ns]', freq='H') """ tz = timezones.maybe_get_tz(tz) if self.tz is None: # tz naive, use tz_localize raise TypeError('Cannot convert tz-naive timestamps, use ' 'tz_localize to localize') # No conversion since timestamps are all UTC to begin with dtype = tz_to_dtype(tz) return self._simple_new(self.asi8, dtype=dtype, freq=self.freq)
[ "Convert", "tz", "-", "aware", "Datetime", "Array", "/", "Index", "from", "one", "time", "zone", "to", "another", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L788-L861
[ "def", "tz_convert", "(", "self", ",", "tz", ")", ":", "tz", "=", "timezones", ".", "maybe_get_tz", "(", "tz", ")", "if", "self", ".", "tz", "is", "None", ":", "# tz naive, use tz_localize", "raise", "TypeError", "(", "'Cannot convert tz-naive timestamps, use '", "'tz_localize to localize'", ")", "# No conversion since timestamps are all UTC to begin with", "dtype", "=", "tz_to_dtype", "(", "tz", ")", "return", "self", ".", "_simple_new", "(", "self", ".", "asi8", ",", "dtype", "=", "dtype", ",", "freq", "=", "self", ".", "freq", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DatetimeArray.tz_localize
Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index. This method takes a time zone (tz) naive Datetime Array/Index object and makes this time zone aware. It does not move the time to another time zone. Time zone localization helps to switch from time zone aware to time zone unaware objects. Parameters ---------- tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone to convert timestamps to. Passing ``None`` will remove the time zone information preserving local time. ambiguous : 'infer', 'NaT', bool array, default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False signifies a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \ default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times .. versionadded:: 0.24.0 errors : {'raise', 'coerce'}, default None - 'raise' will raise a NonExistentTimeError if a timestamp is not valid in the specified time zone (e.g. due to a transition from or to DST time). Use ``nonexistent='raise'`` instead. - 'coerce' will return NaT if the timestamp can not be converted to the specified time zone. Use ``nonexistent='NaT'`` instead. .. deprecated:: 0.24.0 Returns ------- Same type as self Array/Index converted to the specified time zone. Raises ------ TypeError If the Datetime Array/Index is tz-aware and tz is not None. See Also -------- DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from one time zone to another. Examples -------- >>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3) >>> tz_naive DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', '2018-03-03 09:00:00'], dtype='datetime64[ns]', freq='D') Localize DatetimeIndex in US/Eastern time zone: >>> tz_aware = tz_naive.tz_localize(tz='US/Eastern') >>> tz_aware DatetimeIndex(['2018-03-01 09:00:00-05:00', '2018-03-02 09:00:00-05:00', '2018-03-03 09:00:00-05:00'], dtype='datetime64[ns, US/Eastern]', freq='D') With the ``tz=None``, we can remove the time zone information while keeping the local time (not converted to UTC): >>> tz_aware.tz_localize(None) DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', '2018-03-03 09:00:00'], dtype='datetime64[ns]', freq='D') Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.dt.tz_localize('CET', ambiguous='infer') 0 2018-10-28 01:30:00+02:00 1 2018-10-28 02:00:00+02:00 2 2018-10-28 02:30:00+02:00 3 2018-10-28 02:00:00+01:00 4 2018-10-28 02:30:00+01:00 5 2018-10-28 03:00:00+01:00 6 2018-10-28 03:30:00+01:00 dtype: datetime64[ns, CET] In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False])) 0 2015-03-29 03:00:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, Europe/Warsaw] If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. >>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 0 2015-03-29 03:00:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw'] >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 0 2015-03-29 01:59:59.999999999+01:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw'] >>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 0 2015-03-29 03:30:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw']
pandas/core/arrays/datetimes.py
def tz_localize(self, tz, ambiguous='raise', nonexistent='raise', errors=None): """ Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index. This method takes a time zone (tz) naive Datetime Array/Index object and makes this time zone aware. It does not move the time to another time zone. Time zone localization helps to switch from time zone aware to time zone unaware objects. Parameters ---------- tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone to convert timestamps to. Passing ``None`` will remove the time zone information preserving local time. ambiguous : 'infer', 'NaT', bool array, default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False signifies a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \ default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times .. versionadded:: 0.24.0 errors : {'raise', 'coerce'}, default None - 'raise' will raise a NonExistentTimeError if a timestamp is not valid in the specified time zone (e.g. due to a transition from or to DST time). Use ``nonexistent='raise'`` instead. - 'coerce' will return NaT if the timestamp can not be converted to the specified time zone. Use ``nonexistent='NaT'`` instead. .. deprecated:: 0.24.0 Returns ------- Same type as self Array/Index converted to the specified time zone. Raises ------ TypeError If the Datetime Array/Index is tz-aware and tz is not None. See Also -------- DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from one time zone to another. Examples -------- >>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3) >>> tz_naive DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', '2018-03-03 09:00:00'], dtype='datetime64[ns]', freq='D') Localize DatetimeIndex in US/Eastern time zone: >>> tz_aware = tz_naive.tz_localize(tz='US/Eastern') >>> tz_aware DatetimeIndex(['2018-03-01 09:00:00-05:00', '2018-03-02 09:00:00-05:00', '2018-03-03 09:00:00-05:00'], dtype='datetime64[ns, US/Eastern]', freq='D') With the ``tz=None``, we can remove the time zone information while keeping the local time (not converted to UTC): >>> tz_aware.tz_localize(None) DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', '2018-03-03 09:00:00'], dtype='datetime64[ns]', freq='D') Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.dt.tz_localize('CET', ambiguous='infer') 0 2018-10-28 01:30:00+02:00 1 2018-10-28 02:00:00+02:00 2 2018-10-28 02:30:00+02:00 3 2018-10-28 02:00:00+01:00 4 2018-10-28 02:30:00+01:00 5 2018-10-28 03:00:00+01:00 6 2018-10-28 03:30:00+01:00 dtype: datetime64[ns, CET] In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False])) 0 2015-03-29 03:00:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, Europe/Warsaw] If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. >>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 0 2015-03-29 03:00:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw'] >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 0 2015-03-29 01:59:59.999999999+01:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw'] >>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 0 2015-03-29 03:30:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw'] """ if errors is not None: warnings.warn("The errors argument is deprecated and will be " "removed in a future release. Use " "nonexistent='NaT' or nonexistent='raise' " "instead.", FutureWarning) if errors == 'coerce': nonexistent = 'NaT' elif errors == 'raise': nonexistent = 'raise' else: raise ValueError("The errors argument must be either 'coerce' " "or 'raise'.") nonexistent_options = ('raise', 'NaT', 'shift_forward', 'shift_backward') if nonexistent not in nonexistent_options and not isinstance( nonexistent, timedelta): raise ValueError("The nonexistent argument must be one of 'raise'," " 'NaT', 'shift_forward', 'shift_backward' or" " a timedelta object") if self.tz is not None: if tz is None: new_dates = tzconversion.tz_convert(self.asi8, timezones.UTC, self.tz) else: raise TypeError("Already tz-aware, use tz_convert to convert.") else: tz = timezones.maybe_get_tz(tz) # Convert to UTC new_dates = conversion.tz_localize_to_utc( self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent, ) new_dates = new_dates.view(_NS_DTYPE) dtype = tz_to_dtype(tz) return self._simple_new(new_dates, dtype=dtype, freq=self.freq)
def tz_localize(self, tz, ambiguous='raise', nonexistent='raise', errors=None): """ Localize tz-naive Datetime Array/Index to tz-aware Datetime Array/Index. This method takes a time zone (tz) naive Datetime Array/Index object and makes this time zone aware. It does not move the time to another time zone. Time zone localization helps to switch from time zone aware to time zone unaware objects. Parameters ---------- tz : str, pytz.timezone, dateutil.tz.tzfile or None Time zone to convert timestamps to. Passing ``None`` will remove the time zone information preserving local time. ambiguous : 'infer', 'NaT', bool array, default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False signifies a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times nonexistent : 'shift_forward', 'shift_backward, 'NaT', timedelta, \ default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times .. versionadded:: 0.24.0 errors : {'raise', 'coerce'}, default None - 'raise' will raise a NonExistentTimeError if a timestamp is not valid in the specified time zone (e.g. due to a transition from or to DST time). Use ``nonexistent='raise'`` instead. - 'coerce' will return NaT if the timestamp can not be converted to the specified time zone. Use ``nonexistent='NaT'`` instead. .. deprecated:: 0.24.0 Returns ------- Same type as self Array/Index converted to the specified time zone. Raises ------ TypeError If the Datetime Array/Index is tz-aware and tz is not None. See Also -------- DatetimeIndex.tz_convert : Convert tz-aware DatetimeIndex from one time zone to another. Examples -------- >>> tz_naive = pd.date_range('2018-03-01 09:00', periods=3) >>> tz_naive DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', '2018-03-03 09:00:00'], dtype='datetime64[ns]', freq='D') Localize DatetimeIndex in US/Eastern time zone: >>> tz_aware = tz_naive.tz_localize(tz='US/Eastern') >>> tz_aware DatetimeIndex(['2018-03-01 09:00:00-05:00', '2018-03-02 09:00:00-05:00', '2018-03-03 09:00:00-05:00'], dtype='datetime64[ns, US/Eastern]', freq='D') With the ``tz=None``, we can remove the time zone information while keeping the local time (not converted to UTC): >>> tz_aware.tz_localize(None) DatetimeIndex(['2018-03-01 09:00:00', '2018-03-02 09:00:00', '2018-03-03 09:00:00'], dtype='datetime64[ns]', freq='D') Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.dt.tz_localize('CET', ambiguous='infer') 0 2018-10-28 01:30:00+02:00 1 2018-10-28 02:00:00+02:00 2 2018-10-28 02:30:00+02:00 3 2018-10-28 02:00:00+01:00 4 2018-10-28 02:30:00+01:00 5 2018-10-28 03:00:00+01:00 6 2018-10-28 03:30:00+01:00 dtype: datetime64[ns, CET] In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.to_datetime(pd.Series(['2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.dt.tz_localize('CET', ambiguous=np.array([True, True, False])) 0 2015-03-29 03:00:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, Europe/Warsaw] If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. >>> s = pd.to_datetime(pd.Series(['2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 0 2015-03-29 03:00:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw'] >>> s.dt.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 0 2015-03-29 01:59:59.999999999+01:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw'] >>> s.dt.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 0 2015-03-29 03:30:00+02:00 1 2015-03-29 03:30:00+02:00 dtype: datetime64[ns, 'Europe/Warsaw'] """ if errors is not None: warnings.warn("The errors argument is deprecated and will be " "removed in a future release. Use " "nonexistent='NaT' or nonexistent='raise' " "instead.", FutureWarning) if errors == 'coerce': nonexistent = 'NaT' elif errors == 'raise': nonexistent = 'raise' else: raise ValueError("The errors argument must be either 'coerce' " "or 'raise'.") nonexistent_options = ('raise', 'NaT', 'shift_forward', 'shift_backward') if nonexistent not in nonexistent_options and not isinstance( nonexistent, timedelta): raise ValueError("The nonexistent argument must be one of 'raise'," " 'NaT', 'shift_forward', 'shift_backward' or" " a timedelta object") if self.tz is not None: if tz is None: new_dates = tzconversion.tz_convert(self.asi8, timezones.UTC, self.tz) else: raise TypeError("Already tz-aware, use tz_convert to convert.") else: tz = timezones.maybe_get_tz(tz) # Convert to UTC new_dates = conversion.tz_localize_to_utc( self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent, ) new_dates = new_dates.view(_NS_DTYPE) dtype = tz_to_dtype(tz) return self._simple_new(new_dates, dtype=dtype, freq=self.freq)
[ "Localize", "tz", "-", "naive", "Datetime", "Array", "/", "Index", "to", "tz", "-", "aware", "Datetime", "Array", "/", "Index", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L863-L1047
[ "def", "tz_localize", "(", "self", ",", "tz", ",", "ambiguous", "=", "'raise'", ",", "nonexistent", "=", "'raise'", ",", "errors", "=", "None", ")", ":", "if", "errors", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"The errors argument is deprecated and will be \"", "\"removed in a future release. Use \"", "\"nonexistent='NaT' or nonexistent='raise' \"", "\"instead.\"", ",", "FutureWarning", ")", "if", "errors", "==", "'coerce'", ":", "nonexistent", "=", "'NaT'", "elif", "errors", "==", "'raise'", ":", "nonexistent", "=", "'raise'", "else", ":", "raise", "ValueError", "(", "\"The errors argument must be either 'coerce' \"", "\"or 'raise'.\"", ")", "nonexistent_options", "=", "(", "'raise'", ",", "'NaT'", ",", "'shift_forward'", ",", "'shift_backward'", ")", "if", "nonexistent", "not", "in", "nonexistent_options", "and", "not", "isinstance", "(", "nonexistent", ",", "timedelta", ")", ":", "raise", "ValueError", "(", "\"The nonexistent argument must be one of 'raise',\"", "\" 'NaT', 'shift_forward', 'shift_backward' or\"", "\" a timedelta object\"", ")", "if", "self", ".", "tz", "is", "not", "None", ":", "if", "tz", "is", "None", ":", "new_dates", "=", "tzconversion", ".", "tz_convert", "(", "self", ".", "asi8", ",", "timezones", ".", "UTC", ",", "self", ".", "tz", ")", "else", ":", "raise", "TypeError", "(", "\"Already tz-aware, use tz_convert to convert.\"", ")", "else", ":", "tz", "=", "timezones", ".", "maybe_get_tz", "(", "tz", ")", "# Convert to UTC", "new_dates", "=", "conversion", ".", "tz_localize_to_utc", "(", "self", ".", "asi8", ",", "tz", ",", "ambiguous", "=", "ambiguous", ",", "nonexistent", "=", "nonexistent", ",", ")", "new_dates", "=", "new_dates", ".", "view", "(", "_NS_DTYPE", ")", "dtype", "=", "tz_to_dtype", "(", "tz", ")", "return", "self", ".", "_simple_new", "(", "new_dates", ",", "dtype", "=", "dtype", ",", "freq", "=", "self", ".", "freq", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DatetimeArray.normalize
Convert times to midnight. The time component of the date-time is converted to midnight i.e. 00:00:00. This is useful in cases, when the time does not matter. Length is unaltered. The timezones are unaffected. This method is available on Series with datetime values under the ``.dt`` accessor, and directly on Datetime Array/Index. Returns ------- DatetimeArray, DatetimeIndex or Series The same type as the original data. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- floor : Floor the datetimes to the specified freq. ceil : Ceil the datetimes to the specified freq. round : Round the datetimes to the specified freq. Examples -------- >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H', ... periods=3, tz='Asia/Calcutta') >>> idx DatetimeIndex(['2014-08-01 10:00:00+05:30', '2014-08-01 11:00:00+05:30', '2014-08-01 12:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq='H') >>> idx.normalize() DatetimeIndex(['2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None)
pandas/core/arrays/datetimes.py
def normalize(self): """ Convert times to midnight. The time component of the date-time is converted to midnight i.e. 00:00:00. This is useful in cases, when the time does not matter. Length is unaltered. The timezones are unaffected. This method is available on Series with datetime values under the ``.dt`` accessor, and directly on Datetime Array/Index. Returns ------- DatetimeArray, DatetimeIndex or Series The same type as the original data. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- floor : Floor the datetimes to the specified freq. ceil : Ceil the datetimes to the specified freq. round : Round the datetimes to the specified freq. Examples -------- >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H', ... periods=3, tz='Asia/Calcutta') >>> idx DatetimeIndex(['2014-08-01 10:00:00+05:30', '2014-08-01 11:00:00+05:30', '2014-08-01 12:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq='H') >>> idx.normalize() DatetimeIndex(['2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None) """ if self.tz is None or timezones.is_utc(self.tz): not_null = ~self.isna() DAY_NS = ccalendar.DAY_SECONDS * 1000000000 new_values = self.asi8.copy() adjustment = (new_values[not_null] % DAY_NS) new_values[not_null] = new_values[not_null] - adjustment else: new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) return type(self)._from_sequence(new_values, freq='infer').tz_localize(self.tz)
def normalize(self): """ Convert times to midnight. The time component of the date-time is converted to midnight i.e. 00:00:00. This is useful in cases, when the time does not matter. Length is unaltered. The timezones are unaffected. This method is available on Series with datetime values under the ``.dt`` accessor, and directly on Datetime Array/Index. Returns ------- DatetimeArray, DatetimeIndex or Series The same type as the original data. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- floor : Floor the datetimes to the specified freq. ceil : Ceil the datetimes to the specified freq. round : Round the datetimes to the specified freq. Examples -------- >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H', ... periods=3, tz='Asia/Calcutta') >>> idx DatetimeIndex(['2014-08-01 10:00:00+05:30', '2014-08-01 11:00:00+05:30', '2014-08-01 12:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq='H') >>> idx.normalize() DatetimeIndex(['2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None) """ if self.tz is None or timezones.is_utc(self.tz): not_null = ~self.isna() DAY_NS = ccalendar.DAY_SECONDS * 1000000000 new_values = self.asi8.copy() adjustment = (new_values[not_null] % DAY_NS) new_values[not_null] = new_values[not_null] - adjustment else: new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) return type(self)._from_sequence(new_values, freq='infer').tz_localize(self.tz)
[ "Convert", "times", "to", "midnight", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1063-L1110
[ "def", "normalize", "(", "self", ")", ":", "if", "self", ".", "tz", "is", "None", "or", "timezones", ".", "is_utc", "(", "self", ".", "tz", ")", ":", "not_null", "=", "~", "self", ".", "isna", "(", ")", "DAY_NS", "=", "ccalendar", ".", "DAY_SECONDS", "*", "1000000000", "new_values", "=", "self", ".", "asi8", ".", "copy", "(", ")", "adjustment", "=", "(", "new_values", "[", "not_null", "]", "%", "DAY_NS", ")", "new_values", "[", "not_null", "]", "=", "new_values", "[", "not_null", "]", "-", "adjustment", "else", ":", "new_values", "=", "conversion", ".", "normalize_i8_timestamps", "(", "self", ".", "asi8", ",", "self", ".", "tz", ")", "return", "type", "(", "self", ")", ".", "_from_sequence", "(", "new_values", ",", "freq", "=", "'infer'", ")", ".", "tz_localize", "(", "self", ".", "tz", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DatetimeArray.to_period
Cast to PeriodArray/Index at a particular frequency. Converts DatetimeArray/Index to PeriodArray/Index. Parameters ---------- freq : str or Offset, optional One of pandas' :ref:`offset strings <timeseries.offset_aliases>` or an Offset object. Will be inferred by default. Returns ------- PeriodArray/Index Raises ------ ValueError When converting a DatetimeArray/Index with non-regular values, so that a frequency cannot be inferred. See Also -------- PeriodIndex: Immutable ndarray holding ordinal values. DatetimeIndex.to_pydatetime: Return DatetimeIndex as object. Examples -------- >>> df = pd.DataFrame({"y": [1, 2, 3]}, ... index=pd.to_datetime(["2000-03-31 00:00:00", ... "2000-05-31 00:00:00", ... "2000-08-31 00:00:00"])) >>> df.index.to_period("M") PeriodIndex(['2000-03', '2000-05', '2000-08'], dtype='period[M]', freq='M') Infer the daily frequency >>> idx = pd.date_range("2017-01-01", periods=2) >>> idx.to_period() PeriodIndex(['2017-01-01', '2017-01-02'], dtype='period[D]', freq='D')
pandas/core/arrays/datetimes.py
def to_period(self, freq=None): """ Cast to PeriodArray/Index at a particular frequency. Converts DatetimeArray/Index to PeriodArray/Index. Parameters ---------- freq : str or Offset, optional One of pandas' :ref:`offset strings <timeseries.offset_aliases>` or an Offset object. Will be inferred by default. Returns ------- PeriodArray/Index Raises ------ ValueError When converting a DatetimeArray/Index with non-regular values, so that a frequency cannot be inferred. See Also -------- PeriodIndex: Immutable ndarray holding ordinal values. DatetimeIndex.to_pydatetime: Return DatetimeIndex as object. Examples -------- >>> df = pd.DataFrame({"y": [1, 2, 3]}, ... index=pd.to_datetime(["2000-03-31 00:00:00", ... "2000-05-31 00:00:00", ... "2000-08-31 00:00:00"])) >>> df.index.to_period("M") PeriodIndex(['2000-03', '2000-05', '2000-08'], dtype='period[M]', freq='M') Infer the daily frequency >>> idx = pd.date_range("2017-01-01", periods=2) >>> idx.to_period() PeriodIndex(['2017-01-01', '2017-01-02'], dtype='period[D]', freq='D') """ from pandas.core.arrays import PeriodArray if self.tz is not None: warnings.warn("Converting to PeriodArray/Index representation " "will drop timezone information.", UserWarning) if freq is None: freq = self.freqstr or self.inferred_freq if freq is None: raise ValueError("You must pass a freq argument as " "current index has none.") freq = get_period_alias(freq) return PeriodArray._from_datetime64(self._data, freq, tz=self.tz)
def to_period(self, freq=None): """ Cast to PeriodArray/Index at a particular frequency. Converts DatetimeArray/Index to PeriodArray/Index. Parameters ---------- freq : str or Offset, optional One of pandas' :ref:`offset strings <timeseries.offset_aliases>` or an Offset object. Will be inferred by default. Returns ------- PeriodArray/Index Raises ------ ValueError When converting a DatetimeArray/Index with non-regular values, so that a frequency cannot be inferred. See Also -------- PeriodIndex: Immutable ndarray holding ordinal values. DatetimeIndex.to_pydatetime: Return DatetimeIndex as object. Examples -------- >>> df = pd.DataFrame({"y": [1, 2, 3]}, ... index=pd.to_datetime(["2000-03-31 00:00:00", ... "2000-05-31 00:00:00", ... "2000-08-31 00:00:00"])) >>> df.index.to_period("M") PeriodIndex(['2000-03', '2000-05', '2000-08'], dtype='period[M]', freq='M') Infer the daily frequency >>> idx = pd.date_range("2017-01-01", periods=2) >>> idx.to_period() PeriodIndex(['2017-01-01', '2017-01-02'], dtype='period[D]', freq='D') """ from pandas.core.arrays import PeriodArray if self.tz is not None: warnings.warn("Converting to PeriodArray/Index representation " "will drop timezone information.", UserWarning) if freq is None: freq = self.freqstr or self.inferred_freq if freq is None: raise ValueError("You must pass a freq argument as " "current index has none.") freq = get_period_alias(freq) return PeriodArray._from_datetime64(self._data, freq, tz=self.tz)
[ "Cast", "to", "PeriodArray", "/", "Index", "at", "a", "particular", "frequency", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1112-L1171
[ "def", "to_period", "(", "self", ",", "freq", "=", "None", ")", ":", "from", "pandas", ".", "core", ".", "arrays", "import", "PeriodArray", "if", "self", ".", "tz", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"Converting to PeriodArray/Index representation \"", "\"will drop timezone information.\"", ",", "UserWarning", ")", "if", "freq", "is", "None", ":", "freq", "=", "self", ".", "freqstr", "or", "self", ".", "inferred_freq", "if", "freq", "is", "None", ":", "raise", "ValueError", "(", "\"You must pass a freq argument as \"", "\"current index has none.\"", ")", "freq", "=", "get_period_alias", "(", "freq", ")", "return", "PeriodArray", ".", "_from_datetime64", "(", "self", ".", "_data", ",", "freq", ",", "tz", "=", "self", ".", "tz", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DatetimeArray.to_perioddelta
Calculate TimedeltaArray of difference between index values and index converted to PeriodArray at specified freq. Used for vectorized offsets Parameters ---------- freq : Period frequency Returns ------- TimedeltaArray/Index
pandas/core/arrays/datetimes.py
def to_perioddelta(self, freq): """ Calculate TimedeltaArray of difference between index values and index converted to PeriodArray at specified freq. Used for vectorized offsets Parameters ---------- freq : Period frequency Returns ------- TimedeltaArray/Index """ # TODO: consider privatizing (discussion in GH#23113) from pandas.core.arrays.timedeltas import TimedeltaArray i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8 m8delta = i8delta.view('m8[ns]') return TimedeltaArray(m8delta)
def to_perioddelta(self, freq): """ Calculate TimedeltaArray of difference between index values and index converted to PeriodArray at specified freq. Used for vectorized offsets Parameters ---------- freq : Period frequency Returns ------- TimedeltaArray/Index """ # TODO: consider privatizing (discussion in GH#23113) from pandas.core.arrays.timedeltas import TimedeltaArray i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8 m8delta = i8delta.view('m8[ns]') return TimedeltaArray(m8delta)
[ "Calculate", "TimedeltaArray", "of", "difference", "between", "index", "values", "and", "index", "converted", "to", "PeriodArray", "at", "specified", "freq", ".", "Used", "for", "vectorized", "offsets" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1173-L1191
[ "def", "to_perioddelta", "(", "self", ",", "freq", ")", ":", "# TODO: consider privatizing (discussion in GH#23113)", "from", "pandas", ".", "core", ".", "arrays", ".", "timedeltas", "import", "TimedeltaArray", "i8delta", "=", "self", ".", "asi8", "-", "self", ".", "to_period", "(", "freq", ")", ".", "to_timestamp", "(", ")", ".", "asi8", "m8delta", "=", "i8delta", ".", "view", "(", "'m8[ns]'", ")", "return", "TimedeltaArray", "(", "m8delta", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DatetimeArray.month_name
Return the month names of the DateTimeIndex with specified locale. .. versionadded:: 0.23.0 Parameters ---------- locale : str, optional Locale determining the language in which to return the month name. Default is English locale. Returns ------- Index Index of month names. Examples -------- >>> idx = pd.date_range(start='2018-01', freq='M', periods=3) >>> idx DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], dtype='datetime64[ns]', freq='M') >>> idx.month_name() Index(['January', 'February', 'March'], dtype='object')
pandas/core/arrays/datetimes.py
def month_name(self, locale=None): """ Return the month names of the DateTimeIndex with specified locale. .. versionadded:: 0.23.0 Parameters ---------- locale : str, optional Locale determining the language in which to return the month name. Default is English locale. Returns ------- Index Index of month names. Examples -------- >>> idx = pd.date_range(start='2018-01', freq='M', periods=3) >>> idx DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], dtype='datetime64[ns]', freq='M') >>> idx.month_name() Index(['January', 'February', 'March'], dtype='object') """ if self.tz is not None and not timezones.is_utc(self.tz): values = self._local_timestamps() else: values = self.asi8 result = fields.get_date_name_field(values, 'month_name', locale=locale) result = self._maybe_mask_results(result, fill_value=None) return result
def month_name(self, locale=None): """ Return the month names of the DateTimeIndex with specified locale. .. versionadded:: 0.23.0 Parameters ---------- locale : str, optional Locale determining the language in which to return the month name. Default is English locale. Returns ------- Index Index of month names. Examples -------- >>> idx = pd.date_range(start='2018-01', freq='M', periods=3) >>> idx DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], dtype='datetime64[ns]', freq='M') >>> idx.month_name() Index(['January', 'February', 'March'], dtype='object') """ if self.tz is not None and not timezones.is_utc(self.tz): values = self._local_timestamps() else: values = self.asi8 result = fields.get_date_name_field(values, 'month_name', locale=locale) result = self._maybe_mask_results(result, fill_value=None) return result
[ "Return", "the", "month", "names", "of", "the", "DateTimeIndex", "with", "specified", "locale", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1196-L1230
[ "def", "month_name", "(", "self", ",", "locale", "=", "None", ")", ":", "if", "self", ".", "tz", "is", "not", "None", "and", "not", "timezones", ".", "is_utc", "(", "self", ".", "tz", ")", ":", "values", "=", "self", ".", "_local_timestamps", "(", ")", "else", ":", "values", "=", "self", ".", "asi8", "result", "=", "fields", ".", "get_date_name_field", "(", "values", ",", "'month_name'", ",", "locale", "=", "locale", ")", "result", "=", "self", ".", "_maybe_mask_results", "(", "result", ",", "fill_value", "=", "None", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DatetimeArray.time
Returns numpy array of datetime.time. The time part of the Timestamps.
pandas/core/arrays/datetimes.py
def time(self): """ Returns numpy array of datetime.time. The time part of the Timestamps. """ # If the Timestamps have a timezone that is not UTC, # convert them into their i8 representation while # keeping their timezone and not using UTC if self.tz is not None and not timezones.is_utc(self.tz): timestamps = self._local_timestamps() else: timestamps = self.asi8 return tslib.ints_to_pydatetime(timestamps, box="time")
def time(self): """ Returns numpy array of datetime.time. The time part of the Timestamps. """ # If the Timestamps have a timezone that is not UTC, # convert them into their i8 representation while # keeping their timezone and not using UTC if self.tz is not None and not timezones.is_utc(self.tz): timestamps = self._local_timestamps() else: timestamps = self.asi8 return tslib.ints_to_pydatetime(timestamps, box="time")
[ "Returns", "numpy", "array", "of", "datetime", ".", "time", ".", "The", "time", "part", "of", "the", "Timestamps", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1269-L1281
[ "def", "time", "(", "self", ")", ":", "# If the Timestamps have a timezone that is not UTC,", "# convert them into their i8 representation while", "# keeping their timezone and not using UTC", "if", "self", ".", "tz", "is", "not", "None", "and", "not", "timezones", ".", "is_utc", "(", "self", ".", "tz", ")", ":", "timestamps", "=", "self", ".", "_local_timestamps", "(", ")", "else", ":", "timestamps", "=", "self", ".", "asi8", "return", "tslib", ".", "ints_to_pydatetime", "(", "timestamps", ",", "box", "=", "\"time\"", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DatetimeArray.to_julian_date
Convert Datetime Array to float64 ndarray of Julian Dates. 0 Julian date is noon January 1, 4713 BC. http://en.wikipedia.org/wiki/Julian_day
pandas/core/arrays/datetimes.py
def to_julian_date(self): """ Convert Datetime Array to float64 ndarray of Julian Dates. 0 Julian date is noon January 1, 4713 BC. http://en.wikipedia.org/wiki/Julian_day """ # http://mysite.verizon.net/aesir_research/date/jdalg2.htm year = np.asarray(self.year) month = np.asarray(self.month) day = np.asarray(self.day) testarr = month < 3 year[testarr] -= 1 month[testarr] += 12 return (day + np.fix((153 * month - 457) / 5) + 365 * year + np.floor(year / 4) - np.floor(year / 100) + np.floor(year / 400) + 1721118.5 + (self.hour + self.minute / 60.0 + self.second / 3600.0 + self.microsecond / 3600.0 / 1e+6 + self.nanosecond / 3600.0 / 1e+9 ) / 24.0)
def to_julian_date(self): """ Convert Datetime Array to float64 ndarray of Julian Dates. 0 Julian date is noon January 1, 4713 BC. http://en.wikipedia.org/wiki/Julian_day """ # http://mysite.verizon.net/aesir_research/date/jdalg2.htm year = np.asarray(self.year) month = np.asarray(self.month) day = np.asarray(self.day) testarr = month < 3 year[testarr] -= 1 month[testarr] += 12 return (day + np.fix((153 * month - 457) / 5) + 365 * year + np.floor(year / 4) - np.floor(year / 100) + np.floor(year / 400) + 1721118.5 + (self.hour + self.minute / 60.0 + self.second / 3600.0 + self.microsecond / 3600.0 / 1e+6 + self.nanosecond / 3600.0 / 1e+9 ) / 24.0)
[ "Convert", "Datetime", "Array", "to", "float64", "ndarray", "of", "Julian", "Dates", ".", "0", "Julian", "date", "is", "noon", "January", "1", "4713", "BC", ".", "http", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Julian_day" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1634-L1660
[ "def", "to_julian_date", "(", "self", ")", ":", "# http://mysite.verizon.net/aesir_research/date/jdalg2.htm", "year", "=", "np", ".", "asarray", "(", "self", ".", "year", ")", "month", "=", "np", ".", "asarray", "(", "self", ".", "month", ")", "day", "=", "np", ".", "asarray", "(", "self", ".", "day", ")", "testarr", "=", "month", "<", "3", "year", "[", "testarr", "]", "-=", "1", "month", "[", "testarr", "]", "+=", "12", "return", "(", "day", "+", "np", ".", "fix", "(", "(", "153", "*", "month", "-", "457", ")", "/", "5", ")", "+", "365", "*", "year", "+", "np", ".", "floor", "(", "year", "/", "4", ")", "-", "np", ".", "floor", "(", "year", "/", "100", ")", "+", "np", ".", "floor", "(", "year", "/", "400", ")", "+", "1721118.5", "+", "(", "self", ".", "hour", "+", "self", ".", "minute", "/", "60.0", "+", "self", ".", "second", "/", "3600.0", "+", "self", ".", "microsecond", "/", "3600.0", "/", "1e+6", "+", "self", ".", "nanosecond", "/", "3600.0", "/", "1e+9", ")", "/", "24.0", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
get_api_items
Yield information about all public API items. Parse api.rst file from the documentation, and extract all the functions, methods, classes, attributes... This should include all pandas public API. Parameters ---------- api_doc_fd : file descriptor A file descriptor of the API documentation page, containing the table of contents with all the public API. Yields ------ name : str The name of the object (e.g. 'pandas.Series.str.upper). func : function The object itself. In most cases this will be a function or method, but it can also be classes, properties, cython objects... section : str The name of the section in the API page where the object item is located. subsection : str The name of the subsection in the API page where the object item is located.
scripts/validate_docstrings.py
def get_api_items(api_doc_fd): """ Yield information about all public API items. Parse api.rst file from the documentation, and extract all the functions, methods, classes, attributes... This should include all pandas public API. Parameters ---------- api_doc_fd : file descriptor A file descriptor of the API documentation page, containing the table of contents with all the public API. Yields ------ name : str The name of the object (e.g. 'pandas.Series.str.upper). func : function The object itself. In most cases this will be a function or method, but it can also be classes, properties, cython objects... section : str The name of the section in the API page where the object item is located. subsection : str The name of the subsection in the API page where the object item is located. """ current_module = 'pandas' previous_line = current_section = current_subsection = '' position = None for line in api_doc_fd: line = line.strip() if len(line) == len(previous_line): if set(line) == set('-'): current_section = previous_line continue if set(line) == set('~'): current_subsection = previous_line continue if line.startswith('.. currentmodule::'): current_module = line.replace('.. currentmodule::', '').strip() continue if line == '.. autosummary::': position = 'autosummary' continue if position == 'autosummary': if line == '': position = 'items' continue if position == 'items': if line == '': position = None continue item = line.strip() func = importlib.import_module(current_module) for part in item.split('.'): func = getattr(func, part) yield ('.'.join([current_module, item]), func, current_section, current_subsection) previous_line = line
def get_api_items(api_doc_fd): """ Yield information about all public API items. Parse api.rst file from the documentation, and extract all the functions, methods, classes, attributes... This should include all pandas public API. Parameters ---------- api_doc_fd : file descriptor A file descriptor of the API documentation page, containing the table of contents with all the public API. Yields ------ name : str The name of the object (e.g. 'pandas.Series.str.upper). func : function The object itself. In most cases this will be a function or method, but it can also be classes, properties, cython objects... section : str The name of the section in the API page where the object item is located. subsection : str The name of the subsection in the API page where the object item is located. """ current_module = 'pandas' previous_line = current_section = current_subsection = '' position = None for line in api_doc_fd: line = line.strip() if len(line) == len(previous_line): if set(line) == set('-'): current_section = previous_line continue if set(line) == set('~'): current_subsection = previous_line continue if line.startswith('.. currentmodule::'): current_module = line.replace('.. currentmodule::', '').strip() continue if line == '.. autosummary::': position = 'autosummary' continue if position == 'autosummary': if line == '': position = 'items' continue if position == 'items': if line == '': position = None continue item = line.strip() func = importlib.import_module(current_module) for part in item.split('.'): func = getattr(func, part) yield ('.'.join([current_module, item]), func, current_section, current_subsection) previous_line = line
[ "Yield", "information", "about", "all", "public", "API", "items", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L158-L223
[ "def", "get_api_items", "(", "api_doc_fd", ")", ":", "current_module", "=", "'pandas'", "previous_line", "=", "current_section", "=", "current_subsection", "=", "''", "position", "=", "None", "for", "line", "in", "api_doc_fd", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "len", "(", "line", ")", "==", "len", "(", "previous_line", ")", ":", "if", "set", "(", "line", ")", "==", "set", "(", "'-'", ")", ":", "current_section", "=", "previous_line", "continue", "if", "set", "(", "line", ")", "==", "set", "(", "'~'", ")", ":", "current_subsection", "=", "previous_line", "continue", "if", "line", ".", "startswith", "(", "'.. currentmodule::'", ")", ":", "current_module", "=", "line", ".", "replace", "(", "'.. currentmodule::'", ",", "''", ")", ".", "strip", "(", ")", "continue", "if", "line", "==", "'.. autosummary::'", ":", "position", "=", "'autosummary'", "continue", "if", "position", "==", "'autosummary'", ":", "if", "line", "==", "''", ":", "position", "=", "'items'", "continue", "if", "position", "==", "'items'", ":", "if", "line", "==", "''", ":", "position", "=", "None", "continue", "item", "=", "line", ".", "strip", "(", ")", "func", "=", "importlib", ".", "import_module", "(", "current_module", ")", "for", "part", "in", "item", ".", "split", "(", "'.'", ")", ":", "func", "=", "getattr", "(", "func", ",", "part", ")", "yield", "(", "'.'", ".", "join", "(", "[", "current_module", ",", "item", "]", ")", ",", "func", ",", "current_section", ",", "current_subsection", ")", "previous_line", "=", "line" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
get_validation_data
Validate the docstring. Parameters ---------- doc : Docstring A Docstring object with the given function name. Returns ------- tuple errors : list of tuple Errors occurred during validation. warnings : list of tuple Warnings occurred during validation. examples_errs : str Examples usage displayed along the error, otherwise empty string. Notes ----- The errors codes are defined as: - First two characters: Section where the error happens: * GL: Global (no section, like section ordering errors) * SS: Short summary * ES: Extended summary * PR: Parameters * RT: Returns * YD: Yields * RS: Raises * WN: Warns * SA: See Also * NT: Notes * RF: References * EX: Examples - Last two characters: Numeric error code inside the section For example, EX02 is the second codified error in the Examples section (which in this case is assigned to examples that do not pass the tests). The error codes, their corresponding error messages, and the details on how they are validated, are not documented more than in the source code of this function.
scripts/validate_docstrings.py
def get_validation_data(doc): """ Validate the docstring. Parameters ---------- doc : Docstring A Docstring object with the given function name. Returns ------- tuple errors : list of tuple Errors occurred during validation. warnings : list of tuple Warnings occurred during validation. examples_errs : str Examples usage displayed along the error, otherwise empty string. Notes ----- The errors codes are defined as: - First two characters: Section where the error happens: * GL: Global (no section, like section ordering errors) * SS: Short summary * ES: Extended summary * PR: Parameters * RT: Returns * YD: Yields * RS: Raises * WN: Warns * SA: See Also * NT: Notes * RF: References * EX: Examples - Last two characters: Numeric error code inside the section For example, EX02 is the second codified error in the Examples section (which in this case is assigned to examples that do not pass the tests). The error codes, their corresponding error messages, and the details on how they are validated, are not documented more than in the source code of this function. """ errs = [] wrns = [] if not doc.raw_doc: errs.append(error('GL08')) return errs, wrns, '' if doc.start_blank_lines != 1: errs.append(error('GL01')) if doc.end_blank_lines != 1: errs.append(error('GL02')) if doc.double_blank_lines: errs.append(error('GL03')) mentioned_errs = doc.mentioned_private_classes if mentioned_errs: errs.append(error('GL04', mentioned_private_classes=', '.join(mentioned_errs))) for line in doc.raw_doc.splitlines(): if re.match("^ *\t", line): errs.append(error('GL05', line_with_tabs=line.lstrip())) unexpected_sections = [section for section in doc.section_titles if section not in ALLOWED_SECTIONS] for section in unexpected_sections: errs.append(error('GL06', section=section, allowed_sections=', '.join(ALLOWED_SECTIONS))) correct_order = [section for section in ALLOWED_SECTIONS if section in doc.section_titles] if correct_order != doc.section_titles: errs.append(error('GL07', correct_sections=', '.join(correct_order))) if (doc.deprecated_with_directive and not doc.extended_summary.startswith('.. deprecated:: ')): errs.append(error('GL09')) if not doc.summary: errs.append(error('SS01')) else: if not doc.summary[0].isupper(): errs.append(error('SS02')) if doc.summary[-1] != '.': errs.append(error('SS03')) if doc.summary != doc.summary.lstrip(): errs.append(error('SS04')) elif (doc.is_function_or_method and doc.summary.split(' ')[0][-1] == 's'): errs.append(error('SS05')) if doc.num_summary_lines > 1: errs.append(error('SS06')) if not doc.extended_summary: wrns.append(('ES01', 'No extended summary found')) # PR01: Parameters not documented # PR02: Unknown parameters # PR03: Wrong parameters order errs += doc.parameter_mismatches for param in doc.doc_parameters: if not param.startswith("*"): # Check can ignore var / kwargs if not doc.parameter_type(param): if ':' in param: errs.append(error('PR10', param_name=param.split(':')[0])) else: errs.append(error('PR04', param_name=param)) else: if doc.parameter_type(param)[-1] == '.': errs.append(error('PR05', param_name=param)) common_type_errors = [('integer', 'int'), ('boolean', 'bool'), ('string', 'str')] for wrong_type, right_type in common_type_errors: if wrong_type in doc.parameter_type(param): errs.append(error('PR06', param_name=param, right_type=right_type, wrong_type=wrong_type)) if not doc.parameter_desc(param): errs.append(error('PR07', param_name=param)) else: if not doc.parameter_desc(param)[0].isupper(): errs.append(error('PR08', param_name=param)) if doc.parameter_desc(param)[-1] != '.': errs.append(error('PR09', param_name=param)) if doc.is_function_or_method: if not doc.returns: if doc.method_returns_something: errs.append(error('RT01')) else: if len(doc.returns) == 1 and doc.returns[0].name: errs.append(error('RT02')) for name_or_type, type_, desc in doc.returns: if not desc: errs.append(error('RT03')) else: desc = ' '.join(desc) if not desc[0].isupper(): errs.append(error('RT04')) if not desc.endswith('.'): errs.append(error('RT05')) if not doc.yields and 'yield' in doc.method_source: errs.append(error('YD01')) if not doc.see_also: wrns.append(error('SA01')) else: for rel_name, rel_desc in doc.see_also.items(): if rel_desc: if not rel_desc.endswith('.'): errs.append(error('SA02', reference_name=rel_name)) if not rel_desc[0].isupper(): errs.append(error('SA03', reference_name=rel_name)) else: errs.append(error('SA04', reference_name=rel_name)) if rel_name.startswith('pandas.'): errs.append(error('SA05', reference_name=rel_name, right_reference=rel_name[len('pandas.'):])) examples_errs = '' if not doc.examples: wrns.append(error('EX01')) else: examples_errs = doc.examples_errors if examples_errs: errs.append(error('EX02', doctest_log=examples_errs)) for err in doc.validate_pep8(): errs.append(error('EX03', error_code=err.error_code, error_message=err.message, times_happening=' ({} times)'.format(err.count) if err.count > 1 else '')) examples_source_code = ''.join(doc.examples_source_code) for wrong_import in ('numpy', 'pandas'): if 'import {}'.format(wrong_import) in examples_source_code: errs.append(error('EX04', imported_library=wrong_import)) return errs, wrns, examples_errs
def get_validation_data(doc): """ Validate the docstring. Parameters ---------- doc : Docstring A Docstring object with the given function name. Returns ------- tuple errors : list of tuple Errors occurred during validation. warnings : list of tuple Warnings occurred during validation. examples_errs : str Examples usage displayed along the error, otherwise empty string. Notes ----- The errors codes are defined as: - First two characters: Section where the error happens: * GL: Global (no section, like section ordering errors) * SS: Short summary * ES: Extended summary * PR: Parameters * RT: Returns * YD: Yields * RS: Raises * WN: Warns * SA: See Also * NT: Notes * RF: References * EX: Examples - Last two characters: Numeric error code inside the section For example, EX02 is the second codified error in the Examples section (which in this case is assigned to examples that do not pass the tests). The error codes, their corresponding error messages, and the details on how they are validated, are not documented more than in the source code of this function. """ errs = [] wrns = [] if not doc.raw_doc: errs.append(error('GL08')) return errs, wrns, '' if doc.start_blank_lines != 1: errs.append(error('GL01')) if doc.end_blank_lines != 1: errs.append(error('GL02')) if doc.double_blank_lines: errs.append(error('GL03')) mentioned_errs = doc.mentioned_private_classes if mentioned_errs: errs.append(error('GL04', mentioned_private_classes=', '.join(mentioned_errs))) for line in doc.raw_doc.splitlines(): if re.match("^ *\t", line): errs.append(error('GL05', line_with_tabs=line.lstrip())) unexpected_sections = [section for section in doc.section_titles if section not in ALLOWED_SECTIONS] for section in unexpected_sections: errs.append(error('GL06', section=section, allowed_sections=', '.join(ALLOWED_SECTIONS))) correct_order = [section for section in ALLOWED_SECTIONS if section in doc.section_titles] if correct_order != doc.section_titles: errs.append(error('GL07', correct_sections=', '.join(correct_order))) if (doc.deprecated_with_directive and not doc.extended_summary.startswith('.. deprecated:: ')): errs.append(error('GL09')) if not doc.summary: errs.append(error('SS01')) else: if not doc.summary[0].isupper(): errs.append(error('SS02')) if doc.summary[-1] != '.': errs.append(error('SS03')) if doc.summary != doc.summary.lstrip(): errs.append(error('SS04')) elif (doc.is_function_or_method and doc.summary.split(' ')[0][-1] == 's'): errs.append(error('SS05')) if doc.num_summary_lines > 1: errs.append(error('SS06')) if not doc.extended_summary: wrns.append(('ES01', 'No extended summary found')) # PR01: Parameters not documented # PR02: Unknown parameters # PR03: Wrong parameters order errs += doc.parameter_mismatches for param in doc.doc_parameters: if not param.startswith("*"): # Check can ignore var / kwargs if not doc.parameter_type(param): if ':' in param: errs.append(error('PR10', param_name=param.split(':')[0])) else: errs.append(error('PR04', param_name=param)) else: if doc.parameter_type(param)[-1] == '.': errs.append(error('PR05', param_name=param)) common_type_errors = [('integer', 'int'), ('boolean', 'bool'), ('string', 'str')] for wrong_type, right_type in common_type_errors: if wrong_type in doc.parameter_type(param): errs.append(error('PR06', param_name=param, right_type=right_type, wrong_type=wrong_type)) if not doc.parameter_desc(param): errs.append(error('PR07', param_name=param)) else: if not doc.parameter_desc(param)[0].isupper(): errs.append(error('PR08', param_name=param)) if doc.parameter_desc(param)[-1] != '.': errs.append(error('PR09', param_name=param)) if doc.is_function_or_method: if not doc.returns: if doc.method_returns_something: errs.append(error('RT01')) else: if len(doc.returns) == 1 and doc.returns[0].name: errs.append(error('RT02')) for name_or_type, type_, desc in doc.returns: if not desc: errs.append(error('RT03')) else: desc = ' '.join(desc) if not desc[0].isupper(): errs.append(error('RT04')) if not desc.endswith('.'): errs.append(error('RT05')) if not doc.yields and 'yield' in doc.method_source: errs.append(error('YD01')) if not doc.see_also: wrns.append(error('SA01')) else: for rel_name, rel_desc in doc.see_also.items(): if rel_desc: if not rel_desc.endswith('.'): errs.append(error('SA02', reference_name=rel_name)) if not rel_desc[0].isupper(): errs.append(error('SA03', reference_name=rel_name)) else: errs.append(error('SA04', reference_name=rel_name)) if rel_name.startswith('pandas.'): errs.append(error('SA05', reference_name=rel_name, right_reference=rel_name[len('pandas.'):])) examples_errs = '' if not doc.examples: wrns.append(error('EX01')) else: examples_errs = doc.examples_errors if examples_errs: errs.append(error('EX02', doctest_log=examples_errs)) for err in doc.validate_pep8(): errs.append(error('EX03', error_code=err.error_code, error_message=err.message, times_happening=' ({} times)'.format(err.count) if err.count > 1 else '')) examples_source_code = ''.join(doc.examples_source_code) for wrong_import in ('numpy', 'pandas'): if 'import {}'.format(wrong_import) in examples_source_code: errs.append(error('EX04', imported_library=wrong_import)) return errs, wrns, examples_errs
[ "Validate", "the", "docstring", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L599-L785
[ "def", "get_validation_data", "(", "doc", ")", ":", "errs", "=", "[", "]", "wrns", "=", "[", "]", "if", "not", "doc", ".", "raw_doc", ":", "errs", ".", "append", "(", "error", "(", "'GL08'", ")", ")", "return", "errs", ",", "wrns", ",", "''", "if", "doc", ".", "start_blank_lines", "!=", "1", ":", "errs", ".", "append", "(", "error", "(", "'GL01'", ")", ")", "if", "doc", ".", "end_blank_lines", "!=", "1", ":", "errs", ".", "append", "(", "error", "(", "'GL02'", ")", ")", "if", "doc", ".", "double_blank_lines", ":", "errs", ".", "append", "(", "error", "(", "'GL03'", ")", ")", "mentioned_errs", "=", "doc", ".", "mentioned_private_classes", "if", "mentioned_errs", ":", "errs", ".", "append", "(", "error", "(", "'GL04'", ",", "mentioned_private_classes", "=", "', '", ".", "join", "(", "mentioned_errs", ")", ")", ")", "for", "line", "in", "doc", ".", "raw_doc", ".", "splitlines", "(", ")", ":", "if", "re", ".", "match", "(", "\"^ *\\t\"", ",", "line", ")", ":", "errs", ".", "append", "(", "error", "(", "'GL05'", ",", "line_with_tabs", "=", "line", ".", "lstrip", "(", ")", ")", ")", "unexpected_sections", "=", "[", "section", "for", "section", "in", "doc", ".", "section_titles", "if", "section", "not", "in", "ALLOWED_SECTIONS", "]", "for", "section", "in", "unexpected_sections", ":", "errs", ".", "append", "(", "error", "(", "'GL06'", ",", "section", "=", "section", ",", "allowed_sections", "=", "', '", ".", "join", "(", "ALLOWED_SECTIONS", ")", ")", ")", "correct_order", "=", "[", "section", "for", "section", "in", "ALLOWED_SECTIONS", "if", "section", "in", "doc", ".", "section_titles", "]", "if", "correct_order", "!=", "doc", ".", "section_titles", ":", "errs", ".", "append", "(", "error", "(", "'GL07'", ",", "correct_sections", "=", "', '", ".", "join", "(", "correct_order", ")", ")", ")", "if", "(", "doc", ".", "deprecated_with_directive", "and", "not", "doc", ".", "extended_summary", ".", "startswith", "(", "'.. deprecated:: '", ")", ")", ":", "errs", ".", "append", "(", "error", "(", "'GL09'", ")", ")", "if", "not", "doc", ".", "summary", ":", "errs", ".", "append", "(", "error", "(", "'SS01'", ")", ")", "else", ":", "if", "not", "doc", ".", "summary", "[", "0", "]", ".", "isupper", "(", ")", ":", "errs", ".", "append", "(", "error", "(", "'SS02'", ")", ")", "if", "doc", ".", "summary", "[", "-", "1", "]", "!=", "'.'", ":", "errs", ".", "append", "(", "error", "(", "'SS03'", ")", ")", "if", "doc", ".", "summary", "!=", "doc", ".", "summary", ".", "lstrip", "(", ")", ":", "errs", ".", "append", "(", "error", "(", "'SS04'", ")", ")", "elif", "(", "doc", ".", "is_function_or_method", "and", "doc", ".", "summary", ".", "split", "(", "' '", ")", "[", "0", "]", "[", "-", "1", "]", "==", "'s'", ")", ":", "errs", ".", "append", "(", "error", "(", "'SS05'", ")", ")", "if", "doc", ".", "num_summary_lines", ">", "1", ":", "errs", ".", "append", "(", "error", "(", "'SS06'", ")", ")", "if", "not", "doc", ".", "extended_summary", ":", "wrns", ".", "append", "(", "(", "'ES01'", ",", "'No extended summary found'", ")", ")", "# PR01: Parameters not documented", "# PR02: Unknown parameters", "# PR03: Wrong parameters order", "errs", "+=", "doc", ".", "parameter_mismatches", "for", "param", "in", "doc", ".", "doc_parameters", ":", "if", "not", "param", ".", "startswith", "(", "\"*\"", ")", ":", "# Check can ignore var / kwargs", "if", "not", "doc", ".", "parameter_type", "(", "param", ")", ":", "if", "':'", "in", "param", ":", "errs", ".", "append", "(", "error", "(", "'PR10'", ",", "param_name", "=", "param", ".", "split", "(", "':'", ")", "[", "0", "]", ")", ")", "else", ":", "errs", ".", "append", "(", "error", "(", "'PR04'", ",", "param_name", "=", "param", ")", ")", "else", ":", "if", "doc", ".", "parameter_type", "(", "param", ")", "[", "-", "1", "]", "==", "'.'", ":", "errs", ".", "append", "(", "error", "(", "'PR05'", ",", "param_name", "=", "param", ")", ")", "common_type_errors", "=", "[", "(", "'integer'", ",", "'int'", ")", ",", "(", "'boolean'", ",", "'bool'", ")", ",", "(", "'string'", ",", "'str'", ")", "]", "for", "wrong_type", ",", "right_type", "in", "common_type_errors", ":", "if", "wrong_type", "in", "doc", ".", "parameter_type", "(", "param", ")", ":", "errs", ".", "append", "(", "error", "(", "'PR06'", ",", "param_name", "=", "param", ",", "right_type", "=", "right_type", ",", "wrong_type", "=", "wrong_type", ")", ")", "if", "not", "doc", ".", "parameter_desc", "(", "param", ")", ":", "errs", ".", "append", "(", "error", "(", "'PR07'", ",", "param_name", "=", "param", ")", ")", "else", ":", "if", "not", "doc", ".", "parameter_desc", "(", "param", ")", "[", "0", "]", ".", "isupper", "(", ")", ":", "errs", ".", "append", "(", "error", "(", "'PR08'", ",", "param_name", "=", "param", ")", ")", "if", "doc", ".", "parameter_desc", "(", "param", ")", "[", "-", "1", "]", "!=", "'.'", ":", "errs", ".", "append", "(", "error", "(", "'PR09'", ",", "param_name", "=", "param", ")", ")", "if", "doc", ".", "is_function_or_method", ":", "if", "not", "doc", ".", "returns", ":", "if", "doc", ".", "method_returns_something", ":", "errs", ".", "append", "(", "error", "(", "'RT01'", ")", ")", "else", ":", "if", "len", "(", "doc", ".", "returns", ")", "==", "1", "and", "doc", ".", "returns", "[", "0", "]", ".", "name", ":", "errs", ".", "append", "(", "error", "(", "'RT02'", ")", ")", "for", "name_or_type", ",", "type_", ",", "desc", "in", "doc", ".", "returns", ":", "if", "not", "desc", ":", "errs", ".", "append", "(", "error", "(", "'RT03'", ")", ")", "else", ":", "desc", "=", "' '", ".", "join", "(", "desc", ")", "if", "not", "desc", "[", "0", "]", ".", "isupper", "(", ")", ":", "errs", ".", "append", "(", "error", "(", "'RT04'", ")", ")", "if", "not", "desc", ".", "endswith", "(", "'.'", ")", ":", "errs", ".", "append", "(", "error", "(", "'RT05'", ")", ")", "if", "not", "doc", ".", "yields", "and", "'yield'", "in", "doc", ".", "method_source", ":", "errs", ".", "append", "(", "error", "(", "'YD01'", ")", ")", "if", "not", "doc", ".", "see_also", ":", "wrns", ".", "append", "(", "error", "(", "'SA01'", ")", ")", "else", ":", "for", "rel_name", ",", "rel_desc", "in", "doc", ".", "see_also", ".", "items", "(", ")", ":", "if", "rel_desc", ":", "if", "not", "rel_desc", ".", "endswith", "(", "'.'", ")", ":", "errs", ".", "append", "(", "error", "(", "'SA02'", ",", "reference_name", "=", "rel_name", ")", ")", "if", "not", "rel_desc", "[", "0", "]", ".", "isupper", "(", ")", ":", "errs", ".", "append", "(", "error", "(", "'SA03'", ",", "reference_name", "=", "rel_name", ")", ")", "else", ":", "errs", ".", "append", "(", "error", "(", "'SA04'", ",", "reference_name", "=", "rel_name", ")", ")", "if", "rel_name", ".", "startswith", "(", "'pandas.'", ")", ":", "errs", ".", "append", "(", "error", "(", "'SA05'", ",", "reference_name", "=", "rel_name", ",", "right_reference", "=", "rel_name", "[", "len", "(", "'pandas.'", ")", ":", "]", ")", ")", "examples_errs", "=", "''", "if", "not", "doc", ".", "examples", ":", "wrns", ".", "append", "(", "error", "(", "'EX01'", ")", ")", "else", ":", "examples_errs", "=", "doc", ".", "examples_errors", "if", "examples_errs", ":", "errs", ".", "append", "(", "error", "(", "'EX02'", ",", "doctest_log", "=", "examples_errs", ")", ")", "for", "err", "in", "doc", ".", "validate_pep8", "(", ")", ":", "errs", ".", "append", "(", "error", "(", "'EX03'", ",", "error_code", "=", "err", ".", "error_code", ",", "error_message", "=", "err", ".", "message", ",", "times_happening", "=", "' ({} times)'", ".", "format", "(", "err", ".", "count", ")", "if", "err", ".", "count", ">", "1", "else", "''", ")", ")", "examples_source_code", "=", "''", ".", "join", "(", "doc", ".", "examples_source_code", ")", "for", "wrong_import", "in", "(", "'numpy'", ",", "'pandas'", ")", ":", "if", "'import {}'", ".", "format", "(", "wrong_import", ")", "in", "examples_source_code", ":", "errs", ".", "append", "(", "error", "(", "'EX04'", ",", "imported_library", "=", "wrong_import", ")", ")", "return", "errs", ",", "wrns", ",", "examples_errs" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
validate_one
Validate the docstring for the given func_name Parameters ---------- func_name : function Function whose docstring will be evaluated (e.g. pandas.read_csv). Returns ------- dict A dictionary containing all the information obtained from validating the docstring.
scripts/validate_docstrings.py
def validate_one(func_name): """ Validate the docstring for the given func_name Parameters ---------- func_name : function Function whose docstring will be evaluated (e.g. pandas.read_csv). Returns ------- dict A dictionary containing all the information obtained from validating the docstring. """ doc = Docstring(func_name) errs, wrns, examples_errs = get_validation_data(doc) return {'type': doc.type, 'docstring': doc.clean_doc, 'deprecated': doc.deprecated, 'file': doc.source_file_name, 'file_line': doc.source_file_def_line, 'github_link': doc.github_url, 'errors': errs, 'warnings': wrns, 'examples_errors': examples_errs}
def validate_one(func_name): """ Validate the docstring for the given func_name Parameters ---------- func_name : function Function whose docstring will be evaluated (e.g. pandas.read_csv). Returns ------- dict A dictionary containing all the information obtained from validating the docstring. """ doc = Docstring(func_name) errs, wrns, examples_errs = get_validation_data(doc) return {'type': doc.type, 'docstring': doc.clean_doc, 'deprecated': doc.deprecated, 'file': doc.source_file_name, 'file_line': doc.source_file_def_line, 'github_link': doc.github_url, 'errors': errs, 'warnings': wrns, 'examples_errors': examples_errs}
[ "Validate", "the", "docstring", "for", "the", "given", "func_name" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L788-L813
[ "def", "validate_one", "(", "func_name", ")", ":", "doc", "=", "Docstring", "(", "func_name", ")", "errs", ",", "wrns", ",", "examples_errs", "=", "get_validation_data", "(", "doc", ")", "return", "{", "'type'", ":", "doc", ".", "type", ",", "'docstring'", ":", "doc", ".", "clean_doc", ",", "'deprecated'", ":", "doc", ".", "deprecated", ",", "'file'", ":", "doc", ".", "source_file_name", ",", "'file_line'", ":", "doc", ".", "source_file_def_line", ",", "'github_link'", ":", "doc", ".", "github_url", ",", "'errors'", ":", "errs", ",", "'warnings'", ":", "wrns", ",", "'examples_errors'", ":", "examples_errs", "}" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
validate_all
Execute the validation of all docstrings, and return a dict with the results. Parameters ---------- prefix : str or None If provided, only the docstrings that start with this pattern will be validated. If None, all docstrings will be validated. ignore_deprecated: bool, default False If True, deprecated objects are ignored when validating docstrings. Returns ------- dict A dictionary with an item for every function/method... containing all the validation information.
scripts/validate_docstrings.py
def validate_all(prefix, ignore_deprecated=False): """ Execute the validation of all docstrings, and return a dict with the results. Parameters ---------- prefix : str or None If provided, only the docstrings that start with this pattern will be validated. If None, all docstrings will be validated. ignore_deprecated: bool, default False If True, deprecated objects are ignored when validating docstrings. Returns ------- dict A dictionary with an item for every function/method... containing all the validation information. """ result = {} seen = {} # functions from the API docs api_doc_fnames = os.path.join( BASE_PATH, 'doc', 'source', 'reference', '*.rst') api_items = [] for api_doc_fname in glob.glob(api_doc_fnames): with open(api_doc_fname) as f: api_items += list(get_api_items(f)) for func_name, func_obj, section, subsection in api_items: if prefix and not func_name.startswith(prefix): continue doc_info = validate_one(func_name) if ignore_deprecated and doc_info['deprecated']: continue result[func_name] = doc_info shared_code_key = doc_info['file'], doc_info['file_line'] shared_code = seen.get(shared_code_key, '') result[func_name].update({'in_api': True, 'section': section, 'subsection': subsection, 'shared_code_with': shared_code}) seen[shared_code_key] = func_name # functions from introspecting Series, DataFrame and Panel api_item_names = set(list(zip(*api_items))[0]) for class_ in (pandas.Series, pandas.DataFrame, pandas.Panel): for member in inspect.getmembers(class_): func_name = 'pandas.{}.{}'.format(class_.__name__, member[0]) if (not member[0].startswith('_') and func_name not in api_item_names): if prefix and not func_name.startswith(prefix): continue doc_info = validate_one(func_name) if ignore_deprecated and doc_info['deprecated']: continue result[func_name] = doc_info result[func_name]['in_api'] = False return result
def validate_all(prefix, ignore_deprecated=False): """ Execute the validation of all docstrings, and return a dict with the results. Parameters ---------- prefix : str or None If provided, only the docstrings that start with this pattern will be validated. If None, all docstrings will be validated. ignore_deprecated: bool, default False If True, deprecated objects are ignored when validating docstrings. Returns ------- dict A dictionary with an item for every function/method... containing all the validation information. """ result = {} seen = {} # functions from the API docs api_doc_fnames = os.path.join( BASE_PATH, 'doc', 'source', 'reference', '*.rst') api_items = [] for api_doc_fname in glob.glob(api_doc_fnames): with open(api_doc_fname) as f: api_items += list(get_api_items(f)) for func_name, func_obj, section, subsection in api_items: if prefix and not func_name.startswith(prefix): continue doc_info = validate_one(func_name) if ignore_deprecated and doc_info['deprecated']: continue result[func_name] = doc_info shared_code_key = doc_info['file'], doc_info['file_line'] shared_code = seen.get(shared_code_key, '') result[func_name].update({'in_api': True, 'section': section, 'subsection': subsection, 'shared_code_with': shared_code}) seen[shared_code_key] = func_name # functions from introspecting Series, DataFrame and Panel api_item_names = set(list(zip(*api_items))[0]) for class_ in (pandas.Series, pandas.DataFrame, pandas.Panel): for member in inspect.getmembers(class_): func_name = 'pandas.{}.{}'.format(class_.__name__, member[0]) if (not member[0].startswith('_') and func_name not in api_item_names): if prefix and not func_name.startswith(prefix): continue doc_info = validate_one(func_name) if ignore_deprecated and doc_info['deprecated']: continue result[func_name] = doc_info result[func_name]['in_api'] = False return result
[ "Execute", "the", "validation", "of", "all", "docstrings", "and", "return", "a", "dict", "with", "the", "results", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L816-L877
[ "def", "validate_all", "(", "prefix", ",", "ignore_deprecated", "=", "False", ")", ":", "result", "=", "{", "}", "seen", "=", "{", "}", "# functions from the API docs", "api_doc_fnames", "=", "os", ".", "path", ".", "join", "(", "BASE_PATH", ",", "'doc'", ",", "'source'", ",", "'reference'", ",", "'*.rst'", ")", "api_items", "=", "[", "]", "for", "api_doc_fname", "in", "glob", ".", "glob", "(", "api_doc_fnames", ")", ":", "with", "open", "(", "api_doc_fname", ")", "as", "f", ":", "api_items", "+=", "list", "(", "get_api_items", "(", "f", ")", ")", "for", "func_name", ",", "func_obj", ",", "section", ",", "subsection", "in", "api_items", ":", "if", "prefix", "and", "not", "func_name", ".", "startswith", "(", "prefix", ")", ":", "continue", "doc_info", "=", "validate_one", "(", "func_name", ")", "if", "ignore_deprecated", "and", "doc_info", "[", "'deprecated'", "]", ":", "continue", "result", "[", "func_name", "]", "=", "doc_info", "shared_code_key", "=", "doc_info", "[", "'file'", "]", ",", "doc_info", "[", "'file_line'", "]", "shared_code", "=", "seen", ".", "get", "(", "shared_code_key", ",", "''", ")", "result", "[", "func_name", "]", ".", "update", "(", "{", "'in_api'", ":", "True", ",", "'section'", ":", "section", ",", "'subsection'", ":", "subsection", ",", "'shared_code_with'", ":", "shared_code", "}", ")", "seen", "[", "shared_code_key", "]", "=", "func_name", "# functions from introspecting Series, DataFrame and Panel", "api_item_names", "=", "set", "(", "list", "(", "zip", "(", "*", "api_items", ")", ")", "[", "0", "]", ")", "for", "class_", "in", "(", "pandas", ".", "Series", ",", "pandas", ".", "DataFrame", ",", "pandas", ".", "Panel", ")", ":", "for", "member", "in", "inspect", ".", "getmembers", "(", "class_", ")", ":", "func_name", "=", "'pandas.{}.{}'", ".", "format", "(", "class_", ".", "__name__", ",", "member", "[", "0", "]", ")", "if", "(", "not", "member", "[", "0", "]", ".", "startswith", "(", "'_'", ")", "and", "func_name", "not", "in", "api_item_names", ")", ":", "if", "prefix", "and", "not", "func_name", ".", "startswith", "(", "prefix", ")", ":", "continue", "doc_info", "=", "validate_one", "(", "func_name", ")", "if", "ignore_deprecated", "and", "doc_info", "[", "'deprecated'", "]", ":", "continue", "result", "[", "func_name", "]", "=", "doc_info", "result", "[", "func_name", "]", "[", "'in_api'", "]", "=", "False", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Docstring._load_obj
Import Python object from its name as string. Parameters ---------- name : str Object name to import (e.g. pandas.Series.str.upper) Returns ------- object Python object that can be a class, method, function... Examples -------- >>> Docstring._load_obj('pandas.Series') <class 'pandas.core.series.Series'>
scripts/validate_docstrings.py
def _load_obj(name): """ Import Python object from its name as string. Parameters ---------- name : str Object name to import (e.g. pandas.Series.str.upper) Returns ------- object Python object that can be a class, method, function... Examples -------- >>> Docstring._load_obj('pandas.Series') <class 'pandas.core.series.Series'> """ for maxsplit in range(1, name.count('.') + 1): # TODO when py3 only replace by: module, *func_parts = ... func_name_split = name.rsplit('.', maxsplit) module = func_name_split[0] func_parts = func_name_split[1:] try: obj = importlib.import_module(module) except ImportError: pass else: continue if 'obj' not in locals(): raise ImportError('No module can be imported ' 'from "{}"'.format(name)) for part in func_parts: obj = getattr(obj, part) return obj
def _load_obj(name): """ Import Python object from its name as string. Parameters ---------- name : str Object name to import (e.g. pandas.Series.str.upper) Returns ------- object Python object that can be a class, method, function... Examples -------- >>> Docstring._load_obj('pandas.Series') <class 'pandas.core.series.Series'> """ for maxsplit in range(1, name.count('.') + 1): # TODO when py3 only replace by: module, *func_parts = ... func_name_split = name.rsplit('.', maxsplit) module = func_name_split[0] func_parts = func_name_split[1:] try: obj = importlib.import_module(module) except ImportError: pass else: continue if 'obj' not in locals(): raise ImportError('No module can be imported ' 'from "{}"'.format(name)) for part in func_parts: obj = getattr(obj, part) return obj
[ "Import", "Python", "object", "from", "its", "name", "as", "string", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L240-L277
[ "def", "_load_obj", "(", "name", ")", ":", "for", "maxsplit", "in", "range", "(", "1", ",", "name", ".", "count", "(", "'.'", ")", "+", "1", ")", ":", "# TODO when py3 only replace by: module, *func_parts = ...", "func_name_split", "=", "name", ".", "rsplit", "(", "'.'", ",", "maxsplit", ")", "module", "=", "func_name_split", "[", "0", "]", "func_parts", "=", "func_name_split", "[", "1", ":", "]", "try", ":", "obj", "=", "importlib", ".", "import_module", "(", "module", ")", "except", "ImportError", ":", "pass", "else", ":", "continue", "if", "'obj'", "not", "in", "locals", "(", ")", ":", "raise", "ImportError", "(", "'No module can be imported '", "'from \"{}\"'", ".", "format", "(", "name", ")", ")", "for", "part", "in", "func_parts", ":", "obj", "=", "getattr", "(", "obj", ",", "part", ")", "return", "obj" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Docstring._to_original_callable
Find the Python object that contains the source code of the object. This is useful to find the place in the source code (file and line number) where a docstring is defined. It does not currently work for all cases, but it should help find some (properties...).
scripts/validate_docstrings.py
def _to_original_callable(obj): """ Find the Python object that contains the source code of the object. This is useful to find the place in the source code (file and line number) where a docstring is defined. It does not currently work for all cases, but it should help find some (properties...). """ while True: if inspect.isfunction(obj) or inspect.isclass(obj): f = inspect.getfile(obj) if f.startswith('<') and f.endswith('>'): return None return obj if inspect.ismethod(obj): obj = obj.__func__ elif isinstance(obj, functools.partial): obj = obj.func elif isinstance(obj, property): obj = obj.fget else: return None
def _to_original_callable(obj): """ Find the Python object that contains the source code of the object. This is useful to find the place in the source code (file and line number) where a docstring is defined. It does not currently work for all cases, but it should help find some (properties...). """ while True: if inspect.isfunction(obj) or inspect.isclass(obj): f = inspect.getfile(obj) if f.startswith('<') and f.endswith('>'): return None return obj if inspect.ismethod(obj): obj = obj.__func__ elif isinstance(obj, functools.partial): obj = obj.func elif isinstance(obj, property): obj = obj.fget else: return None
[ "Find", "the", "Python", "object", "that", "contains", "the", "source", "code", "of", "the", "object", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L280-L301
[ "def", "_to_original_callable", "(", "obj", ")", ":", "while", "True", ":", "if", "inspect", ".", "isfunction", "(", "obj", ")", "or", "inspect", ".", "isclass", "(", "obj", ")", ":", "f", "=", "inspect", ".", "getfile", "(", "obj", ")", "if", "f", ".", "startswith", "(", "'<'", ")", "and", "f", ".", "endswith", "(", "'>'", ")", ":", "return", "None", "return", "obj", "if", "inspect", ".", "ismethod", "(", "obj", ")", ":", "obj", "=", "obj", ".", "__func__", "elif", "isinstance", "(", "obj", ",", "functools", ".", "partial", ")", ":", "obj", "=", "obj", ".", "func", "elif", "isinstance", "(", "obj", ",", "property", ")", ":", "obj", "=", "obj", ".", "fget", "else", ":", "return", "None" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Docstring.source_file_name
File name where the object is implemented (e.g. pandas/core/frame.py).
scripts/validate_docstrings.py
def source_file_name(self): """ File name where the object is implemented (e.g. pandas/core/frame.py). """ try: fname = inspect.getsourcefile(self.code_obj) except TypeError: # In some cases the object is something complex like a cython # object that can't be easily introspected. An it's better to # return the source code file of the object as None, than crash pass else: if fname: fname = os.path.relpath(fname, BASE_PATH) return fname
def source_file_name(self): """ File name where the object is implemented (e.g. pandas/core/frame.py). """ try: fname = inspect.getsourcefile(self.code_obj) except TypeError: # In some cases the object is something complex like a cython # object that can't be easily introspected. An it's better to # return the source code file of the object as None, than crash pass else: if fname: fname = os.path.relpath(fname, BASE_PATH) return fname
[ "File", "name", "where", "the", "object", "is", "implemented", "(", "e", ".", "g", ".", "pandas", "/", "core", "/", "frame", ".", "py", ")", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L314-L328
[ "def", "source_file_name", "(", "self", ")", ":", "try", ":", "fname", "=", "inspect", ".", "getsourcefile", "(", "self", ".", "code_obj", ")", "except", "TypeError", ":", "# In some cases the object is something complex like a cython", "# object that can't be easily introspected. An it's better to", "# return the source code file of the object as None, than crash", "pass", "else", ":", "if", "fname", ":", "fname", "=", "os", ".", "path", ".", "relpath", "(", "fname", ",", "BASE_PATH", ")", "return", "fname" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037