id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
19,600
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.make_empty
def make_empty(self, axes=None): """ return an empty BlockManager with the items axis of len 0 """ if axes is None: axes = [ensure_index([])] + [ensure_index(a) for a in self.axes[1:]] # preserve dtype if possible if self.ndim == 1: blocks = np.array([], dtype=self.array_dtype) else: blocks = [] return self.__class__(blocks, axes)
python
def make_empty(self, axes=None): """ return an empty BlockManager with the items axis of len 0 """ if axes is None: axes = [ensure_index([])] + [ensure_index(a) for a in self.axes[1:]] # preserve dtype if possible if self.ndim == 1: blocks = np.array([], dtype=self.array_dtype) else: blocks = [] return self.__class__(blocks, axes)
[ "def", "make_empty", "(", "self", ",", "axes", "=", "None", ")", ":", "if", "axes", "is", "None", ":", "axes", "=", "[", "ensure_index", "(", "[", "]", ")", "]", "+", "[", "ensure_index", "(", "a", ")", "for", "a", "in", "self", ".", "axes", "[", "1", ":", "]", "]", "# preserve dtype if possible", "if", "self", ".", "ndim", "==", "1", ":", "blocks", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "self", ".", "array_dtype", ")", "else", ":", "blocks", "=", "[", "]", "return", "self", ".", "__class__", "(", "blocks", ",", "axes", ")" ]
return an empty BlockManager with the items axis of len 0
[ "return", "an", "empty", "BlockManager", "with", "the", "items", "axis", "of", "len", "0" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L120-L131
19,601
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.rename_axis
def rename_axis(self, mapper, axis, copy=True, level=None): """ Rename one of axes. Parameters ---------- mapper : unary callable axis : int copy : boolean, default True level : int, default None """ obj = self.copy(deep=copy) obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level)) return obj
python
def rename_axis(self, mapper, axis, copy=True, level=None): """ Rename one of axes. Parameters ---------- mapper : unary callable axis : int copy : boolean, default True level : int, default None """ obj = self.copy(deep=copy) obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level)) return obj
[ "def", "rename_axis", "(", "self", ",", "mapper", ",", "axis", ",", "copy", "=", "True", ",", "level", "=", "None", ")", ":", "obj", "=", "self", ".", "copy", "(", "deep", "=", "copy", ")", "obj", ".", "set_axis", "(", "axis", ",", "_transform_index", "(", "self", ".", "axes", "[", "axis", "]", ",", "mapper", ",", "level", ")", ")", "return", "obj" ]
Rename one of axes. Parameters ---------- mapper : unary callable axis : int copy : boolean, default True level : int, default None
[ "Rename", "one", "of", "axes", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L159-L172
19,602
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager._get_counts
def _get_counts(self, f): """ return a dict of the counts of the function in BlockManager """ self._consolidate_inplace() counts = dict() for b in self.blocks: v = f(b) counts[v] = counts.get(v, 0) + b.shape[0] return counts
python
def _get_counts(self, f): """ return a dict of the counts of the function in BlockManager """ self._consolidate_inplace() counts = dict() for b in self.blocks: v = f(b) counts[v] = counts.get(v, 0) + b.shape[0] return counts
[ "def", "_get_counts", "(", "self", ",", "f", ")", ":", "self", ".", "_consolidate_inplace", "(", ")", "counts", "=", "dict", "(", ")", "for", "b", "in", "self", ".", "blocks", ":", "v", "=", "f", "(", "b", ")", "counts", "[", "v", "]", "=", "counts", ".", "get", "(", "v", ",", "0", ")", "+", "b", ".", "shape", "[", "0", "]", "return", "counts" ]
return a dict of the counts of the function in BlockManager
[ "return", "a", "dict", "of", "the", "counts", "of", "the", "function", "in", "BlockManager" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L210-L217
19,603
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.apply
def apply(self, f, axes=None, filter=None, do_integrity_check=False, consolidate=True, **kwargs): """ iterate over the blocks, collect and create a new block manager Parameters ---------- f : the callable or function name to operate on at the block level axes : optional (if not supplied, use self.axes) filter : list, if supplied, only call the block if the filter is in the block do_integrity_check : boolean, default False. Do the block manager integrity check consolidate: boolean, default True. Join together blocks having same dtype Returns ------- Block Manager (new object) """ result_blocks = [] # filter kwarg is used in replace-* family of methods if filter is not None: filter_locs = set(self.items.get_indexer_for(filter)) if len(filter_locs) == len(self.items): # All items are included, as if there were no filtering filter = None else: kwargs['filter'] = filter_locs if consolidate: self._consolidate_inplace() if f == 'where': align_copy = True if kwargs.get('align', True): align_keys = ['other', 'cond'] else: align_keys = ['cond'] elif f == 'putmask': align_copy = False if kwargs.get('align', True): align_keys = ['new', 'mask'] else: align_keys = ['mask'] elif f == 'fillna': # fillna internally does putmask, maybe it's better to do this # at mgr, not block level? align_copy = False align_keys = ['value'] else: align_keys = [] # TODO(EA): may interfere with ExtensionBlock.setitem for blocks # with a .values attribute. aligned_args = {k: kwargs[k] for k in align_keys if hasattr(kwargs[k], 'values') and not isinstance(kwargs[k], ABCExtensionArray)} for b in self.blocks: if filter is not None: if not b.mgr_locs.isin(filter_locs).any(): result_blocks.append(b) continue if aligned_args: b_items = self.items[b.mgr_locs.indexer] for k, obj in aligned_args.items(): axis = getattr(obj, '_info_axis_number', 0) kwargs[k] = obj.reindex(b_items, axis=axis, copy=align_copy) applied = getattr(b, f)(**kwargs) result_blocks = _extend_blocks(applied, result_blocks) if len(result_blocks) == 0: return self.make_empty(axes or self.axes) bm = self.__class__(result_blocks, axes or self.axes, do_integrity_check=do_integrity_check) bm._consolidate_inplace() return bm
python
def apply(self, f, axes=None, filter=None, do_integrity_check=False, consolidate=True, **kwargs): """ iterate over the blocks, collect and create a new block manager Parameters ---------- f : the callable or function name to operate on at the block level axes : optional (if not supplied, use self.axes) filter : list, if supplied, only call the block if the filter is in the block do_integrity_check : boolean, default False. Do the block manager integrity check consolidate: boolean, default True. Join together blocks having same dtype Returns ------- Block Manager (new object) """ result_blocks = [] # filter kwarg is used in replace-* family of methods if filter is not None: filter_locs = set(self.items.get_indexer_for(filter)) if len(filter_locs) == len(self.items): # All items are included, as if there were no filtering filter = None else: kwargs['filter'] = filter_locs if consolidate: self._consolidate_inplace() if f == 'where': align_copy = True if kwargs.get('align', True): align_keys = ['other', 'cond'] else: align_keys = ['cond'] elif f == 'putmask': align_copy = False if kwargs.get('align', True): align_keys = ['new', 'mask'] else: align_keys = ['mask'] elif f == 'fillna': # fillna internally does putmask, maybe it's better to do this # at mgr, not block level? align_copy = False align_keys = ['value'] else: align_keys = [] # TODO(EA): may interfere with ExtensionBlock.setitem for blocks # with a .values attribute. aligned_args = {k: kwargs[k] for k in align_keys if hasattr(kwargs[k], 'values') and not isinstance(kwargs[k], ABCExtensionArray)} for b in self.blocks: if filter is not None: if not b.mgr_locs.isin(filter_locs).any(): result_blocks.append(b) continue if aligned_args: b_items = self.items[b.mgr_locs.indexer] for k, obj in aligned_args.items(): axis = getattr(obj, '_info_axis_number', 0) kwargs[k] = obj.reindex(b_items, axis=axis, copy=align_copy) applied = getattr(b, f)(**kwargs) result_blocks = _extend_blocks(applied, result_blocks) if len(result_blocks) == 0: return self.make_empty(axes or self.axes) bm = self.__class__(result_blocks, axes or self.axes, do_integrity_check=do_integrity_check) bm._consolidate_inplace() return bm
[ "def", "apply", "(", "self", ",", "f", ",", "axes", "=", "None", ",", "filter", "=", "None", ",", "do_integrity_check", "=", "False", ",", "consolidate", "=", "True", ",", "*", "*", "kwargs", ")", ":", "result_blocks", "=", "[", "]", "# filter kwarg is used in replace-* family of methods", "if", "filter", "is", "not", "None", ":", "filter_locs", "=", "set", "(", "self", ".", "items", ".", "get_indexer_for", "(", "filter", ")", ")", "if", "len", "(", "filter_locs", ")", "==", "len", "(", "self", ".", "items", ")", ":", "# All items are included, as if there were no filtering", "filter", "=", "None", "else", ":", "kwargs", "[", "'filter'", "]", "=", "filter_locs", "if", "consolidate", ":", "self", ".", "_consolidate_inplace", "(", ")", "if", "f", "==", "'where'", ":", "align_copy", "=", "True", "if", "kwargs", ".", "get", "(", "'align'", ",", "True", ")", ":", "align_keys", "=", "[", "'other'", ",", "'cond'", "]", "else", ":", "align_keys", "=", "[", "'cond'", "]", "elif", "f", "==", "'putmask'", ":", "align_copy", "=", "False", "if", "kwargs", ".", "get", "(", "'align'", ",", "True", ")", ":", "align_keys", "=", "[", "'new'", ",", "'mask'", "]", "else", ":", "align_keys", "=", "[", "'mask'", "]", "elif", "f", "==", "'fillna'", ":", "# fillna internally does putmask, maybe it's better to do this", "# at mgr, not block level?", "align_copy", "=", "False", "align_keys", "=", "[", "'value'", "]", "else", ":", "align_keys", "=", "[", "]", "# TODO(EA): may interfere with ExtensionBlock.setitem for blocks", "# with a .values attribute.", "aligned_args", "=", "{", "k", ":", "kwargs", "[", "k", "]", "for", "k", "in", "align_keys", "if", "hasattr", "(", "kwargs", "[", "k", "]", ",", "'values'", ")", "and", "not", "isinstance", "(", "kwargs", "[", "k", "]", ",", "ABCExtensionArray", ")", "}", "for", "b", "in", "self", ".", "blocks", ":", "if", "filter", "is", "not", "None", ":", "if", "not", "b", ".", "mgr_locs", ".", "isin", "(", "filter_locs", ")", ".", "any", "(", ")", ":", "result_blocks", ".", "append", "(", "b", ")", "continue", "if", "aligned_args", ":", "b_items", "=", "self", ".", "items", "[", "b", ".", "mgr_locs", ".", "indexer", "]", "for", "k", ",", "obj", "in", "aligned_args", ".", "items", "(", ")", ":", "axis", "=", "getattr", "(", "obj", ",", "'_info_axis_number'", ",", "0", ")", "kwargs", "[", "k", "]", "=", "obj", ".", "reindex", "(", "b_items", ",", "axis", "=", "axis", ",", "copy", "=", "align_copy", ")", "applied", "=", "getattr", "(", "b", ",", "f", ")", "(", "*", "*", "kwargs", ")", "result_blocks", "=", "_extend_blocks", "(", "applied", ",", "result_blocks", ")", "if", "len", "(", "result_blocks", ")", "==", "0", ":", "return", "self", ".", "make_empty", "(", "axes", "or", "self", ".", "axes", ")", "bm", "=", "self", ".", "__class__", "(", "result_blocks", ",", "axes", "or", "self", ".", "axes", ",", "do_integrity_check", "=", "do_integrity_check", ")", "bm", ".", "_consolidate_inplace", "(", ")", "return", "bm" ]
iterate over the blocks, collect and create a new block manager Parameters ---------- f : the callable or function name to operate on at the block level axes : optional (if not supplied, use self.axes) filter : list, if supplied, only call the block if the filter is in the block do_integrity_check : boolean, default False. Do the block manager integrity check consolidate: boolean, default True. Join together blocks having same dtype Returns ------- Block Manager (new object)
[ "iterate", "over", "the", "blocks", "collect", "and", "create", "a", "new", "block", "manager" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L318-L403
19,604
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.quantile
def quantile(self, axis=0, consolidate=True, transposed=False, interpolation='linear', qs=None, numeric_only=None): """ Iterate over blocks applying quantile reduction. This routine is intended for reduction type operations and will do inference on the generated blocks. Parameters ---------- axis: reduction axis, default 0 consolidate: boolean, default True. Join together blocks having same dtype transposed: boolean, default False we are holding transposed data interpolation : type of interpolation, default 'linear' qs : a scalar or list of the quantiles to be computed numeric_only : ignored Returns ------- Block Manager (new object) """ # Series dispatches to DataFrame for quantile, which allows us to # simplify some of the code here and in the blocks assert self.ndim >= 2 if consolidate: self._consolidate_inplace() def get_axe(block, qs, axes): from pandas import Float64Index if is_list_like(qs): ax = Float64Index(qs) elif block.ndim == 1: ax = Float64Index([qs]) else: ax = axes[0] return ax axes, blocks = [], [] for b in self.blocks: block = b.quantile(axis=axis, qs=qs, interpolation=interpolation) axe = get_axe(b, qs, axes=self.axes) axes.append(axe) blocks.append(block) # note that some DatetimeTZ, Categorical are always ndim==1 ndim = {b.ndim for b in blocks} assert 0 not in ndim, ndim if 2 in ndim: new_axes = list(self.axes) # multiple blocks that are reduced if len(blocks) > 1: new_axes[1] = axes[0] # reset the placement to the original for b, sb in zip(blocks, self.blocks): b.mgr_locs = sb.mgr_locs else: new_axes[axis] = Index(np.concatenate( [ax.values for ax in axes])) if transposed: new_axes = new_axes[::-1] blocks = [b.make_block(b.values.T, placement=np.arange(b.shape[1]) ) for b in blocks] return self.__class__(blocks, new_axes) # single block, i.e. ndim == {1} values = _concat._concat_compat([b.values for b in blocks]) # compute the orderings of our original data if len(self.blocks) > 1: indexer = np.empty(len(self.axes[0]), dtype=np.intp) i = 0 for b in self.blocks: for j in b.mgr_locs: indexer[j] = i i = i + 1 values = values.take(indexer) return SingleBlockManager( [make_block(values, ndim=1, placement=np.arange(len(values)))], axes[0])
python
def quantile(self, axis=0, consolidate=True, transposed=False, interpolation='linear', qs=None, numeric_only=None): """ Iterate over blocks applying quantile reduction. This routine is intended for reduction type operations and will do inference on the generated blocks. Parameters ---------- axis: reduction axis, default 0 consolidate: boolean, default True. Join together blocks having same dtype transposed: boolean, default False we are holding transposed data interpolation : type of interpolation, default 'linear' qs : a scalar or list of the quantiles to be computed numeric_only : ignored Returns ------- Block Manager (new object) """ # Series dispatches to DataFrame for quantile, which allows us to # simplify some of the code here and in the blocks assert self.ndim >= 2 if consolidate: self._consolidate_inplace() def get_axe(block, qs, axes): from pandas import Float64Index if is_list_like(qs): ax = Float64Index(qs) elif block.ndim == 1: ax = Float64Index([qs]) else: ax = axes[0] return ax axes, blocks = [], [] for b in self.blocks: block = b.quantile(axis=axis, qs=qs, interpolation=interpolation) axe = get_axe(b, qs, axes=self.axes) axes.append(axe) blocks.append(block) # note that some DatetimeTZ, Categorical are always ndim==1 ndim = {b.ndim for b in blocks} assert 0 not in ndim, ndim if 2 in ndim: new_axes = list(self.axes) # multiple blocks that are reduced if len(blocks) > 1: new_axes[1] = axes[0] # reset the placement to the original for b, sb in zip(blocks, self.blocks): b.mgr_locs = sb.mgr_locs else: new_axes[axis] = Index(np.concatenate( [ax.values for ax in axes])) if transposed: new_axes = new_axes[::-1] blocks = [b.make_block(b.values.T, placement=np.arange(b.shape[1]) ) for b in blocks] return self.__class__(blocks, new_axes) # single block, i.e. ndim == {1} values = _concat._concat_compat([b.values for b in blocks]) # compute the orderings of our original data if len(self.blocks) > 1: indexer = np.empty(len(self.axes[0]), dtype=np.intp) i = 0 for b in self.blocks: for j in b.mgr_locs: indexer[j] = i i = i + 1 values = values.take(indexer) return SingleBlockManager( [make_block(values, ndim=1, placement=np.arange(len(values)))], axes[0])
[ "def", "quantile", "(", "self", ",", "axis", "=", "0", ",", "consolidate", "=", "True", ",", "transposed", "=", "False", ",", "interpolation", "=", "'linear'", ",", "qs", "=", "None", ",", "numeric_only", "=", "None", ")", ":", "# Series dispatches to DataFrame for quantile, which allows us to", "# simplify some of the code here and in the blocks", "assert", "self", ".", "ndim", ">=", "2", "if", "consolidate", ":", "self", ".", "_consolidate_inplace", "(", ")", "def", "get_axe", "(", "block", ",", "qs", ",", "axes", ")", ":", "from", "pandas", "import", "Float64Index", "if", "is_list_like", "(", "qs", ")", ":", "ax", "=", "Float64Index", "(", "qs", ")", "elif", "block", ".", "ndim", "==", "1", ":", "ax", "=", "Float64Index", "(", "[", "qs", "]", ")", "else", ":", "ax", "=", "axes", "[", "0", "]", "return", "ax", "axes", ",", "blocks", "=", "[", "]", ",", "[", "]", "for", "b", "in", "self", ".", "blocks", ":", "block", "=", "b", ".", "quantile", "(", "axis", "=", "axis", ",", "qs", "=", "qs", ",", "interpolation", "=", "interpolation", ")", "axe", "=", "get_axe", "(", "b", ",", "qs", ",", "axes", "=", "self", ".", "axes", ")", "axes", ".", "append", "(", "axe", ")", "blocks", ".", "append", "(", "block", ")", "# note that some DatetimeTZ, Categorical are always ndim==1", "ndim", "=", "{", "b", ".", "ndim", "for", "b", "in", "blocks", "}", "assert", "0", "not", "in", "ndim", ",", "ndim", "if", "2", "in", "ndim", ":", "new_axes", "=", "list", "(", "self", ".", "axes", ")", "# multiple blocks that are reduced", "if", "len", "(", "blocks", ")", ">", "1", ":", "new_axes", "[", "1", "]", "=", "axes", "[", "0", "]", "# reset the placement to the original", "for", "b", ",", "sb", "in", "zip", "(", "blocks", ",", "self", ".", "blocks", ")", ":", "b", ".", "mgr_locs", "=", "sb", ".", "mgr_locs", "else", ":", "new_axes", "[", "axis", "]", "=", "Index", "(", "np", ".", "concatenate", "(", "[", "ax", ".", "values", "for", "ax", "in", "axes", "]", ")", ")", "if", "transposed", ":", "new_axes", "=", "new_axes", "[", ":", ":", "-", "1", "]", "blocks", "=", "[", "b", ".", "make_block", "(", "b", ".", "values", ".", "T", ",", "placement", "=", "np", ".", "arange", "(", "b", ".", "shape", "[", "1", "]", ")", ")", "for", "b", "in", "blocks", "]", "return", "self", ".", "__class__", "(", "blocks", ",", "new_axes", ")", "# single block, i.e. ndim == {1}", "values", "=", "_concat", ".", "_concat_compat", "(", "[", "b", ".", "values", "for", "b", "in", "blocks", "]", ")", "# compute the orderings of our original data", "if", "len", "(", "self", ".", "blocks", ")", ">", "1", ":", "indexer", "=", "np", ".", "empty", "(", "len", "(", "self", ".", "axes", "[", "0", "]", ")", ",", "dtype", "=", "np", ".", "intp", ")", "i", "=", "0", "for", "b", "in", "self", ".", "blocks", ":", "for", "j", "in", "b", ".", "mgr_locs", ":", "indexer", "[", "j", "]", "=", "i", "i", "=", "i", "+", "1", "values", "=", "values", ".", "take", "(", "indexer", ")", "return", "SingleBlockManager", "(", "[", "make_block", "(", "values", ",", "ndim", "=", "1", ",", "placement", "=", "np", ".", "arange", "(", "len", "(", "values", ")", ")", ")", "]", ",", "axes", "[", "0", "]", ")" ]
Iterate over blocks applying quantile reduction. This routine is intended for reduction type operations and will do inference on the generated blocks. Parameters ---------- axis: reduction axis, default 0 consolidate: boolean, default True. Join together blocks having same dtype transposed: boolean, default False we are holding transposed data interpolation : type of interpolation, default 'linear' qs : a scalar or list of the quantiles to be computed numeric_only : ignored Returns ------- Block Manager (new object)
[ "Iterate", "over", "blocks", "applying", "quantile", "reduction", ".", "This", "routine", "is", "intended", "for", "reduction", "type", "operations", "and", "will", "do", "inference", "on", "the", "generated", "blocks", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L405-L501
19,605
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.replace_list
def replace_list(self, src_list, dest_list, inplace=False, regex=False): """ do a list replace """ inplace = validate_bool_kwarg(inplace, 'inplace') # figure out our mask a-priori to avoid repeated replacements values = self.as_array() def comp(s, regex=False): """ Generate a bool array by perform an equality check, or perform an element-wise regular expression matching """ if isna(s): return isna(values) if hasattr(s, 'asm8'): return _compare_or_regex_search(maybe_convert_objects(values), getattr(s, 'asm8'), regex) return _compare_or_regex_search(values, s, regex) masks = [comp(s, regex) for i, s in enumerate(src_list)] result_blocks = [] src_len = len(src_list) - 1 for blk in self.blocks: # its possible to get multiple result blocks here # replace ALWAYS will return a list rb = [blk if inplace else blk.copy()] for i, (s, d) in enumerate(zip(src_list, dest_list)): new_rb = [] for b in rb: m = masks[i][b.mgr_locs.indexer] convert = i == src_len result = b._replace_coerce(mask=m, to_replace=s, value=d, inplace=inplace, convert=convert, regex=regex) if m.any(): new_rb = _extend_blocks(result, new_rb) else: new_rb.append(b) rb = new_rb result_blocks.extend(rb) bm = self.__class__(result_blocks, self.axes) bm._consolidate_inplace() return bm
python
def replace_list(self, src_list, dest_list, inplace=False, regex=False): """ do a list replace """ inplace = validate_bool_kwarg(inplace, 'inplace') # figure out our mask a-priori to avoid repeated replacements values = self.as_array() def comp(s, regex=False): """ Generate a bool array by perform an equality check, or perform an element-wise regular expression matching """ if isna(s): return isna(values) if hasattr(s, 'asm8'): return _compare_or_regex_search(maybe_convert_objects(values), getattr(s, 'asm8'), regex) return _compare_or_regex_search(values, s, regex) masks = [comp(s, regex) for i, s in enumerate(src_list)] result_blocks = [] src_len = len(src_list) - 1 for blk in self.blocks: # its possible to get multiple result blocks here # replace ALWAYS will return a list rb = [blk if inplace else blk.copy()] for i, (s, d) in enumerate(zip(src_list, dest_list)): new_rb = [] for b in rb: m = masks[i][b.mgr_locs.indexer] convert = i == src_len result = b._replace_coerce(mask=m, to_replace=s, value=d, inplace=inplace, convert=convert, regex=regex) if m.any(): new_rb = _extend_blocks(result, new_rb) else: new_rb.append(b) rb = new_rb result_blocks.extend(rb) bm = self.__class__(result_blocks, self.axes) bm._consolidate_inplace() return bm
[ "def", "replace_list", "(", "self", ",", "src_list", ",", "dest_list", ",", "inplace", "=", "False", ",", "regex", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "# figure out our mask a-priori to avoid repeated replacements", "values", "=", "self", ".", "as_array", "(", ")", "def", "comp", "(", "s", ",", "regex", "=", "False", ")", ":", "\"\"\"\n Generate a bool array by perform an equality check, or perform\n an element-wise regular expression matching\n \"\"\"", "if", "isna", "(", "s", ")", ":", "return", "isna", "(", "values", ")", "if", "hasattr", "(", "s", ",", "'asm8'", ")", ":", "return", "_compare_or_regex_search", "(", "maybe_convert_objects", "(", "values", ")", ",", "getattr", "(", "s", ",", "'asm8'", ")", ",", "regex", ")", "return", "_compare_or_regex_search", "(", "values", ",", "s", ",", "regex", ")", "masks", "=", "[", "comp", "(", "s", ",", "regex", ")", "for", "i", ",", "s", "in", "enumerate", "(", "src_list", ")", "]", "result_blocks", "=", "[", "]", "src_len", "=", "len", "(", "src_list", ")", "-", "1", "for", "blk", "in", "self", ".", "blocks", ":", "# its possible to get multiple result blocks here", "# replace ALWAYS will return a list", "rb", "=", "[", "blk", "if", "inplace", "else", "blk", ".", "copy", "(", ")", "]", "for", "i", ",", "(", "s", ",", "d", ")", "in", "enumerate", "(", "zip", "(", "src_list", ",", "dest_list", ")", ")", ":", "new_rb", "=", "[", "]", "for", "b", "in", "rb", ":", "m", "=", "masks", "[", "i", "]", "[", "b", ".", "mgr_locs", ".", "indexer", "]", "convert", "=", "i", "==", "src_len", "result", "=", "b", ".", "_replace_coerce", "(", "mask", "=", "m", ",", "to_replace", "=", "s", ",", "value", "=", "d", ",", "inplace", "=", "inplace", ",", "convert", "=", "convert", ",", "regex", "=", "regex", ")", "if", "m", ".", "any", "(", ")", ":", "new_rb", "=", "_extend_blocks", "(", "result", ",", "new_rb", ")", "else", ":", "new_rb", ".", "append", "(", "b", ")", "rb", "=", "new_rb", "result_blocks", ".", "extend", "(", "rb", ")", "bm", "=", "self", ".", "__class__", "(", "result_blocks", ",", "self", ".", "axes", ")", "bm", ".", "_consolidate_inplace", "(", ")", "return", "bm" ]
do a list replace
[ "do", "a", "list", "replace" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L539-L585
19,606
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.combine
def combine(self, blocks, copy=True): """ return a new manager with the blocks """ if len(blocks) == 0: return self.make_empty() # FIXME: optimization potential indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks])) inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) new_blocks = [] for b in blocks: b = b.copy(deep=copy) b.mgr_locs = algos.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0, allow_fill=False) new_blocks.append(b) axes = list(self.axes) axes[0] = self.items.take(indexer) return self.__class__(new_blocks, axes, do_integrity_check=False)
python
def combine(self, blocks, copy=True): """ return a new manager with the blocks """ if len(blocks) == 0: return self.make_empty() # FIXME: optimization potential indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks])) inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) new_blocks = [] for b in blocks: b = b.copy(deep=copy) b.mgr_locs = algos.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0, allow_fill=False) new_blocks.append(b) axes = list(self.axes) axes[0] = self.items.take(indexer) return self.__class__(new_blocks, axes, do_integrity_check=False)
[ "def", "combine", "(", "self", ",", "blocks", ",", "copy", "=", "True", ")", ":", "if", "len", "(", "blocks", ")", "==", "0", ":", "return", "self", ".", "make_empty", "(", ")", "# FIXME: optimization potential", "indexer", "=", "np", ".", "sort", "(", "np", ".", "concatenate", "(", "[", "b", ".", "mgr_locs", ".", "as_array", "for", "b", "in", "blocks", "]", ")", ")", "inv_indexer", "=", "lib", ".", "get_reverse_indexer", "(", "indexer", ",", "self", ".", "shape", "[", "0", "]", ")", "new_blocks", "=", "[", "]", "for", "b", "in", "blocks", ":", "b", "=", "b", ".", "copy", "(", "deep", "=", "copy", ")", "b", ".", "mgr_locs", "=", "algos", ".", "take_1d", "(", "inv_indexer", ",", "b", ".", "mgr_locs", ".", "as_array", ",", "axis", "=", "0", ",", "allow_fill", "=", "False", ")", "new_blocks", ".", "append", "(", "b", ")", "axes", "=", "list", "(", "self", ".", "axes", ")", "axes", "[", "0", "]", "=", "self", ".", "items", ".", "take", "(", "indexer", ")", "return", "self", ".", "__class__", "(", "new_blocks", ",", "axes", ",", "do_integrity_check", "=", "False", ")" ]
return a new manager with the blocks
[ "return", "a", "new", "manager", "with", "the", "blocks" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L658-L678
19,607
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.copy
def copy(self, deep=True): """ Make deep or shallow copy of BlockManager Parameters ---------- deep : boolean o rstring, default True If False, return shallow copy (do not copy data) If 'all', copy data and a deep copy of the index Returns ------- copy : BlockManager """ # this preserves the notion of view copying of axes if deep: if deep == 'all': copy = lambda ax: ax.copy(deep=True) else: copy = lambda ax: ax.view() new_axes = [copy(ax) for ax in self.axes] else: new_axes = list(self.axes) return self.apply('copy', axes=new_axes, deep=deep, do_integrity_check=False)
python
def copy(self, deep=True): """ Make deep or shallow copy of BlockManager Parameters ---------- deep : boolean o rstring, default True If False, return shallow copy (do not copy data) If 'all', copy data and a deep copy of the index Returns ------- copy : BlockManager """ # this preserves the notion of view copying of axes if deep: if deep == 'all': copy = lambda ax: ax.copy(deep=True) else: copy = lambda ax: ax.view() new_axes = [copy(ax) for ax in self.axes] else: new_axes = list(self.axes) return self.apply('copy', axes=new_axes, deep=deep, do_integrity_check=False)
[ "def", "copy", "(", "self", ",", "deep", "=", "True", ")", ":", "# this preserves the notion of view copying of axes", "if", "deep", ":", "if", "deep", "==", "'all'", ":", "copy", "=", "lambda", "ax", ":", "ax", ".", "copy", "(", "deep", "=", "True", ")", "else", ":", "copy", "=", "lambda", "ax", ":", "ax", ".", "view", "(", ")", "new_axes", "=", "[", "copy", "(", "ax", ")", "for", "ax", "in", "self", ".", "axes", "]", "else", ":", "new_axes", "=", "list", "(", "self", ".", "axes", ")", "return", "self", ".", "apply", "(", "'copy'", ",", "axes", "=", "new_axes", ",", "deep", "=", "deep", ",", "do_integrity_check", "=", "False", ")" ]
Make deep or shallow copy of BlockManager Parameters ---------- deep : boolean o rstring, default True If False, return shallow copy (do not copy data) If 'all', copy data and a deep copy of the index Returns ------- copy : BlockManager
[ "Make", "deep", "or", "shallow", "copy", "of", "BlockManager" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L706-L730
19,608
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.as_array
def as_array(self, transpose=False, items=None): """Convert the blockmanager data into an numpy array. Parameters ---------- transpose : boolean, default False If True, transpose the return array items : list of strings or None Names of block items that will be included in the returned array. ``None`` means that all block items will be used Returns ------- arr : ndarray """ if len(self.blocks) == 0: arr = np.empty(self.shape, dtype=float) return arr.transpose() if transpose else arr if items is not None: mgr = self.reindex_axis(items, axis=0) else: mgr = self if self._is_single_block and mgr.blocks[0].is_datetimetz: # TODO(Block.get_values): Make DatetimeTZBlock.get_values # always be object dtype. Some callers seem to want the # DatetimeArray (previously DTI) arr = mgr.blocks[0].get_values(dtype=object) elif self._is_single_block or not self.is_mixed_type: arr = np.asarray(mgr.blocks[0].get_values()) else: arr = mgr._interleave() return arr.transpose() if transpose else arr
python
def as_array(self, transpose=False, items=None): """Convert the blockmanager data into an numpy array. Parameters ---------- transpose : boolean, default False If True, transpose the return array items : list of strings or None Names of block items that will be included in the returned array. ``None`` means that all block items will be used Returns ------- arr : ndarray """ if len(self.blocks) == 0: arr = np.empty(self.shape, dtype=float) return arr.transpose() if transpose else arr if items is not None: mgr = self.reindex_axis(items, axis=0) else: mgr = self if self._is_single_block and mgr.blocks[0].is_datetimetz: # TODO(Block.get_values): Make DatetimeTZBlock.get_values # always be object dtype. Some callers seem to want the # DatetimeArray (previously DTI) arr = mgr.blocks[0].get_values(dtype=object) elif self._is_single_block or not self.is_mixed_type: arr = np.asarray(mgr.blocks[0].get_values()) else: arr = mgr._interleave() return arr.transpose() if transpose else arr
[ "def", "as_array", "(", "self", ",", "transpose", "=", "False", ",", "items", "=", "None", ")", ":", "if", "len", "(", "self", ".", "blocks", ")", "==", "0", ":", "arr", "=", "np", ".", "empty", "(", "self", ".", "shape", ",", "dtype", "=", "float", ")", "return", "arr", ".", "transpose", "(", ")", "if", "transpose", "else", "arr", "if", "items", "is", "not", "None", ":", "mgr", "=", "self", ".", "reindex_axis", "(", "items", ",", "axis", "=", "0", ")", "else", ":", "mgr", "=", "self", "if", "self", ".", "_is_single_block", "and", "mgr", ".", "blocks", "[", "0", "]", ".", "is_datetimetz", ":", "# TODO(Block.get_values): Make DatetimeTZBlock.get_values", "# always be object dtype. Some callers seem to want the", "# DatetimeArray (previously DTI)", "arr", "=", "mgr", ".", "blocks", "[", "0", "]", ".", "get_values", "(", "dtype", "=", "object", ")", "elif", "self", ".", "_is_single_block", "or", "not", "self", ".", "is_mixed_type", ":", "arr", "=", "np", ".", "asarray", "(", "mgr", ".", "blocks", "[", "0", "]", ".", "get_values", "(", ")", ")", "else", ":", "arr", "=", "mgr", ".", "_interleave", "(", ")", "return", "arr", ".", "transpose", "(", ")", "if", "transpose", "else", "arr" ]
Convert the blockmanager data into an numpy array. Parameters ---------- transpose : boolean, default False If True, transpose the return array items : list of strings or None Names of block items that will be included in the returned array. ``None`` means that all block items will be used Returns ------- arr : ndarray
[ "Convert", "the", "blockmanager", "data", "into", "an", "numpy", "array", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L732-L766
19,609
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager._interleave
def _interleave(self): """ Return ndarray from blocks with specified item order Items must be contained in the blocks """ from pandas.core.dtypes.common import is_sparse dtype = _interleaved_dtype(self.blocks) # TODO: https://github.com/pandas-dev/pandas/issues/22791 # Give EAs some input on what happens here. Sparse needs this. if is_sparse(dtype): dtype = dtype.subtype elif is_extension_array_dtype(dtype): dtype = 'object' result = np.empty(self.shape, dtype=dtype) itemmask = np.zeros(self.shape[0]) for blk in self.blocks: rl = blk.mgr_locs result[rl.indexer] = blk.get_values(dtype) itemmask[rl.indexer] = 1 if not itemmask.all(): raise AssertionError('Some items were not contained in blocks') return result
python
def _interleave(self): """ Return ndarray from blocks with specified item order Items must be contained in the blocks """ from pandas.core.dtypes.common import is_sparse dtype = _interleaved_dtype(self.blocks) # TODO: https://github.com/pandas-dev/pandas/issues/22791 # Give EAs some input on what happens here. Sparse needs this. if is_sparse(dtype): dtype = dtype.subtype elif is_extension_array_dtype(dtype): dtype = 'object' result = np.empty(self.shape, dtype=dtype) itemmask = np.zeros(self.shape[0]) for blk in self.blocks: rl = blk.mgr_locs result[rl.indexer] = blk.get_values(dtype) itemmask[rl.indexer] = 1 if not itemmask.all(): raise AssertionError('Some items were not contained in blocks') return result
[ "def", "_interleave", "(", "self", ")", ":", "from", "pandas", ".", "core", ".", "dtypes", ".", "common", "import", "is_sparse", "dtype", "=", "_interleaved_dtype", "(", "self", ".", "blocks", ")", "# TODO: https://github.com/pandas-dev/pandas/issues/22791", "# Give EAs some input on what happens here. Sparse needs this.", "if", "is_sparse", "(", "dtype", ")", ":", "dtype", "=", "dtype", ".", "subtype", "elif", "is_extension_array_dtype", "(", "dtype", ")", ":", "dtype", "=", "'object'", "result", "=", "np", ".", "empty", "(", "self", ".", "shape", ",", "dtype", "=", "dtype", ")", "itemmask", "=", "np", ".", "zeros", "(", "self", ".", "shape", "[", "0", "]", ")", "for", "blk", "in", "self", ".", "blocks", ":", "rl", "=", "blk", ".", "mgr_locs", "result", "[", "rl", ".", "indexer", "]", "=", "blk", ".", "get_values", "(", "dtype", ")", "itemmask", "[", "rl", ".", "indexer", "]", "=", "1", "if", "not", "itemmask", ".", "all", "(", ")", ":", "raise", "AssertionError", "(", "'Some items were not contained in blocks'", ")", "return", "result" ]
Return ndarray from blocks with specified item order Items must be contained in the blocks
[ "Return", "ndarray", "from", "blocks", "with", "specified", "item", "order", "Items", "must", "be", "contained", "in", "the", "blocks" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L768-L795
19,610
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.fast_xs
def fast_xs(self, loc): """ get a cross sectional for a given location in the items ; handle dups return the result, is *could* be a view in the case of a single block """ if len(self.blocks) == 1: return self.blocks[0].iget((slice(None), loc)) items = self.items # non-unique (GH4726) if not items.is_unique: result = self._interleave() if self.ndim == 2: result = result.T return result[loc] # unique dtype = _interleaved_dtype(self.blocks) n = len(items) if is_extension_array_dtype(dtype): # we'll eventually construct an ExtensionArray. result = np.empty(n, dtype=object) else: result = np.empty(n, dtype=dtype) for blk in self.blocks: # Such assignment may incorrectly coerce NaT to None # result[blk.mgr_locs] = blk._slice((slice(None), loc)) for i, rl in enumerate(blk.mgr_locs): result[rl] = blk._try_coerce_result(blk.iget((i, loc))) if is_extension_array_dtype(dtype): result = dtype.construct_array_type()._from_sequence( result, dtype=dtype ) return result
python
def fast_xs(self, loc): """ get a cross sectional for a given location in the items ; handle dups return the result, is *could* be a view in the case of a single block """ if len(self.blocks) == 1: return self.blocks[0].iget((slice(None), loc)) items = self.items # non-unique (GH4726) if not items.is_unique: result = self._interleave() if self.ndim == 2: result = result.T return result[loc] # unique dtype = _interleaved_dtype(self.blocks) n = len(items) if is_extension_array_dtype(dtype): # we'll eventually construct an ExtensionArray. result = np.empty(n, dtype=object) else: result = np.empty(n, dtype=dtype) for blk in self.blocks: # Such assignment may incorrectly coerce NaT to None # result[blk.mgr_locs] = blk._slice((slice(None), loc)) for i, rl in enumerate(blk.mgr_locs): result[rl] = blk._try_coerce_result(blk.iget((i, loc))) if is_extension_array_dtype(dtype): result = dtype.construct_array_type()._from_sequence( result, dtype=dtype ) return result
[ "def", "fast_xs", "(", "self", ",", "loc", ")", ":", "if", "len", "(", "self", ".", "blocks", ")", "==", "1", ":", "return", "self", ".", "blocks", "[", "0", "]", ".", "iget", "(", "(", "slice", "(", "None", ")", ",", "loc", ")", ")", "items", "=", "self", ".", "items", "# non-unique (GH4726)", "if", "not", "items", ".", "is_unique", ":", "result", "=", "self", ".", "_interleave", "(", ")", "if", "self", ".", "ndim", "==", "2", ":", "result", "=", "result", ".", "T", "return", "result", "[", "loc", "]", "# unique", "dtype", "=", "_interleaved_dtype", "(", "self", ".", "blocks", ")", "n", "=", "len", "(", "items", ")", "if", "is_extension_array_dtype", "(", "dtype", ")", ":", "# we'll eventually construct an ExtensionArray.", "result", "=", "np", ".", "empty", "(", "n", ",", "dtype", "=", "object", ")", "else", ":", "result", "=", "np", ".", "empty", "(", "n", ",", "dtype", "=", "dtype", ")", "for", "blk", "in", "self", ".", "blocks", ":", "# Such assignment may incorrectly coerce NaT to None", "# result[blk.mgr_locs] = blk._slice((slice(None), loc))", "for", "i", ",", "rl", "in", "enumerate", "(", "blk", ".", "mgr_locs", ")", ":", "result", "[", "rl", "]", "=", "blk", ".", "_try_coerce_result", "(", "blk", ".", "iget", "(", "(", "i", ",", "loc", ")", ")", ")", "if", "is_extension_array_dtype", "(", "dtype", ")", ":", "result", "=", "dtype", ".", "construct_array_type", "(", ")", ".", "_from_sequence", "(", "result", ",", "dtype", "=", "dtype", ")", "return", "result" ]
get a cross sectional for a given location in the items ; handle dups return the result, is *could* be a view in the case of a single block
[ "get", "a", "cross", "sectional", "for", "a", "given", "location", "in", "the", "items", ";", "handle", "dups" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L864-L905
19,611
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.consolidate
def consolidate(self): """ Join together blocks having same dtype Returns ------- y : BlockManager """ if self.is_consolidated(): return self bm = self.__class__(self.blocks, self.axes) bm._is_consolidated = False bm._consolidate_inplace() return bm
python
def consolidate(self): """ Join together blocks having same dtype Returns ------- y : BlockManager """ if self.is_consolidated(): return self bm = self.__class__(self.blocks, self.axes) bm._is_consolidated = False bm._consolidate_inplace() return bm
[ "def", "consolidate", "(", "self", ")", ":", "if", "self", ".", "is_consolidated", "(", ")", ":", "return", "self", "bm", "=", "self", ".", "__class__", "(", "self", ".", "blocks", ",", "self", ".", "axes", ")", "bm", ".", "_is_consolidated", "=", "False", "bm", ".", "_consolidate_inplace", "(", ")", "return", "bm" ]
Join together blocks having same dtype Returns ------- y : BlockManager
[ "Join", "together", "blocks", "having", "same", "dtype" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L907-L921
19,612
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.iget
def iget(self, i, fastpath=True): """ Return the data as a SingleBlockManager if fastpath=True and possible Otherwise return as a ndarray """ block = self.blocks[self._blknos[i]] values = block.iget(self._blklocs[i]) if not fastpath or not block._box_to_block_values or values.ndim != 1: return values # fastpath shortcut for select a single-dim from a 2-dim BM return SingleBlockManager( [block.make_block_same_class(values, placement=slice(0, len(values)), ndim=1)], self.axes[1])
python
def iget(self, i, fastpath=True): """ Return the data as a SingleBlockManager if fastpath=True and possible Otherwise return as a ndarray """ block = self.blocks[self._blknos[i]] values = block.iget(self._blklocs[i]) if not fastpath or not block._box_to_block_values or values.ndim != 1: return values # fastpath shortcut for select a single-dim from a 2-dim BM return SingleBlockManager( [block.make_block_same_class(values, placement=slice(0, len(values)), ndim=1)], self.axes[1])
[ "def", "iget", "(", "self", ",", "i", ",", "fastpath", "=", "True", ")", ":", "block", "=", "self", ".", "blocks", "[", "self", ".", "_blknos", "[", "i", "]", "]", "values", "=", "block", ".", "iget", "(", "self", ".", "_blklocs", "[", "i", "]", ")", "if", "not", "fastpath", "or", "not", "block", ".", "_box_to_block_values", "or", "values", ".", "ndim", "!=", "1", ":", "return", "values", "# fastpath shortcut for select a single-dim from a 2-dim BM", "return", "SingleBlockManager", "(", "[", "block", ".", "make_block_same_class", "(", "values", ",", "placement", "=", "slice", "(", "0", ",", "len", "(", "values", ")", ")", ",", "ndim", "=", "1", ")", "]", ",", "self", ".", "axes", "[", "1", "]", ")" ]
Return the data as a SingleBlockManager if fastpath=True and possible Otherwise return as a ndarray
[ "Return", "the", "data", "as", "a", "SingleBlockManager", "if", "fastpath", "=", "True", "and", "possible" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L959-L975
19,613
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.insert
def insert(self, loc, item, value, allow_duplicates=False): """ Insert item at selected position. Parameters ---------- loc : int item : hashable value : array_like allow_duplicates: bool If False, trying to insert non-unique item will raise """ if not allow_duplicates and item in self.items: # Should this be a different kind of error?? raise ValueError('cannot insert {}, already exists'.format(item)) if not isinstance(loc, int): raise TypeError("loc must be int") # insert to the axis; this could possibly raise a TypeError new_axis = self.items.insert(loc, item) block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1)) for blkno, count in _fast_count_smallints(self._blknos[loc:]): blk = self.blocks[blkno] if count == len(blk.mgr_locs): blk.mgr_locs = blk.mgr_locs.add(1) else: new_mgr_locs = blk.mgr_locs.as_array.copy() new_mgr_locs[new_mgr_locs >= loc] += 1 blk.mgr_locs = new_mgr_locs if loc == self._blklocs.shape[0]: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) else: self._blklocs = np.insert(self._blklocs, loc, 0) self._blknos = np.insert(self._blknos, loc, len(self.blocks)) self.axes[0] = new_axis self.blocks += (block,) self._shape = None self._known_consolidated = False if len(self.blocks) > 100: self._consolidate_inplace()
python
def insert(self, loc, item, value, allow_duplicates=False): """ Insert item at selected position. Parameters ---------- loc : int item : hashable value : array_like allow_duplicates: bool If False, trying to insert non-unique item will raise """ if not allow_duplicates and item in self.items: # Should this be a different kind of error?? raise ValueError('cannot insert {}, already exists'.format(item)) if not isinstance(loc, int): raise TypeError("loc must be int") # insert to the axis; this could possibly raise a TypeError new_axis = self.items.insert(loc, item) block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1)) for blkno, count in _fast_count_smallints(self._blknos[loc:]): blk = self.blocks[blkno] if count == len(blk.mgr_locs): blk.mgr_locs = blk.mgr_locs.add(1) else: new_mgr_locs = blk.mgr_locs.as_array.copy() new_mgr_locs[new_mgr_locs >= loc] += 1 blk.mgr_locs = new_mgr_locs if loc == self._blklocs.shape[0]: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) else: self._blklocs = np.insert(self._blklocs, loc, 0) self._blknos = np.insert(self._blknos, loc, len(self.blocks)) self.axes[0] = new_axis self.blocks += (block,) self._shape = None self._known_consolidated = False if len(self.blocks) > 100: self._consolidate_inplace()
[ "def", "insert", "(", "self", ",", "loc", ",", "item", ",", "value", ",", "allow_duplicates", "=", "False", ")", ":", "if", "not", "allow_duplicates", "and", "item", "in", "self", ".", "items", ":", "# Should this be a different kind of error??", "raise", "ValueError", "(", "'cannot insert {}, already exists'", ".", "format", "(", "item", ")", ")", "if", "not", "isinstance", "(", "loc", ",", "int", ")", ":", "raise", "TypeError", "(", "\"loc must be int\"", ")", "# insert to the axis; this could possibly raise a TypeError", "new_axis", "=", "self", ".", "items", ".", "insert", "(", "loc", ",", "item", ")", "block", "=", "make_block", "(", "values", "=", "value", ",", "ndim", "=", "self", ".", "ndim", ",", "placement", "=", "slice", "(", "loc", ",", "loc", "+", "1", ")", ")", "for", "blkno", ",", "count", "in", "_fast_count_smallints", "(", "self", ".", "_blknos", "[", "loc", ":", "]", ")", ":", "blk", "=", "self", ".", "blocks", "[", "blkno", "]", "if", "count", "==", "len", "(", "blk", ".", "mgr_locs", ")", ":", "blk", ".", "mgr_locs", "=", "blk", ".", "mgr_locs", ".", "add", "(", "1", ")", "else", ":", "new_mgr_locs", "=", "blk", ".", "mgr_locs", ".", "as_array", ".", "copy", "(", ")", "new_mgr_locs", "[", "new_mgr_locs", ">=", "loc", "]", "+=", "1", "blk", ".", "mgr_locs", "=", "new_mgr_locs", "if", "loc", "==", "self", ".", "_blklocs", ".", "shape", "[", "0", "]", ":", "# np.append is a lot faster, let's use it if we can.", "self", ".", "_blklocs", "=", "np", ".", "append", "(", "self", ".", "_blklocs", ",", "0", ")", "self", ".", "_blknos", "=", "np", ".", "append", "(", "self", ".", "_blknos", ",", "len", "(", "self", ".", "blocks", ")", ")", "else", ":", "self", ".", "_blklocs", "=", "np", ".", "insert", "(", "self", ".", "_blklocs", ",", "loc", ",", "0", ")", "self", ".", "_blknos", "=", "np", ".", "insert", "(", "self", ".", "_blknos", ",", "loc", ",", "len", "(", "self", ".", "blocks", ")", ")", "self", ".", "axes", "[", "0", "]", "=", "new_axis", "self", ".", "blocks", "+=", "(", "block", ",", ")", "self", ".", "_shape", "=", "None", "self", ".", "_known_consolidated", "=", "False", "if", "len", "(", "self", ".", "blocks", ")", ">", "100", ":", "self", ".", "_consolidate_inplace", "(", ")" ]
Insert item at selected position. Parameters ---------- loc : int item : hashable value : array_like allow_duplicates: bool If False, trying to insert non-unique item will raise
[ "Insert", "item", "at", "selected", "position", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1130-L1180
19,614
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.reindex_axis
def reindex_axis(self, new_index, axis, method=None, limit=None, fill_value=None, copy=True): """ Conform block manager to new index. """ new_index = ensure_index(new_index) new_index, indexer = self.axes[axis].reindex(new_index, method=method, limit=limit) return self.reindex_indexer(new_index, indexer, axis=axis, fill_value=fill_value, copy=copy)
python
def reindex_axis(self, new_index, axis, method=None, limit=None, fill_value=None, copy=True): """ Conform block manager to new index. """ new_index = ensure_index(new_index) new_index, indexer = self.axes[axis].reindex(new_index, method=method, limit=limit) return self.reindex_indexer(new_index, indexer, axis=axis, fill_value=fill_value, copy=copy)
[ "def", "reindex_axis", "(", "self", ",", "new_index", ",", "axis", ",", "method", "=", "None", ",", "limit", "=", "None", ",", "fill_value", "=", "None", ",", "copy", "=", "True", ")", ":", "new_index", "=", "ensure_index", "(", "new_index", ")", "new_index", ",", "indexer", "=", "self", ".", "axes", "[", "axis", "]", ".", "reindex", "(", "new_index", ",", "method", "=", "method", ",", "limit", "=", "limit", ")", "return", "self", ".", "reindex_indexer", "(", "new_index", ",", "indexer", ",", "axis", "=", "axis", ",", "fill_value", "=", "fill_value", ",", "copy", "=", "copy", ")" ]
Conform block manager to new index.
[ "Conform", "block", "manager", "to", "new", "index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1182-L1192
19,615
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.take
def take(self, indexer, axis=1, verify=True, convert=True): """ Take items along any axis. """ self._consolidate_inplace() indexer = (np.arange(indexer.start, indexer.stop, indexer.step, dtype='int64') if isinstance(indexer, slice) else np.asanyarray(indexer, dtype='int64')) n = self.shape[axis] if convert: indexer = maybe_convert_indices(indexer, n) if verify: if ((indexer == -1) | (indexer >= n)).any(): raise Exception('Indices must be nonzero and less than ' 'the axis length') new_labels = self.axes[axis].take(indexer) return self.reindex_indexer(new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True)
python
def take(self, indexer, axis=1, verify=True, convert=True): """ Take items along any axis. """ self._consolidate_inplace() indexer = (np.arange(indexer.start, indexer.stop, indexer.step, dtype='int64') if isinstance(indexer, slice) else np.asanyarray(indexer, dtype='int64')) n = self.shape[axis] if convert: indexer = maybe_convert_indices(indexer, n) if verify: if ((indexer == -1) | (indexer >= n)).any(): raise Exception('Indices must be nonzero and less than ' 'the axis length') new_labels = self.axes[axis].take(indexer) return self.reindex_indexer(new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True)
[ "def", "take", "(", "self", ",", "indexer", ",", "axis", "=", "1", ",", "verify", "=", "True", ",", "convert", "=", "True", ")", ":", "self", ".", "_consolidate_inplace", "(", ")", "indexer", "=", "(", "np", ".", "arange", "(", "indexer", ".", "start", ",", "indexer", ".", "stop", ",", "indexer", ".", "step", ",", "dtype", "=", "'int64'", ")", "if", "isinstance", "(", "indexer", ",", "slice", ")", "else", "np", ".", "asanyarray", "(", "indexer", ",", "dtype", "=", "'int64'", ")", ")", "n", "=", "self", ".", "shape", "[", "axis", "]", "if", "convert", ":", "indexer", "=", "maybe_convert_indices", "(", "indexer", ",", "n", ")", "if", "verify", ":", "if", "(", "(", "indexer", "==", "-", "1", ")", "|", "(", "indexer", ">=", "n", ")", ")", ".", "any", "(", ")", ":", "raise", "Exception", "(", "'Indices must be nonzero and less than '", "'the axis length'", ")", "new_labels", "=", "self", ".", "axes", "[", "axis", "]", ".", "take", "(", "indexer", ")", "return", "self", ".", "reindex_indexer", "(", "new_axis", "=", "new_labels", ",", "indexer", "=", "indexer", ",", "axis", "=", "axis", ",", "allow_dups", "=", "True", ")" ]
Take items along any axis.
[ "Take", "items", "along", "any", "axis", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1325-L1346
19,616
pandas-dev/pandas
pandas/core/internals/managers.py
BlockManager.unstack
def unstack(self, unstacker_func, fill_value): """Return a blockmanager with all blocks unstacked. Parameters ---------- unstacker_func : callable A (partially-applied) ``pd.core.reshape._Unstacker`` class. fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ n_rows = self.shape[-1] dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items) new_columns = dummy.get_new_columns() new_index = dummy.get_new_index() new_blocks = [] columns_mask = [] for blk in self.blocks: blocks, mask = blk._unstack( partial(unstacker_func, value_columns=self.items[blk.mgr_locs.indexer]), new_columns, n_rows, fill_value ) new_blocks.extend(blocks) columns_mask.extend(mask) new_columns = new_columns[columns_mask] bm = BlockManager(new_blocks, [new_columns, new_index]) return bm
python
def unstack(self, unstacker_func, fill_value): """Return a blockmanager with all blocks unstacked. Parameters ---------- unstacker_func : callable A (partially-applied) ``pd.core.reshape._Unstacker`` class. fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ n_rows = self.shape[-1] dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items) new_columns = dummy.get_new_columns() new_index = dummy.get_new_index() new_blocks = [] columns_mask = [] for blk in self.blocks: blocks, mask = blk._unstack( partial(unstacker_func, value_columns=self.items[blk.mgr_locs.indexer]), new_columns, n_rows, fill_value ) new_blocks.extend(blocks) columns_mask.extend(mask) new_columns = new_columns[columns_mask] bm = BlockManager(new_blocks, [new_columns, new_index]) return bm
[ "def", "unstack", "(", "self", ",", "unstacker_func", ",", "fill_value", ")", ":", "n_rows", "=", "self", ".", "shape", "[", "-", "1", "]", "dummy", "=", "unstacker_func", "(", "np", ".", "empty", "(", "(", "0", ",", "0", ")", ")", ",", "value_columns", "=", "self", ".", "items", ")", "new_columns", "=", "dummy", ".", "get_new_columns", "(", ")", "new_index", "=", "dummy", ".", "get_new_index", "(", ")", "new_blocks", "=", "[", "]", "columns_mask", "=", "[", "]", "for", "blk", "in", "self", ".", "blocks", ":", "blocks", ",", "mask", "=", "blk", ".", "_unstack", "(", "partial", "(", "unstacker_func", ",", "value_columns", "=", "self", ".", "items", "[", "blk", ".", "mgr_locs", ".", "indexer", "]", ")", ",", "new_columns", ",", "n_rows", ",", "fill_value", ")", "new_blocks", ".", "extend", "(", "blocks", ")", "columns_mask", ".", "extend", "(", "mask", ")", "new_columns", "=", "new_columns", "[", "columns_mask", "]", "bm", "=", "BlockManager", "(", "new_blocks", ",", "[", "new_columns", ",", "new_index", "]", ")", "return", "bm" ]
Return a blockmanager with all blocks unstacked. Parameters ---------- unstacker_func : callable A (partially-applied) ``pd.core.reshape._Unstacker`` class. fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager
[ "Return", "a", "blockmanager", "with", "all", "blocks", "unstacked", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1392-L1428
19,617
pandas-dev/pandas
pandas/core/internals/managers.py
SingleBlockManager.delete
def delete(self, item): """ Delete single item from SingleBlockManager. Ensures that self.blocks doesn't become empty. """ loc = self.items.get_loc(item) self._block.delete(loc) self.axes[0] = self.axes[0].delete(loc)
python
def delete(self, item): """ Delete single item from SingleBlockManager. Ensures that self.blocks doesn't become empty. """ loc = self.items.get_loc(item) self._block.delete(loc) self.axes[0] = self.axes[0].delete(loc)
[ "def", "delete", "(", "self", ",", "item", ")", ":", "loc", "=", "self", ".", "items", ".", "get_loc", "(", "item", ")", "self", ".", "_block", ".", "delete", "(", "loc", ")", "self", ".", "axes", "[", "0", "]", "=", "self", ".", "axes", "[", "0", "]", ".", "delete", "(", "loc", ")" ]
Delete single item from SingleBlockManager. Ensures that self.blocks doesn't become empty.
[ "Delete", "single", "item", "from", "SingleBlockManager", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1577-L1585
19,618
pandas-dev/pandas
pandas/core/internals/managers.py
SingleBlockManager.concat
def concat(self, to_concat, new_axis): """ Concatenate a list of SingleBlockManagers into a single SingleBlockManager. Used for pd.concat of Series objects with axis=0. Parameters ---------- to_concat : list of SingleBlockManagers new_axis : Index of the result Returns ------- SingleBlockManager """ non_empties = [x for x in to_concat if len(x) > 0] # check if all series are of the same block type: if len(non_empties) > 0: blocks = [obj.blocks[0] for obj in non_empties] if len({b.dtype for b in blocks}) == 1: new_block = blocks[0].concat_same_type(blocks) else: values = [x.values for x in blocks] values = _concat._concat_compat(values) new_block = make_block( values, placement=slice(0, len(values), 1)) else: values = [x._block.values for x in to_concat] values = _concat._concat_compat(values) new_block = make_block( values, placement=slice(0, len(values), 1)) mgr = SingleBlockManager(new_block, new_axis) return mgr
python
def concat(self, to_concat, new_axis): """ Concatenate a list of SingleBlockManagers into a single SingleBlockManager. Used for pd.concat of Series objects with axis=0. Parameters ---------- to_concat : list of SingleBlockManagers new_axis : Index of the result Returns ------- SingleBlockManager """ non_empties = [x for x in to_concat if len(x) > 0] # check if all series are of the same block type: if len(non_empties) > 0: blocks = [obj.blocks[0] for obj in non_empties] if len({b.dtype for b in blocks}) == 1: new_block = blocks[0].concat_same_type(blocks) else: values = [x.values for x in blocks] values = _concat._concat_compat(values) new_block = make_block( values, placement=slice(0, len(values), 1)) else: values = [x._block.values for x in to_concat] values = _concat._concat_compat(values) new_block = make_block( values, placement=slice(0, len(values), 1)) mgr = SingleBlockManager(new_block, new_axis) return mgr
[ "def", "concat", "(", "self", ",", "to_concat", ",", "new_axis", ")", ":", "non_empties", "=", "[", "x", "for", "x", "in", "to_concat", "if", "len", "(", "x", ")", ">", "0", "]", "# check if all series are of the same block type:", "if", "len", "(", "non_empties", ")", ">", "0", ":", "blocks", "=", "[", "obj", ".", "blocks", "[", "0", "]", "for", "obj", "in", "non_empties", "]", "if", "len", "(", "{", "b", ".", "dtype", "for", "b", "in", "blocks", "}", ")", "==", "1", ":", "new_block", "=", "blocks", "[", "0", "]", ".", "concat_same_type", "(", "blocks", ")", "else", ":", "values", "=", "[", "x", ".", "values", "for", "x", "in", "blocks", "]", "values", "=", "_concat", ".", "_concat_compat", "(", "values", ")", "new_block", "=", "make_block", "(", "values", ",", "placement", "=", "slice", "(", "0", ",", "len", "(", "values", ")", ",", "1", ")", ")", "else", ":", "values", "=", "[", "x", ".", "_block", ".", "values", "for", "x", "in", "to_concat", "]", "values", "=", "_concat", ".", "_concat_compat", "(", "values", ")", "new_block", "=", "make_block", "(", "values", ",", "placement", "=", "slice", "(", "0", ",", "len", "(", "values", ")", ",", "1", ")", ")", "mgr", "=", "SingleBlockManager", "(", "new_block", ",", "new_axis", ")", "return", "mgr" ]
Concatenate a list of SingleBlockManagers into a single SingleBlockManager. Used for pd.concat of Series objects with axis=0. Parameters ---------- to_concat : list of SingleBlockManagers new_axis : Index of the result Returns ------- SingleBlockManager
[ "Concatenate", "a", "list", "of", "SingleBlockManagers", "into", "a", "single", "SingleBlockManager", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/managers.py#L1594-L1630
19,619
pandas-dev/pandas
pandas/core/sparse/series.py
SparseSeries.from_array
def from_array(cls, arr, index=None, name=None, copy=False, fill_value=None, fastpath=False): """Construct SparseSeries from array. .. deprecated:: 0.23.0 Use the pd.SparseSeries(..) constructor instead. """ warnings.warn("'from_array' is deprecated and will be removed in a " "future version. Please use the pd.SparseSeries(..) " "constructor instead.", FutureWarning, stacklevel=2) return cls(arr, index=index, name=name, copy=copy, fill_value=fill_value, fastpath=fastpath)
python
def from_array(cls, arr, index=None, name=None, copy=False, fill_value=None, fastpath=False): """Construct SparseSeries from array. .. deprecated:: 0.23.0 Use the pd.SparseSeries(..) constructor instead. """ warnings.warn("'from_array' is deprecated and will be removed in a " "future version. Please use the pd.SparseSeries(..) " "constructor instead.", FutureWarning, stacklevel=2) return cls(arr, index=index, name=name, copy=copy, fill_value=fill_value, fastpath=fastpath)
[ "def", "from_array", "(", "cls", ",", "arr", ",", "index", "=", "None", ",", "name", "=", "None", ",", "copy", "=", "False", ",", "fill_value", "=", "None", ",", "fastpath", "=", "False", ")", ":", "warnings", ".", "warn", "(", "\"'from_array' is deprecated and will be removed in a \"", "\"future version. Please use the pd.SparseSeries(..) \"", "\"constructor instead.\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "cls", "(", "arr", ",", "index", "=", "index", ",", "name", "=", "name", ",", "copy", "=", "copy", ",", "fill_value", "=", "fill_value", ",", "fastpath", "=", "fastpath", ")" ]
Construct SparseSeries from array. .. deprecated:: 0.23.0 Use the pd.SparseSeries(..) constructor instead.
[ "Construct", "SparseSeries", "from", "array", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L181-L192
19,620
pandas-dev/pandas
pandas/core/sparse/series.py
SparseSeries.as_sparse_array
def as_sparse_array(self, kind=None, fill_value=None, copy=False): """ return my self as a sparse array, do not copy by default """ if fill_value is None: fill_value = self.fill_value if kind is None: kind = self.kind return SparseArray(self.values, sparse_index=self.sp_index, fill_value=fill_value, kind=kind, copy=copy)
python
def as_sparse_array(self, kind=None, fill_value=None, copy=False): """ return my self as a sparse array, do not copy by default """ if fill_value is None: fill_value = self.fill_value if kind is None: kind = self.kind return SparseArray(self.values, sparse_index=self.sp_index, fill_value=fill_value, kind=kind, copy=copy)
[ "def", "as_sparse_array", "(", "self", ",", "kind", "=", "None", ",", "fill_value", "=", "None", ",", "copy", "=", "False", ")", ":", "if", "fill_value", "is", "None", ":", "fill_value", "=", "self", ".", "fill_value", "if", "kind", "is", "None", ":", "kind", "=", "self", ".", "kind", "return", "SparseArray", "(", "self", ".", "values", ",", "sparse_index", "=", "self", ".", "sp_index", ",", "fill_value", "=", "fill_value", ",", "kind", "=", "kind", ",", "copy", "=", "copy", ")" ]
return my self as a sparse array, do not copy by default
[ "return", "my", "self", "as", "a", "sparse", "array", "do", "not", "copy", "by", "default" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L210-L218
19,621
pandas-dev/pandas
pandas/core/sparse/series.py
SparseSeries._reduce
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): """ perform a reduction operation """ return op(self.get_values(), skipna=skipna, **kwds)
python
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds): """ perform a reduction operation """ return op(self.get_values(), skipna=skipna, **kwds)
[ "def", "_reduce", "(", "self", ",", "op", ",", "name", ",", "axis", "=", "0", ",", "skipna", "=", "True", ",", "numeric_only", "=", "None", ",", "filter_type", "=", "None", ",", "*", "*", "kwds", ")", ":", "return", "op", "(", "self", ".", "get_values", "(", ")", ",", "skipna", "=", "skipna", ",", "*", "*", "kwds", ")" ]
perform a reduction operation
[ "perform", "a", "reduction", "operation" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L227-L230
19,622
pandas-dev/pandas
pandas/core/sparse/series.py
SparseSeries._ixs
def _ixs(self, i, axis=0): """ Return the i-th value or values in the SparseSeries by location Parameters ---------- i : int, slice, or sequence of integers Returns ------- value : scalar (int) or Series (slice, sequence) """ label = self.index[i] if isinstance(label, Index): return self.take(i, axis=axis) else: return self._get_val_at(i)
python
def _ixs(self, i, axis=0): """ Return the i-th value or values in the SparseSeries by location Parameters ---------- i : int, slice, or sequence of integers Returns ------- value : scalar (int) or Series (slice, sequence) """ label = self.index[i] if isinstance(label, Index): return self.take(i, axis=axis) else: return self._get_val_at(i)
[ "def", "_ixs", "(", "self", ",", "i", ",", "axis", "=", "0", ")", ":", "label", "=", "self", ".", "index", "[", "i", "]", "if", "isinstance", "(", "label", ",", "Index", ")", ":", "return", "self", ".", "take", "(", "i", ",", "axis", "=", "axis", ")", "else", ":", "return", "self", ".", "_get_val_at", "(", "i", ")" ]
Return the i-th value or values in the SparseSeries by location Parameters ---------- i : int, slice, or sequence of integers Returns ------- value : scalar (int) or Series (slice, sequence)
[ "Return", "the", "i", "-", "th", "value", "or", "values", "in", "the", "SparseSeries", "by", "location" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L268-L284
19,623
pandas-dev/pandas
pandas/core/sparse/series.py
SparseSeries.abs
def abs(self): """ Return an object with absolute value taken. Only applicable to objects that are all numeric Returns ------- abs: same type as caller """ return self._constructor(np.abs(self.values), index=self.index).__finalize__(self)
python
def abs(self): """ Return an object with absolute value taken. Only applicable to objects that are all numeric Returns ------- abs: same type as caller """ return self._constructor(np.abs(self.values), index=self.index).__finalize__(self)
[ "def", "abs", "(", "self", ")", ":", "return", "self", ".", "_constructor", "(", "np", ".", "abs", "(", "self", ".", "values", ")", ",", "index", "=", "self", ".", "index", ")", ".", "__finalize__", "(", "self", ")" ]
Return an object with absolute value taken. Only applicable to objects that are all numeric Returns ------- abs: same type as caller
[ "Return", "an", "object", "with", "absolute", "value", "taken", ".", "Only", "applicable", "to", "objects", "that", "are", "all", "numeric" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L308-L318
19,624
pandas-dev/pandas
pandas/core/sparse/series.py
SparseSeries.get
def get(self, label, default=None): """ Returns value occupying requested label, default to specified missing value if not present. Analogous to dict.get Parameters ---------- label : object Label value looking for default : object, optional Value to return if label not in index Returns ------- y : scalar """ if label in self.index: loc = self.index.get_loc(label) return self._get_val_at(loc) else: return default
python
def get(self, label, default=None): """ Returns value occupying requested label, default to specified missing value if not present. Analogous to dict.get Parameters ---------- label : object Label value looking for default : object, optional Value to return if label not in index Returns ------- y : scalar """ if label in self.index: loc = self.index.get_loc(label) return self._get_val_at(loc) else: return default
[ "def", "get", "(", "self", ",", "label", ",", "default", "=", "None", ")", ":", "if", "label", "in", "self", ".", "index", ":", "loc", "=", "self", ".", "index", ".", "get_loc", "(", "label", ")", "return", "self", ".", "_get_val_at", "(", "loc", ")", "else", ":", "return", "default" ]
Returns value occupying requested label, default to specified missing value if not present. Analogous to dict.get Parameters ---------- label : object Label value looking for default : object, optional Value to return if label not in index Returns ------- y : scalar
[ "Returns", "value", "occupying", "requested", "label", "default", "to", "specified", "missing", "value", "if", "not", "present", ".", "Analogous", "to", "dict", ".", "get" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L320-L340
19,625
pandas-dev/pandas
pandas/core/sparse/series.py
SparseSeries.get_value
def get_value(self, label, takeable=False): """ Retrieve single value at passed index label .. deprecated:: 0.21.0 Please use .at[] or .iat[] accessors. Parameters ---------- index : label takeable : interpret the index as indexers, default False Returns ------- value : scalar value """ warnings.warn("get_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._get_value(label, takeable=takeable)
python
def get_value(self, label, takeable=False): """ Retrieve single value at passed index label .. deprecated:: 0.21.0 Please use .at[] or .iat[] accessors. Parameters ---------- index : label takeable : interpret the index as indexers, default False Returns ------- value : scalar value """ warnings.warn("get_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._get_value(label, takeable=takeable)
[ "def", "get_value", "(", "self", ",", "label", ",", "takeable", "=", "False", ")", ":", "warnings", ".", "warn", "(", "\"get_value is deprecated and will be removed \"", "\"in a future release. Please use \"", "\".at[] or .iat[] accessors instead\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "_get_value", "(", "label", ",", "takeable", "=", "takeable", ")" ]
Retrieve single value at passed index label .. deprecated:: 0.21.0 Please use .at[] or .iat[] accessors. Parameters ---------- index : label takeable : interpret the index as indexers, default False Returns ------- value : scalar value
[ "Retrieve", "single", "value", "at", "passed", "index", "label" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L342-L364
19,626
pandas-dev/pandas
pandas/core/sparse/series.py
SparseSeries.set_value
def set_value(self, label, value, takeable=False): """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index .. deprecated:: 0.21.0 Please use .at[] or .iat[] accessors. Parameters ---------- label : object Partial indexing with MultiIndex not allowed value : object Scalar value takeable : interpret the index as indexers, default False Notes ----- This method *always* returns a new object. It is not particularly efficient but is provided for API compatibility with Series Returns ------- series : SparseSeries """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(label, value, takeable=takeable)
python
def set_value(self, label, value, takeable=False): """ Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index .. deprecated:: 0.21.0 Please use .at[] or .iat[] accessors. Parameters ---------- label : object Partial indexing with MultiIndex not allowed value : object Scalar value takeable : interpret the index as indexers, default False Notes ----- This method *always* returns a new object. It is not particularly efficient but is provided for API compatibility with Series Returns ------- series : SparseSeries """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(label, value, takeable=takeable)
[ "def", "set_value", "(", "self", ",", "label", ",", "value", ",", "takeable", "=", "False", ")", ":", "warnings", ".", "warn", "(", "\"set_value is deprecated and will be removed \"", "\"in a future release. Please use \"", "\".at[] or .iat[] accessors instead\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "_set_value", "(", "label", ",", "value", ",", "takeable", "=", "takeable", ")" ]
Quickly set single value at passed label. If label is not contained, a new object is created with the label placed at the end of the result index .. deprecated:: 0.21.0 Please use .at[] or .iat[] accessors. Parameters ---------- label : object Partial indexing with MultiIndex not allowed value : object Scalar value takeable : interpret the index as indexers, default False Notes ----- This method *always* returns a new object. It is not particularly efficient but is provided for API compatibility with Series Returns ------- series : SparseSeries
[ "Quickly", "set", "single", "value", "at", "passed", "label", ".", "If", "label", "is", "not", "contained", "a", "new", "object", "is", "created", "with", "the", "label", "placed", "at", "the", "end", "of", "the", "result", "index" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L371-L402
19,627
pandas-dev/pandas
pandas/core/sparse/series.py
SparseSeries.to_dense
def to_dense(self): """ Convert SparseSeries to a Series. Returns ------- s : Series """ return Series(self.values.to_dense(), index=self.index, name=self.name)
python
def to_dense(self): """ Convert SparseSeries to a Series. Returns ------- s : Series """ return Series(self.values.to_dense(), index=self.index, name=self.name)
[ "def", "to_dense", "(", "self", ")", ":", "return", "Series", "(", "self", ".", "values", ".", "to_dense", "(", ")", ",", "index", "=", "self", ".", "index", ",", "name", "=", "self", ".", "name", ")" ]
Convert SparseSeries to a Series. Returns ------- s : Series
[ "Convert", "SparseSeries", "to", "a", "Series", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L434-L443
19,628
pandas-dev/pandas
pandas/core/sparse/series.py
SparseSeries.copy
def copy(self, deep=True): """ Make a copy of the SparseSeries. Only the actual sparse values need to be copied """ # TODO: https://github.com/pandas-dev/pandas/issues/22314 # We skip the block manager till that is resolved. new_data = self.values.copy(deep=deep) return self._constructor(new_data, sparse_index=self.sp_index, fill_value=self.fill_value, index=self.index.copy(), name=self.name).__finalize__(self)
python
def copy(self, deep=True): """ Make a copy of the SparseSeries. Only the actual sparse values need to be copied """ # TODO: https://github.com/pandas-dev/pandas/issues/22314 # We skip the block manager till that is resolved. new_data = self.values.copy(deep=deep) return self._constructor(new_data, sparse_index=self.sp_index, fill_value=self.fill_value, index=self.index.copy(), name=self.name).__finalize__(self)
[ "def", "copy", "(", "self", ",", "deep", "=", "True", ")", ":", "# TODO: https://github.com/pandas-dev/pandas/issues/22314", "# We skip the block manager till that is resolved.", "new_data", "=", "self", ".", "values", ".", "copy", "(", "deep", "=", "deep", ")", "return", "self", ".", "_constructor", "(", "new_data", ",", "sparse_index", "=", "self", ".", "sp_index", ",", "fill_value", "=", "self", ".", "fill_value", ",", "index", "=", "self", ".", "index", ".", "copy", "(", ")", ",", "name", "=", "self", ".", "name", ")", ".", "__finalize__", "(", "self", ")" ]
Make a copy of the SparseSeries. Only the actual sparse values need to be copied
[ "Make", "a", "copy", "of", "the", "SparseSeries", ".", "Only", "the", "actual", "sparse", "values", "need", "to", "be", "copied" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L449-L460
19,629
pandas-dev/pandas
pandas/core/sparse/series.py
SparseSeries.sparse_reindex
def sparse_reindex(self, new_index): """ Conform sparse values to new SparseIndex Parameters ---------- new_index : {BlockIndex, IntIndex} Returns ------- reindexed : SparseSeries """ if not isinstance(new_index, splib.SparseIndex): raise TypeError("new index must be a SparseIndex") values = self.values values = values.sp_index.to_int_index().reindex( values.sp_values.astype('float64'), values.fill_value, new_index) values = SparseArray(values, sparse_index=new_index, fill_value=self.values.fill_value) return self._constructor(values, index=self.index).__finalize__(self)
python
def sparse_reindex(self, new_index): """ Conform sparse values to new SparseIndex Parameters ---------- new_index : {BlockIndex, IntIndex} Returns ------- reindexed : SparseSeries """ if not isinstance(new_index, splib.SparseIndex): raise TypeError("new index must be a SparseIndex") values = self.values values = values.sp_index.to_int_index().reindex( values.sp_values.astype('float64'), values.fill_value, new_index) values = SparseArray(values, sparse_index=new_index, fill_value=self.values.fill_value) return self._constructor(values, index=self.index).__finalize__(self)
[ "def", "sparse_reindex", "(", "self", ",", "new_index", ")", ":", "if", "not", "isinstance", "(", "new_index", ",", "splib", ".", "SparseIndex", ")", ":", "raise", "TypeError", "(", "\"new index must be a SparseIndex\"", ")", "values", "=", "self", ".", "values", "values", "=", "values", ".", "sp_index", ".", "to_int_index", "(", ")", ".", "reindex", "(", "values", ".", "sp_values", ".", "astype", "(", "'float64'", ")", ",", "values", ".", "fill_value", ",", "new_index", ")", "values", "=", "SparseArray", "(", "values", ",", "sparse_index", "=", "new_index", ",", "fill_value", "=", "self", ".", "values", ".", "fill_value", ")", "return", "self", ".", "_constructor", "(", "values", ",", "index", "=", "self", ".", "index", ")", ".", "__finalize__", "(", "self", ")" ]
Conform sparse values to new SparseIndex Parameters ---------- new_index : {BlockIndex, IntIndex} Returns ------- reindexed : SparseSeries
[ "Conform", "sparse", "values", "to", "new", "SparseIndex" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L470-L490
19,630
pandas-dev/pandas
pandas/core/sparse/series.py
SparseSeries.dropna
def dropna(self, axis=0, inplace=False, **kwargs): """ Analogous to Series.dropna. If fill_value=NaN, returns a dense Series """ # TODO: make more efficient # Validate axis self._get_axis_number(axis or 0) dense_valid = self.to_dense().dropna() if inplace: raise NotImplementedError("Cannot perform inplace dropna" " operations on a SparseSeries") if isna(self.fill_value): return dense_valid else: dense_valid = dense_valid[dense_valid != self.fill_value] return dense_valid.to_sparse(fill_value=self.fill_value)
python
def dropna(self, axis=0, inplace=False, **kwargs): """ Analogous to Series.dropna. If fill_value=NaN, returns a dense Series """ # TODO: make more efficient # Validate axis self._get_axis_number(axis or 0) dense_valid = self.to_dense().dropna() if inplace: raise NotImplementedError("Cannot perform inplace dropna" " operations on a SparseSeries") if isna(self.fill_value): return dense_valid else: dense_valid = dense_valid[dense_valid != self.fill_value] return dense_valid.to_sparse(fill_value=self.fill_value)
[ "def", "dropna", "(", "self", ",", "axis", "=", "0", ",", "inplace", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# TODO: make more efficient", "# Validate axis", "self", ".", "_get_axis_number", "(", "axis", "or", "0", ")", "dense_valid", "=", "self", ".", "to_dense", "(", ")", ".", "dropna", "(", ")", "if", "inplace", ":", "raise", "NotImplementedError", "(", "\"Cannot perform inplace dropna\"", "\" operations on a SparseSeries\"", ")", "if", "isna", "(", "self", ".", "fill_value", ")", ":", "return", "dense_valid", "else", ":", "dense_valid", "=", "dense_valid", "[", "dense_valid", "!=", "self", ".", "fill_value", "]", "return", "dense_valid", ".", "to_sparse", "(", "fill_value", "=", "self", ".", "fill_value", ")" ]
Analogous to Series.dropna. If fill_value=NaN, returns a dense Series
[ "Analogous", "to", "Series", ".", "dropna", ".", "If", "fill_value", "=", "NaN", "returns", "a", "dense", "Series" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L537-L552
19,631
pandas-dev/pandas
pandas/core/sparse/series.py
SparseSeries.combine_first
def combine_first(self, other): """ Combine Series values, choosing the calling Series's values first. Result index will be the union of the two indexes Parameters ---------- other : Series Returns ------- y : Series """ if isinstance(other, SparseSeries): other = other.to_dense() dense_combined = self.to_dense().combine_first(other) return dense_combined.to_sparse(fill_value=self.fill_value)
python
def combine_first(self, other): """ Combine Series values, choosing the calling Series's values first. Result index will be the union of the two indexes Parameters ---------- other : Series Returns ------- y : Series """ if isinstance(other, SparseSeries): other = other.to_dense() dense_combined = self.to_dense().combine_first(other) return dense_combined.to_sparse(fill_value=self.fill_value)
[ "def", "combine_first", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "SparseSeries", ")", ":", "other", "=", "other", ".", "to_dense", "(", ")", "dense_combined", "=", "self", ".", "to_dense", "(", ")", ".", "combine_first", "(", "other", ")", "return", "dense_combined", ".", "to_sparse", "(", "fill_value", "=", "self", ".", "fill_value", ")" ]
Combine Series values, choosing the calling Series's values first. Result index will be the union of the two indexes Parameters ---------- other : Series Returns ------- y : Series
[ "Combine", "Series", "values", "choosing", "the", "calling", "Series", "s", "values", "first", ".", "Result", "index", "will", "be", "the", "union", "of", "the", "two", "indexes" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/series.py#L554-L571
19,632
pandas-dev/pandas
pandas/core/tools/datetimes.py
_maybe_cache
def _maybe_cache(arg, format, cache, convert_listlike): """ Create a cache of unique dates from an array of dates Parameters ---------- arg : integer, float, string, datetime, list, tuple, 1-d array, Series format : string Strftime format to parse time cache : boolean True attempts to create a cache of converted values convert_listlike : function Conversion function to apply on dates Returns ------- cache_array : Series Cache of converted, unique dates. Can be empty """ from pandas import Series cache_array = Series() if cache: # Perform a quicker unique check from pandas import Index unique_dates = Index(arg).unique() if len(unique_dates) < len(arg): cache_dates = convert_listlike(unique_dates.to_numpy(), True, format) cache_array = Series(cache_dates, index=unique_dates) return cache_array
python
def _maybe_cache(arg, format, cache, convert_listlike): """ Create a cache of unique dates from an array of dates Parameters ---------- arg : integer, float, string, datetime, list, tuple, 1-d array, Series format : string Strftime format to parse time cache : boolean True attempts to create a cache of converted values convert_listlike : function Conversion function to apply on dates Returns ------- cache_array : Series Cache of converted, unique dates. Can be empty """ from pandas import Series cache_array = Series() if cache: # Perform a quicker unique check from pandas import Index unique_dates = Index(arg).unique() if len(unique_dates) < len(arg): cache_dates = convert_listlike(unique_dates.to_numpy(), True, format) cache_array = Series(cache_dates, index=unique_dates) return cache_array
[ "def", "_maybe_cache", "(", "arg", ",", "format", ",", "cache", ",", "convert_listlike", ")", ":", "from", "pandas", "import", "Series", "cache_array", "=", "Series", "(", ")", "if", "cache", ":", "# Perform a quicker unique check", "from", "pandas", "import", "Index", "unique_dates", "=", "Index", "(", "arg", ")", ".", "unique", "(", ")", "if", "len", "(", "unique_dates", ")", "<", "len", "(", "arg", ")", ":", "cache_dates", "=", "convert_listlike", "(", "unique_dates", ".", "to_numpy", "(", ")", ",", "True", ",", "format", ")", "cache_array", "=", "Series", "(", "cache_dates", ",", "index", "=", "unique_dates", ")", "return", "cache_array" ]
Create a cache of unique dates from an array of dates Parameters ---------- arg : integer, float, string, datetime, list, tuple, 1-d array, Series format : string Strftime format to parse time cache : boolean True attempts to create a cache of converted values convert_listlike : function Conversion function to apply on dates Returns ------- cache_array : Series Cache of converted, unique dates. Can be empty
[ "Create", "a", "cache", "of", "unique", "dates", "from", "an", "array", "of", "dates" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/datetimes.py#L31-L60
19,633
pandas-dev/pandas
pandas/core/tools/datetimes.py
_convert_and_box_cache
def _convert_and_box_cache(arg, cache_array, box, errors, name=None): """ Convert array of dates with a cache and box the result Parameters ---------- arg : integer, float, string, datetime, list, tuple, 1-d array, Series cache_array : Series Cache of converted, unique dates box : boolean True boxes result as an Index-like, False returns an ndarray errors : string 'ignore' plus box=True will convert result to Index name : string, default None Name for a DatetimeIndex Returns ------- result : datetime of converted dates Returns: - Index-like if box=True - ndarray if box=False """ from pandas import Series, DatetimeIndex, Index result = Series(arg).map(cache_array) if box: if errors == 'ignore': return Index(result, name=name) else: return DatetimeIndex(result, name=name) return result.values
python
def _convert_and_box_cache(arg, cache_array, box, errors, name=None): """ Convert array of dates with a cache and box the result Parameters ---------- arg : integer, float, string, datetime, list, tuple, 1-d array, Series cache_array : Series Cache of converted, unique dates box : boolean True boxes result as an Index-like, False returns an ndarray errors : string 'ignore' plus box=True will convert result to Index name : string, default None Name for a DatetimeIndex Returns ------- result : datetime of converted dates Returns: - Index-like if box=True - ndarray if box=False """ from pandas import Series, DatetimeIndex, Index result = Series(arg).map(cache_array) if box: if errors == 'ignore': return Index(result, name=name) else: return DatetimeIndex(result, name=name) return result.values
[ "def", "_convert_and_box_cache", "(", "arg", ",", "cache_array", ",", "box", ",", "errors", ",", "name", "=", "None", ")", ":", "from", "pandas", "import", "Series", ",", "DatetimeIndex", ",", "Index", "result", "=", "Series", "(", "arg", ")", ".", "map", "(", "cache_array", ")", "if", "box", ":", "if", "errors", "==", "'ignore'", ":", "return", "Index", "(", "result", ",", "name", "=", "name", ")", "else", ":", "return", "DatetimeIndex", "(", "result", ",", "name", "=", "name", ")", "return", "result", ".", "values" ]
Convert array of dates with a cache and box the result Parameters ---------- arg : integer, float, string, datetime, list, tuple, 1-d array, Series cache_array : Series Cache of converted, unique dates box : boolean True boxes result as an Index-like, False returns an ndarray errors : string 'ignore' plus box=True will convert result to Index name : string, default None Name for a DatetimeIndex Returns ------- result : datetime of converted dates Returns: - Index-like if box=True - ndarray if box=False
[ "Convert", "array", "of", "dates", "with", "a", "cache", "and", "box", "the", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/datetimes.py#L63-L94
19,634
pandas-dev/pandas
pandas/core/tools/datetimes.py
_return_parsed_timezone_results
def _return_parsed_timezone_results(result, timezones, box, tz, name): """ Return results from array_strptime if a %z or %Z directive was passed. Parameters ---------- result : ndarray int64 date representations of the dates timezones : ndarray pytz timezone objects box : boolean True boxes result as an Index-like, False returns an ndarray tz : object None or pytz timezone object name : string, default None Name for a DatetimeIndex Returns ------- tz_result : ndarray of parsed dates with timezone Returns: - Index-like if box=True - ndarray of Timestamps if box=False """ if tz is not None: raise ValueError("Cannot pass a tz argument when " "parsing strings with timezone " "information.") tz_results = np.array([Timestamp(res).tz_localize(zone) for res, zone in zip(result, timezones)]) if box: from pandas import Index return Index(tz_results, name=name) return tz_results
python
def _return_parsed_timezone_results(result, timezones, box, tz, name): """ Return results from array_strptime if a %z or %Z directive was passed. Parameters ---------- result : ndarray int64 date representations of the dates timezones : ndarray pytz timezone objects box : boolean True boxes result as an Index-like, False returns an ndarray tz : object None or pytz timezone object name : string, default None Name for a DatetimeIndex Returns ------- tz_result : ndarray of parsed dates with timezone Returns: - Index-like if box=True - ndarray of Timestamps if box=False """ if tz is not None: raise ValueError("Cannot pass a tz argument when " "parsing strings with timezone " "information.") tz_results = np.array([Timestamp(res).tz_localize(zone) for res, zone in zip(result, timezones)]) if box: from pandas import Index return Index(tz_results, name=name) return tz_results
[ "def", "_return_parsed_timezone_results", "(", "result", ",", "timezones", ",", "box", ",", "tz", ",", "name", ")", ":", "if", "tz", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Cannot pass a tz argument when \"", "\"parsing strings with timezone \"", "\"information.\"", ")", "tz_results", "=", "np", ".", "array", "(", "[", "Timestamp", "(", "res", ")", ".", "tz_localize", "(", "zone", ")", "for", "res", ",", "zone", "in", "zip", "(", "result", ",", "timezones", ")", "]", ")", "if", "box", ":", "from", "pandas", "import", "Index", "return", "Index", "(", "tz_results", ",", "name", "=", "name", ")", "return", "tz_results" ]
Return results from array_strptime if a %z or %Z directive was passed. Parameters ---------- result : ndarray int64 date representations of the dates timezones : ndarray pytz timezone objects box : boolean True boxes result as an Index-like, False returns an ndarray tz : object None or pytz timezone object name : string, default None Name for a DatetimeIndex Returns ------- tz_result : ndarray of parsed dates with timezone Returns: - Index-like if box=True - ndarray of Timestamps if box=False
[ "Return", "results", "from", "array_strptime", "if", "a", "%z", "or", "%Z", "directive", "was", "passed", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/datetimes.py#L97-L132
19,635
pandas-dev/pandas
pandas/core/tools/datetimes.py
_adjust_to_origin
def _adjust_to_origin(arg, origin, unit): """ Helper function for to_datetime. Adjust input argument to the specified origin Parameters ---------- arg : list, tuple, ndarray, Series, Index date to be adjusted origin : 'julian' or Timestamp origin offset for the arg unit : string passed unit from to_datetime, must be 'D' Returns ------- ndarray or scalar of adjusted date(s) """ if origin == 'julian': original = arg j0 = Timestamp(0).to_julian_date() if unit != 'D': raise ValueError("unit must be 'D' for origin='julian'") try: arg = arg - j0 except TypeError: raise ValueError("incompatible 'arg' type for given " "'origin'='julian'") # premptively check this for a nice range j_max = Timestamp.max.to_julian_date() - j0 j_min = Timestamp.min.to_julian_date() - j0 if np.any(arg > j_max) or np.any(arg < j_min): raise tslibs.OutOfBoundsDatetime( "{original} is Out of Bounds for " "origin='julian'".format(original=original)) else: # arg must be numeric if not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or is_numeric_dtype(np.asarray(arg))): raise ValueError( "'{arg}' is not compatible with origin='{origin}'; " "it must be numeric with a unit specified ".format( arg=arg, origin=origin)) # we are going to offset back to unix / epoch time try: offset = Timestamp(origin) except tslibs.OutOfBoundsDatetime: raise tslibs.OutOfBoundsDatetime( "origin {origin} is Out of Bounds".format(origin=origin)) except ValueError: raise ValueError("origin {origin} cannot be converted " "to a Timestamp".format(origin=origin)) if offset.tz is not None: raise ValueError( "origin offset {} must be tz-naive".format(offset)) offset -= Timestamp(0) # convert the offset to the unit of the arg # this should be lossless in terms of precision offset = offset // tslibs.Timedelta(1, unit=unit) # scalars & ndarray-like can handle the addition if is_list_like(arg) and not isinstance( arg, (ABCSeries, ABCIndexClass, np.ndarray)): arg = np.asarray(arg) arg = arg + offset return arg
python
def _adjust_to_origin(arg, origin, unit): """ Helper function for to_datetime. Adjust input argument to the specified origin Parameters ---------- arg : list, tuple, ndarray, Series, Index date to be adjusted origin : 'julian' or Timestamp origin offset for the arg unit : string passed unit from to_datetime, must be 'D' Returns ------- ndarray or scalar of adjusted date(s) """ if origin == 'julian': original = arg j0 = Timestamp(0).to_julian_date() if unit != 'D': raise ValueError("unit must be 'D' for origin='julian'") try: arg = arg - j0 except TypeError: raise ValueError("incompatible 'arg' type for given " "'origin'='julian'") # premptively check this for a nice range j_max = Timestamp.max.to_julian_date() - j0 j_min = Timestamp.min.to_julian_date() - j0 if np.any(arg > j_max) or np.any(arg < j_min): raise tslibs.OutOfBoundsDatetime( "{original} is Out of Bounds for " "origin='julian'".format(original=original)) else: # arg must be numeric if not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or is_numeric_dtype(np.asarray(arg))): raise ValueError( "'{arg}' is not compatible with origin='{origin}'; " "it must be numeric with a unit specified ".format( arg=arg, origin=origin)) # we are going to offset back to unix / epoch time try: offset = Timestamp(origin) except tslibs.OutOfBoundsDatetime: raise tslibs.OutOfBoundsDatetime( "origin {origin} is Out of Bounds".format(origin=origin)) except ValueError: raise ValueError("origin {origin} cannot be converted " "to a Timestamp".format(origin=origin)) if offset.tz is not None: raise ValueError( "origin offset {} must be tz-naive".format(offset)) offset -= Timestamp(0) # convert the offset to the unit of the arg # this should be lossless in terms of precision offset = offset // tslibs.Timedelta(1, unit=unit) # scalars & ndarray-like can handle the addition if is_list_like(arg) and not isinstance( arg, (ABCSeries, ABCIndexClass, np.ndarray)): arg = np.asarray(arg) arg = arg + offset return arg
[ "def", "_adjust_to_origin", "(", "arg", ",", "origin", ",", "unit", ")", ":", "if", "origin", "==", "'julian'", ":", "original", "=", "arg", "j0", "=", "Timestamp", "(", "0", ")", ".", "to_julian_date", "(", ")", "if", "unit", "!=", "'D'", ":", "raise", "ValueError", "(", "\"unit must be 'D' for origin='julian'\"", ")", "try", ":", "arg", "=", "arg", "-", "j0", "except", "TypeError", ":", "raise", "ValueError", "(", "\"incompatible 'arg' type for given \"", "\"'origin'='julian'\"", ")", "# premptively check this for a nice range", "j_max", "=", "Timestamp", ".", "max", ".", "to_julian_date", "(", ")", "-", "j0", "j_min", "=", "Timestamp", ".", "min", ".", "to_julian_date", "(", ")", "-", "j0", "if", "np", ".", "any", "(", "arg", ">", "j_max", ")", "or", "np", ".", "any", "(", "arg", "<", "j_min", ")", ":", "raise", "tslibs", ".", "OutOfBoundsDatetime", "(", "\"{original} is Out of Bounds for \"", "\"origin='julian'\"", ".", "format", "(", "original", "=", "original", ")", ")", "else", ":", "# arg must be numeric", "if", "not", "(", "(", "is_scalar", "(", "arg", ")", "and", "(", "is_integer", "(", "arg", ")", "or", "is_float", "(", "arg", ")", ")", ")", "or", "is_numeric_dtype", "(", "np", ".", "asarray", "(", "arg", ")", ")", ")", ":", "raise", "ValueError", "(", "\"'{arg}' is not compatible with origin='{origin}'; \"", "\"it must be numeric with a unit specified \"", ".", "format", "(", "arg", "=", "arg", ",", "origin", "=", "origin", ")", ")", "# we are going to offset back to unix / epoch time", "try", ":", "offset", "=", "Timestamp", "(", "origin", ")", "except", "tslibs", ".", "OutOfBoundsDatetime", ":", "raise", "tslibs", ".", "OutOfBoundsDatetime", "(", "\"origin {origin} is Out of Bounds\"", ".", "format", "(", "origin", "=", "origin", ")", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"origin {origin} cannot be converted \"", "\"to a Timestamp\"", ".", "format", "(", "origin", "=", "origin", ")", ")", "if", "offset", ".", "tz", "is", "not", "None", ":", "raise", "ValueError", "(", "\"origin offset {} must be tz-naive\"", ".", "format", "(", "offset", ")", ")", "offset", "-=", "Timestamp", "(", "0", ")", "# convert the offset to the unit of the arg", "# this should be lossless in terms of precision", "offset", "=", "offset", "//", "tslibs", ".", "Timedelta", "(", "1", ",", "unit", "=", "unit", ")", "# scalars & ndarray-like can handle the addition", "if", "is_list_like", "(", "arg", ")", "and", "not", "isinstance", "(", "arg", ",", "(", "ABCSeries", ",", "ABCIndexClass", ",", "np", ".", "ndarray", ")", ")", ":", "arg", "=", "np", ".", "asarray", "(", "arg", ")", "arg", "=", "arg", "+", "offset", "return", "arg" ]
Helper function for to_datetime. Adjust input argument to the specified origin Parameters ---------- arg : list, tuple, ndarray, Series, Index date to be adjusted origin : 'julian' or Timestamp origin offset for the arg unit : string passed unit from to_datetime, must be 'D' Returns ------- ndarray or scalar of adjusted date(s)
[ "Helper", "function", "for", "to_datetime", ".", "Adjust", "input", "argument", "to", "the", "specified", "origin" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/datetimes.py#L329-L399
19,636
pandas-dev/pandas
pandas/core/tools/datetimes.py
to_datetime
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False, origin='unix', cache=False): """ Convert argument to datetime. Parameters ---------- arg : integer, float, string, datetime, list, tuple, 1-d array, Series .. versionadded:: 0.18.1 or DataFrame/dict-like errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaT - If 'ignore', then invalid parsing will return the input dayfirst : boolean, default False Specify a date parse order if `arg` is str or its list-likes. If True, parses dates with the day first, eg 10/11/12 is parsed as 2012-11-10. Warning: dayfirst=True is not strict, but will prefer to parse with day first (this is a known bug, based on dateutil behavior). yearfirst : boolean, default False Specify a date parse order if `arg` is str or its list-likes. - If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12. - If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil). Warning: yearfirst=True is not strict, but will prefer to parse with year first (this is a known bug, based on dateutil behavior). .. versionadded:: 0.16.1 utc : boolean, default None Return UTC DatetimeIndex if True (converting any tz-aware datetime.datetime objects as well). box : boolean, default True - If True returns a DatetimeIndex or Index-like object - If False returns ndarray of values. .. deprecated:: 0.25.0 Use :meth:`.to_numpy` or :meth:`Timestamp.to_datetime64` instead to get an ndarray of values or numpy.datetime64, respectively. format : string, default None strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse all the way up to nanoseconds. See strftime documentation for more information on choices: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior exact : boolean, True by default - If True, require an exact format match. - If False, allow the format to match anywhere in the target string. unit : string, default 'ns' unit of the arg (D,s,ms,us,ns) denote the unit, which is an integer or float number. This will be based off the origin. Example, with unit='ms' and origin='unix' (the default), this would calculate the number of milliseconds to the unix epoch start. infer_datetime_format : boolean, default False If True and no `format` is given, attempt to infer the format of the datetime strings, and if it can be inferred, switch to a faster method of parsing them. In some cases this can increase the parsing speed by ~5-10x. origin : scalar, default is 'unix' Define the reference date. The numeric values would be parsed as number of units (defined by `unit`) since this reference date. - If 'unix' (or POSIX) time; origin is set to 1970-01-01. - If 'julian', unit must be 'D', and origin is set to beginning of Julian Calendar. Julian day number 0 is assigned to the day starting at noon on January 1, 4713 BC. - If Timestamp convertible, origin is set to Timestamp identified by origin. .. versionadded:: 0.20.0 cache : boolean, default False If True, use a cache of unique, converted dates to apply the datetime conversion. May produce significant speed-up when parsing duplicate date strings, especially ones with timezone offsets. .. versionadded:: 0.23.0 Returns ------- ret : datetime if parsing succeeded. Return type depends on input: - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp In case when it is not possible to return designated types (e.g. when any element of input is before Timestamp.min or after Timestamp.max) return will have datetime.datetime type (or corresponding array/Series). See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_timedelta : Convert argument to timedelta. Examples -------- Assembling a datetime from multiple columns of a DataFrame. The keys can be common abbreviations like ['year', 'month', 'day', 'minute', 'second', 'ms', 'us', 'ns']) or plurals of the same >>> df = pd.DataFrame({'year': [2015, 2016], 'month': [2, 3], 'day': [4, 5]}) >>> pd.to_datetime(df) 0 2015-02-04 1 2016-03-05 dtype: datetime64[ns] If a date does not meet the `timestamp limitations <http://pandas.pydata.org/pandas-docs/stable/timeseries.html #timeseries-timestamp-limits>`_, passing errors='ignore' will return the original input instead of raising any exception. Passing errors='coerce' will force an out-of-bounds date to NaT, in addition to forcing non-dates (or non-parseable dates) to NaT. >>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore') datetime.datetime(1300, 1, 1, 0, 0) >>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce') NaT Passing infer_datetime_format=True can often-times speedup a parsing if its not an ISO8601 format exactly, but in a regular format. >>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000) >>> s.head() 0 3/11/2000 1 3/12/2000 2 3/13/2000 3 3/11/2000 4 3/12/2000 dtype: object >>> %timeit pd.to_datetime(s,infer_datetime_format=True) 100 loops, best of 3: 10.4 ms per loop >>> %timeit pd.to_datetime(s,infer_datetime_format=False) 1 loop, best of 3: 471 ms per loop Using a unix epoch time >>> pd.to_datetime(1490195805, unit='s') Timestamp('2017-03-22 15:16:45') >>> pd.to_datetime(1490195805433502912, unit='ns') Timestamp('2017-03-22 15:16:45.433502912') .. warning:: For float arg, precision rounding might happen. To prevent unexpected behavior use a fixed-width exact type. Using a non-unix epoch origin >>> pd.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01')) 0 1960-01-02 1 1960-01-03 2 1960-01-04 """ if arg is None: return None if origin != 'unix': arg = _adjust_to_origin(arg, origin, unit) tz = 'utc' if utc else None convert_listlike = partial(_convert_listlike_datetimes, tz=tz, unit=unit, dayfirst=dayfirst, yearfirst=yearfirst, errors=errors, exact=exact, infer_datetime_format=infer_datetime_format) if isinstance(arg, Timestamp): result = arg if tz is not None: if arg.tz is not None: result = result.tz_convert(tz) else: result = result.tz_localize(tz) elif isinstance(arg, ABCSeries): cache_array = _maybe_cache(arg, format, cache, convert_listlike) if not cache_array.empty: result = arg.map(cache_array) else: values = convert_listlike(arg._values, True, format) result = arg._constructor(values, index=arg.index, name=arg.name) elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)): result = _assemble_from_unit_mappings(arg, errors, box, tz) elif isinstance(arg, ABCIndexClass): cache_array = _maybe_cache(arg, format, cache, convert_listlike) if not cache_array.empty: result = _convert_and_box_cache(arg, cache_array, box, errors, name=arg.name) else: convert_listlike = partial(convert_listlike, name=arg.name) result = convert_listlike(arg, box, format) elif is_list_like(arg): cache_array = _maybe_cache(arg, format, cache, convert_listlike) if not cache_array.empty: result = _convert_and_box_cache(arg, cache_array, box, errors) else: result = convert_listlike(arg, box, format) else: result = convert_listlike(np.array([arg]), box, format)[0] return result
python
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False, origin='unix', cache=False): """ Convert argument to datetime. Parameters ---------- arg : integer, float, string, datetime, list, tuple, 1-d array, Series .. versionadded:: 0.18.1 or DataFrame/dict-like errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaT - If 'ignore', then invalid parsing will return the input dayfirst : boolean, default False Specify a date parse order if `arg` is str or its list-likes. If True, parses dates with the day first, eg 10/11/12 is parsed as 2012-11-10. Warning: dayfirst=True is not strict, but will prefer to parse with day first (this is a known bug, based on dateutil behavior). yearfirst : boolean, default False Specify a date parse order if `arg` is str or its list-likes. - If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12. - If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil). Warning: yearfirst=True is not strict, but will prefer to parse with year first (this is a known bug, based on dateutil behavior). .. versionadded:: 0.16.1 utc : boolean, default None Return UTC DatetimeIndex if True (converting any tz-aware datetime.datetime objects as well). box : boolean, default True - If True returns a DatetimeIndex or Index-like object - If False returns ndarray of values. .. deprecated:: 0.25.0 Use :meth:`.to_numpy` or :meth:`Timestamp.to_datetime64` instead to get an ndarray of values or numpy.datetime64, respectively. format : string, default None strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse all the way up to nanoseconds. See strftime documentation for more information on choices: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior exact : boolean, True by default - If True, require an exact format match. - If False, allow the format to match anywhere in the target string. unit : string, default 'ns' unit of the arg (D,s,ms,us,ns) denote the unit, which is an integer or float number. This will be based off the origin. Example, with unit='ms' and origin='unix' (the default), this would calculate the number of milliseconds to the unix epoch start. infer_datetime_format : boolean, default False If True and no `format` is given, attempt to infer the format of the datetime strings, and if it can be inferred, switch to a faster method of parsing them. In some cases this can increase the parsing speed by ~5-10x. origin : scalar, default is 'unix' Define the reference date. The numeric values would be parsed as number of units (defined by `unit`) since this reference date. - If 'unix' (or POSIX) time; origin is set to 1970-01-01. - If 'julian', unit must be 'D', and origin is set to beginning of Julian Calendar. Julian day number 0 is assigned to the day starting at noon on January 1, 4713 BC. - If Timestamp convertible, origin is set to Timestamp identified by origin. .. versionadded:: 0.20.0 cache : boolean, default False If True, use a cache of unique, converted dates to apply the datetime conversion. May produce significant speed-up when parsing duplicate date strings, especially ones with timezone offsets. .. versionadded:: 0.23.0 Returns ------- ret : datetime if parsing succeeded. Return type depends on input: - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp In case when it is not possible to return designated types (e.g. when any element of input is before Timestamp.min or after Timestamp.max) return will have datetime.datetime type (or corresponding array/Series). See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_timedelta : Convert argument to timedelta. Examples -------- Assembling a datetime from multiple columns of a DataFrame. The keys can be common abbreviations like ['year', 'month', 'day', 'minute', 'second', 'ms', 'us', 'ns']) or plurals of the same >>> df = pd.DataFrame({'year': [2015, 2016], 'month': [2, 3], 'day': [4, 5]}) >>> pd.to_datetime(df) 0 2015-02-04 1 2016-03-05 dtype: datetime64[ns] If a date does not meet the `timestamp limitations <http://pandas.pydata.org/pandas-docs/stable/timeseries.html #timeseries-timestamp-limits>`_, passing errors='ignore' will return the original input instead of raising any exception. Passing errors='coerce' will force an out-of-bounds date to NaT, in addition to forcing non-dates (or non-parseable dates) to NaT. >>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore') datetime.datetime(1300, 1, 1, 0, 0) >>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce') NaT Passing infer_datetime_format=True can often-times speedup a parsing if its not an ISO8601 format exactly, but in a regular format. >>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000) >>> s.head() 0 3/11/2000 1 3/12/2000 2 3/13/2000 3 3/11/2000 4 3/12/2000 dtype: object >>> %timeit pd.to_datetime(s,infer_datetime_format=True) 100 loops, best of 3: 10.4 ms per loop >>> %timeit pd.to_datetime(s,infer_datetime_format=False) 1 loop, best of 3: 471 ms per loop Using a unix epoch time >>> pd.to_datetime(1490195805, unit='s') Timestamp('2017-03-22 15:16:45') >>> pd.to_datetime(1490195805433502912, unit='ns') Timestamp('2017-03-22 15:16:45.433502912') .. warning:: For float arg, precision rounding might happen. To prevent unexpected behavior use a fixed-width exact type. Using a non-unix epoch origin >>> pd.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01')) 0 1960-01-02 1 1960-01-03 2 1960-01-04 """ if arg is None: return None if origin != 'unix': arg = _adjust_to_origin(arg, origin, unit) tz = 'utc' if utc else None convert_listlike = partial(_convert_listlike_datetimes, tz=tz, unit=unit, dayfirst=dayfirst, yearfirst=yearfirst, errors=errors, exact=exact, infer_datetime_format=infer_datetime_format) if isinstance(arg, Timestamp): result = arg if tz is not None: if arg.tz is not None: result = result.tz_convert(tz) else: result = result.tz_localize(tz) elif isinstance(arg, ABCSeries): cache_array = _maybe_cache(arg, format, cache, convert_listlike) if not cache_array.empty: result = arg.map(cache_array) else: values = convert_listlike(arg._values, True, format) result = arg._constructor(values, index=arg.index, name=arg.name) elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)): result = _assemble_from_unit_mappings(arg, errors, box, tz) elif isinstance(arg, ABCIndexClass): cache_array = _maybe_cache(arg, format, cache, convert_listlike) if not cache_array.empty: result = _convert_and_box_cache(arg, cache_array, box, errors, name=arg.name) else: convert_listlike = partial(convert_listlike, name=arg.name) result = convert_listlike(arg, box, format) elif is_list_like(arg): cache_array = _maybe_cache(arg, format, cache, convert_listlike) if not cache_array.empty: result = _convert_and_box_cache(arg, cache_array, box, errors) else: result = convert_listlike(arg, box, format) else: result = convert_listlike(np.array([arg]), box, format)[0] return result
[ "def", "to_datetime", "(", "arg", ",", "errors", "=", "'raise'", ",", "dayfirst", "=", "False", ",", "yearfirst", "=", "False", ",", "utc", "=", "None", ",", "box", "=", "True", ",", "format", "=", "None", ",", "exact", "=", "True", ",", "unit", "=", "None", ",", "infer_datetime_format", "=", "False", ",", "origin", "=", "'unix'", ",", "cache", "=", "False", ")", ":", "if", "arg", "is", "None", ":", "return", "None", "if", "origin", "!=", "'unix'", ":", "arg", "=", "_adjust_to_origin", "(", "arg", ",", "origin", ",", "unit", ")", "tz", "=", "'utc'", "if", "utc", "else", "None", "convert_listlike", "=", "partial", "(", "_convert_listlike_datetimes", ",", "tz", "=", "tz", ",", "unit", "=", "unit", ",", "dayfirst", "=", "dayfirst", ",", "yearfirst", "=", "yearfirst", ",", "errors", "=", "errors", ",", "exact", "=", "exact", ",", "infer_datetime_format", "=", "infer_datetime_format", ")", "if", "isinstance", "(", "arg", ",", "Timestamp", ")", ":", "result", "=", "arg", "if", "tz", "is", "not", "None", ":", "if", "arg", ".", "tz", "is", "not", "None", ":", "result", "=", "result", ".", "tz_convert", "(", "tz", ")", "else", ":", "result", "=", "result", ".", "tz_localize", "(", "tz", ")", "elif", "isinstance", "(", "arg", ",", "ABCSeries", ")", ":", "cache_array", "=", "_maybe_cache", "(", "arg", ",", "format", ",", "cache", ",", "convert_listlike", ")", "if", "not", "cache_array", ".", "empty", ":", "result", "=", "arg", ".", "map", "(", "cache_array", ")", "else", ":", "values", "=", "convert_listlike", "(", "arg", ".", "_values", ",", "True", ",", "format", ")", "result", "=", "arg", ".", "_constructor", "(", "values", ",", "index", "=", "arg", ".", "index", ",", "name", "=", "arg", ".", "name", ")", "elif", "isinstance", "(", "arg", ",", "(", "ABCDataFrame", ",", "abc", ".", "MutableMapping", ")", ")", ":", "result", "=", "_assemble_from_unit_mappings", "(", "arg", ",", "errors", ",", "box", ",", "tz", ")", "elif", "isinstance", "(", "arg", ",", "ABCIndexClass", ")", ":", "cache_array", "=", "_maybe_cache", "(", "arg", ",", "format", ",", "cache", ",", "convert_listlike", ")", "if", "not", "cache_array", ".", "empty", ":", "result", "=", "_convert_and_box_cache", "(", "arg", ",", "cache_array", ",", "box", ",", "errors", ",", "name", "=", "arg", ".", "name", ")", "else", ":", "convert_listlike", "=", "partial", "(", "convert_listlike", ",", "name", "=", "arg", ".", "name", ")", "result", "=", "convert_listlike", "(", "arg", ",", "box", ",", "format", ")", "elif", "is_list_like", "(", "arg", ")", ":", "cache_array", "=", "_maybe_cache", "(", "arg", ",", "format", ",", "cache", ",", "convert_listlike", ")", "if", "not", "cache_array", ".", "empty", ":", "result", "=", "_convert_and_box_cache", "(", "arg", ",", "cache_array", ",", "box", ",", "errors", ")", "else", ":", "result", "=", "convert_listlike", "(", "arg", ",", "box", ",", "format", ")", "else", ":", "result", "=", "convert_listlike", "(", "np", ".", "array", "(", "[", "arg", "]", ")", ",", "box", ",", "format", ")", "[", "0", "]", "return", "result" ]
Convert argument to datetime. Parameters ---------- arg : integer, float, string, datetime, list, tuple, 1-d array, Series .. versionadded:: 0.18.1 or DataFrame/dict-like errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaT - If 'ignore', then invalid parsing will return the input dayfirst : boolean, default False Specify a date parse order if `arg` is str or its list-likes. If True, parses dates with the day first, eg 10/11/12 is parsed as 2012-11-10. Warning: dayfirst=True is not strict, but will prefer to parse with day first (this is a known bug, based on dateutil behavior). yearfirst : boolean, default False Specify a date parse order if `arg` is str or its list-likes. - If True parses dates with the year first, eg 10/11/12 is parsed as 2010-11-12. - If both dayfirst and yearfirst are True, yearfirst is preceded (same as dateutil). Warning: yearfirst=True is not strict, but will prefer to parse with year first (this is a known bug, based on dateutil behavior). .. versionadded:: 0.16.1 utc : boolean, default None Return UTC DatetimeIndex if True (converting any tz-aware datetime.datetime objects as well). box : boolean, default True - If True returns a DatetimeIndex or Index-like object - If False returns ndarray of values. .. deprecated:: 0.25.0 Use :meth:`.to_numpy` or :meth:`Timestamp.to_datetime64` instead to get an ndarray of values or numpy.datetime64, respectively. format : string, default None strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse all the way up to nanoseconds. See strftime documentation for more information on choices: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior exact : boolean, True by default - If True, require an exact format match. - If False, allow the format to match anywhere in the target string. unit : string, default 'ns' unit of the arg (D,s,ms,us,ns) denote the unit, which is an integer or float number. This will be based off the origin. Example, with unit='ms' and origin='unix' (the default), this would calculate the number of milliseconds to the unix epoch start. infer_datetime_format : boolean, default False If True and no `format` is given, attempt to infer the format of the datetime strings, and if it can be inferred, switch to a faster method of parsing them. In some cases this can increase the parsing speed by ~5-10x. origin : scalar, default is 'unix' Define the reference date. The numeric values would be parsed as number of units (defined by `unit`) since this reference date. - If 'unix' (or POSIX) time; origin is set to 1970-01-01. - If 'julian', unit must be 'D', and origin is set to beginning of Julian Calendar. Julian day number 0 is assigned to the day starting at noon on January 1, 4713 BC. - If Timestamp convertible, origin is set to Timestamp identified by origin. .. versionadded:: 0.20.0 cache : boolean, default False If True, use a cache of unique, converted dates to apply the datetime conversion. May produce significant speed-up when parsing duplicate date strings, especially ones with timezone offsets. .. versionadded:: 0.23.0 Returns ------- ret : datetime if parsing succeeded. Return type depends on input: - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp In case when it is not possible to return designated types (e.g. when any element of input is before Timestamp.min or after Timestamp.max) return will have datetime.datetime type (or corresponding array/Series). See Also -------- DataFrame.astype : Cast argument to a specified dtype. to_timedelta : Convert argument to timedelta. Examples -------- Assembling a datetime from multiple columns of a DataFrame. The keys can be common abbreviations like ['year', 'month', 'day', 'minute', 'second', 'ms', 'us', 'ns']) or plurals of the same >>> df = pd.DataFrame({'year': [2015, 2016], 'month': [2, 3], 'day': [4, 5]}) >>> pd.to_datetime(df) 0 2015-02-04 1 2016-03-05 dtype: datetime64[ns] If a date does not meet the `timestamp limitations <http://pandas.pydata.org/pandas-docs/stable/timeseries.html #timeseries-timestamp-limits>`_, passing errors='ignore' will return the original input instead of raising any exception. Passing errors='coerce' will force an out-of-bounds date to NaT, in addition to forcing non-dates (or non-parseable dates) to NaT. >>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore') datetime.datetime(1300, 1, 1, 0, 0) >>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce') NaT Passing infer_datetime_format=True can often-times speedup a parsing if its not an ISO8601 format exactly, but in a regular format. >>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000) >>> s.head() 0 3/11/2000 1 3/12/2000 2 3/13/2000 3 3/11/2000 4 3/12/2000 dtype: object >>> %timeit pd.to_datetime(s,infer_datetime_format=True) 100 loops, best of 3: 10.4 ms per loop >>> %timeit pd.to_datetime(s,infer_datetime_format=False) 1 loop, best of 3: 471 ms per loop Using a unix epoch time >>> pd.to_datetime(1490195805, unit='s') Timestamp('2017-03-22 15:16:45') >>> pd.to_datetime(1490195805433502912, unit='ns') Timestamp('2017-03-22 15:16:45.433502912') .. warning:: For float arg, precision rounding might happen. To prevent unexpected behavior use a fixed-width exact type. Using a non-unix epoch origin >>> pd.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01')) 0 1960-01-02 1 1960-01-03 2 1960-01-04
[ "Convert", "argument", "to", "datetime", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/datetimes.py#L403-L622
19,637
pandas-dev/pandas
pandas/util/_decorators.py
deprecate
def deprecate(name, alternative, version, alt_name=None, klass=None, stacklevel=2, msg=None): """ Return a new function that emits a deprecation warning on use. To use this method for a deprecated function, another function `alternative` with the same signature must exist. The deprecated function will emit a deprecation warning, and in the docstring it will contain the deprecation directive with the provided version so it can be detected for future removal. Parameters ---------- name : str Name of function to deprecate. alternative : func Function to use instead. version : str Version of pandas in which the method has been deprecated. alt_name : str, optional Name to use in preference of alternative.__name__. klass : Warning, default FutureWarning stacklevel : int, default 2 msg : str The message to display in the warning. Default is '{name} is deprecated. Use {alt_name} instead.' """ alt_name = alt_name or alternative.__name__ klass = klass or FutureWarning warning_msg = msg or '{} is deprecated, use {} instead'.format(name, alt_name) @wraps(alternative) def wrapper(*args, **kwargs): warnings.warn(warning_msg, klass, stacklevel=stacklevel) return alternative(*args, **kwargs) # adding deprecated directive to the docstring msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name) doc_error_msg = ('deprecate needs a correctly formatted docstring in ' 'the target function (should have a one liner short ' 'summary, and opening quotes should be in their own ' 'line). Found:\n{}'.format(alternative.__doc__)) # when python is running in optimized mode (i.e. `-OO`), docstrings are # removed, so we check that a docstring with correct formatting is used # but we allow empty docstrings if alternative.__doc__: if alternative.__doc__.count('\n') < 3: raise AssertionError(doc_error_msg) empty1, summary, empty2, doc = alternative.__doc__.split('\n', 3) if empty1 or empty2 and not summary: raise AssertionError(doc_error_msg) wrapper.__doc__ = dedent(""" {summary} .. deprecated:: {depr_version} {depr_msg} {rest_of_docstring}""").format(summary=summary.strip(), depr_version=version, depr_msg=msg, rest_of_docstring=dedent(doc)) return wrapper
python
def deprecate(name, alternative, version, alt_name=None, klass=None, stacklevel=2, msg=None): """ Return a new function that emits a deprecation warning on use. To use this method for a deprecated function, another function `alternative` with the same signature must exist. The deprecated function will emit a deprecation warning, and in the docstring it will contain the deprecation directive with the provided version so it can be detected for future removal. Parameters ---------- name : str Name of function to deprecate. alternative : func Function to use instead. version : str Version of pandas in which the method has been deprecated. alt_name : str, optional Name to use in preference of alternative.__name__. klass : Warning, default FutureWarning stacklevel : int, default 2 msg : str The message to display in the warning. Default is '{name} is deprecated. Use {alt_name} instead.' """ alt_name = alt_name or alternative.__name__ klass = klass or FutureWarning warning_msg = msg or '{} is deprecated, use {} instead'.format(name, alt_name) @wraps(alternative) def wrapper(*args, **kwargs): warnings.warn(warning_msg, klass, stacklevel=stacklevel) return alternative(*args, **kwargs) # adding deprecated directive to the docstring msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name) doc_error_msg = ('deprecate needs a correctly formatted docstring in ' 'the target function (should have a one liner short ' 'summary, and opening quotes should be in their own ' 'line). Found:\n{}'.format(alternative.__doc__)) # when python is running in optimized mode (i.e. `-OO`), docstrings are # removed, so we check that a docstring with correct formatting is used # but we allow empty docstrings if alternative.__doc__: if alternative.__doc__.count('\n') < 3: raise AssertionError(doc_error_msg) empty1, summary, empty2, doc = alternative.__doc__.split('\n', 3) if empty1 or empty2 and not summary: raise AssertionError(doc_error_msg) wrapper.__doc__ = dedent(""" {summary} .. deprecated:: {depr_version} {depr_msg} {rest_of_docstring}""").format(summary=summary.strip(), depr_version=version, depr_msg=msg, rest_of_docstring=dedent(doc)) return wrapper
[ "def", "deprecate", "(", "name", ",", "alternative", ",", "version", ",", "alt_name", "=", "None", ",", "klass", "=", "None", ",", "stacklevel", "=", "2", ",", "msg", "=", "None", ")", ":", "alt_name", "=", "alt_name", "or", "alternative", ".", "__name__", "klass", "=", "klass", "or", "FutureWarning", "warning_msg", "=", "msg", "or", "'{} is deprecated, use {} instead'", ".", "format", "(", "name", ",", "alt_name", ")", "@", "wraps", "(", "alternative", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "warnings", ".", "warn", "(", "warning_msg", ",", "klass", ",", "stacklevel", "=", "stacklevel", ")", "return", "alternative", "(", "*", "args", ",", "*", "*", "kwargs", ")", "# adding deprecated directive to the docstring", "msg", "=", "msg", "or", "'Use `{alt_name}` instead.'", ".", "format", "(", "alt_name", "=", "alt_name", ")", "doc_error_msg", "=", "(", "'deprecate needs a correctly formatted docstring in '", "'the target function (should have a one liner short '", "'summary, and opening quotes should be in their own '", "'line). Found:\\n{}'", ".", "format", "(", "alternative", ".", "__doc__", ")", ")", "# when python is running in optimized mode (i.e. `-OO`), docstrings are", "# removed, so we check that a docstring with correct formatting is used", "# but we allow empty docstrings", "if", "alternative", ".", "__doc__", ":", "if", "alternative", ".", "__doc__", ".", "count", "(", "'\\n'", ")", "<", "3", ":", "raise", "AssertionError", "(", "doc_error_msg", ")", "empty1", ",", "summary", ",", "empty2", ",", "doc", "=", "alternative", ".", "__doc__", ".", "split", "(", "'\\n'", ",", "3", ")", "if", "empty1", "or", "empty2", "and", "not", "summary", ":", "raise", "AssertionError", "(", "doc_error_msg", ")", "wrapper", ".", "__doc__", "=", "dedent", "(", "\"\"\"\n {summary}\n\n .. deprecated:: {depr_version}\n {depr_msg}\n\n {rest_of_docstring}\"\"\"", ")", ".", "format", "(", "summary", "=", "summary", ".", "strip", "(", ")", ",", "depr_version", "=", "version", ",", "depr_msg", "=", "msg", ",", "rest_of_docstring", "=", "dedent", "(", "doc", ")", ")", "return", "wrapper" ]
Return a new function that emits a deprecation warning on use. To use this method for a deprecated function, another function `alternative` with the same signature must exist. The deprecated function will emit a deprecation warning, and in the docstring it will contain the deprecation directive with the provided version so it can be detected for future removal. Parameters ---------- name : str Name of function to deprecate. alternative : func Function to use instead. version : str Version of pandas in which the method has been deprecated. alt_name : str, optional Name to use in preference of alternative.__name__. klass : Warning, default FutureWarning stacklevel : int, default 2 msg : str The message to display in the warning. Default is '{name} is deprecated. Use {alt_name} instead.'
[ "Return", "a", "new", "function", "that", "emits", "a", "deprecation", "warning", "on", "use", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_decorators.py#L9-L74
19,638
pandas-dev/pandas
pandas/util/_decorators.py
deprecate_kwarg
def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2): """ Decorator to deprecate a keyword argument of a function. Parameters ---------- old_arg_name : str Name of argument in function to deprecate new_arg_name : str or None Name of preferred argument in function. Use None to raise warning that ``old_arg_name`` keyword is deprecated. mapping : dict or callable If mapping is present, use it to translate old arguments to new arguments. A callable must do its own value checking; values not found in a dict will be forwarded unchanged. Examples -------- The following deprecates 'cols', using 'columns' instead >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') ... def f(columns=''): ... print(columns) ... >>> f(columns='should work ok') should work ok >>> f(cols='should raise warning') FutureWarning: cols is deprecated, use columns instead warnings.warn(msg, FutureWarning) should raise warning >>> f(cols='should error', columns="can\'t pass do both") TypeError: Can only specify 'cols' or 'columns', not both >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False}) ... def f(new=False): ... print('yes!' if new else 'no!') ... >>> f(old='yes') FutureWarning: old='yes' is deprecated, use new=True instead warnings.warn(msg, FutureWarning) yes! To raise a warning that a keyword will be removed entirely in the future >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None) ... def f(cols='', another_param=''): ... print(cols) ... >>> f(cols='should raise warning') FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning >>> f(another_param='should not raise warning') should not raise warning >>> f(cols='should raise warning', another_param='') FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning """ if mapping is not None and not hasattr(mapping, 'get') and \ not callable(mapping): raise TypeError("mapping from old to new argument values " "must be dict or callable!") def _deprecate_kwarg(func): @wraps(func) def wrapper(*args, **kwargs): old_arg_value = kwargs.pop(old_arg_name, None) if new_arg_name is None and old_arg_value is not None: msg = ( "the '{old_name}' keyword is deprecated and will be " "removed in a future version. " "Please take steps to stop the use of '{old_name}'" ).format(old_name=old_arg_name) warnings.warn(msg, FutureWarning, stacklevel=stacklevel) kwargs[old_arg_name] = old_arg_value return func(*args, **kwargs) if old_arg_value is not None: if mapping is not None: if hasattr(mapping, 'get'): new_arg_value = mapping.get(old_arg_value, old_arg_value) else: new_arg_value = mapping(old_arg_value) msg = ("the {old_name}={old_val!r} keyword is deprecated, " "use {new_name}={new_val!r} instead" ).format(old_name=old_arg_name, old_val=old_arg_value, new_name=new_arg_name, new_val=new_arg_value) else: new_arg_value = old_arg_value msg = ("the '{old_name}' keyword is deprecated, " "use '{new_name}' instead" ).format(old_name=old_arg_name, new_name=new_arg_name) warnings.warn(msg, FutureWarning, stacklevel=stacklevel) if kwargs.get(new_arg_name, None) is not None: msg = ("Can only specify '{old_name}' or '{new_name}', " "not both").format(old_name=old_arg_name, new_name=new_arg_name) raise TypeError(msg) else: kwargs[new_arg_name] = new_arg_value return func(*args, **kwargs) return wrapper return _deprecate_kwarg
python
def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2): """ Decorator to deprecate a keyword argument of a function. Parameters ---------- old_arg_name : str Name of argument in function to deprecate new_arg_name : str or None Name of preferred argument in function. Use None to raise warning that ``old_arg_name`` keyword is deprecated. mapping : dict or callable If mapping is present, use it to translate old arguments to new arguments. A callable must do its own value checking; values not found in a dict will be forwarded unchanged. Examples -------- The following deprecates 'cols', using 'columns' instead >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') ... def f(columns=''): ... print(columns) ... >>> f(columns='should work ok') should work ok >>> f(cols='should raise warning') FutureWarning: cols is deprecated, use columns instead warnings.warn(msg, FutureWarning) should raise warning >>> f(cols='should error', columns="can\'t pass do both") TypeError: Can only specify 'cols' or 'columns', not both >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False}) ... def f(new=False): ... print('yes!' if new else 'no!') ... >>> f(old='yes') FutureWarning: old='yes' is deprecated, use new=True instead warnings.warn(msg, FutureWarning) yes! To raise a warning that a keyword will be removed entirely in the future >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None) ... def f(cols='', another_param=''): ... print(cols) ... >>> f(cols='should raise warning') FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning >>> f(another_param='should not raise warning') should not raise warning >>> f(cols='should raise warning', another_param='') FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning """ if mapping is not None and not hasattr(mapping, 'get') and \ not callable(mapping): raise TypeError("mapping from old to new argument values " "must be dict or callable!") def _deprecate_kwarg(func): @wraps(func) def wrapper(*args, **kwargs): old_arg_value = kwargs.pop(old_arg_name, None) if new_arg_name is None and old_arg_value is not None: msg = ( "the '{old_name}' keyword is deprecated and will be " "removed in a future version. " "Please take steps to stop the use of '{old_name}'" ).format(old_name=old_arg_name) warnings.warn(msg, FutureWarning, stacklevel=stacklevel) kwargs[old_arg_name] = old_arg_value return func(*args, **kwargs) if old_arg_value is not None: if mapping is not None: if hasattr(mapping, 'get'): new_arg_value = mapping.get(old_arg_value, old_arg_value) else: new_arg_value = mapping(old_arg_value) msg = ("the {old_name}={old_val!r} keyword is deprecated, " "use {new_name}={new_val!r} instead" ).format(old_name=old_arg_name, old_val=old_arg_value, new_name=new_arg_name, new_val=new_arg_value) else: new_arg_value = old_arg_value msg = ("the '{old_name}' keyword is deprecated, " "use '{new_name}' instead" ).format(old_name=old_arg_name, new_name=new_arg_name) warnings.warn(msg, FutureWarning, stacklevel=stacklevel) if kwargs.get(new_arg_name, None) is not None: msg = ("Can only specify '{old_name}' or '{new_name}', " "not both").format(old_name=old_arg_name, new_name=new_arg_name) raise TypeError(msg) else: kwargs[new_arg_name] = new_arg_value return func(*args, **kwargs) return wrapper return _deprecate_kwarg
[ "def", "deprecate_kwarg", "(", "old_arg_name", ",", "new_arg_name", ",", "mapping", "=", "None", ",", "stacklevel", "=", "2", ")", ":", "if", "mapping", "is", "not", "None", "and", "not", "hasattr", "(", "mapping", ",", "'get'", ")", "and", "not", "callable", "(", "mapping", ")", ":", "raise", "TypeError", "(", "\"mapping from old to new argument values \"", "\"must be dict or callable!\"", ")", "def", "_deprecate_kwarg", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "old_arg_value", "=", "kwargs", ".", "pop", "(", "old_arg_name", ",", "None", ")", "if", "new_arg_name", "is", "None", "and", "old_arg_value", "is", "not", "None", ":", "msg", "=", "(", "\"the '{old_name}' keyword is deprecated and will be \"", "\"removed in a future version. \"", "\"Please take steps to stop the use of '{old_name}'\"", ")", ".", "format", "(", "old_name", "=", "old_arg_name", ")", "warnings", ".", "warn", "(", "msg", ",", "FutureWarning", ",", "stacklevel", "=", "stacklevel", ")", "kwargs", "[", "old_arg_name", "]", "=", "old_arg_value", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "old_arg_value", "is", "not", "None", ":", "if", "mapping", "is", "not", "None", ":", "if", "hasattr", "(", "mapping", ",", "'get'", ")", ":", "new_arg_value", "=", "mapping", ".", "get", "(", "old_arg_value", ",", "old_arg_value", ")", "else", ":", "new_arg_value", "=", "mapping", "(", "old_arg_value", ")", "msg", "=", "(", "\"the {old_name}={old_val!r} keyword is deprecated, \"", "\"use {new_name}={new_val!r} instead\"", ")", ".", "format", "(", "old_name", "=", "old_arg_name", ",", "old_val", "=", "old_arg_value", ",", "new_name", "=", "new_arg_name", ",", "new_val", "=", "new_arg_value", ")", "else", ":", "new_arg_value", "=", "old_arg_value", "msg", "=", "(", "\"the '{old_name}' keyword is deprecated, \"", "\"use '{new_name}' instead\"", ")", ".", "format", "(", "old_name", "=", "old_arg_name", ",", "new_name", "=", "new_arg_name", ")", "warnings", ".", "warn", "(", "msg", ",", "FutureWarning", ",", "stacklevel", "=", "stacklevel", ")", "if", "kwargs", ".", "get", "(", "new_arg_name", ",", "None", ")", "is", "not", "None", ":", "msg", "=", "(", "\"Can only specify '{old_name}' or '{new_name}', \"", "\"not both\"", ")", ".", "format", "(", "old_name", "=", "old_arg_name", ",", "new_name", "=", "new_arg_name", ")", "raise", "TypeError", "(", "msg", ")", "else", ":", "kwargs", "[", "new_arg_name", "]", "=", "new_arg_value", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "_deprecate_kwarg" ]
Decorator to deprecate a keyword argument of a function. Parameters ---------- old_arg_name : str Name of argument in function to deprecate new_arg_name : str or None Name of preferred argument in function. Use None to raise warning that ``old_arg_name`` keyword is deprecated. mapping : dict or callable If mapping is present, use it to translate old arguments to new arguments. A callable must do its own value checking; values not found in a dict will be forwarded unchanged. Examples -------- The following deprecates 'cols', using 'columns' instead >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') ... def f(columns=''): ... print(columns) ... >>> f(columns='should work ok') should work ok >>> f(cols='should raise warning') FutureWarning: cols is deprecated, use columns instead warnings.warn(msg, FutureWarning) should raise warning >>> f(cols='should error', columns="can\'t pass do both") TypeError: Can only specify 'cols' or 'columns', not both >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False}) ... def f(new=False): ... print('yes!' if new else 'no!') ... >>> f(old='yes') FutureWarning: old='yes' is deprecated, use new=True instead warnings.warn(msg, FutureWarning) yes! To raise a warning that a keyword will be removed entirely in the future >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None) ... def f(cols='', another_param=''): ... print(cols) ... >>> f(cols='should raise warning') FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning >>> f(another_param='should not raise warning') should not raise warning >>> f(cols='should raise warning', another_param='') FutureWarning: the 'cols' keyword is deprecated and will be removed in a future version please takes steps to stop use of 'cols' should raise warning
[ "Decorator", "to", "deprecate", "a", "keyword", "argument", "of", "a", "function", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_decorators.py#L77-L190
19,639
pandas-dev/pandas
pandas/util/_decorators.py
make_signature
def make_signature(func): """ Returns a tuple containing the paramenter list with defaults and parameter list. Examples -------- >>> def f(a, b, c=2): >>> return a * b * c >>> print(make_signature(f)) (['a', 'b', 'c=2'], ['a', 'b', 'c']) """ spec = inspect.getfullargspec(func) if spec.defaults is None: n_wo_defaults = len(spec.args) defaults = ('',) * n_wo_defaults else: n_wo_defaults = len(spec.args) - len(spec.defaults) defaults = ('',) * n_wo_defaults + tuple(spec.defaults) args = [] for var, default in zip(spec.args, defaults): args.append(var if default == '' else var + '=' + repr(default)) if spec.varargs: args.append('*' + spec.varargs) if spec.varkw: args.append('**' + spec.varkw) return args, spec.args
python
def make_signature(func): """ Returns a tuple containing the paramenter list with defaults and parameter list. Examples -------- >>> def f(a, b, c=2): >>> return a * b * c >>> print(make_signature(f)) (['a', 'b', 'c=2'], ['a', 'b', 'c']) """ spec = inspect.getfullargspec(func) if spec.defaults is None: n_wo_defaults = len(spec.args) defaults = ('',) * n_wo_defaults else: n_wo_defaults = len(spec.args) - len(spec.defaults) defaults = ('',) * n_wo_defaults + tuple(spec.defaults) args = [] for var, default in zip(spec.args, defaults): args.append(var if default == '' else var + '=' + repr(default)) if spec.varargs: args.append('*' + spec.varargs) if spec.varkw: args.append('**' + spec.varkw) return args, spec.args
[ "def", "make_signature", "(", "func", ")", ":", "spec", "=", "inspect", ".", "getfullargspec", "(", "func", ")", "if", "spec", ".", "defaults", "is", "None", ":", "n_wo_defaults", "=", "len", "(", "spec", ".", "args", ")", "defaults", "=", "(", "''", ",", ")", "*", "n_wo_defaults", "else", ":", "n_wo_defaults", "=", "len", "(", "spec", ".", "args", ")", "-", "len", "(", "spec", ".", "defaults", ")", "defaults", "=", "(", "''", ",", ")", "*", "n_wo_defaults", "+", "tuple", "(", "spec", ".", "defaults", ")", "args", "=", "[", "]", "for", "var", ",", "default", "in", "zip", "(", "spec", ".", "args", ",", "defaults", ")", ":", "args", ".", "append", "(", "var", "if", "default", "==", "''", "else", "var", "+", "'='", "+", "repr", "(", "default", ")", ")", "if", "spec", ".", "varargs", ":", "args", ".", "append", "(", "'*'", "+", "spec", ".", "varargs", ")", "if", "spec", ".", "varkw", ":", "args", ".", "append", "(", "'**'", "+", "spec", ".", "varkw", ")", "return", "args", ",", "spec", ".", "args" ]
Returns a tuple containing the paramenter list with defaults and parameter list. Examples -------- >>> def f(a, b, c=2): >>> return a * b * c >>> print(make_signature(f)) (['a', 'b', 'c=2'], ['a', 'b', 'c'])
[ "Returns", "a", "tuple", "containing", "the", "paramenter", "list", "with", "defaults", "and", "parameter", "list", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_decorators.py#L324-L351
19,640
pandas-dev/pandas
pandas/core/indexes/range.py
RangeIndex.from_range
def from_range(cls, data, name=None, dtype=None, **kwargs): """ Create RangeIndex from a range object. """ if not isinstance(data, range): raise TypeError( '{0}(...) must be called with object coercible to a ' 'range, {1} was passed'.format(cls.__name__, repr(data))) start, stop, step = data.start, data.stop, data.step return RangeIndex(start, stop, step, dtype=dtype, name=name, **kwargs)
python
def from_range(cls, data, name=None, dtype=None, **kwargs): """ Create RangeIndex from a range object. """ if not isinstance(data, range): raise TypeError( '{0}(...) must be called with object coercible to a ' 'range, {1} was passed'.format(cls.__name__, repr(data))) start, stop, step = data.start, data.stop, data.step return RangeIndex(start, stop, step, dtype=dtype, name=name, **kwargs)
[ "def", "from_range", "(", "cls", ",", "data", ",", "name", "=", "None", ",", "dtype", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "data", ",", "range", ")", ":", "raise", "TypeError", "(", "'{0}(...) must be called with object coercible to a '", "'range, {1} was passed'", ".", "format", "(", "cls", ".", "__name__", ",", "repr", "(", "data", ")", ")", ")", "start", ",", "stop", ",", "step", "=", "data", ".", "start", ",", "data", ".", "stop", ",", "data", ".", "step", "return", "RangeIndex", "(", "start", ",", "stop", ",", "step", ",", "dtype", "=", "dtype", ",", "name", "=", "name", ",", "*", "*", "kwargs", ")" ]
Create RangeIndex from a range object.
[ "Create", "RangeIndex", "from", "a", "range", "object", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L128-L136
19,641
pandas-dev/pandas
pandas/core/indexes/range.py
RangeIndex.min
def min(self, axis=None, skipna=True): """The minimum value of the RangeIndex""" nv.validate_minmax_axis(axis) return self._minmax('min')
python
def min(self, axis=None, skipna=True): """The minimum value of the RangeIndex""" nv.validate_minmax_axis(axis) return self._minmax('min')
[ "def", "min", "(", "self", ",", "axis", "=", "None", ",", "skipna", "=", "True", ")", ":", "nv", ".", "validate_minmax_axis", "(", "axis", ")", "return", "self", ".", "_minmax", "(", "'min'", ")" ]
The minimum value of the RangeIndex
[ "The", "minimum", "value", "of", "the", "RangeIndex" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L325-L328
19,642
pandas-dev/pandas
pandas/core/indexes/range.py
RangeIndex.max
def max(self, axis=None, skipna=True): """The maximum value of the RangeIndex""" nv.validate_minmax_axis(axis) return self._minmax('max')
python
def max(self, axis=None, skipna=True): """The maximum value of the RangeIndex""" nv.validate_minmax_axis(axis) return self._minmax('max')
[ "def", "max", "(", "self", ",", "axis", "=", "None", ",", "skipna", "=", "True", ")", ":", "nv", ".", "validate_minmax_axis", "(", "axis", ")", "return", "self", ".", "_minmax", "(", "'max'", ")" ]
The maximum value of the RangeIndex
[ "The", "maximum", "value", "of", "the", "RangeIndex" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L330-L333
19,643
pandas-dev/pandas
pandas/core/indexes/range.py
RangeIndex.argsort
def argsort(self, *args, **kwargs): """ Returns the indices that would sort the index and its underlying data. Returns ------- argsorted : numpy array See Also -------- numpy.ndarray.argsort """ nv.validate_argsort(args, kwargs) if self._step > 0: return np.arange(len(self)) else: return np.arange(len(self) - 1, -1, -1)
python
def argsort(self, *args, **kwargs): """ Returns the indices that would sort the index and its underlying data. Returns ------- argsorted : numpy array See Also -------- numpy.ndarray.argsort """ nv.validate_argsort(args, kwargs) if self._step > 0: return np.arange(len(self)) else: return np.arange(len(self) - 1, -1, -1)
[ "def", "argsort", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_argsort", "(", "args", ",", "kwargs", ")", "if", "self", ".", "_step", ">", "0", ":", "return", "np", ".", "arange", "(", "len", "(", "self", ")", ")", "else", ":", "return", "np", ".", "arange", "(", "len", "(", "self", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")" ]
Returns the indices that would sort the index and its underlying data. Returns ------- argsorted : numpy array See Also -------- numpy.ndarray.argsort
[ "Returns", "the", "indices", "that", "would", "sort", "the", "index", "and", "its", "underlying", "data", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L335-L353
19,644
pandas-dev/pandas
pandas/core/indexes/range.py
RangeIndex._min_fitting_element
def _min_fitting_element(self, lower_limit): """Returns the smallest element greater than or equal to the limit""" no_steps = -(-(lower_limit - self._start) // abs(self._step)) return self._start + abs(self._step) * no_steps
python
def _min_fitting_element(self, lower_limit): """Returns the smallest element greater than or equal to the limit""" no_steps = -(-(lower_limit - self._start) // abs(self._step)) return self._start + abs(self._step) * no_steps
[ "def", "_min_fitting_element", "(", "self", ",", "lower_limit", ")", ":", "no_steps", "=", "-", "(", "-", "(", "lower_limit", "-", "self", ".", "_start", ")", "//", "abs", "(", "self", ".", "_step", ")", ")", "return", "self", ".", "_start", "+", "abs", "(", "self", ".", "_step", ")", "*", "no_steps" ]
Returns the smallest element greater than or equal to the limit
[ "Returns", "the", "smallest", "element", "greater", "than", "or", "equal", "to", "the", "limit" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L439-L442
19,645
pandas-dev/pandas
pandas/core/indexes/range.py
RangeIndex._max_fitting_element
def _max_fitting_element(self, upper_limit): """Returns the largest element smaller than or equal to the limit""" no_steps = (upper_limit - self._start) // abs(self._step) return self._start + abs(self._step) * no_steps
python
def _max_fitting_element(self, upper_limit): """Returns the largest element smaller than or equal to the limit""" no_steps = (upper_limit - self._start) // abs(self._step) return self._start + abs(self._step) * no_steps
[ "def", "_max_fitting_element", "(", "self", ",", "upper_limit", ")", ":", "no_steps", "=", "(", "upper_limit", "-", "self", ".", "_start", ")", "//", "abs", "(", "self", ".", "_step", ")", "return", "self", ".", "_start", "+", "abs", "(", "self", ".", "_step", ")", "*", "no_steps" ]
Returns the largest element smaller than or equal to the limit
[ "Returns", "the", "largest", "element", "smaller", "than", "or", "equal", "to", "the", "limit" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L444-L447
19,646
pandas-dev/pandas
pandas/core/indexes/range.py
RangeIndex.union
def union(self, other, sort=None): """ Form the union of two Index objects and sorts if possible Parameters ---------- other : Index or array-like sort : False or None, default None Whether to sort resulting index. ``sort=None`` returns a mononotically increasing ``RangeIndex`` if possible or a sorted ``Int64Index`` if not. ``sort=False`` always returns an unsorted ``Int64Index`` .. versionadded:: 0.25.0 Returns ------- union : Index """ self._assert_can_do_setop(other) if len(other) == 0 or self.equals(other) or len(self) == 0: return super().union(other, sort=sort) if isinstance(other, RangeIndex) and sort is None: start_s, step_s = self._start, self._step end_s = self._start + self._step * (len(self) - 1) start_o, step_o = other._start, other._step end_o = other._start + other._step * (len(other) - 1) if self._step < 0: start_s, step_s, end_s = end_s, -step_s, start_s if other._step < 0: start_o, step_o, end_o = end_o, -step_o, start_o if len(self) == 1 and len(other) == 1: step_s = step_o = abs(self._start - other._start) elif len(self) == 1: step_s = step_o elif len(other) == 1: step_o = step_s start_r = min(start_s, start_o) end_r = max(end_s, end_o) if step_o == step_s: if ((start_s - start_o) % step_s == 0 and (start_s - end_o) <= step_s and (start_o - end_s) <= step_s): return RangeIndex(start_r, end_r + step_s, step_s) if ((step_s % 2 == 0) and (abs(start_s - start_o) <= step_s / 2) and (abs(end_s - end_o) <= step_s / 2)): return RangeIndex(start_r, end_r + step_s / 2, step_s / 2) elif step_o % step_s == 0: if ((start_o - start_s) % step_s == 0 and (start_o + step_s >= start_s) and (end_o - step_s <= end_s)): return RangeIndex(start_r, end_r + step_s, step_s) elif step_s % step_o == 0: if ((start_s - start_o) % step_o == 0 and (start_s + step_o >= start_o) and (end_s - step_o <= end_o)): return RangeIndex(start_r, end_r + step_o, step_o) return self._int64index.union(other, sort=sort)
python
def union(self, other, sort=None): """ Form the union of two Index objects and sorts if possible Parameters ---------- other : Index or array-like sort : False or None, default None Whether to sort resulting index. ``sort=None`` returns a mononotically increasing ``RangeIndex`` if possible or a sorted ``Int64Index`` if not. ``sort=False`` always returns an unsorted ``Int64Index`` .. versionadded:: 0.25.0 Returns ------- union : Index """ self._assert_can_do_setop(other) if len(other) == 0 or self.equals(other) or len(self) == 0: return super().union(other, sort=sort) if isinstance(other, RangeIndex) and sort is None: start_s, step_s = self._start, self._step end_s = self._start + self._step * (len(self) - 1) start_o, step_o = other._start, other._step end_o = other._start + other._step * (len(other) - 1) if self._step < 0: start_s, step_s, end_s = end_s, -step_s, start_s if other._step < 0: start_o, step_o, end_o = end_o, -step_o, start_o if len(self) == 1 and len(other) == 1: step_s = step_o = abs(self._start - other._start) elif len(self) == 1: step_s = step_o elif len(other) == 1: step_o = step_s start_r = min(start_s, start_o) end_r = max(end_s, end_o) if step_o == step_s: if ((start_s - start_o) % step_s == 0 and (start_s - end_o) <= step_s and (start_o - end_s) <= step_s): return RangeIndex(start_r, end_r + step_s, step_s) if ((step_s % 2 == 0) and (abs(start_s - start_o) <= step_s / 2) and (abs(end_s - end_o) <= step_s / 2)): return RangeIndex(start_r, end_r + step_s / 2, step_s / 2) elif step_o % step_s == 0: if ((start_o - start_s) % step_s == 0 and (start_o + step_s >= start_s) and (end_o - step_s <= end_s)): return RangeIndex(start_r, end_r + step_s, step_s) elif step_s % step_o == 0: if ((start_s - start_o) % step_o == 0 and (start_s + step_o >= start_o) and (end_s - step_o <= end_o)): return RangeIndex(start_r, end_r + step_o, step_o) return self._int64index.union(other, sort=sort)
[ "def", "union", "(", "self", ",", "other", ",", "sort", "=", "None", ")", ":", "self", ".", "_assert_can_do_setop", "(", "other", ")", "if", "len", "(", "other", ")", "==", "0", "or", "self", ".", "equals", "(", "other", ")", "or", "len", "(", "self", ")", "==", "0", ":", "return", "super", "(", ")", ".", "union", "(", "other", ",", "sort", "=", "sort", ")", "if", "isinstance", "(", "other", ",", "RangeIndex", ")", "and", "sort", "is", "None", ":", "start_s", ",", "step_s", "=", "self", ".", "_start", ",", "self", ".", "_step", "end_s", "=", "self", ".", "_start", "+", "self", ".", "_step", "*", "(", "len", "(", "self", ")", "-", "1", ")", "start_o", ",", "step_o", "=", "other", ".", "_start", ",", "other", ".", "_step", "end_o", "=", "other", ".", "_start", "+", "other", ".", "_step", "*", "(", "len", "(", "other", ")", "-", "1", ")", "if", "self", ".", "_step", "<", "0", ":", "start_s", ",", "step_s", ",", "end_s", "=", "end_s", ",", "-", "step_s", ",", "start_s", "if", "other", ".", "_step", "<", "0", ":", "start_o", ",", "step_o", ",", "end_o", "=", "end_o", ",", "-", "step_o", ",", "start_o", "if", "len", "(", "self", ")", "==", "1", "and", "len", "(", "other", ")", "==", "1", ":", "step_s", "=", "step_o", "=", "abs", "(", "self", ".", "_start", "-", "other", ".", "_start", ")", "elif", "len", "(", "self", ")", "==", "1", ":", "step_s", "=", "step_o", "elif", "len", "(", "other", ")", "==", "1", ":", "step_o", "=", "step_s", "start_r", "=", "min", "(", "start_s", ",", "start_o", ")", "end_r", "=", "max", "(", "end_s", ",", "end_o", ")", "if", "step_o", "==", "step_s", ":", "if", "(", "(", "start_s", "-", "start_o", ")", "%", "step_s", "==", "0", "and", "(", "start_s", "-", "end_o", ")", "<=", "step_s", "and", "(", "start_o", "-", "end_s", ")", "<=", "step_s", ")", ":", "return", "RangeIndex", "(", "start_r", ",", "end_r", "+", "step_s", ",", "step_s", ")", "if", "(", "(", "step_s", "%", "2", "==", "0", ")", "and", "(", "abs", "(", "start_s", "-", "start_o", ")", "<=", "step_s", "/", "2", ")", "and", "(", "abs", "(", "end_s", "-", "end_o", ")", "<=", "step_s", "/", "2", ")", ")", ":", "return", "RangeIndex", "(", "start_r", ",", "end_r", "+", "step_s", "/", "2", ",", "step_s", "/", "2", ")", "elif", "step_o", "%", "step_s", "==", "0", ":", "if", "(", "(", "start_o", "-", "start_s", ")", "%", "step_s", "==", "0", "and", "(", "start_o", "+", "step_s", ">=", "start_s", ")", "and", "(", "end_o", "-", "step_s", "<=", "end_s", ")", ")", ":", "return", "RangeIndex", "(", "start_r", ",", "end_r", "+", "step_s", ",", "step_s", ")", "elif", "step_s", "%", "step_o", "==", "0", ":", "if", "(", "(", "start_s", "-", "start_o", ")", "%", "step_o", "==", "0", "and", "(", "start_s", "+", "step_o", ">=", "start_o", ")", "and", "(", "end_s", "-", "step_o", "<=", "end_o", ")", ")", ":", "return", "RangeIndex", "(", "start_r", ",", "end_r", "+", "step_o", ",", "step_o", ")", "return", "self", ".", "_int64index", ".", "union", "(", "other", ",", "sort", "=", "sort", ")" ]
Form the union of two Index objects and sorts if possible Parameters ---------- other : Index or array-like sort : False or None, default None Whether to sort resulting index. ``sort=None`` returns a mononotically increasing ``RangeIndex`` if possible or a sorted ``Int64Index`` if not. ``sort=False`` always returns an unsorted ``Int64Index`` .. versionadded:: 0.25.0 Returns ------- union : Index
[ "Form", "the", "union", "of", "two", "Index", "objects", "and", "sorts", "if", "possible" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L466-L527
19,647
pandas-dev/pandas
pandas/core/indexes/range.py
RangeIndex._add_numeric_methods_binary
def _add_numeric_methods_binary(cls): """ add in numeric methods, specialized to RangeIndex """ def _make_evaluate_binop(op, step=False): """ Parameters ---------- op : callable that accepts 2 parms perform the binary op step : callable, optional, default to False op to apply to the step parm if not None if False, use the existing step """ def _evaluate_numeric_binop(self, other): if isinstance(other, (ABCSeries, ABCDataFrame)): return NotImplemented elif isinstance(other, ABCTimedeltaIndex): # Defer to TimedeltaIndex implementation return NotImplemented elif isinstance(other, (timedelta, np.timedelta64)): # GH#19333 is_integer evaluated True on timedelta64, # so we need to catch these explicitly return op(self._int64index, other) elif is_timedelta64_dtype(other): # Must be an np.ndarray; GH#22390 return op(self._int64index, other) other = self._validate_for_numeric_binop(other, op) attrs = self._get_attributes_dict() attrs = self._maybe_update_attributes(attrs) left, right = self, other try: # apply if we have an override if step: with np.errstate(all='ignore'): rstep = step(left._step, right) # we don't have a representable op # so return a base index if not is_integer(rstep) or not rstep: raise ValueError else: rstep = left._step with np.errstate(all='ignore'): rstart = op(left._start, right) rstop = op(left._stop, right) result = RangeIndex(rstart, rstop, rstep, **attrs) # for compat with numpy / Int64Index # even if we can represent as a RangeIndex, return # as a Float64Index if we have float-like descriptors if not all(is_integer(x) for x in [rstart, rstop, rstep]): result = result.astype('float64') return result except (ValueError, TypeError, ZeroDivisionError): # Defer to Int64Index implementation return op(self._int64index, other) # TODO: Do attrs get handled reliably? name = '__{name}__'.format(name=op.__name__) return compat.set_function_name(_evaluate_numeric_binop, name, cls) cls.__add__ = _make_evaluate_binop(operator.add) cls.__radd__ = _make_evaluate_binop(ops.radd) cls.__sub__ = _make_evaluate_binop(operator.sub) cls.__rsub__ = _make_evaluate_binop(ops.rsub) cls.__mul__ = _make_evaluate_binop(operator.mul, step=operator.mul) cls.__rmul__ = _make_evaluate_binop(ops.rmul, step=ops.rmul) cls.__truediv__ = _make_evaluate_binop(operator.truediv, step=operator.truediv) cls.__rtruediv__ = _make_evaluate_binop(ops.rtruediv, step=ops.rtruediv)
python
def _add_numeric_methods_binary(cls): """ add in numeric methods, specialized to RangeIndex """ def _make_evaluate_binop(op, step=False): """ Parameters ---------- op : callable that accepts 2 parms perform the binary op step : callable, optional, default to False op to apply to the step parm if not None if False, use the existing step """ def _evaluate_numeric_binop(self, other): if isinstance(other, (ABCSeries, ABCDataFrame)): return NotImplemented elif isinstance(other, ABCTimedeltaIndex): # Defer to TimedeltaIndex implementation return NotImplemented elif isinstance(other, (timedelta, np.timedelta64)): # GH#19333 is_integer evaluated True on timedelta64, # so we need to catch these explicitly return op(self._int64index, other) elif is_timedelta64_dtype(other): # Must be an np.ndarray; GH#22390 return op(self._int64index, other) other = self._validate_for_numeric_binop(other, op) attrs = self._get_attributes_dict() attrs = self._maybe_update_attributes(attrs) left, right = self, other try: # apply if we have an override if step: with np.errstate(all='ignore'): rstep = step(left._step, right) # we don't have a representable op # so return a base index if not is_integer(rstep) or not rstep: raise ValueError else: rstep = left._step with np.errstate(all='ignore'): rstart = op(left._start, right) rstop = op(left._stop, right) result = RangeIndex(rstart, rstop, rstep, **attrs) # for compat with numpy / Int64Index # even if we can represent as a RangeIndex, return # as a Float64Index if we have float-like descriptors if not all(is_integer(x) for x in [rstart, rstop, rstep]): result = result.astype('float64') return result except (ValueError, TypeError, ZeroDivisionError): # Defer to Int64Index implementation return op(self._int64index, other) # TODO: Do attrs get handled reliably? name = '__{name}__'.format(name=op.__name__) return compat.set_function_name(_evaluate_numeric_binop, name, cls) cls.__add__ = _make_evaluate_binop(operator.add) cls.__radd__ = _make_evaluate_binop(ops.radd) cls.__sub__ = _make_evaluate_binop(operator.sub) cls.__rsub__ = _make_evaluate_binop(ops.rsub) cls.__mul__ = _make_evaluate_binop(operator.mul, step=operator.mul) cls.__rmul__ = _make_evaluate_binop(ops.rmul, step=ops.rmul) cls.__truediv__ = _make_evaluate_binop(operator.truediv, step=operator.truediv) cls.__rtruediv__ = _make_evaluate_binop(ops.rtruediv, step=ops.rtruediv)
[ "def", "_add_numeric_methods_binary", "(", "cls", ")", ":", "def", "_make_evaluate_binop", "(", "op", ",", "step", "=", "False", ")", ":", "\"\"\"\n Parameters\n ----------\n op : callable that accepts 2 parms\n perform the binary op\n step : callable, optional, default to False\n op to apply to the step parm if not None\n if False, use the existing step\n \"\"\"", "def", "_evaluate_numeric_binop", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "(", "ABCSeries", ",", "ABCDataFrame", ")", ")", ":", "return", "NotImplemented", "elif", "isinstance", "(", "other", ",", "ABCTimedeltaIndex", ")", ":", "# Defer to TimedeltaIndex implementation", "return", "NotImplemented", "elif", "isinstance", "(", "other", ",", "(", "timedelta", ",", "np", ".", "timedelta64", ")", ")", ":", "# GH#19333 is_integer evaluated True on timedelta64,", "# so we need to catch these explicitly", "return", "op", "(", "self", ".", "_int64index", ",", "other", ")", "elif", "is_timedelta64_dtype", "(", "other", ")", ":", "# Must be an np.ndarray; GH#22390", "return", "op", "(", "self", ".", "_int64index", ",", "other", ")", "other", "=", "self", ".", "_validate_for_numeric_binop", "(", "other", ",", "op", ")", "attrs", "=", "self", ".", "_get_attributes_dict", "(", ")", "attrs", "=", "self", ".", "_maybe_update_attributes", "(", "attrs", ")", "left", ",", "right", "=", "self", ",", "other", "try", ":", "# apply if we have an override", "if", "step", ":", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "rstep", "=", "step", "(", "left", ".", "_step", ",", "right", ")", "# we don't have a representable op", "# so return a base index", "if", "not", "is_integer", "(", "rstep", ")", "or", "not", "rstep", ":", "raise", "ValueError", "else", ":", "rstep", "=", "left", ".", "_step", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "rstart", "=", "op", "(", "left", ".", "_start", ",", "right", ")", "rstop", "=", "op", "(", "left", ".", "_stop", ",", "right", ")", "result", "=", "RangeIndex", "(", "rstart", ",", "rstop", ",", "rstep", ",", "*", "*", "attrs", ")", "# for compat with numpy / Int64Index", "# even if we can represent as a RangeIndex, return", "# as a Float64Index if we have float-like descriptors", "if", "not", "all", "(", "is_integer", "(", "x", ")", "for", "x", "in", "[", "rstart", ",", "rstop", ",", "rstep", "]", ")", ":", "result", "=", "result", ".", "astype", "(", "'float64'", ")", "return", "result", "except", "(", "ValueError", ",", "TypeError", ",", "ZeroDivisionError", ")", ":", "# Defer to Int64Index implementation", "return", "op", "(", "self", ".", "_int64index", ",", "other", ")", "# TODO: Do attrs get handled reliably?", "name", "=", "'__{name}__'", ".", "format", "(", "name", "=", "op", ".", "__name__", ")", "return", "compat", ".", "set_function_name", "(", "_evaluate_numeric_binop", ",", "name", ",", "cls", ")", "cls", ".", "__add__", "=", "_make_evaluate_binop", "(", "operator", ".", "add", ")", "cls", ".", "__radd__", "=", "_make_evaluate_binop", "(", "ops", ".", "radd", ")", "cls", ".", "__sub__", "=", "_make_evaluate_binop", "(", "operator", ".", "sub", ")", "cls", ".", "__rsub__", "=", "_make_evaluate_binop", "(", "ops", ".", "rsub", ")", "cls", ".", "__mul__", "=", "_make_evaluate_binop", "(", "operator", ".", "mul", ",", "step", "=", "operator", ".", "mul", ")", "cls", ".", "__rmul__", "=", "_make_evaluate_binop", "(", "ops", ".", "rmul", ",", "step", "=", "ops", ".", "rmul", ")", "cls", ".", "__truediv__", "=", "_make_evaluate_binop", "(", "operator", ".", "truediv", ",", "step", "=", "operator", ".", "truediv", ")", "cls", ".", "__rtruediv__", "=", "_make_evaluate_binop", "(", "ops", ".", "rtruediv", ",", "step", "=", "ops", ".", "rtruediv", ")" ]
add in numeric methods, specialized to RangeIndex
[ "add", "in", "numeric", "methods", "specialized", "to", "RangeIndex" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L644-L727
19,648
pandas-dev/pandas
pandas/io/formats/printing.py
adjoin
def adjoin(space, *lists, **kwargs): """ Glues together two sets of strings using the amount of space requested. The idea is to prettify. ---------- space : int number of spaces for padding lists : str list of str which being joined strlen : callable function used to calculate the length of each str. Needed for unicode handling. justfunc : callable function used to justify str. Needed for unicode handling. """ strlen = kwargs.pop('strlen', len) justfunc = kwargs.pop('justfunc', justify) out_lines = [] newLists = [] lengths = [max(map(strlen, x)) + space for x in lists[:-1]] # not the last one lengths.append(max(map(len, lists[-1]))) maxLen = max(map(len, lists)) for i, lst in enumerate(lists): nl = justfunc(lst, lengths[i], mode='left') nl.extend([' ' * lengths[i]] * (maxLen - len(lst))) newLists.append(nl) toJoin = zip(*newLists) for lines in toJoin: out_lines.append(_join_unicode(lines)) return _join_unicode(out_lines, sep='\n')
python
def adjoin(space, *lists, **kwargs): """ Glues together two sets of strings using the amount of space requested. The idea is to prettify. ---------- space : int number of spaces for padding lists : str list of str which being joined strlen : callable function used to calculate the length of each str. Needed for unicode handling. justfunc : callable function used to justify str. Needed for unicode handling. """ strlen = kwargs.pop('strlen', len) justfunc = kwargs.pop('justfunc', justify) out_lines = [] newLists = [] lengths = [max(map(strlen, x)) + space for x in lists[:-1]] # not the last one lengths.append(max(map(len, lists[-1]))) maxLen = max(map(len, lists)) for i, lst in enumerate(lists): nl = justfunc(lst, lengths[i], mode='left') nl.extend([' ' * lengths[i]] * (maxLen - len(lst))) newLists.append(nl) toJoin = zip(*newLists) for lines in toJoin: out_lines.append(_join_unicode(lines)) return _join_unicode(out_lines, sep='\n')
[ "def", "adjoin", "(", "space", ",", "*", "lists", ",", "*", "*", "kwargs", ")", ":", "strlen", "=", "kwargs", ".", "pop", "(", "'strlen'", ",", "len", ")", "justfunc", "=", "kwargs", ".", "pop", "(", "'justfunc'", ",", "justify", ")", "out_lines", "=", "[", "]", "newLists", "=", "[", "]", "lengths", "=", "[", "max", "(", "map", "(", "strlen", ",", "x", ")", ")", "+", "space", "for", "x", "in", "lists", "[", ":", "-", "1", "]", "]", "# not the last one", "lengths", ".", "append", "(", "max", "(", "map", "(", "len", ",", "lists", "[", "-", "1", "]", ")", ")", ")", "maxLen", "=", "max", "(", "map", "(", "len", ",", "lists", ")", ")", "for", "i", ",", "lst", "in", "enumerate", "(", "lists", ")", ":", "nl", "=", "justfunc", "(", "lst", ",", "lengths", "[", "i", "]", ",", "mode", "=", "'left'", ")", "nl", ".", "extend", "(", "[", "' '", "*", "lengths", "[", "i", "]", "]", "*", "(", "maxLen", "-", "len", "(", "lst", ")", ")", ")", "newLists", ".", "append", "(", "nl", ")", "toJoin", "=", "zip", "(", "*", "newLists", ")", "for", "lines", "in", "toJoin", ":", "out_lines", ".", "append", "(", "_join_unicode", "(", "lines", ")", ")", "return", "_join_unicode", "(", "out_lines", ",", "sep", "=", "'\\n'", ")" ]
Glues together two sets of strings using the amount of space requested. The idea is to prettify. ---------- space : int number of spaces for padding lists : str list of str which being joined strlen : callable function used to calculate the length of each str. Needed for unicode handling. justfunc : callable function used to justify str. Needed for unicode handling.
[ "Glues", "together", "two", "sets", "of", "strings", "using", "the", "amount", "of", "space", "requested", ".", "The", "idea", "is", "to", "prettify", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/printing.py#L12-L44
19,649
pandas-dev/pandas
pandas/io/gbq.py
read_gbq
def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=False, auth_local_webserver=False, dialect=None, location=None, configuration=None, credentials=None, use_bqstorage_api=None, private_key=None, verbose=None): """ Load data from Google BigQuery. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- query : str SQL-Like Query to return data values. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. index_col : str, optional Name of result column to use for index in results DataFrame. col_order : list(str), optional List of BigQuery column names in the desired order for results DataFrame. reauth : boolean, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. auth_local_webserver : boolean, default False Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. dialect : str, default 'legacy' Note: The default value is changing to 'standard' in a future verion. SQL syntax dialect to use. Value can be one of: ``'legacy'`` Use BigQuery's legacy SQL dialect. For more information see `BigQuery Legacy SQL Reference <https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__. ``'standard'`` Use BigQuery's standard SQL, which is compliant with the SQL 2011 standard. For more information see `BigQuery Standard SQL Reference <https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__. .. versionchanged:: 0.24.0 location : str, optional Location where the query job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of any datasets used in the query. *New in version 0.5.0 of pandas-gbq*. configuration : dict, optional Query config parameters for job processing. For example: configuration = {'query': {'useQueryCache': False}} For more information see `BigQuery REST API Reference <https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. .. versionadded:: 0.24.0 use_bqstorage_api : bool, default False Use the `BigQuery Storage API <https://cloud.google.com/bigquery/docs/reference/storage/>`__ to download query results quickly, but at an increased cost. To use this API, first `enable it in the Cloud Console <https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__. You must also have the `bigquery.readsessions.create <https://cloud.google.com/bigquery/docs/access-control#roles>`__ permission on the project you are billing queries to. This feature requires version 0.10.0 or later of the ``pandas-gbq`` package. It also requires the ``google-cloud-bigquery-storage`` and ``fastavro`` packages. .. versionadded:: 0.25.0 private_key : str, deprecated Deprecated in pandas-gbq version 0.8.0. Use the ``credentials`` parameter and :func:`google.oauth2.service_account.Credentials.from_service_account_info` or :func:`google.oauth2.service_account.Credentials.from_service_account_file` instead. Service account private key in JSON format. Can be file path or string contents. This is useful for remote server authentication (eg. Jupyter/IPython notebook on remote host). verbose : None, deprecated Deprecated in pandas-gbq version 0.4.0. Use the `logging module to adjust verbosity instead <https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__. Returns ------- df: DataFrame DataFrame representing results of query. See Also -------- pandas_gbq.read_gbq : This function in the pandas-gbq library. DataFrame.to_gbq : Write a DataFrame to Google BigQuery. """ pandas_gbq = _try_import() kwargs = {} # START: new kwargs. Don't populate unless explicitly set. if use_bqstorage_api is not None: kwargs["use_bqstorage_api"] = use_bqstorage_api # END: new kwargs # START: deprecated kwargs. Don't populate unless explicitly set. if verbose is not None: kwargs["verbose"] = verbose if private_key is not None: kwargs["private_key"] = private_key # END: deprecated kwargs return pandas_gbq.read_gbq( query, project_id=project_id, index_col=index_col, col_order=col_order, reauth=reauth, auth_local_webserver=auth_local_webserver, dialect=dialect, location=location, configuration=configuration, credentials=credentials, **kwargs)
python
def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=False, auth_local_webserver=False, dialect=None, location=None, configuration=None, credentials=None, use_bqstorage_api=None, private_key=None, verbose=None): """ Load data from Google BigQuery. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- query : str SQL-Like Query to return data values. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. index_col : str, optional Name of result column to use for index in results DataFrame. col_order : list(str), optional List of BigQuery column names in the desired order for results DataFrame. reauth : boolean, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. auth_local_webserver : boolean, default False Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. dialect : str, default 'legacy' Note: The default value is changing to 'standard' in a future verion. SQL syntax dialect to use. Value can be one of: ``'legacy'`` Use BigQuery's legacy SQL dialect. For more information see `BigQuery Legacy SQL Reference <https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__. ``'standard'`` Use BigQuery's standard SQL, which is compliant with the SQL 2011 standard. For more information see `BigQuery Standard SQL Reference <https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__. .. versionchanged:: 0.24.0 location : str, optional Location where the query job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of any datasets used in the query. *New in version 0.5.0 of pandas-gbq*. configuration : dict, optional Query config parameters for job processing. For example: configuration = {'query': {'useQueryCache': False}} For more information see `BigQuery REST API Reference <https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. .. versionadded:: 0.24.0 use_bqstorage_api : bool, default False Use the `BigQuery Storage API <https://cloud.google.com/bigquery/docs/reference/storage/>`__ to download query results quickly, but at an increased cost. To use this API, first `enable it in the Cloud Console <https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__. You must also have the `bigquery.readsessions.create <https://cloud.google.com/bigquery/docs/access-control#roles>`__ permission on the project you are billing queries to. This feature requires version 0.10.0 or later of the ``pandas-gbq`` package. It also requires the ``google-cloud-bigquery-storage`` and ``fastavro`` packages. .. versionadded:: 0.25.0 private_key : str, deprecated Deprecated in pandas-gbq version 0.8.0. Use the ``credentials`` parameter and :func:`google.oauth2.service_account.Credentials.from_service_account_info` or :func:`google.oauth2.service_account.Credentials.from_service_account_file` instead. Service account private key in JSON format. Can be file path or string contents. This is useful for remote server authentication (eg. Jupyter/IPython notebook on remote host). verbose : None, deprecated Deprecated in pandas-gbq version 0.4.0. Use the `logging module to adjust verbosity instead <https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__. Returns ------- df: DataFrame DataFrame representing results of query. See Also -------- pandas_gbq.read_gbq : This function in the pandas-gbq library. DataFrame.to_gbq : Write a DataFrame to Google BigQuery. """ pandas_gbq = _try_import() kwargs = {} # START: new kwargs. Don't populate unless explicitly set. if use_bqstorage_api is not None: kwargs["use_bqstorage_api"] = use_bqstorage_api # END: new kwargs # START: deprecated kwargs. Don't populate unless explicitly set. if verbose is not None: kwargs["verbose"] = verbose if private_key is not None: kwargs["private_key"] = private_key # END: deprecated kwargs return pandas_gbq.read_gbq( query, project_id=project_id, index_col=index_col, col_order=col_order, reauth=reauth, auth_local_webserver=auth_local_webserver, dialect=dialect, location=location, configuration=configuration, credentials=credentials, **kwargs)
[ "def", "read_gbq", "(", "query", ",", "project_id", "=", "None", ",", "index_col", "=", "None", ",", "col_order", "=", "None", ",", "reauth", "=", "False", ",", "auth_local_webserver", "=", "False", ",", "dialect", "=", "None", ",", "location", "=", "None", ",", "configuration", "=", "None", ",", "credentials", "=", "None", ",", "use_bqstorage_api", "=", "None", ",", "private_key", "=", "None", ",", "verbose", "=", "None", ")", ":", "pandas_gbq", "=", "_try_import", "(", ")", "kwargs", "=", "{", "}", "# START: new kwargs. Don't populate unless explicitly set.", "if", "use_bqstorage_api", "is", "not", "None", ":", "kwargs", "[", "\"use_bqstorage_api\"", "]", "=", "use_bqstorage_api", "# END: new kwargs", "# START: deprecated kwargs. Don't populate unless explicitly set.", "if", "verbose", "is", "not", "None", ":", "kwargs", "[", "\"verbose\"", "]", "=", "verbose", "if", "private_key", "is", "not", "None", ":", "kwargs", "[", "\"private_key\"", "]", "=", "private_key", "# END: deprecated kwargs", "return", "pandas_gbq", ".", "read_gbq", "(", "query", ",", "project_id", "=", "project_id", ",", "index_col", "=", "index_col", ",", "col_order", "=", "col_order", ",", "reauth", "=", "reauth", ",", "auth_local_webserver", "=", "auth_local_webserver", ",", "dialect", "=", "dialect", ",", "location", "=", "location", ",", "configuration", "=", "configuration", ",", "credentials", "=", "credentials", ",", "*", "*", "kwargs", ")" ]
Load data from Google BigQuery. This function requires the `pandas-gbq package <https://pandas-gbq.readthedocs.io>`__. See the `How to authenticate with Google BigQuery <https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__ guide for authentication instructions. Parameters ---------- query : str SQL-Like Query to return data values. project_id : str, optional Google BigQuery Account project ID. Optional when available from the environment. index_col : str, optional Name of result column to use for index in results DataFrame. col_order : list(str), optional List of BigQuery column names in the desired order for results DataFrame. reauth : boolean, default False Force Google BigQuery to re-authenticate the user. This is useful if multiple accounts are used. auth_local_webserver : boolean, default False Use the `local webserver flow`_ instead of the `console flow`_ when getting user credentials. .. _local webserver flow: http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server .. _console flow: http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console *New in version 0.2.0 of pandas-gbq*. dialect : str, default 'legacy' Note: The default value is changing to 'standard' in a future verion. SQL syntax dialect to use. Value can be one of: ``'legacy'`` Use BigQuery's legacy SQL dialect. For more information see `BigQuery Legacy SQL Reference <https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__. ``'standard'`` Use BigQuery's standard SQL, which is compliant with the SQL 2011 standard. For more information see `BigQuery Standard SQL Reference <https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__. .. versionchanged:: 0.24.0 location : str, optional Location where the query job should run. See the `BigQuery locations documentation <https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a list of available locations. The location must match that of any datasets used in the query. *New in version 0.5.0 of pandas-gbq*. configuration : dict, optional Query config parameters for job processing. For example: configuration = {'query': {'useQueryCache': False}} For more information see `BigQuery REST API Reference <https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__. credentials : google.auth.credentials.Credentials, optional Credentials for accessing Google APIs. Use this parameter to override default credentials, such as to use Compute Engine :class:`google.auth.compute_engine.Credentials` or Service Account :class:`google.oauth2.service_account.Credentials` directly. *New in version 0.8.0 of pandas-gbq*. .. versionadded:: 0.24.0 use_bqstorage_api : bool, default False Use the `BigQuery Storage API <https://cloud.google.com/bigquery/docs/reference/storage/>`__ to download query results quickly, but at an increased cost. To use this API, first `enable it in the Cloud Console <https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__. You must also have the `bigquery.readsessions.create <https://cloud.google.com/bigquery/docs/access-control#roles>`__ permission on the project you are billing queries to. This feature requires version 0.10.0 or later of the ``pandas-gbq`` package. It also requires the ``google-cloud-bigquery-storage`` and ``fastavro`` packages. .. versionadded:: 0.25.0 private_key : str, deprecated Deprecated in pandas-gbq version 0.8.0. Use the ``credentials`` parameter and :func:`google.oauth2.service_account.Credentials.from_service_account_info` or :func:`google.oauth2.service_account.Credentials.from_service_account_file` instead. Service account private key in JSON format. Can be file path or string contents. This is useful for remote server authentication (eg. Jupyter/IPython notebook on remote host). verbose : None, deprecated Deprecated in pandas-gbq version 0.4.0. Use the `logging module to adjust verbosity instead <https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__. Returns ------- df: DataFrame DataFrame representing results of query. See Also -------- pandas_gbq.read_gbq : This function in the pandas-gbq library. DataFrame.to_gbq : Write a DataFrame to Google BigQuery.
[ "Load", "data", "from", "Google", "BigQuery", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/gbq.py#L24-L167
19,650
pandas-dev/pandas
pandas/plotting/_misc.py
andrews_curves
def andrews_curves(frame, class_column, ax=None, samples=200, color=None, colormap=None, **kwds): """ Generate a matplotlib plot of Andrews curves, for visualising clusters of multivariate data. Andrews curves have the functional form: f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) + x_4 sin(2t) + x_5 cos(2t) + ... Where x coefficients correspond to the values of each dimension and t is linearly spaced between -pi and +pi. Each row of frame then corresponds to a single curve. Parameters ---------- frame : DataFrame Data to be plotted, preferably normalized to (0.0, 1.0) class_column : Name of the column containing class names ax : matplotlib axes object, default None samples : Number of points to plot in each curve color : list or tuple, optional Colors to use for the different classes colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. kwds : keywords Options to pass to matplotlib plotting method Returns ------- class:`matplotlip.axis.Axes` """ from math import sqrt, pi import matplotlib.pyplot as plt def function(amplitudes): def f(t): x1 = amplitudes[0] result = x1 / sqrt(2.0) # Take the rest of the coefficients and resize them # appropriately. Take a copy of amplitudes as otherwise numpy # deletes the element from amplitudes itself. coeffs = np.delete(np.copy(amplitudes), 0) coeffs.resize(int((coeffs.size + 1) / 2), 2) # Generate the harmonics and arguments for the sin and cos # functions. harmonics = np.arange(0, coeffs.shape[0]) + 1 trig_args = np.outer(harmonics, t) result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) + coeffs[:, 1, np.newaxis] * np.cos(trig_args), axis=0) return result return f n = len(frame) class_col = frame[class_column] classes = frame[class_column].drop_duplicates() df = frame.drop(class_column, axis=1) t = np.linspace(-pi, pi, samples) used_legends = set() color_values = _get_standard_colors(num_colors=len(classes), colormap=colormap, color_type='random', color=color) colors = dict(zip(classes, color_values)) if ax is None: ax = plt.gca(xlim=(-pi, pi)) for i in range(n): row = df.iloc[i].values f = function(row) y = f(t) kls = class_col.iat[i] label = pprint_thing(kls) if label not in used_legends: used_legends.add(label) ax.plot(t, y, color=colors[kls], label=label, **kwds) else: ax.plot(t, y, color=colors[kls], **kwds) ax.legend(loc='upper right') ax.grid() return ax
python
def andrews_curves(frame, class_column, ax=None, samples=200, color=None, colormap=None, **kwds): """ Generate a matplotlib plot of Andrews curves, for visualising clusters of multivariate data. Andrews curves have the functional form: f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) + x_4 sin(2t) + x_5 cos(2t) + ... Where x coefficients correspond to the values of each dimension and t is linearly spaced between -pi and +pi. Each row of frame then corresponds to a single curve. Parameters ---------- frame : DataFrame Data to be plotted, preferably normalized to (0.0, 1.0) class_column : Name of the column containing class names ax : matplotlib axes object, default None samples : Number of points to plot in each curve color : list or tuple, optional Colors to use for the different classes colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. kwds : keywords Options to pass to matplotlib plotting method Returns ------- class:`matplotlip.axis.Axes` """ from math import sqrt, pi import matplotlib.pyplot as plt def function(amplitudes): def f(t): x1 = amplitudes[0] result = x1 / sqrt(2.0) # Take the rest of the coefficients and resize them # appropriately. Take a copy of amplitudes as otherwise numpy # deletes the element from amplitudes itself. coeffs = np.delete(np.copy(amplitudes), 0) coeffs.resize(int((coeffs.size + 1) / 2), 2) # Generate the harmonics and arguments for the sin and cos # functions. harmonics = np.arange(0, coeffs.shape[0]) + 1 trig_args = np.outer(harmonics, t) result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) + coeffs[:, 1, np.newaxis] * np.cos(trig_args), axis=0) return result return f n = len(frame) class_col = frame[class_column] classes = frame[class_column].drop_duplicates() df = frame.drop(class_column, axis=1) t = np.linspace(-pi, pi, samples) used_legends = set() color_values = _get_standard_colors(num_colors=len(classes), colormap=colormap, color_type='random', color=color) colors = dict(zip(classes, color_values)) if ax is None: ax = plt.gca(xlim=(-pi, pi)) for i in range(n): row = df.iloc[i].values f = function(row) y = f(t) kls = class_col.iat[i] label = pprint_thing(kls) if label not in used_legends: used_legends.add(label) ax.plot(t, y, color=colors[kls], label=label, **kwds) else: ax.plot(t, y, color=colors[kls], **kwds) ax.legend(loc='upper right') ax.grid() return ax
[ "def", "andrews_curves", "(", "frame", ",", "class_column", ",", "ax", "=", "None", ",", "samples", "=", "200", ",", "color", "=", "None", ",", "colormap", "=", "None", ",", "*", "*", "kwds", ")", ":", "from", "math", "import", "sqrt", ",", "pi", "import", "matplotlib", ".", "pyplot", "as", "plt", "def", "function", "(", "amplitudes", ")", ":", "def", "f", "(", "t", ")", ":", "x1", "=", "amplitudes", "[", "0", "]", "result", "=", "x1", "/", "sqrt", "(", "2.0", ")", "# Take the rest of the coefficients and resize them", "# appropriately. Take a copy of amplitudes as otherwise numpy", "# deletes the element from amplitudes itself.", "coeffs", "=", "np", ".", "delete", "(", "np", ".", "copy", "(", "amplitudes", ")", ",", "0", ")", "coeffs", ".", "resize", "(", "int", "(", "(", "coeffs", ".", "size", "+", "1", ")", "/", "2", ")", ",", "2", ")", "# Generate the harmonics and arguments for the sin and cos", "# functions.", "harmonics", "=", "np", ".", "arange", "(", "0", ",", "coeffs", ".", "shape", "[", "0", "]", ")", "+", "1", "trig_args", "=", "np", ".", "outer", "(", "harmonics", ",", "t", ")", "result", "+=", "np", ".", "sum", "(", "coeffs", "[", ":", ",", "0", ",", "np", ".", "newaxis", "]", "*", "np", ".", "sin", "(", "trig_args", ")", "+", "coeffs", "[", ":", ",", "1", ",", "np", ".", "newaxis", "]", "*", "np", ".", "cos", "(", "trig_args", ")", ",", "axis", "=", "0", ")", "return", "result", "return", "f", "n", "=", "len", "(", "frame", ")", "class_col", "=", "frame", "[", "class_column", "]", "classes", "=", "frame", "[", "class_column", "]", ".", "drop_duplicates", "(", ")", "df", "=", "frame", ".", "drop", "(", "class_column", ",", "axis", "=", "1", ")", "t", "=", "np", ".", "linspace", "(", "-", "pi", ",", "pi", ",", "samples", ")", "used_legends", "=", "set", "(", ")", "color_values", "=", "_get_standard_colors", "(", "num_colors", "=", "len", "(", "classes", ")", ",", "colormap", "=", "colormap", ",", "color_type", "=", "'random'", ",", "color", "=", "color", ")", "colors", "=", "dict", "(", "zip", "(", "classes", ",", "color_values", ")", ")", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", "xlim", "=", "(", "-", "pi", ",", "pi", ")", ")", "for", "i", "in", "range", "(", "n", ")", ":", "row", "=", "df", ".", "iloc", "[", "i", "]", ".", "values", "f", "=", "function", "(", "row", ")", "y", "=", "f", "(", "t", ")", "kls", "=", "class_col", ".", "iat", "[", "i", "]", "label", "=", "pprint_thing", "(", "kls", ")", "if", "label", "not", "in", "used_legends", ":", "used_legends", ".", "add", "(", "label", ")", "ax", ".", "plot", "(", "t", ",", "y", ",", "color", "=", "colors", "[", "kls", "]", ",", "label", "=", "label", ",", "*", "*", "kwds", ")", "else", ":", "ax", ".", "plot", "(", "t", ",", "y", ",", "color", "=", "colors", "[", "kls", "]", ",", "*", "*", "kwds", ")", "ax", ".", "legend", "(", "loc", "=", "'upper right'", ")", "ax", ".", "grid", "(", ")", "return", "ax" ]
Generate a matplotlib plot of Andrews curves, for visualising clusters of multivariate data. Andrews curves have the functional form: f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) + x_4 sin(2t) + x_5 cos(2t) + ... Where x coefficients correspond to the values of each dimension and t is linearly spaced between -pi and +pi. Each row of frame then corresponds to a single curve. Parameters ---------- frame : DataFrame Data to be plotted, preferably normalized to (0.0, 1.0) class_column : Name of the column containing class names ax : matplotlib axes object, default None samples : Number of points to plot in each curve color : list or tuple, optional Colors to use for the different classes colormap : str or matplotlib colormap object, default None Colormap to select colors from. If string, load colormap with that name from matplotlib. kwds : keywords Options to pass to matplotlib plotting method Returns ------- class:`matplotlip.axis.Axes`
[ "Generate", "a", "matplotlib", "plot", "of", "Andrews", "curves", "for", "visualising", "clusters", "of", "multivariate", "data", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_misc.py#L270-L356
19,651
pandas-dev/pandas
pandas/plotting/_misc.py
bootstrap_plot
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): """ Bootstrap plot on mean, median and mid-range statistics. The bootstrap plot is used to estimate the uncertainty of a statistic by relaying on random sampling with replacement [1]_. This function will generate bootstrapping plots for mean, median and mid-range statistics for the given number of samples of the given size. .. [1] "Bootstrapping (statistics)" in \ https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 Parameters ---------- series : pandas.Series Pandas Series from where to get the samplings for the bootstrapping. fig : matplotlib.figure.Figure, default None If given, it will use the `fig` reference for plotting instead of creating a new one with default parameters. size : int, default 50 Number of data points to consider during each sampling. It must be greater or equal than the length of the `series`. samples : int, default 500 Number of times the bootstrap procedure is performed. **kwds : Options to pass to matplotlib plotting method. Returns ------- matplotlib.figure.Figure Matplotlib figure. See Also -------- DataFrame.plot : Basic plotting for DataFrame objects. Series.plot : Basic plotting for Series objects. Examples -------- .. plot:: :context: close-figs >>> s = pd.Series(np.random.uniform(size=100)) >>> fig = pd.plotting.bootstrap_plot(s) # doctest: +SKIP """ import random import matplotlib.pyplot as plt # random.sample(ndarray, int) fails on python 3.3, sigh data = list(series.values) samplings = [random.sample(data, size) for _ in range(samples)] means = np.array([np.mean(sampling) for sampling in samplings]) medians = np.array([np.median(sampling) for sampling in samplings]) midranges = np.array([(min(sampling) + max(sampling)) * 0.5 for sampling in samplings]) if fig is None: fig = plt.figure() x = lrange(samples) axes = [] ax1 = fig.add_subplot(2, 3, 1) ax1.set_xlabel("Sample") axes.append(ax1) ax1.plot(x, means, **kwds) ax2 = fig.add_subplot(2, 3, 2) ax2.set_xlabel("Sample") axes.append(ax2) ax2.plot(x, medians, **kwds) ax3 = fig.add_subplot(2, 3, 3) ax3.set_xlabel("Sample") axes.append(ax3) ax3.plot(x, midranges, **kwds) ax4 = fig.add_subplot(2, 3, 4) ax4.set_xlabel("Mean") axes.append(ax4) ax4.hist(means, **kwds) ax5 = fig.add_subplot(2, 3, 5) ax5.set_xlabel("Median") axes.append(ax5) ax5.hist(medians, **kwds) ax6 = fig.add_subplot(2, 3, 6) ax6.set_xlabel("Midrange") axes.append(ax6) ax6.hist(midranges, **kwds) for axis in axes: plt.setp(axis.get_xticklabels(), fontsize=8) plt.setp(axis.get_yticklabels(), fontsize=8) return fig
python
def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds): """ Bootstrap plot on mean, median and mid-range statistics. The bootstrap plot is used to estimate the uncertainty of a statistic by relaying on random sampling with replacement [1]_. This function will generate bootstrapping plots for mean, median and mid-range statistics for the given number of samples of the given size. .. [1] "Bootstrapping (statistics)" in \ https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 Parameters ---------- series : pandas.Series Pandas Series from where to get the samplings for the bootstrapping. fig : matplotlib.figure.Figure, default None If given, it will use the `fig` reference for plotting instead of creating a new one with default parameters. size : int, default 50 Number of data points to consider during each sampling. It must be greater or equal than the length of the `series`. samples : int, default 500 Number of times the bootstrap procedure is performed. **kwds : Options to pass to matplotlib plotting method. Returns ------- matplotlib.figure.Figure Matplotlib figure. See Also -------- DataFrame.plot : Basic plotting for DataFrame objects. Series.plot : Basic plotting for Series objects. Examples -------- .. plot:: :context: close-figs >>> s = pd.Series(np.random.uniform(size=100)) >>> fig = pd.plotting.bootstrap_plot(s) # doctest: +SKIP """ import random import matplotlib.pyplot as plt # random.sample(ndarray, int) fails on python 3.3, sigh data = list(series.values) samplings = [random.sample(data, size) for _ in range(samples)] means = np.array([np.mean(sampling) for sampling in samplings]) medians = np.array([np.median(sampling) for sampling in samplings]) midranges = np.array([(min(sampling) + max(sampling)) * 0.5 for sampling in samplings]) if fig is None: fig = plt.figure() x = lrange(samples) axes = [] ax1 = fig.add_subplot(2, 3, 1) ax1.set_xlabel("Sample") axes.append(ax1) ax1.plot(x, means, **kwds) ax2 = fig.add_subplot(2, 3, 2) ax2.set_xlabel("Sample") axes.append(ax2) ax2.plot(x, medians, **kwds) ax3 = fig.add_subplot(2, 3, 3) ax3.set_xlabel("Sample") axes.append(ax3) ax3.plot(x, midranges, **kwds) ax4 = fig.add_subplot(2, 3, 4) ax4.set_xlabel("Mean") axes.append(ax4) ax4.hist(means, **kwds) ax5 = fig.add_subplot(2, 3, 5) ax5.set_xlabel("Median") axes.append(ax5) ax5.hist(medians, **kwds) ax6 = fig.add_subplot(2, 3, 6) ax6.set_xlabel("Midrange") axes.append(ax6) ax6.hist(midranges, **kwds) for axis in axes: plt.setp(axis.get_xticklabels(), fontsize=8) plt.setp(axis.get_yticklabels(), fontsize=8) return fig
[ "def", "bootstrap_plot", "(", "series", ",", "fig", "=", "None", ",", "size", "=", "50", ",", "samples", "=", "500", ",", "*", "*", "kwds", ")", ":", "import", "random", "import", "matplotlib", ".", "pyplot", "as", "plt", "# random.sample(ndarray, int) fails on python 3.3, sigh", "data", "=", "list", "(", "series", ".", "values", ")", "samplings", "=", "[", "random", ".", "sample", "(", "data", ",", "size", ")", "for", "_", "in", "range", "(", "samples", ")", "]", "means", "=", "np", ".", "array", "(", "[", "np", ".", "mean", "(", "sampling", ")", "for", "sampling", "in", "samplings", "]", ")", "medians", "=", "np", ".", "array", "(", "[", "np", ".", "median", "(", "sampling", ")", "for", "sampling", "in", "samplings", "]", ")", "midranges", "=", "np", ".", "array", "(", "[", "(", "min", "(", "sampling", ")", "+", "max", "(", "sampling", ")", ")", "*", "0.5", "for", "sampling", "in", "samplings", "]", ")", "if", "fig", "is", "None", ":", "fig", "=", "plt", ".", "figure", "(", ")", "x", "=", "lrange", "(", "samples", ")", "axes", "=", "[", "]", "ax1", "=", "fig", ".", "add_subplot", "(", "2", ",", "3", ",", "1", ")", "ax1", ".", "set_xlabel", "(", "\"Sample\"", ")", "axes", ".", "append", "(", "ax1", ")", "ax1", ".", "plot", "(", "x", ",", "means", ",", "*", "*", "kwds", ")", "ax2", "=", "fig", ".", "add_subplot", "(", "2", ",", "3", ",", "2", ")", "ax2", ".", "set_xlabel", "(", "\"Sample\"", ")", "axes", ".", "append", "(", "ax2", ")", "ax2", ".", "plot", "(", "x", ",", "medians", ",", "*", "*", "kwds", ")", "ax3", "=", "fig", ".", "add_subplot", "(", "2", ",", "3", ",", "3", ")", "ax3", ".", "set_xlabel", "(", "\"Sample\"", ")", "axes", ".", "append", "(", "ax3", ")", "ax3", ".", "plot", "(", "x", ",", "midranges", ",", "*", "*", "kwds", ")", "ax4", "=", "fig", ".", "add_subplot", "(", "2", ",", "3", ",", "4", ")", "ax4", ".", "set_xlabel", "(", "\"Mean\"", ")", "axes", ".", "append", "(", "ax4", ")", "ax4", ".", "hist", "(", "means", ",", "*", "*", "kwds", ")", "ax5", "=", "fig", ".", "add_subplot", "(", "2", ",", "3", ",", "5", ")", "ax5", ".", "set_xlabel", "(", "\"Median\"", ")", "axes", ".", "append", "(", "ax5", ")", "ax5", ".", "hist", "(", "medians", ",", "*", "*", "kwds", ")", "ax6", "=", "fig", ".", "add_subplot", "(", "2", ",", "3", ",", "6", ")", "ax6", ".", "set_xlabel", "(", "\"Midrange\"", ")", "axes", ".", "append", "(", "ax6", ")", "ax6", ".", "hist", "(", "midranges", ",", "*", "*", "kwds", ")", "for", "axis", "in", "axes", ":", "plt", ".", "setp", "(", "axis", ".", "get_xticklabels", "(", ")", ",", "fontsize", "=", "8", ")", "plt", ".", "setp", "(", "axis", ".", "get_yticklabels", "(", ")", ",", "fontsize", "=", "8", ")", "return", "fig" ]
Bootstrap plot on mean, median and mid-range statistics. The bootstrap plot is used to estimate the uncertainty of a statistic by relaying on random sampling with replacement [1]_. This function will generate bootstrapping plots for mean, median and mid-range statistics for the given number of samples of the given size. .. [1] "Bootstrapping (statistics)" in \ https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 Parameters ---------- series : pandas.Series Pandas Series from where to get the samplings for the bootstrapping. fig : matplotlib.figure.Figure, default None If given, it will use the `fig` reference for plotting instead of creating a new one with default parameters. size : int, default 50 Number of data points to consider during each sampling. It must be greater or equal than the length of the `series`. samples : int, default 500 Number of times the bootstrap procedure is performed. **kwds : Options to pass to matplotlib plotting method. Returns ------- matplotlib.figure.Figure Matplotlib figure. See Also -------- DataFrame.plot : Basic plotting for DataFrame objects. Series.plot : Basic plotting for Series objects. Examples -------- .. plot:: :context: close-figs >>> s = pd.Series(np.random.uniform(size=100)) >>> fig = pd.plotting.bootstrap_plot(s) # doctest: +SKIP
[ "Bootstrap", "plot", "on", "mean", "median", "and", "mid", "-", "range", "statistics", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_misc.py#L359-L447
19,652
pandas-dev/pandas
pandas/plotting/_misc.py
autocorrelation_plot
def autocorrelation_plot(series, ax=None, **kwds): """ Autocorrelation plot for time series. Parameters: ----------- series: Time series ax: Matplotlib axis object, optional kwds : keywords Options to pass to matplotlib plotting method Returns: ----------- class:`matplotlib.axis.Axes` """ import matplotlib.pyplot as plt n = len(series) data = np.asarray(series) if ax is None: ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0)) mean = np.mean(data) c0 = np.sum((data - mean) ** 2) / float(n) def r(h): return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0 x = np.arange(n) + 1 y = lmap(r, x) z95 = 1.959963984540054 z99 = 2.5758293035489004 ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey') ax.axhline(y=z95 / np.sqrt(n), color='grey') ax.axhline(y=0.0, color='black') ax.axhline(y=-z95 / np.sqrt(n), color='grey') ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey') ax.set_xlabel("Lag") ax.set_ylabel("Autocorrelation") ax.plot(x, y, **kwds) if 'label' in kwds: ax.legend() ax.grid() return ax
python
def autocorrelation_plot(series, ax=None, **kwds): """ Autocorrelation plot for time series. Parameters: ----------- series: Time series ax: Matplotlib axis object, optional kwds : keywords Options to pass to matplotlib plotting method Returns: ----------- class:`matplotlib.axis.Axes` """ import matplotlib.pyplot as plt n = len(series) data = np.asarray(series) if ax is None: ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0)) mean = np.mean(data) c0 = np.sum((data - mean) ** 2) / float(n) def r(h): return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / float(n) / c0 x = np.arange(n) + 1 y = lmap(r, x) z95 = 1.959963984540054 z99 = 2.5758293035489004 ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey') ax.axhline(y=z95 / np.sqrt(n), color='grey') ax.axhline(y=0.0, color='black') ax.axhline(y=-z95 / np.sqrt(n), color='grey') ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey') ax.set_xlabel("Lag") ax.set_ylabel("Autocorrelation") ax.plot(x, y, **kwds) if 'label' in kwds: ax.legend() ax.grid() return ax
[ "def", "autocorrelation_plot", "(", "series", ",", "ax", "=", "None", ",", "*", "*", "kwds", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "n", "=", "len", "(", "series", ")", "data", "=", "np", ".", "asarray", "(", "series", ")", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", "xlim", "=", "(", "1", ",", "n", ")", ",", "ylim", "=", "(", "-", "1.0", ",", "1.0", ")", ")", "mean", "=", "np", ".", "mean", "(", "data", ")", "c0", "=", "np", ".", "sum", "(", "(", "data", "-", "mean", ")", "**", "2", ")", "/", "float", "(", "n", ")", "def", "r", "(", "h", ")", ":", "return", "(", "(", "data", "[", ":", "n", "-", "h", "]", "-", "mean", ")", "*", "(", "data", "[", "h", ":", "]", "-", "mean", ")", ")", ".", "sum", "(", ")", "/", "float", "(", "n", ")", "/", "c0", "x", "=", "np", ".", "arange", "(", "n", ")", "+", "1", "y", "=", "lmap", "(", "r", ",", "x", ")", "z95", "=", "1.959963984540054", "z99", "=", "2.5758293035489004", "ax", ".", "axhline", "(", "y", "=", "z99", "/", "np", ".", "sqrt", "(", "n", ")", ",", "linestyle", "=", "'--'", ",", "color", "=", "'grey'", ")", "ax", ".", "axhline", "(", "y", "=", "z95", "/", "np", ".", "sqrt", "(", "n", ")", ",", "color", "=", "'grey'", ")", "ax", ".", "axhline", "(", "y", "=", "0.0", ",", "color", "=", "'black'", ")", "ax", ".", "axhline", "(", "y", "=", "-", "z95", "/", "np", ".", "sqrt", "(", "n", ")", ",", "color", "=", "'grey'", ")", "ax", ".", "axhline", "(", "y", "=", "-", "z99", "/", "np", ".", "sqrt", "(", "n", ")", ",", "linestyle", "=", "'--'", ",", "color", "=", "'grey'", ")", "ax", ".", "set_xlabel", "(", "\"Lag\"", ")", "ax", ".", "set_ylabel", "(", "\"Autocorrelation\"", ")", "ax", ".", "plot", "(", "x", ",", "y", ",", "*", "*", "kwds", ")", "if", "'label'", "in", "kwds", ":", "ax", ".", "legend", "(", ")", "ax", ".", "grid", "(", ")", "return", "ax" ]
Autocorrelation plot for time series. Parameters: ----------- series: Time series ax: Matplotlib axis object, optional kwds : keywords Options to pass to matplotlib plotting method Returns: ----------- class:`matplotlib.axis.Axes`
[ "Autocorrelation", "plot", "for", "time", "series", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_misc.py#L596-L637
19,653
pandas-dev/pandas
pandas/core/computation/align.py
_any_pandas_objects
def _any_pandas_objects(terms): """Check a sequence of terms for instances of PandasObject.""" return any(isinstance(term.value, pd.core.generic.PandasObject) for term in terms)
python
def _any_pandas_objects(terms): """Check a sequence of terms for instances of PandasObject.""" return any(isinstance(term.value, pd.core.generic.PandasObject) for term in terms)
[ "def", "_any_pandas_objects", "(", "terms", ")", ":", "return", "any", "(", "isinstance", "(", "term", ".", "value", ",", "pd", ".", "core", ".", "generic", ".", "PandasObject", ")", "for", "term", "in", "terms", ")" ]
Check a sequence of terms for instances of PandasObject.
[ "Check", "a", "sequence", "of", "terms", "for", "instances", "of", "PandasObject", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/align.py#L36-L39
19,654
pandas-dev/pandas
pandas/core/computation/align.py
_align
def _align(terms): """Align a set of terms""" try: # flatten the parse tree (a nested list, really) terms = list(com.flatten(terms)) except TypeError: # can't iterate so it must just be a constant or single variable if isinstance(terms.value, pd.core.generic.NDFrame): typ = type(terms.value) return typ, _zip_axes_from_type(typ, terms.value.axes) return np.result_type(terms.type), None # if all resolved variables are numeric scalars if all(term.is_scalar for term in terms): return _result_type_many(*(term.value for term in terms)).type, None # perform the main alignment typ, axes = _align_core(terms) return typ, axes
python
def _align(terms): """Align a set of terms""" try: # flatten the parse tree (a nested list, really) terms = list(com.flatten(terms)) except TypeError: # can't iterate so it must just be a constant or single variable if isinstance(terms.value, pd.core.generic.NDFrame): typ = type(terms.value) return typ, _zip_axes_from_type(typ, terms.value.axes) return np.result_type(terms.type), None # if all resolved variables are numeric scalars if all(term.is_scalar for term in terms): return _result_type_many(*(term.value for term in terms)).type, None # perform the main alignment typ, axes = _align_core(terms) return typ, axes
[ "def", "_align", "(", "terms", ")", ":", "try", ":", "# flatten the parse tree (a nested list, really)", "terms", "=", "list", "(", "com", ".", "flatten", "(", "terms", ")", ")", "except", "TypeError", ":", "# can't iterate so it must just be a constant or single variable", "if", "isinstance", "(", "terms", ".", "value", ",", "pd", ".", "core", ".", "generic", ".", "NDFrame", ")", ":", "typ", "=", "type", "(", "terms", ".", "value", ")", "return", "typ", ",", "_zip_axes_from_type", "(", "typ", ",", "terms", ".", "value", ".", "axes", ")", "return", "np", ".", "result_type", "(", "terms", ".", "type", ")", ",", "None", "# if all resolved variables are numeric scalars", "if", "all", "(", "term", ".", "is_scalar", "for", "term", "in", "terms", ")", ":", "return", "_result_type_many", "(", "*", "(", "term", ".", "value", "for", "term", "in", "terms", ")", ")", ".", "type", ",", "None", "# perform the main alignment", "typ", ",", "axes", "=", "_align_core", "(", "terms", ")", "return", "typ", ",", "axes" ]
Align a set of terms
[ "Align", "a", "set", "of", "terms" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/align.py#L114-L132
19,655
pandas-dev/pandas
pandas/plotting/_timeseries.py
tsplot
def tsplot(series, plotf, ax=None, **kwargs): import warnings """ Plots a Series on the given Matplotlib axes or the current axes Parameters ---------- axes : Axes series : Series Notes _____ Supports same kwargs as Axes.plot .. deprecated:: 0.23.0 Use Series.plot() instead """ warnings.warn("'tsplot' is deprecated and will be removed in a " "future version. Please use Series.plot() instead.", FutureWarning, stacklevel=2) # Used inferred freq is possible, need a test case for inferred if ax is None: import matplotlib.pyplot as plt ax = plt.gca() freq, series = _maybe_resample(series, ax, kwargs) # Set ax with freq info _decorate_axes(ax, freq, kwargs) ax._plot_data.append((series, plotf, kwargs)) lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs) # set date formatter, locators and rescale limits format_dateaxis(ax, ax.freq, series.index) return lines
python
def tsplot(series, plotf, ax=None, **kwargs): import warnings """ Plots a Series on the given Matplotlib axes or the current axes Parameters ---------- axes : Axes series : Series Notes _____ Supports same kwargs as Axes.plot .. deprecated:: 0.23.0 Use Series.plot() instead """ warnings.warn("'tsplot' is deprecated and will be removed in a " "future version. Please use Series.plot() instead.", FutureWarning, stacklevel=2) # Used inferred freq is possible, need a test case for inferred if ax is None: import matplotlib.pyplot as plt ax = plt.gca() freq, series = _maybe_resample(series, ax, kwargs) # Set ax with freq info _decorate_axes(ax, freq, kwargs) ax._plot_data.append((series, plotf, kwargs)) lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs) # set date formatter, locators and rescale limits format_dateaxis(ax, ax.freq, series.index) return lines
[ "def", "tsplot", "(", "series", ",", "plotf", ",", "ax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "\"'tsplot' is deprecated and will be removed in a \"", "\"future version. Please use Series.plot() instead.\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "# Used inferred freq is possible, need a test case for inferred", "if", "ax", "is", "None", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "ax", "=", "plt", ".", "gca", "(", ")", "freq", ",", "series", "=", "_maybe_resample", "(", "series", ",", "ax", ",", "kwargs", ")", "# Set ax with freq info", "_decorate_axes", "(", "ax", ",", "freq", ",", "kwargs", ")", "ax", ".", "_plot_data", ".", "append", "(", "(", "series", ",", "plotf", ",", "kwargs", ")", ")", "lines", "=", "plotf", "(", "ax", ",", "series", ".", "index", ".", "_mpl_repr", "(", ")", ",", "series", ".", "values", ",", "*", "*", "kwargs", ")", "# set date formatter, locators and rescale limits", "format_dateaxis", "(", "ax", ",", "ax", ".", "freq", ",", "series", ".", "index", ")", "return", "lines" ]
Plots a Series on the given Matplotlib axes or the current axes Parameters ---------- axes : Axes series : Series Notes _____ Supports same kwargs as Axes.plot .. deprecated:: 0.23.0 Use Series.plot() instead
[ "Plots", "a", "Series", "on", "the", "given", "Matplotlib", "axes", "or", "the", "current", "axes" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_timeseries.py#L26-L62
19,656
pandas-dev/pandas
pandas/plotting/_timeseries.py
_decorate_axes
def _decorate_axes(ax, freq, kwargs): """Initialize axes for time-series plotting""" if not hasattr(ax, '_plot_data'): ax._plot_data = [] ax.freq = freq xaxis = ax.get_xaxis() xaxis.freq = freq if not hasattr(ax, 'legendlabels'): ax.legendlabels = [kwargs.get('label', None)] else: ax.legendlabels.append(kwargs.get('label', None)) ax.view_interval = None ax.date_axis_info = None
python
def _decorate_axes(ax, freq, kwargs): """Initialize axes for time-series plotting""" if not hasattr(ax, '_plot_data'): ax._plot_data = [] ax.freq = freq xaxis = ax.get_xaxis() xaxis.freq = freq if not hasattr(ax, 'legendlabels'): ax.legendlabels = [kwargs.get('label', None)] else: ax.legendlabels.append(kwargs.get('label', None)) ax.view_interval = None ax.date_axis_info = None
[ "def", "_decorate_axes", "(", "ax", ",", "freq", ",", "kwargs", ")", ":", "if", "not", "hasattr", "(", "ax", ",", "'_plot_data'", ")", ":", "ax", ".", "_plot_data", "=", "[", "]", "ax", ".", "freq", "=", "freq", "xaxis", "=", "ax", ".", "get_xaxis", "(", ")", "xaxis", ".", "freq", "=", "freq", "if", "not", "hasattr", "(", "ax", ",", "'legendlabels'", ")", ":", "ax", ".", "legendlabels", "=", "[", "kwargs", ".", "get", "(", "'label'", ",", "None", ")", "]", "else", ":", "ax", ".", "legendlabels", ".", "append", "(", "kwargs", ".", "get", "(", "'label'", ",", "None", ")", ")", "ax", ".", "view_interval", "=", "None", "ax", ".", "date_axis_info", "=", "None" ]
Initialize axes for time-series plotting
[ "Initialize", "axes", "for", "time", "-", "series", "plotting" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_timeseries.py#L157-L170
19,657
pandas-dev/pandas
pandas/core/frame.py
DataFrame._is_homogeneous_type
def _is_homogeneous_type(self): """ Whether all the columns in a DataFrame have the same type. Returns ------- bool Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if self._data.any_extension_types: return len({block.dtype for block in self._data.blocks}) == 1 else: return not self._data.is_mixed_type
python
def _is_homogeneous_type(self): """ Whether all the columns in a DataFrame have the same type. Returns ------- bool Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False """ if self._data.any_extension_types: return len({block.dtype for block in self._data.blocks}) == 1 else: return not self._data.is_mixed_type
[ "def", "_is_homogeneous_type", "(", "self", ")", ":", "if", "self", ".", "_data", ".", "any_extension_types", ":", "return", "len", "(", "{", "block", ".", "dtype", "for", "block", "in", "self", ".", "_data", ".", "blocks", "}", ")", "==", "1", "else", ":", "return", "not", "self", ".", "_data", ".", "is_mixed_type" ]
Whether all the columns in a DataFrame have the same type. Returns ------- bool Examples -------- >>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type True >>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type False Items with the same type but different sizes are considered different types. >>> DataFrame({ ... "A": np.array([1, 2], dtype=np.int32), ... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type False
[ "Whether", "all", "the", "columns", "in", "a", "DataFrame", "have", "the", "same", "type", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L514-L540
19,658
pandas-dev/pandas
pandas/core/frame.py
DataFrame._repr_html_
def _repr_html_(self): """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO("") self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace('<', r'&lt;', 1) val = val.replace('>', r'&gt;', 1) return '<pre>' + val + '</pre>' if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") return self.to_html(max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, notebook=True) else: return None
python
def _repr_html_(self): """ Return a html representation for a particular DataFrame. Mainly for IPython notebook. """ if self._info_repr(): buf = StringIO("") self.info(buf=buf) # need to escape the <class>, should be the first line. val = buf.getvalue().replace('<', r'&lt;', 1) val = val.replace('>', r'&gt;', 1) return '<pre>' + val + '</pre>' if get_option("display.notebook_repr_html"): max_rows = get_option("display.max_rows") max_cols = get_option("display.max_columns") show_dimensions = get_option("display.show_dimensions") return self.to_html(max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, notebook=True) else: return None
[ "def", "_repr_html_", "(", "self", ")", ":", "if", "self", ".", "_info_repr", "(", ")", ":", "buf", "=", "StringIO", "(", "\"\"", ")", "self", ".", "info", "(", "buf", "=", "buf", ")", "# need to escape the <class>, should be the first line.", "val", "=", "buf", ".", "getvalue", "(", ")", ".", "replace", "(", "'<'", ",", "r'&lt;'", ",", "1", ")", "val", "=", "val", ".", "replace", "(", "'>'", ",", "r'&gt;'", ",", "1", ")", "return", "'<pre>'", "+", "val", "+", "'</pre>'", "if", "get_option", "(", "\"display.notebook_repr_html\"", ")", ":", "max_rows", "=", "get_option", "(", "\"display.max_rows\"", ")", "max_cols", "=", "get_option", "(", "\"display.max_columns\"", ")", "show_dimensions", "=", "get_option", "(", "\"display.show_dimensions\"", ")", "return", "self", ".", "to_html", "(", "max_rows", "=", "max_rows", ",", "max_cols", "=", "max_cols", ",", "show_dimensions", "=", "show_dimensions", ",", "notebook", "=", "True", ")", "else", ":", "return", "None" ]
Return a html representation for a particular DataFrame. Mainly for IPython notebook.
[ "Return", "a", "html", "representation", "for", "a", "particular", "DataFrame", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L635-L657
19,659
pandas-dev/pandas
pandas/core/frame.py
DataFrame.itertuples
def itertuples(self, index=True, name="Pandas"): """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Yields ------- collections.namedtuple Yields a namedtuple for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.iteritems : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. With a large number of columns (>255), regular tuples are returned. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) # Python 3 supports at most 255 arguments to constructor if name is not None and len(self.columns) + index < 256: itertuple = collections.namedtuple(name, fields, rename=True) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays)
python
def itertuples(self, index=True, name="Pandas"): """ Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Yields ------- collections.namedtuple Yields a namedtuple for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.iteritems : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. With a large number of columns (>255), regular tuples are returned. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2) """ arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, "Index") # use integer indexing because of possible duplicate column names arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) # Python 3 supports at most 255 arguments to constructor if name is not None and len(self.columns) + index < 256: itertuple = collections.namedtuple(name, fields, rename=True) return map(itertuple._make, zip(*arrays)) # fallback to regular tuples return zip(*arrays)
[ "def", "itertuples", "(", "self", ",", "index", "=", "True", ",", "name", "=", "\"Pandas\"", ")", ":", "arrays", "=", "[", "]", "fields", "=", "list", "(", "self", ".", "columns", ")", "if", "index", ":", "arrays", ".", "append", "(", "self", ".", "index", ")", "fields", ".", "insert", "(", "0", ",", "\"Index\"", ")", "# use integer indexing because of possible duplicate column names", "arrays", ".", "extend", "(", "self", ".", "iloc", "[", ":", ",", "k", "]", "for", "k", "in", "range", "(", "len", "(", "self", ".", "columns", ")", ")", ")", "# Python 3 supports at most 255 arguments to constructor", "if", "name", "is", "not", "None", "and", "len", "(", "self", ".", "columns", ")", "+", "index", "<", "256", ":", "itertuple", "=", "collections", ".", "namedtuple", "(", "name", ",", "fields", ",", "rename", "=", "True", ")", "return", "map", "(", "itertuple", ".", "_make", ",", "zip", "(", "*", "arrays", ")", ")", "# fallback to regular tuples", "return", "zip", "(", "*", "arrays", ")" ]
Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default "Pandas" The name of the returned namedtuples or None to return regular tuples. Yields ------- collections.namedtuple Yields a namedtuple for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.iteritems : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. With a large number of columns (>255), regular tuples are returned. Examples -------- >>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]}, ... index=['dog', 'hawk']) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) ... Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the `index` parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) ... Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the `name` parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name='Animal'): ... print(row) ... Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2)
[ "Iterate", "over", "DataFrame", "rows", "as", "namedtuples", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L832-L910
19,660
pandas-dev/pandas
pandas/core/frame.py
DataFrame.dot
def dot(self, other): """ Compute the matrix mutiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Serie. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if (len(common) > len(self.columns) or len(common) > len(other.index)): raise ValueError('matrices are not aligned') left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError('Dot product shape mismatch, ' '{s} vs {r}'.format(s=lvals.shape, r=rvals.shape)) if isinstance(other, DataFrame): return self._constructor(np.dot(lvals, rvals), index=left.index, columns=other.columns) elif isinstance(other, Series): return Series(np.dot(lvals, rvals), index=left.index) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index) else: return Series(result, index=left.index) else: # pragma: no cover raise TypeError('unsupported type: {oth}'.format(oth=type(other)))
python
def dot(self, other): """ Compute the matrix mutiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Serie. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2 """ if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if (len(common) > len(self.columns) or len(common) > len(other.index)): raise ValueError('matrices are not aligned') left = self.reindex(columns=common, copy=False) right = other.reindex(index=common, copy=False) lvals = left.values rvals = right.values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError('Dot product shape mismatch, ' '{s} vs {r}'.format(s=lvals.shape, r=rvals.shape)) if isinstance(other, DataFrame): return self._constructor(np.dot(lvals, rvals), index=left.index, columns=other.columns) elif isinstance(other, Series): return Series(np.dot(lvals, rvals), index=left.index) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index) else: return Series(result, index=left.index) else: # pragma: no cover raise TypeError('unsupported type: {oth}'.format(oth=type(other)))
[ "def", "dot", "(", "self", ",", "other", ")", ":", "if", "isinstance", "(", "other", ",", "(", "Series", ",", "DataFrame", ")", ")", ":", "common", "=", "self", ".", "columns", ".", "union", "(", "other", ".", "index", ")", "if", "(", "len", "(", "common", ")", ">", "len", "(", "self", ".", "columns", ")", "or", "len", "(", "common", ")", ">", "len", "(", "other", ".", "index", ")", ")", ":", "raise", "ValueError", "(", "'matrices are not aligned'", ")", "left", "=", "self", ".", "reindex", "(", "columns", "=", "common", ",", "copy", "=", "False", ")", "right", "=", "other", ".", "reindex", "(", "index", "=", "common", ",", "copy", "=", "False", ")", "lvals", "=", "left", ".", "values", "rvals", "=", "right", ".", "values", "else", ":", "left", "=", "self", "lvals", "=", "self", ".", "values", "rvals", "=", "np", ".", "asarray", "(", "other", ")", "if", "lvals", ".", "shape", "[", "1", "]", "!=", "rvals", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "'Dot product shape mismatch, '", "'{s} vs {r}'", ".", "format", "(", "s", "=", "lvals", ".", "shape", ",", "r", "=", "rvals", ".", "shape", ")", ")", "if", "isinstance", "(", "other", ",", "DataFrame", ")", ":", "return", "self", ".", "_constructor", "(", "np", ".", "dot", "(", "lvals", ",", "rvals", ")", ",", "index", "=", "left", ".", "index", ",", "columns", "=", "other", ".", "columns", ")", "elif", "isinstance", "(", "other", ",", "Series", ")", ":", "return", "Series", "(", "np", ".", "dot", "(", "lvals", ",", "rvals", ")", ",", "index", "=", "left", ".", "index", ")", "elif", "isinstance", "(", "rvals", ",", "(", "np", ".", "ndarray", ",", "Index", ")", ")", ":", "result", "=", "np", ".", "dot", "(", "lvals", ",", "rvals", ")", "if", "result", ".", "ndim", "==", "2", ":", "return", "self", ".", "_constructor", "(", "result", ",", "index", "=", "left", ".", "index", ")", "else", ":", "return", "Series", "(", "result", ",", "index", "=", "left", ".", "index", ")", "else", ":", "# pragma: no cover", "raise", "TypeError", "(", "'unsupported type: {oth}'", ".", "format", "(", "oth", "=", "type", "(", "other", ")", ")", ")" ]
Compute the matrix mutiplication between the DataFrame and other. This method computes the matrix product between the DataFrame and the values of an other Series, DataFrame or a numpy array. It can also be called using ``self @ other`` in Python >= 3.5. Parameters ---------- other : Series, DataFrame or array-like The other object to compute the matrix product with. Returns ------- Series or DataFrame If other is a Series, return the matrix product between self and other as a Serie. If other is a DataFrame or a numpy.array, return the matrix product of self and other in a DataFrame of a np.array. See Also -------- Series.dot: Similar method for Series. Notes ----- The dimensions of DataFrame and other must be compatible in order to compute the matrix multiplication. The dot method for Series computes the inner product, instead of the matrix product here. Examples -------- Here we multiply a DataFrame with a Series. >>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]]) >>> s = pd.Series([1, 1, 2, 1]) >>> df.dot(s) 0 -4 1 5 dtype: int64 Here we multiply a DataFrame with another DataFrame. >>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(other) 0 1 0 1 4 1 2 2 Note that the dot method give the same result as @ >>> df @ other 0 1 0 1 4 1 2 2 The dot method works also if other is an np.array. >>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]]) >>> df.dot(arr) 0 1 0 1 4 1 2 2
[ "Compute", "the", "matrix", "mutiplication", "between", "the", "DataFrame", "and", "other", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L920-L1018
19,661
pandas-dev/pandas
pandas/core/frame.py
DataFrame.from_dict
def from_dict(cls, data, orient='columns', dtype=None, columns=None): """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. dtype : dtype, default None Data type to force, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'``. .. versionadded:: 0.23.0 Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from ndarray (structured dtype), list of tuples, dict, or DataFrame. DataFrame : DataFrame object creation using constructor. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d """ index = None orient = orient.lower() if orient == 'index': if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: data, index = list(data.values()), list(data.keys()) elif orient == 'columns': if columns is not None: raise ValueError("cannot use columns parameter with " "orient='columns'") else: # pragma: no cover raise ValueError('only recognize index or columns for orient') return cls(data, index=index, columns=columns, dtype=dtype)
python
def from_dict(cls, data, orient='columns', dtype=None, columns=None): """ Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. dtype : dtype, default None Data type to force, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'``. .. versionadded:: 0.23.0 Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from ndarray (structured dtype), list of tuples, dict, or DataFrame. DataFrame : DataFrame object creation using constructor. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d """ index = None orient = orient.lower() if orient == 'index': if len(data) > 0: # TODO speed up Series case if isinstance(list(data.values())[0], (Series, dict)): data = _from_nested_dict(data) else: data, index = list(data.values()), list(data.keys()) elif orient == 'columns': if columns is not None: raise ValueError("cannot use columns parameter with " "orient='columns'") else: # pragma: no cover raise ValueError('only recognize index or columns for orient') return cls(data, index=index, columns=columns, dtype=dtype)
[ "def", "from_dict", "(", "cls", ",", "data", ",", "orient", "=", "'columns'", ",", "dtype", "=", "None", ",", "columns", "=", "None", ")", ":", "index", "=", "None", "orient", "=", "orient", ".", "lower", "(", ")", "if", "orient", "==", "'index'", ":", "if", "len", "(", "data", ")", ">", "0", ":", "# TODO speed up Series case", "if", "isinstance", "(", "list", "(", "data", ".", "values", "(", ")", ")", "[", "0", "]", ",", "(", "Series", ",", "dict", ")", ")", ":", "data", "=", "_from_nested_dict", "(", "data", ")", "else", ":", "data", ",", "index", "=", "list", "(", "data", ".", "values", "(", ")", ")", ",", "list", "(", "data", ".", "keys", "(", ")", ")", "elif", "orient", "==", "'columns'", ":", "if", "columns", "is", "not", "None", ":", "raise", "ValueError", "(", "\"cannot use columns parameter with \"", "\"orient='columns'\"", ")", "else", ":", "# pragma: no cover", "raise", "ValueError", "(", "'only recognize index or columns for orient'", ")", "return", "cls", "(", "data", ",", "index", "=", "index", ",", "columns", "=", "columns", ",", "dtype", "=", "dtype", ")" ]
Construct DataFrame from dict of array-like or dicts. Creates DataFrame object from dictionary by columns or by index allowing dtype specification. Parameters ---------- data : dict Of the form {field : array-like} or {field : dict}. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the passed dict should be the columns of the resulting DataFrame, pass 'columns' (default). Otherwise if the keys should be rows, pass 'index'. dtype : dtype, default None Data type to force, otherwise infer. columns : list, default None Column labels to use when ``orient='index'``. Raises a ValueError if used with ``orient='columns'``. .. versionadded:: 0.23.0 Returns ------- DataFrame See Also -------- DataFrame.from_records : DataFrame from ndarray (structured dtype), list of tuples, dict, or DataFrame. DataFrame : DataFrame object creation using constructor. Examples -------- By default the keys of the dict become the DataFrame columns: >>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data) col_1 col_2 0 3 a 1 2 b 2 1 c 3 0 d Specify ``orient='index'`` to create the DataFrame using dictionary keys as rows: >>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']} >>> pd.DataFrame.from_dict(data, orient='index') 0 1 2 3 row_1 3 2 1 0 row_2 a b c d When using the 'index' orientation, the column names can be specified manually: >>> pd.DataFrame.from_dict(data, orient='index', ... columns=['A', 'B', 'C', 'D']) A B C D row_1 3 2 1 0 row_2 a b c d
[ "Construct", "DataFrame", "from", "dict", "of", "array", "-", "like", "or", "dicts", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1036-L1115
19,662
pandas-dev/pandas
pandas/core/frame.py
DataFrame.to_numpy
def to_numpy(self, dtype=None, copy=False): """ Convert the DataFrame to a NumPy array. .. versionadded:: 0.24.0 By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray` copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogenous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ result = np.array(self.values, dtype=dtype, copy=copy) return result
python
def to_numpy(self, dtype=None, copy=False): """ Convert the DataFrame to a NumPy array. .. versionadded:: 0.24.0 By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray` copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogenous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) """ result = np.array(self.values, dtype=dtype, copy=copy) return result
[ "def", "to_numpy", "(", "self", ",", "dtype", "=", "None", ",", "copy", "=", "False", ")", ":", "result", "=", "np", ".", "array", "(", "self", ".", "values", ",", "dtype", "=", "dtype", ",", "copy", "=", "copy", ")", "return", "result" ]
Convert the DataFrame to a NumPy array. .. versionadded:: 0.24.0 By default, the dtype of the returned array will be the common NumPy dtype of all types in the DataFrame. For example, if the dtypes are ``float16`` and ``float32``, the results dtype will be ``float32``. This may require copying data and coercing values, which may be expensive. Parameters ---------- dtype : str or numpy.dtype, optional The dtype to pass to :meth:`numpy.asarray` copy : bool, default False Whether to ensure that the returned value is a not a view on another array. Note that ``copy=False`` does not *ensure* that ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that a copy is made, even if not strictly necessary. Returns ------- numpy.ndarray See Also -------- Series.to_numpy : Similar method for Series. Examples -------- >>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy() array([[1, 3], [2, 4]]) With heterogenous data, the lowest common type will have to be used. >>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]}) >>> df.to_numpy() array([[1. , 3. ], [2. , 4.5]]) For a mix of numeric and non-numeric types, the output array will have object dtype. >>> df['C'] = pd.date_range('2000', periods=2) >>> df.to_numpy() array([[1, 3.0, Timestamp('2000-01-01 00:00:00')], [2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
[ "Convert", "the", "DataFrame", "to", "a", "NumPy", "array", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1117-L1170
19,663
pandas-dev/pandas
pandas/core/frame.py
DataFrame.to_dict
def to_dict(self, orient='dict', into=dict): """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. .. versionadded:: 0.21.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ if not self.columns.is_unique: warnings.warn("DataFrame columns are not unique, some " "columns will be omitted.", UserWarning, stacklevel=2) # GH16122 into_c = com.standardize_mapping(into) if orient.lower().startswith('d'): return into_c( (k, v.to_dict(into)) for k, v in self.items()) elif orient.lower().startswith('l'): return into_c((k, v.tolist()) for k, v in self.items()) elif orient.lower().startswith('sp'): return into_c((('index', self.index.tolist()), ('columns', self.columns.tolist()), ('data', [ list(map(com.maybe_box_datetimelike, t)) for t in self.itertuples(index=False, name=None) ]))) elif orient.lower().startswith('s'): return into_c((k, com.maybe_box_datetimelike(v)) for k, v in self.items()) elif orient.lower().startswith('r'): columns = self.columns.tolist() rows = (dict(zip(columns, row)) for row in self.itertuples(index=False, name=None)) return [ into_c((k, com.maybe_box_datetimelike(v)) for k, v in row.items()) for row in rows] elif orient.lower().startswith('i'): if not self.index.is_unique: raise ValueError( "DataFrame index must be unique for orient='index'." ) return into_c((t[0], dict(zip(self.columns, t[1:]))) for t in self.itertuples(name=None)) else: raise ValueError("orient '{o}' not understood".format(o=orient))
python
def to_dict(self, orient='dict', into=dict): """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. .. versionadded:: 0.21.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] """ if not self.columns.is_unique: warnings.warn("DataFrame columns are not unique, some " "columns will be omitted.", UserWarning, stacklevel=2) # GH16122 into_c = com.standardize_mapping(into) if orient.lower().startswith('d'): return into_c( (k, v.to_dict(into)) for k, v in self.items()) elif orient.lower().startswith('l'): return into_c((k, v.tolist()) for k, v in self.items()) elif orient.lower().startswith('sp'): return into_c((('index', self.index.tolist()), ('columns', self.columns.tolist()), ('data', [ list(map(com.maybe_box_datetimelike, t)) for t in self.itertuples(index=False, name=None) ]))) elif orient.lower().startswith('s'): return into_c((k, com.maybe_box_datetimelike(v)) for k, v in self.items()) elif orient.lower().startswith('r'): columns = self.columns.tolist() rows = (dict(zip(columns, row)) for row in self.itertuples(index=False, name=None)) return [ into_c((k, com.maybe_box_datetimelike(v)) for k, v in row.items()) for row in rows] elif orient.lower().startswith('i'): if not self.index.is_unique: raise ValueError( "DataFrame index must be unique for orient='index'." ) return into_c((t[0], dict(zip(self.columns, t[1:]))) for t in self.itertuples(name=None)) else: raise ValueError("orient '{o}' not understood".format(o=orient))
[ "def", "to_dict", "(", "self", ",", "orient", "=", "'dict'", ",", "into", "=", "dict", ")", ":", "if", "not", "self", ".", "columns", ".", "is_unique", ":", "warnings", ".", "warn", "(", "\"DataFrame columns are not unique, some \"", "\"columns will be omitted.\"", ",", "UserWarning", ",", "stacklevel", "=", "2", ")", "# GH16122", "into_c", "=", "com", ".", "standardize_mapping", "(", "into", ")", "if", "orient", ".", "lower", "(", ")", ".", "startswith", "(", "'d'", ")", ":", "return", "into_c", "(", "(", "k", ",", "v", ".", "to_dict", "(", "into", ")", ")", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", ")", "elif", "orient", ".", "lower", "(", ")", ".", "startswith", "(", "'l'", ")", ":", "return", "into_c", "(", "(", "k", ",", "v", ".", "tolist", "(", ")", ")", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", ")", "elif", "orient", ".", "lower", "(", ")", ".", "startswith", "(", "'sp'", ")", ":", "return", "into_c", "(", "(", "(", "'index'", ",", "self", ".", "index", ".", "tolist", "(", ")", ")", ",", "(", "'columns'", ",", "self", ".", "columns", ".", "tolist", "(", ")", ")", ",", "(", "'data'", ",", "[", "list", "(", "map", "(", "com", ".", "maybe_box_datetimelike", ",", "t", ")", ")", "for", "t", "in", "self", ".", "itertuples", "(", "index", "=", "False", ",", "name", "=", "None", ")", "]", ")", ")", ")", "elif", "orient", ".", "lower", "(", ")", ".", "startswith", "(", "'s'", ")", ":", "return", "into_c", "(", "(", "k", ",", "com", ".", "maybe_box_datetimelike", "(", "v", ")", ")", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", ")", "elif", "orient", ".", "lower", "(", ")", ".", "startswith", "(", "'r'", ")", ":", "columns", "=", "self", ".", "columns", ".", "tolist", "(", ")", "rows", "=", "(", "dict", "(", "zip", "(", "columns", ",", "row", ")", ")", "for", "row", "in", "self", ".", "itertuples", "(", "index", "=", "False", ",", "name", "=", "None", ")", ")", "return", "[", "into_c", "(", "(", "k", ",", "com", ".", "maybe_box_datetimelike", "(", "v", ")", ")", "for", "k", ",", "v", "in", "row", ".", "items", "(", ")", ")", "for", "row", "in", "rows", "]", "elif", "orient", ".", "lower", "(", ")", ".", "startswith", "(", "'i'", ")", ":", "if", "not", "self", ".", "index", ".", "is_unique", ":", "raise", "ValueError", "(", "\"DataFrame index must be unique for orient='index'.\"", ")", "return", "into_c", "(", "(", "t", "[", "0", "]", ",", "dict", "(", "zip", "(", "self", ".", "columns", ",", "t", "[", "1", ":", "]", ")", ")", ")", "for", "t", "in", "self", ".", "itertuples", "(", "name", "=", "None", ")", ")", "else", ":", "raise", "ValueError", "(", "\"orient '{o}' not understood\"", ".", "format", "(", "o", "=", "orient", ")", ")" ]
Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. .. versionadded:: 0.21.0 Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. See Also -------- DataFrame.from_dict: Create a DataFrame from a dictionary. DataFrame.to_json: Convert a DataFrame to JSON format. Examples -------- >>> df = pd.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df.to_dict() {'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}} You can specify the return orientation. >>> df.to_dict('series') {'col1': row1 1 row2 2 Name: col1, dtype: int64, 'col2': row1 0.50 row2 0.75 Name: col2, dtype: float64} >>> df.to_dict('split') {'index': ['row1', 'row2'], 'columns': ['col1', 'col2'], 'data': [[1, 0.5], [2, 0.75]]} >>> df.to_dict('records') [{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}] >>> df.to_dict('index') {'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}} You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) [defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}), defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
[ "Convert", "the", "DataFrame", "to", "a", "dictionary", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1172-L1298
19,664
pandas-dev/pandas
pandas/core/frame.py
DataFrame.to_records
def to_records(self, index=True, convert_datetime64=None, column_dtypes=None, index_dtypes=None): """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. convert_datetime64 : bool, default None .. deprecated:: 0.23.0 Whether to convert the index to datetime.datetime if it is a DatetimeIndex. column_dtypes : str, type, dict, default None .. versionadded:: 0.24.0 If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None .. versionadded:: 0.24.0 If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = "<S{}".format(df.index.str.len().max()) >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if convert_datetime64 is not None: warnings.warn("The 'convert_datetime64' parameter is " "deprecated and will be removed in a future " "version", FutureWarning, stacklevel=2) if index: if is_datetime64_any_dtype(self.index) and convert_datetime64: ix_vals = [self.index.to_pydatetime()] else: if isinstance(self.index, MultiIndex): # array of tuples to numpy cols. copy copy copy ix_vals = lmap(np.array, zip(*self.index.values)) else: ix_vals = [self.index.values] arrays = ix_vals + [self[c].get_values() for c in self.columns] count = 0 index_names = list(self.index.names) if isinstance(self.index, MultiIndex): for i, n in enumerate(index_names): if n is None: index_names[i] = 'level_%d' % count count += 1 elif index_names[0] is None: index_names = ['index'] names = lmap(str, index_names) + lmap(str, self.columns) else: arrays = [self[c].get_values() for c in self.columns] names = lmap(str, self.columns) index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index < index_len: dtype_mapping = index_dtypes name = index_names[index] else: index -= index_len dtype_mapping = column_dtypes name = self.columns[index] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index in dtype_mapping: dtype_mapping = dtype_mapping[index] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): formats.append(dtype_mapping) else: element = "row" if i < index_len else "column" msg = ("Invalid dtype {dtype} specified for " "{element} {name}").format(dtype=dtype_mapping, element=element, name=name) raise ValueError(msg) return np.rec.fromarrays( arrays, dtype={'names': names, 'formats': formats} )
python
def to_records(self, index=True, convert_datetime64=None, column_dtypes=None, index_dtypes=None): """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. convert_datetime64 : bool, default None .. deprecated:: 0.23.0 Whether to convert the index to datetime.datetime if it is a DatetimeIndex. column_dtypes : str, type, dict, default None .. versionadded:: 0.24.0 If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None .. versionadded:: 0.24.0 If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = "<S{}".format(df.index.str.len().max()) >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) """ if convert_datetime64 is not None: warnings.warn("The 'convert_datetime64' parameter is " "deprecated and will be removed in a future " "version", FutureWarning, stacklevel=2) if index: if is_datetime64_any_dtype(self.index) and convert_datetime64: ix_vals = [self.index.to_pydatetime()] else: if isinstance(self.index, MultiIndex): # array of tuples to numpy cols. copy copy copy ix_vals = lmap(np.array, zip(*self.index.values)) else: ix_vals = [self.index.values] arrays = ix_vals + [self[c].get_values() for c in self.columns] count = 0 index_names = list(self.index.names) if isinstance(self.index, MultiIndex): for i, n in enumerate(index_names): if n is None: index_names[i] = 'level_%d' % count count += 1 elif index_names[0] is None: index_names = ['index'] names = lmap(str, index_names) + lmap(str, self.columns) else: arrays = [self[c].get_values() for c in self.columns] names = lmap(str, self.columns) index_names = [] index_len = len(index_names) formats = [] for i, v in enumerate(arrays): index = i # When the names and arrays are collected, we # first collect those in the DataFrame's index, # followed by those in its columns. # # Thus, the total length of the array is: # len(index_names) + len(DataFrame.columns). # # This check allows us to see whether we are # handling a name / array in the index or column. if index < index_len: dtype_mapping = index_dtypes name = index_names[index] else: index -= index_len dtype_mapping = column_dtypes name = self.columns[index] # We have a dictionary, so we get the data type # associated with the index or column (which can # be denoted by its name in the DataFrame or its # position in DataFrame's array of indices or # columns, whichever is applicable. if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index in dtype_mapping: dtype_mapping = dtype_mapping[index] else: dtype_mapping = None # If no mapping can be found, use the array's # dtype attribute for formatting. # # A valid dtype must either be a type or # string naming a type. if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): formats.append(dtype_mapping) else: element = "row" if i < index_len else "column" msg = ("Invalid dtype {dtype} specified for " "{element} {name}").format(dtype=dtype_mapping, element=element, name=name) raise ValueError(msg) return np.rec.fromarrays( arrays, dtype={'names': names, 'formats': formats} )
[ "def", "to_records", "(", "self", ",", "index", "=", "True", ",", "convert_datetime64", "=", "None", ",", "column_dtypes", "=", "None", ",", "index_dtypes", "=", "None", ")", ":", "if", "convert_datetime64", "is", "not", "None", ":", "warnings", ".", "warn", "(", "\"The 'convert_datetime64' parameter is \"", "\"deprecated and will be removed in a future \"", "\"version\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "if", "index", ":", "if", "is_datetime64_any_dtype", "(", "self", ".", "index", ")", "and", "convert_datetime64", ":", "ix_vals", "=", "[", "self", ".", "index", ".", "to_pydatetime", "(", ")", "]", "else", ":", "if", "isinstance", "(", "self", ".", "index", ",", "MultiIndex", ")", ":", "# array of tuples to numpy cols. copy copy copy", "ix_vals", "=", "lmap", "(", "np", ".", "array", ",", "zip", "(", "*", "self", ".", "index", ".", "values", ")", ")", "else", ":", "ix_vals", "=", "[", "self", ".", "index", ".", "values", "]", "arrays", "=", "ix_vals", "+", "[", "self", "[", "c", "]", ".", "get_values", "(", ")", "for", "c", "in", "self", ".", "columns", "]", "count", "=", "0", "index_names", "=", "list", "(", "self", ".", "index", ".", "names", ")", "if", "isinstance", "(", "self", ".", "index", ",", "MultiIndex", ")", ":", "for", "i", ",", "n", "in", "enumerate", "(", "index_names", ")", ":", "if", "n", "is", "None", ":", "index_names", "[", "i", "]", "=", "'level_%d'", "%", "count", "count", "+=", "1", "elif", "index_names", "[", "0", "]", "is", "None", ":", "index_names", "=", "[", "'index'", "]", "names", "=", "lmap", "(", "str", ",", "index_names", ")", "+", "lmap", "(", "str", ",", "self", ".", "columns", ")", "else", ":", "arrays", "=", "[", "self", "[", "c", "]", ".", "get_values", "(", ")", "for", "c", "in", "self", ".", "columns", "]", "names", "=", "lmap", "(", "str", ",", "self", ".", "columns", ")", "index_names", "=", "[", "]", "index_len", "=", "len", "(", "index_names", ")", "formats", "=", "[", "]", "for", "i", ",", "v", "in", "enumerate", "(", "arrays", ")", ":", "index", "=", "i", "# When the names and arrays are collected, we", "# first collect those in the DataFrame's index,", "# followed by those in its columns.", "#", "# Thus, the total length of the array is:", "# len(index_names) + len(DataFrame.columns).", "#", "# This check allows us to see whether we are", "# handling a name / array in the index or column.", "if", "index", "<", "index_len", ":", "dtype_mapping", "=", "index_dtypes", "name", "=", "index_names", "[", "index", "]", "else", ":", "index", "-=", "index_len", "dtype_mapping", "=", "column_dtypes", "name", "=", "self", ".", "columns", "[", "index", "]", "# We have a dictionary, so we get the data type", "# associated with the index or column (which can", "# be denoted by its name in the DataFrame or its", "# position in DataFrame's array of indices or", "# columns, whichever is applicable.", "if", "is_dict_like", "(", "dtype_mapping", ")", ":", "if", "name", "in", "dtype_mapping", ":", "dtype_mapping", "=", "dtype_mapping", "[", "name", "]", "elif", "index", "in", "dtype_mapping", ":", "dtype_mapping", "=", "dtype_mapping", "[", "index", "]", "else", ":", "dtype_mapping", "=", "None", "# If no mapping can be found, use the array's", "# dtype attribute for formatting.", "#", "# A valid dtype must either be a type or", "# string naming a type.", "if", "dtype_mapping", "is", "None", ":", "formats", ".", "append", "(", "v", ".", "dtype", ")", "elif", "isinstance", "(", "dtype_mapping", ",", "(", "type", ",", "np", ".", "dtype", ",", "str", ")", ")", ":", "formats", ".", "append", "(", "dtype_mapping", ")", "else", ":", "element", "=", "\"row\"", "if", "i", "<", "index_len", "else", "\"column\"", "msg", "=", "(", "\"Invalid dtype {dtype} specified for \"", "\"{element} {name}\"", ")", ".", "format", "(", "dtype", "=", "dtype_mapping", ",", "element", "=", "element", ",", "name", "=", "name", ")", "raise", "ValueError", "(", "msg", ")", "return", "np", ".", "rec", ".", "fromarrays", "(", "arrays", ",", "dtype", "=", "{", "'names'", ":", "names", ",", "'formats'", ":", "formats", "}", ")" ]
Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. convert_datetime64 : bool, default None .. deprecated:: 0.23.0 Whether to convert the index to datetime.datetime if it is a DatetimeIndex. column_dtypes : str, type, dict, default None .. versionadded:: 0.24.0 If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None .. versionadded:: 0.24.0 If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) If the DataFrame index has no label then the recarray field name is set to 'index'. If the index has a label then this is used as the field name: >>> df.index = df.index.rename("I") >>> df.to_records() rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')]) As well as for the index: >>> df.to_records(index_dtypes="<S2") rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')]) >>> index_dtypes = "<S{}".format(df.index.str.len().max()) >>> df.to_records(index_dtypes=index_dtypes) rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
[ "Convert", "DataFrame", "to", "a", "NumPy", "record", "array", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1535-L1717
19,665
pandas-dev/pandas
pandas/core/frame.py
DataFrame.from_items
def from_items(cls, items, columns=None, orient='columns'): """ Construct a DataFrame from a list of tuples. .. deprecated:: 0.23.0 `from_items` is deprecated and will be removed in a future version. Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>` instead. :meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>` may be used to preserve the key order. Convert (key, value) pairs to DataFrame. The keys will be the axis index (usually the columns, but depends on the specified orientation). The values should be arrays or Series. Parameters ---------- items : sequence of (key, value) pairs Values should be arrays or Series. columns : sequence of column labels, optional Must be passed if orient='index'. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the input correspond to column labels, pass 'columns' (default). Otherwise if the keys correspond to the index, pass 'index'. Returns ------- DataFrame """ warnings.warn("from_items is deprecated. Please use " "DataFrame.from_dict(dict(items), ...) instead. " "DataFrame.from_dict(OrderedDict(items)) may be used to " "preserve the key order.", FutureWarning, stacklevel=2) keys, values = lzip(*items) if orient == 'columns': if columns is not None: columns = ensure_index(columns) idict = dict(items) if len(idict) < len(items): if not columns.equals(ensure_index(keys)): raise ValueError('With non-unique item names, passed ' 'columns must be identical') arrays = values else: arrays = [idict[k] for k in columns if k in idict] else: columns = ensure_index(keys) arrays = values # GH 17312 # Provide more informative error msg when scalar values passed try: return cls._from_arrays(arrays, columns, None) except ValueError: if not is_nested_list_like(values): raise ValueError('The value in each (key, value) pair ' 'must be an array, Series, or dict') elif orient == 'index': if columns is None: raise TypeError("Must pass columns with orient='index'") keys = ensure_index(keys) # GH 17312 # Provide more informative error msg when scalar values passed try: arr = np.array(values, dtype=object).T data = [lib.maybe_convert_objects(v) for v in arr] return cls._from_arrays(data, columns, keys) except TypeError: if not is_nested_list_like(values): raise ValueError('The value in each (key, value) pair ' 'must be an array, Series, or dict') else: # pragma: no cover raise ValueError("'orient' must be either 'columns' or 'index'")
python
def from_items(cls, items, columns=None, orient='columns'): """ Construct a DataFrame from a list of tuples. .. deprecated:: 0.23.0 `from_items` is deprecated and will be removed in a future version. Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>` instead. :meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>` may be used to preserve the key order. Convert (key, value) pairs to DataFrame. The keys will be the axis index (usually the columns, but depends on the specified orientation). The values should be arrays or Series. Parameters ---------- items : sequence of (key, value) pairs Values should be arrays or Series. columns : sequence of column labels, optional Must be passed if orient='index'. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the input correspond to column labels, pass 'columns' (default). Otherwise if the keys correspond to the index, pass 'index'. Returns ------- DataFrame """ warnings.warn("from_items is deprecated. Please use " "DataFrame.from_dict(dict(items), ...) instead. " "DataFrame.from_dict(OrderedDict(items)) may be used to " "preserve the key order.", FutureWarning, stacklevel=2) keys, values = lzip(*items) if orient == 'columns': if columns is not None: columns = ensure_index(columns) idict = dict(items) if len(idict) < len(items): if not columns.equals(ensure_index(keys)): raise ValueError('With non-unique item names, passed ' 'columns must be identical') arrays = values else: arrays = [idict[k] for k in columns if k in idict] else: columns = ensure_index(keys) arrays = values # GH 17312 # Provide more informative error msg when scalar values passed try: return cls._from_arrays(arrays, columns, None) except ValueError: if not is_nested_list_like(values): raise ValueError('The value in each (key, value) pair ' 'must be an array, Series, or dict') elif orient == 'index': if columns is None: raise TypeError("Must pass columns with orient='index'") keys = ensure_index(keys) # GH 17312 # Provide more informative error msg when scalar values passed try: arr = np.array(values, dtype=object).T data = [lib.maybe_convert_objects(v) for v in arr] return cls._from_arrays(data, columns, keys) except TypeError: if not is_nested_list_like(values): raise ValueError('The value in each (key, value) pair ' 'must be an array, Series, or dict') else: # pragma: no cover raise ValueError("'orient' must be either 'columns' or 'index'")
[ "def", "from_items", "(", "cls", ",", "items", ",", "columns", "=", "None", ",", "orient", "=", "'columns'", ")", ":", "warnings", ".", "warn", "(", "\"from_items is deprecated. Please use \"", "\"DataFrame.from_dict(dict(items), ...) instead. \"", "\"DataFrame.from_dict(OrderedDict(items)) may be used to \"", "\"preserve the key order.\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "keys", ",", "values", "=", "lzip", "(", "*", "items", ")", "if", "orient", "==", "'columns'", ":", "if", "columns", "is", "not", "None", ":", "columns", "=", "ensure_index", "(", "columns", ")", "idict", "=", "dict", "(", "items", ")", "if", "len", "(", "idict", ")", "<", "len", "(", "items", ")", ":", "if", "not", "columns", ".", "equals", "(", "ensure_index", "(", "keys", ")", ")", ":", "raise", "ValueError", "(", "'With non-unique item names, passed '", "'columns must be identical'", ")", "arrays", "=", "values", "else", ":", "arrays", "=", "[", "idict", "[", "k", "]", "for", "k", "in", "columns", "if", "k", "in", "idict", "]", "else", ":", "columns", "=", "ensure_index", "(", "keys", ")", "arrays", "=", "values", "# GH 17312", "# Provide more informative error msg when scalar values passed", "try", ":", "return", "cls", ".", "_from_arrays", "(", "arrays", ",", "columns", ",", "None", ")", "except", "ValueError", ":", "if", "not", "is_nested_list_like", "(", "values", ")", ":", "raise", "ValueError", "(", "'The value in each (key, value) pair '", "'must be an array, Series, or dict'", ")", "elif", "orient", "==", "'index'", ":", "if", "columns", "is", "None", ":", "raise", "TypeError", "(", "\"Must pass columns with orient='index'\"", ")", "keys", "=", "ensure_index", "(", "keys", ")", "# GH 17312", "# Provide more informative error msg when scalar values passed", "try", ":", "arr", "=", "np", ".", "array", "(", "values", ",", "dtype", "=", "object", ")", ".", "T", "data", "=", "[", "lib", ".", "maybe_convert_objects", "(", "v", ")", "for", "v", "in", "arr", "]", "return", "cls", ".", "_from_arrays", "(", "data", ",", "columns", ",", "keys", ")", "except", "TypeError", ":", "if", "not", "is_nested_list_like", "(", "values", ")", ":", "raise", "ValueError", "(", "'The value in each (key, value) pair '", "'must be an array, Series, or dict'", ")", "else", ":", "# pragma: no cover", "raise", "ValueError", "(", "\"'orient' must be either 'columns' or 'index'\"", ")" ]
Construct a DataFrame from a list of tuples. .. deprecated:: 0.23.0 `from_items` is deprecated and will be removed in a future version. Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>` instead. :meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>` may be used to preserve the key order. Convert (key, value) pairs to DataFrame. The keys will be the axis index (usually the columns, but depends on the specified orientation). The values should be arrays or Series. Parameters ---------- items : sequence of (key, value) pairs Values should be arrays or Series. columns : sequence of column labels, optional Must be passed if orient='index'. orient : {'columns', 'index'}, default 'columns' The "orientation" of the data. If the keys of the input correspond to column labels, pass 'columns' (default). Otherwise if the keys correspond to the index, pass 'index'. Returns ------- DataFrame
[ "Construct", "a", "DataFrame", "from", "a", "list", "of", "tuples", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1720-L1805
19,666
pandas-dev/pandas
pandas/core/frame.py
DataFrame.to_sparse
def to_sparse(self, fill_value=None, kind='block'): """ Convert to SparseDataFrame. Implement the sparse version of the DataFrame meaning that any data matching a specific value it's omitted in the representation. The sparse DataFrame allows for a more efficient storage. Parameters ---------- fill_value : float, default None The specific value that should be omitted in the representation. kind : {'block', 'integer'}, default 'block' The kind of the SparseIndex tracking where data is not equal to the fill value: - 'block' tracks only the locations and sizes of blocks of data. - 'integer' keeps an array with all the locations of the data. In most cases 'block' is recommended, since it's more memory efficient. Returns ------- SparseDataFrame The sparse representation of the DataFrame. See Also -------- DataFrame.to_dense : Converts the DataFrame back to the its dense form. Examples -------- >>> df = pd.DataFrame([(np.nan, np.nan), ... (1., np.nan), ... (np.nan, 1.)]) >>> df 0 1 0 NaN NaN 1 1.0 NaN 2 NaN 1.0 >>> type(df) <class 'pandas.core.frame.DataFrame'> >>> sdf = df.to_sparse() >>> sdf 0 1 0 NaN NaN 1 1.0 NaN 2 NaN 1.0 >>> type(sdf) <class 'pandas.core.sparse.frame.SparseDataFrame'> """ from pandas.core.sparse.api import SparseDataFrame return SparseDataFrame(self._series, index=self.index, columns=self.columns, default_kind=kind, default_fill_value=fill_value)
python
def to_sparse(self, fill_value=None, kind='block'): """ Convert to SparseDataFrame. Implement the sparse version of the DataFrame meaning that any data matching a specific value it's omitted in the representation. The sparse DataFrame allows for a more efficient storage. Parameters ---------- fill_value : float, default None The specific value that should be omitted in the representation. kind : {'block', 'integer'}, default 'block' The kind of the SparseIndex tracking where data is not equal to the fill value: - 'block' tracks only the locations and sizes of blocks of data. - 'integer' keeps an array with all the locations of the data. In most cases 'block' is recommended, since it's more memory efficient. Returns ------- SparseDataFrame The sparse representation of the DataFrame. See Also -------- DataFrame.to_dense : Converts the DataFrame back to the its dense form. Examples -------- >>> df = pd.DataFrame([(np.nan, np.nan), ... (1., np.nan), ... (np.nan, 1.)]) >>> df 0 1 0 NaN NaN 1 1.0 NaN 2 NaN 1.0 >>> type(df) <class 'pandas.core.frame.DataFrame'> >>> sdf = df.to_sparse() >>> sdf 0 1 0 NaN NaN 1 1.0 NaN 2 NaN 1.0 >>> type(sdf) <class 'pandas.core.sparse.frame.SparseDataFrame'> """ from pandas.core.sparse.api import SparseDataFrame return SparseDataFrame(self._series, index=self.index, columns=self.columns, default_kind=kind, default_fill_value=fill_value)
[ "def", "to_sparse", "(", "self", ",", "fill_value", "=", "None", ",", "kind", "=", "'block'", ")", ":", "from", "pandas", ".", "core", ".", "sparse", ".", "api", "import", "SparseDataFrame", "return", "SparseDataFrame", "(", "self", ".", "_series", ",", "index", "=", "self", ".", "index", ",", "columns", "=", "self", ".", "columns", ",", "default_kind", "=", "kind", ",", "default_fill_value", "=", "fill_value", ")" ]
Convert to SparseDataFrame. Implement the sparse version of the DataFrame meaning that any data matching a specific value it's omitted in the representation. The sparse DataFrame allows for a more efficient storage. Parameters ---------- fill_value : float, default None The specific value that should be omitted in the representation. kind : {'block', 'integer'}, default 'block' The kind of the SparseIndex tracking where data is not equal to the fill value: - 'block' tracks only the locations and sizes of blocks of data. - 'integer' keeps an array with all the locations of the data. In most cases 'block' is recommended, since it's more memory efficient. Returns ------- SparseDataFrame The sparse representation of the DataFrame. See Also -------- DataFrame.to_dense : Converts the DataFrame back to the its dense form. Examples -------- >>> df = pd.DataFrame([(np.nan, np.nan), ... (1., np.nan), ... (np.nan, 1.)]) >>> df 0 1 0 NaN NaN 1 1.0 NaN 2 NaN 1.0 >>> type(df) <class 'pandas.core.frame.DataFrame'> >>> sdf = df.to_sparse() >>> sdf 0 1 0 NaN NaN 1 1.0 NaN 2 NaN 1.0 >>> type(sdf) <class 'pandas.core.sparse.frame.SparseDataFrame'>
[ "Convert", "to", "SparseDataFrame", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1879-L1936
19,667
pandas-dev/pandas
pandas/core/frame.py
DataFrame.to_stata
def to_stata(self, fname, convert_dates=None, write_index=True, encoding="latin-1", byteorder=None, time_stamp=None, data_label=None, variable_labels=None, version=114, convert_strl=None): """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- fname : str, buffer or path object String, path object (pathlib.Path or py._path.local.LocalPath) or object implementing a binary write() function. If using a buffer then the buffer will not be automatically closed after the file data has been written. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. encoding : str Default is latin-1. Unicode is not supported. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. .. versionadded:: 0.19.0 version : {114, 117}, default 114 Version to use in the output dta file. Version 114 can be used read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 114 limits string variables to 244 characters or fewer while 117 allows strings with lengths up to 2,000,000 characters. .. versionadded:: 0.23.0 convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. .. versionadded:: 0.23.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters .. versionadded:: 0.19.0 See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ kwargs = {} if version not in (114, 117): raise ValueError('Only formats 114 and 117 supported.') if version == 114: if convert_strl is not None: raise ValueError('strl support is only available when using ' 'format 117') from pandas.io.stata import StataWriter as statawriter else: from pandas.io.stata import StataWriter117 as statawriter kwargs['convert_strl'] = convert_strl writer = statawriter(fname, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, **kwargs) writer.write_file()
python
def to_stata(self, fname, convert_dates=None, write_index=True, encoding="latin-1", byteorder=None, time_stamp=None, data_label=None, variable_labels=None, version=114, convert_strl=None): """ Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- fname : str, buffer or path object String, path object (pathlib.Path or py._path.local.LocalPath) or object implementing a binary write() function. If using a buffer then the buffer will not be automatically closed after the file data has been written. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. encoding : str Default is latin-1. Unicode is not supported. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. .. versionadded:: 0.19.0 version : {114, 117}, default 114 Version to use in the output dta file. Version 114 can be used read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 114 limits string variables to 244 characters or fewer while 117 allows strings with lengths up to 2,000,000 characters. .. versionadded:: 0.23.0 convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. .. versionadded:: 0.23.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters .. versionadded:: 0.19.0 See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df.to_stata('animals.dta') # doctest: +SKIP """ kwargs = {} if version not in (114, 117): raise ValueError('Only formats 114 and 117 supported.') if version == 114: if convert_strl is not None: raise ValueError('strl support is only available when using ' 'format 117') from pandas.io.stata import StataWriter as statawriter else: from pandas.io.stata import StataWriter117 as statawriter kwargs['convert_strl'] = convert_strl writer = statawriter(fname, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, **kwargs) writer.write_file()
[ "def", "to_stata", "(", "self", ",", "fname", ",", "convert_dates", "=", "None", ",", "write_index", "=", "True", ",", "encoding", "=", "\"latin-1\"", ",", "byteorder", "=", "None", ",", "time_stamp", "=", "None", ",", "data_label", "=", "None", ",", "variable_labels", "=", "None", ",", "version", "=", "114", ",", "convert_strl", "=", "None", ")", ":", "kwargs", "=", "{", "}", "if", "version", "not", "in", "(", "114", ",", "117", ")", ":", "raise", "ValueError", "(", "'Only formats 114 and 117 supported.'", ")", "if", "version", "==", "114", ":", "if", "convert_strl", "is", "not", "None", ":", "raise", "ValueError", "(", "'strl support is only available when using '", "'format 117'", ")", "from", "pandas", ".", "io", ".", "stata", "import", "StataWriter", "as", "statawriter", "else", ":", "from", "pandas", ".", "io", ".", "stata", "import", "StataWriter117", "as", "statawriter", "kwargs", "[", "'convert_strl'", "]", "=", "convert_strl", "writer", "=", "statawriter", "(", "fname", ",", "self", ",", "convert_dates", "=", "convert_dates", ",", "byteorder", "=", "byteorder", ",", "time_stamp", "=", "time_stamp", ",", "data_label", "=", "data_label", ",", "write_index", "=", "write_index", ",", "variable_labels", "=", "variable_labels", ",", "*", "*", "kwargs", ")", "writer", ".", "write_file", "(", ")" ]
Export DataFrame object to Stata dta format. Writes the DataFrame to a Stata dataset file. "dta" files contain a Stata dataset. Parameters ---------- fname : str, buffer or path object String, path object (pathlib.Path or py._path.local.LocalPath) or object implementing a binary write() function. If using a buffer then the buffer will not be automatically closed after the file data has been written. convert_dates : dict Dictionary mapping columns containing datetime types to stata internal format to use when writing the dates. Options are 'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name. Datetime columns that do not have a conversion type specified will be converted to 'tc'. Raises NotImplementedError if a datetime column has timezone information. write_index : bool Write the index to Stata dataset. encoding : str Default is latin-1. Unicode is not supported. byteorder : str Can be ">", "<", "little", or "big". default is `sys.byteorder`. time_stamp : datetime A datetime to use as file creation date. Default is the current time. data_label : str, optional A label for the data set. Must be 80 characters or smaller. variable_labels : dict Dictionary containing columns as keys and variable labels as values. Each label must be 80 characters or smaller. .. versionadded:: 0.19.0 version : {114, 117}, default 114 Version to use in the output dta file. Version 114 can be used read by Stata 10 and later. Version 117 can be read by Stata 13 or later. Version 114 limits string variables to 244 characters or fewer while 117 allows strings with lengths up to 2,000,000 characters. .. versionadded:: 0.23.0 convert_strl : list, optional List of column names to convert to string columns to Stata StrL format. Only available if version is 117. Storing strings in the StrL format can produce smaller dta files if strings have more than 8 characters and values are repeated. .. versionadded:: 0.23.0 Raises ------ NotImplementedError * If datetimes contain timezone information * Column dtype is not representable in Stata ValueError * Columns listed in convert_dates are neither datetime64[ns] or datetime.datetime * Column listed in convert_dates is not in DataFrame * Categorical label contains more than 32,000 characters .. versionadded:: 0.19.0 See Also -------- read_stata : Import Stata data files. io.stata.StataWriter : Low-level writer for Stata data files. io.stata.StataWriter117 : Low-level writer for version 117 files. Examples -------- >>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon', ... 'parrot'], ... 'speed': [350, 18, 361, 15]}) >>> df.to_stata('animals.dta') # doctest: +SKIP
[ "Export", "DataFrame", "object", "to", "Stata", "dta", "format", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1955-L2055
19,668
pandas-dev/pandas
pandas/core/frame.py
DataFrame.to_feather
def to_feather(self, fname): """ Write out the binary feather-format for DataFrames. .. versionadded:: 0.20.0 Parameters ---------- fname : str string file path """ from pandas.io.feather_format import to_feather to_feather(self, fname)
python
def to_feather(self, fname): """ Write out the binary feather-format for DataFrames. .. versionadded:: 0.20.0 Parameters ---------- fname : str string file path """ from pandas.io.feather_format import to_feather to_feather(self, fname)
[ "def", "to_feather", "(", "self", ",", "fname", ")", ":", "from", "pandas", ".", "io", ".", "feather_format", "import", "to_feather", "to_feather", "(", "self", ",", "fname", ")" ]
Write out the binary feather-format for DataFrames. .. versionadded:: 0.20.0 Parameters ---------- fname : str string file path
[ "Write", "out", "the", "binary", "feather", "-", "format", "for", "DataFrames", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2057-L2069
19,669
pandas-dev/pandas
pandas/core/frame.py
DataFrame.to_parquet
def to_parquet(self, fname, engine='auto', compression='snappy', index=None, partition_cols=None, **kwargs): """ Write a DataFrame to the binary parquet format. .. versionadded:: 0.21.0 This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- fname : str File path or Root Directory path. Will be used as Root Directory path while writing a partitioned dataset. .. versionchanged:: 0.24.0 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, the behavior depends on the chosen engine. .. versionadded:: 0.24.0 partition_cols : list, optional, default None Column names by which to partition the dataset Columns are partitioned in the order they are given .. versionadded:: 0.24.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. See Also -------- read_parquet : Read a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 """ from pandas.io.parquet import to_parquet to_parquet(self, fname, engine, compression=compression, index=index, partition_cols=partition_cols, **kwargs)
python
def to_parquet(self, fname, engine='auto', compression='snappy', index=None, partition_cols=None, **kwargs): """ Write a DataFrame to the binary parquet format. .. versionadded:: 0.21.0 This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- fname : str File path or Root Directory path. Will be used as Root Directory path while writing a partitioned dataset. .. versionchanged:: 0.24.0 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, the behavior depends on the chosen engine. .. versionadded:: 0.24.0 partition_cols : list, optional, default None Column names by which to partition the dataset Columns are partitioned in the order they are given .. versionadded:: 0.24.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. See Also -------- read_parquet : Read a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4 """ from pandas.io.parquet import to_parquet to_parquet(self, fname, engine, compression=compression, index=index, partition_cols=partition_cols, **kwargs)
[ "def", "to_parquet", "(", "self", ",", "fname", ",", "engine", "=", "'auto'", ",", "compression", "=", "'snappy'", ",", "index", "=", "None", ",", "partition_cols", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "pandas", ".", "io", ".", "parquet", "import", "to_parquet", "to_parquet", "(", "self", ",", "fname", ",", "engine", ",", "compression", "=", "compression", ",", "index", "=", "index", ",", "partition_cols", "=", "partition_cols", ",", "*", "*", "kwargs", ")" ]
Write a DataFrame to the binary parquet format. .. versionadded:: 0.21.0 This function writes the dataframe as a `parquet file <https://parquet.apache.org/>`_. You can choose different parquet backends, and have the option of compression. See :ref:`the user guide <io.parquet>` for more details. Parameters ---------- fname : str File path or Root Directory path. Will be used as Root Directory path while writing a partitioned dataset. .. versionchanged:: 0.24.0 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet library to use. If 'auto', then the option ``io.parquet.engine`` is used. The default ``io.parquet.engine`` behavior is to try 'pyarrow', falling back to 'fastparquet' if 'pyarrow' is unavailable. compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' Name of the compression to use. Use ``None`` for no compression. index : bool, default None If ``True``, include the dataframe's index(es) in the file output. If ``False``, they will not be written to the file. If ``None``, the behavior depends on the chosen engine. .. versionadded:: 0.24.0 partition_cols : list, optional, default None Column names by which to partition the dataset Columns are partitioned in the order they are given .. versionadded:: 0.24.0 **kwargs Additional arguments passed to the parquet library. See :ref:`pandas io <io.parquet>` for more details. See Also -------- read_parquet : Read a parquet file. DataFrame.to_csv : Write a csv file. DataFrame.to_sql : Write to a sql table. DataFrame.to_hdf : Write to hdf. Notes ----- This function requires either the `fastparquet <https://pypi.org/project/fastparquet>`_ or `pyarrow <https://arrow.apache.org/docs/python/>`_ library. Examples -------- >>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]}) >>> df.to_parquet('df.parquet.gzip', ... compression='gzip') # doctest: +SKIP >>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP col1 col2 0 1 3 1 2 4
[ "Write", "a", "DataFrame", "to", "the", "binary", "parquet", "format", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2071-L2141
19,670
pandas-dev/pandas
pandas/core/frame.py
DataFrame.memory_usage
def memory_usage(self, index=True, deep=False): """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 80 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 80 int64 40000 float64 40000 complex128 80000 object 160000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5168 """ result = Series([c.memory_usage(index=False, deep=deep) for col, c in self.iteritems()], index=self.columns) if index: result = Series(self.index.memory_usage(deep=deep), index=['Index']).append(result) return result
python
def memory_usage(self, index=True, deep=False): """ Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 80 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 80 int64 40000 float64 40000 complex128 80000 object 160000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5168 """ result = Series([c.memory_usage(index=False, deep=deep) for col, c in self.iteritems()], index=self.columns) if index: result = Series(self.index.memory_usage(deep=deep), index=['Index']).append(result) return result
[ "def", "memory_usage", "(", "self", ",", "index", "=", "True", ",", "deep", "=", "False", ")", ":", "result", "=", "Series", "(", "[", "c", ".", "memory_usage", "(", "index", "=", "False", ",", "deep", "=", "deep", ")", "for", "col", ",", "c", "in", "self", ".", "iteritems", "(", ")", "]", ",", "index", "=", "self", ".", "columns", ")", "if", "index", ":", "result", "=", "Series", "(", "self", ".", "index", ".", "memory_usage", "(", "deep", "=", "deep", ")", ",", "index", "=", "[", "'Index'", "]", ")", ".", "append", "(", "result", ")", "return", "result" ]
Return the memory usage of each column in bytes. The memory usage can optionally include the contribution of the index and elements of `object` dtype. This value is displayed in `DataFrame.info` by default. This can be suppressed by setting ``pandas.options.display.memory_usage`` to False. Parameters ---------- index : bool, default True Specifies whether to include the memory usage of the DataFrame's index in returned Series. If ``index=True``, the memory usage of the index is the first item in the output. deep : bool, default False If True, introspect the data deeply by interrogating `object` dtypes for system-level memory consumption, and include it in the returned values. Returns ------- Series A Series whose index is the original column names and whose values is the memory usage of each column in bytes. See Also -------- numpy.ndarray.nbytes : Total bytes consumed by the elements of an ndarray. Series.memory_usage : Bytes consumed by a Series. Categorical : Memory-efficient array for string values with many repeated values. DataFrame.info : Concise summary of a DataFrame. Examples -------- >>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool'] >>> data = dict([(t, np.ones(shape=5000).astype(t)) ... for t in dtypes]) >>> df = pd.DataFrame(data) >>> df.head() int64 float64 complex128 object bool 0 1 1.0 1.0+0.0j 1 True 1 1 1.0 1.0+0.0j 1 True 2 1 1.0 1.0+0.0j 1 True 3 1 1.0 1.0+0.0j 1 True 4 1 1.0 1.0+0.0j 1 True >>> df.memory_usage() Index 80 int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 >>> df.memory_usage(index=False) int64 40000 float64 40000 complex128 80000 object 40000 bool 5000 dtype: int64 The memory footprint of `object` dtype columns is ignored by default: >>> df.memory_usage(deep=True) Index 80 int64 40000 float64 40000 complex128 80000 object 160000 bool 5000 dtype: int64 Use a Categorical for efficient storage of an object-dtype column with many repeated values. >>> df['object'].astype('category').memory_usage(deep=True) 5168
[ "Return", "the", "memory", "usage", "of", "each", "column", "in", "bytes", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2453-L2542
19,671
pandas-dev/pandas
pandas/core/frame.py
DataFrame.transpose
def transpose(self, *args, **kwargs): """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- copy : bool, default False If True, the underlying data is copied. Otherwise (default), no copy is made if possible. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, dict()) return super().transpose(1, 0, **kwargs)
python
def transpose(self, *args, **kwargs): """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- copy : bool, default False If True, the underlying data is copied. Otherwise (default), no copy is made if possible. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object """ nv.validate_transpose(args, dict()) return super().transpose(1, 0, **kwargs)
[ "def", "transpose", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_transpose", "(", "args", ",", "dict", "(", ")", ")", "return", "super", "(", ")", ".", "transpose", "(", "1", ",", "0", ",", "*", "*", "kwargs", ")" ]
Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. Parameters ---------- copy : bool, default False If True, the underlying data is copied. Otherwise (default), no copy is made if possible. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with numpy. Returns ------- DataFrame The transposed DataFrame. See Also -------- numpy.transpose : Permute the dimensions of a given array. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the `object` dtype. In such a case, a copy of the data is always made. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = pd.DataFrame(data=d1) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T # or df1.transpose() >>> df1_transposed 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'name': ['Alice', 'Bob'], ... 'score': [9.5, 8], ... 'employed': [False, True], ... 'kids': [0, 0]} >>> df2 = pd.DataFrame(data=d2) >>> df2 name score employed kids 0 Alice 9.5 False 0 1 Bob 8.0 True 0 >>> df2_transposed = df2.T # or df2.transpose() >>> df2_transposed 0 1 name Alice Bob score 9.5 8 employed False True kids 0 0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the `object` dtype: >>> df2.dtypes name object score float64 employed bool kids int64 dtype: object >>> df2_transposed.dtypes 0 object 1 object dtype: object
[ "Transpose", "index", "and", "columns", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2544-L2640
19,672
pandas-dev/pandas
pandas/core/frame.py
DataFrame.get_value
def get_value(self, index, col, takeable=False): """ Quickly retrieve single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar """ warnings.warn("get_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._get_value(index, col, takeable=takeable)
python
def get_value(self, index, col, takeable=False): """ Quickly retrieve single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar """ warnings.warn("get_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._get_value(index, col, takeable=takeable)
[ "def", "get_value", "(", "self", ",", "index", ",", "col", ",", "takeable", "=", "False", ")", ":", "warnings", ".", "warn", "(", "\"get_value is deprecated and will be removed \"", "\"in a future release. Please use \"", "\".at[] or .iat[] accessors instead\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "_get_value", "(", "index", ",", "col", ",", "takeable", "=", "takeable", ")" ]
Quickly retrieve single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label takeable : interpret the index/col as indexers, default False Returns ------- scalar
[ "Quickly", "retrieve", "single", "value", "at", "passed", "column", "and", "index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2679-L2701
19,673
pandas-dev/pandas
pandas/core/frame.py
DataFrame.set_value
def set_value(self, index, col, value, takeable=False): """ Put single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label value : scalar takeable : interpret the index/col as indexers, default False Returns ------- DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object. """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(index, col, value, takeable=takeable)
python
def set_value(self, index, col, value, takeable=False): """ Put single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label value : scalar takeable : interpret the index/col as indexers, default False Returns ------- DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object. """ warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(index, col, value, takeable=takeable)
[ "def", "set_value", "(", "self", ",", "index", ",", "col", ",", "value", ",", "takeable", "=", "False", ")", ":", "warnings", ".", "warn", "(", "\"set_value is deprecated and will be removed \"", "\"in a future release. Please use \"", "\".at[] or .iat[] accessors instead\"", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "return", "self", ".", "_set_value", "(", "index", ",", "col", ",", "value", ",", "takeable", "=", "takeable", ")" ]
Put single value at passed column and index. .. deprecated:: 0.21.0 Use .at[] or .iat[] accessors instead. Parameters ---------- index : row label col : column label value : scalar takeable : interpret the index/col as indexers, default False Returns ------- DataFrame If label pair is contained, will be reference to calling DataFrame, otherwise a new object.
[ "Put", "single", "value", "at", "passed", "column", "and", "index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2723-L2747
19,674
pandas-dev/pandas
pandas/core/frame.py
DataFrame.query
def query(self, expr, inplace=False, **kwargs): """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. .. versionadded:: 0.25.0 You can refer to column names that contain spaces by surrounding them in backticks. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether the query should modify the data in place or return a modified copy. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. .. versionadded:: 0.18.0 Returns ------- DataFrame DataFrame resulting from the provided query expression. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, 'inplace') if not isinstance(expr, str): msg = "expr must be a string to be evaluated, {0} given" raise ValueError(msg.format(type(expr))) kwargs['level'] = kwargs.pop('level', 0) + 1 kwargs['target'] = None res = self.eval(expr, **kwargs) try: new_data = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query new_data = self[res] if inplace: self._update_inplace(new_data) else: return new_data
python
def query(self, expr, inplace=False, **kwargs): """ Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. .. versionadded:: 0.25.0 You can refer to column names that contain spaces by surrounding them in backticks. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether the query should modify the data in place or return a modified copy. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. .. versionadded:: 0.18.0 Returns ------- DataFrame DataFrame resulting from the provided query expression. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ inplace = validate_bool_kwarg(inplace, 'inplace') if not isinstance(expr, str): msg = "expr must be a string to be evaluated, {0} given" raise ValueError(msg.format(type(expr))) kwargs['level'] = kwargs.pop('level', 0) + 1 kwargs['target'] = None res = self.eval(expr, **kwargs) try: new_data = self.loc[res] except ValueError: # when res is multi-dimensional loc raises, but this is sometimes a # valid query new_data = self[res] if inplace: self._update_inplace(new_data) else: return new_data
[ "def", "query", "(", "self", ",", "expr", ",", "inplace", "=", "False", ",", "*", "*", "kwargs", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "if", "not", "isinstance", "(", "expr", ",", "str", ")", ":", "msg", "=", "\"expr must be a string to be evaluated, {0} given\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "type", "(", "expr", ")", ")", ")", "kwargs", "[", "'level'", "]", "=", "kwargs", ".", "pop", "(", "'level'", ",", "0", ")", "+", "1", "kwargs", "[", "'target'", "]", "=", "None", "res", "=", "self", ".", "eval", "(", "expr", ",", "*", "*", "kwargs", ")", "try", ":", "new_data", "=", "self", ".", "loc", "[", "res", "]", "except", "ValueError", ":", "# when res is multi-dimensional loc raises, but this is sometimes a", "# valid query", "new_data", "=", "self", "[", "res", "]", "if", "inplace", ":", "self", ".", "_update_inplace", "(", "new_data", ")", "else", ":", "return", "new_data" ]
Query the columns of a DataFrame with a boolean expression. Parameters ---------- expr : str The query string to evaluate. You can refer to variables in the environment by prefixing them with an '@' character like ``@a + b``. .. versionadded:: 0.25.0 You can refer to column names that contain spaces by surrounding them in backticks. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether the query should modify the data in place or return a modified copy. **kwargs See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`DataFrame.query`. .. versionadded:: 0.18.0 Returns ------- DataFrame DataFrame resulting from the provided query expression. See Also -------- eval : Evaluate a string describing operations on DataFrame columns. DataFrame.eval : Evaluate a string describing operations on DataFrame columns. Notes ----- The result of the evaluation of this expression is first passed to :attr:`DataFrame.loc` and if that fails because of a multidimensional key (e.g., a DataFrame) then the result will be passed to :meth:`DataFrame.__getitem__`. This method uses the top-level :func:`eval` function to evaluate the passed query. The :meth:`~pandas.DataFrame.query` method uses a slightly modified Python syntax by default. For example, the ``&`` and ``|`` (bitwise) operators have the precedence of their boolean cousins, :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, however the semantics are different. You can change the semantics of the expression by passing the keyword argument ``parser='python'``. This enforces the same semantics as evaluation in Python space. Likewise, you can pass ``engine='python'`` to evaluate an expression using Python itself as a backend. This is not recommended as it is inefficient compared to using ``numexpr`` as the engine. The :attr:`DataFrame.index` and :attr:`DataFrame.columns` attributes of the :class:`~pandas.DataFrame` instance are placed in the query namespace by default, which allows you to treat both the index and columns of the frame as a column in the frame. The identifier ``index`` is used for the frame index; you can also use the name of the index to identify it in a query. Please note that Python keywords may not be used as identifiers. For further details and examples see the ``query`` documentation in :ref:`indexing <indexing.query>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10
[ "Query", "the", "columns", "of", "a", "DataFrame", "with", "a", "boolean", "expression", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2955-L3082
19,675
pandas-dev/pandas
pandas/core/frame.py
DataFrame.eval
def eval(self, expr, inplace=False, **kwargs): """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. .. versionadded:: 0.18.0. kwargs : dict See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, or pandas object The result of the evaluation. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, 'inplace') resolvers = kwargs.pop('resolvers', None) kwargs['level'] = kwargs.pop('level', 0) + 1 if resolvers is None: index_resolvers = self._get_index_resolvers() column_resolvers = \ self._get_space_character_free_column_resolvers() resolvers = column_resolvers, index_resolvers if 'target' not in kwargs: kwargs['target'] = self kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers) return _eval(expr, inplace=inplace, **kwargs)
python
def eval(self, expr, inplace=False, **kwargs): """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. .. versionadded:: 0.18.0. kwargs : dict See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, or pandas object The result of the evaluation. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 """ from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, 'inplace') resolvers = kwargs.pop('resolvers', None) kwargs['level'] = kwargs.pop('level', 0) + 1 if resolvers is None: index_resolvers = self._get_index_resolvers() column_resolvers = \ self._get_space_character_free_column_resolvers() resolvers = column_resolvers, index_resolvers if 'target' not in kwargs: kwargs['target'] = self kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers) return _eval(expr, inplace=inplace, **kwargs)
[ "def", "eval", "(", "self", ",", "expr", ",", "inplace", "=", "False", ",", "*", "*", "kwargs", ")", ":", "from", "pandas", ".", "core", ".", "computation", ".", "eval", "import", "eval", "as", "_eval", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "resolvers", "=", "kwargs", ".", "pop", "(", "'resolvers'", ",", "None", ")", "kwargs", "[", "'level'", "]", "=", "kwargs", ".", "pop", "(", "'level'", ",", "0", ")", "+", "1", "if", "resolvers", "is", "None", ":", "index_resolvers", "=", "self", ".", "_get_index_resolvers", "(", ")", "column_resolvers", "=", "self", ".", "_get_space_character_free_column_resolvers", "(", ")", "resolvers", "=", "column_resolvers", ",", "index_resolvers", "if", "'target'", "not", "in", "kwargs", ":", "kwargs", "[", "'target'", "]", "=", "self", "kwargs", "[", "'resolvers'", "]", "=", "kwargs", ".", "get", "(", "'resolvers'", ",", "(", ")", ")", "+", "tuple", "(", "resolvers", ")", "return", "_eval", "(", "expr", ",", "inplace", "=", "inplace", ",", "*", "*", "kwargs", ")" ]
Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. .. versionadded:: 0.18.0. kwargs : dict See the documentation for :func:`eval` for complete details on the keyword arguments accepted by :meth:`~pandas.DataFrame.query`. Returns ------- ndarray, scalar, or pandas object The result of the evaluation. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Notes ----- For more details see the API documentation for :func:`~eval`. For detailed examples see :ref:`enhancing performance with eval <enhancingperf.eval>`. Examples -------- >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7
[ "Evaluate", "a", "string", "describing", "operations", "on", "DataFrame", "columns", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3084-L3187
19,676
pandas-dev/pandas
pandas/core/frame.py
DataFrame.select_dtypes
def select_dtypes(self, include=None, exclude=None): """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ def _get_info_slice(obj, indexer): """Slice the info axis of `obj` with `indexer`.""" if not hasattr(obj, '_info_axis_number'): msg = 'object of type {typ!r} has no info axis' raise TypeError(msg.format(typ=type(obj).__name__)) slices = [slice(None)] * obj.ndim slices[obj._info_axis_number] = indexer return tuple(slices) if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = tuple(map(frozenset, (include, exclude))) if not any(selection): raise ValueError('at least one of include or exclude must be ' 'nonempty') # convert the myriad valid dtypes object to a single representation include, exclude = map( lambda x: frozenset(map(infer_dtype_from_object, x)), selection) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError('include and exclude overlap on {inc_ex}'.format( inc_ex=(include & exclude))) # empty include/exclude -> defaults to True # three cases (we've already raised if both are empty) # case 1: empty include, nonempty exclude # we have True, True, ... True for include, same for exclude # in the loop below we get the excluded # and when we call '&' below we get only the excluded # case 2: nonempty include, empty exclude # same as case 1, but with include # case 3: both nonempty # the "union" of the logic of case 1 and case 2: # we get the included and excluded, and return their logical and include_these = Series(not bool(include), index=self.columns) exclude_these = Series(not bool(exclude), index=self.columns) def is_dtype_instance_mapper(idx, dtype): return idx, functools.partial(issubclass, dtype.type) for idx, f in itertools.starmap(is_dtype_instance_mapper, enumerate(self.dtypes)): if include: # checks for the case of empty include or exclude include_these.iloc[idx] = any(map(f, include)) if exclude: exclude_these.iloc[idx] = not any(map(f, exclude)) dtype_indexer = include_these & exclude_these return self.loc[_get_info_slice(self, dtype_indexer)]
python
def select_dtypes(self, include=None, exclude=None): """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0 """ def _get_info_slice(obj, indexer): """Slice the info axis of `obj` with `indexer`.""" if not hasattr(obj, '_info_axis_number'): msg = 'object of type {typ!r} has no info axis' raise TypeError(msg.format(typ=type(obj).__name__)) slices = [slice(None)] * obj.ndim slices[obj._info_axis_number] = indexer return tuple(slices) if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = tuple(map(frozenset, (include, exclude))) if not any(selection): raise ValueError('at least one of include or exclude must be ' 'nonempty') # convert the myriad valid dtypes object to a single representation include, exclude = map( lambda x: frozenset(map(infer_dtype_from_object, x)), selection) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) # can't both include AND exclude! if not include.isdisjoint(exclude): raise ValueError('include and exclude overlap on {inc_ex}'.format( inc_ex=(include & exclude))) # empty include/exclude -> defaults to True # three cases (we've already raised if both are empty) # case 1: empty include, nonempty exclude # we have True, True, ... True for include, same for exclude # in the loop below we get the excluded # and when we call '&' below we get only the excluded # case 2: nonempty include, empty exclude # same as case 1, but with include # case 3: both nonempty # the "union" of the logic of case 1 and case 2: # we get the included and excluded, and return their logical and include_these = Series(not bool(include), index=self.columns) exclude_these = Series(not bool(exclude), index=self.columns) def is_dtype_instance_mapper(idx, dtype): return idx, functools.partial(issubclass, dtype.type) for idx, f in itertools.starmap(is_dtype_instance_mapper, enumerate(self.dtypes)): if include: # checks for the case of empty include or exclude include_these.iloc[idx] = any(map(f, include)) if exclude: exclude_these.iloc[idx] = not any(map(f, exclude)) dtype_indexer = include_these & exclude_these return self.loc[_get_info_slice(self, dtype_indexer)]
[ "def", "select_dtypes", "(", "self", ",", "include", "=", "None", ",", "exclude", "=", "None", ")", ":", "def", "_get_info_slice", "(", "obj", ",", "indexer", ")", ":", "\"\"\"Slice the info axis of `obj` with `indexer`.\"\"\"", "if", "not", "hasattr", "(", "obj", ",", "'_info_axis_number'", ")", ":", "msg", "=", "'object of type {typ!r} has no info axis'", "raise", "TypeError", "(", "msg", ".", "format", "(", "typ", "=", "type", "(", "obj", ")", ".", "__name__", ")", ")", "slices", "=", "[", "slice", "(", "None", ")", "]", "*", "obj", ".", "ndim", "slices", "[", "obj", ".", "_info_axis_number", "]", "=", "indexer", "return", "tuple", "(", "slices", ")", "if", "not", "is_list_like", "(", "include", ")", ":", "include", "=", "(", "include", ",", ")", "if", "include", "is", "not", "None", "else", "(", ")", "if", "not", "is_list_like", "(", "exclude", ")", ":", "exclude", "=", "(", "exclude", ",", ")", "if", "exclude", "is", "not", "None", "else", "(", ")", "selection", "=", "tuple", "(", "map", "(", "frozenset", ",", "(", "include", ",", "exclude", ")", ")", ")", "if", "not", "any", "(", "selection", ")", ":", "raise", "ValueError", "(", "'at least one of include or exclude must be '", "'nonempty'", ")", "# convert the myriad valid dtypes object to a single representation", "include", ",", "exclude", "=", "map", "(", "lambda", "x", ":", "frozenset", "(", "map", "(", "infer_dtype_from_object", ",", "x", ")", ")", ",", "selection", ")", "for", "dtypes", "in", "(", "include", ",", "exclude", ")", ":", "invalidate_string_dtypes", "(", "dtypes", ")", "# can't both include AND exclude!", "if", "not", "include", ".", "isdisjoint", "(", "exclude", ")", ":", "raise", "ValueError", "(", "'include and exclude overlap on {inc_ex}'", ".", "format", "(", "inc_ex", "=", "(", "include", "&", "exclude", ")", ")", ")", "# empty include/exclude -> defaults to True", "# three cases (we've already raised if both are empty)", "# case 1: empty include, nonempty exclude", "# we have True, True, ... True for include, same for exclude", "# in the loop below we get the excluded", "# and when we call '&' below we get only the excluded", "# case 2: nonempty include, empty exclude", "# same as case 1, but with include", "# case 3: both nonempty", "# the \"union\" of the logic of case 1 and case 2:", "# we get the included and excluded, and return their logical and", "include_these", "=", "Series", "(", "not", "bool", "(", "include", ")", ",", "index", "=", "self", ".", "columns", ")", "exclude_these", "=", "Series", "(", "not", "bool", "(", "exclude", ")", ",", "index", "=", "self", ".", "columns", ")", "def", "is_dtype_instance_mapper", "(", "idx", ",", "dtype", ")", ":", "return", "idx", ",", "functools", ".", "partial", "(", "issubclass", ",", "dtype", ".", "type", ")", "for", "idx", ",", "f", "in", "itertools", ".", "starmap", "(", "is_dtype_instance_mapper", ",", "enumerate", "(", "self", ".", "dtypes", ")", ")", ":", "if", "include", ":", "# checks for the case of empty include or exclude", "include_these", ".", "iloc", "[", "idx", "]", "=", "any", "(", "map", "(", "f", ",", "include", ")", ")", "if", "exclude", ":", "exclude_these", ".", "iloc", "[", "idx", "]", "=", "not", "any", "(", "map", "(", "f", ",", "exclude", ")", ")", "dtype_indexer", "=", "include_these", "&", "exclude_these", "return", "self", ".", "loc", "[", "_get_info_slice", "(", "self", ",", "dtype_indexer", ")", "]" ]
Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty * If ``include`` and ``exclude`` have overlapping elements * If any kind of string dtype is passed in. Notes ----- * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__ * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or ``'timedelta64'`` * To select Pandas categorical dtypes, use ``'category'`` * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int']) b c 0 True 1.0 1 False 2.0 2 True 1.0 3 False 2.0 4 True 1.0 5 False 2.0
[ "Return", "a", "subset", "of", "the", "DataFrame", "s", "columns", "based", "on", "the", "column", "dtypes", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3189-L3324
19,677
pandas-dev/pandas
pandas/core/frame.py
DataFrame._box_col_values
def _box_col_values(self, values, items): """ Provide boxed values for a column. """ klass = self._constructor_sliced return klass(values, index=self.index, name=items, fastpath=True)
python
def _box_col_values(self, values, items): """ Provide boxed values for a column. """ klass = self._constructor_sliced return klass(values, index=self.index, name=items, fastpath=True)
[ "def", "_box_col_values", "(", "self", ",", "values", ",", "items", ")", ":", "klass", "=", "self", ".", "_constructor_sliced", "return", "klass", "(", "values", ",", "index", "=", "self", ".", "index", ",", "name", "=", "items", ",", "fastpath", "=", "True", ")" ]
Provide boxed values for a column.
[ "Provide", "boxed", "values", "for", "a", "column", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3333-L3338
19,678
pandas-dev/pandas
pandas/core/frame.py
DataFrame._ensure_valid_index
def _ensure_valid_index(self, value): """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value): try: value = Series(value) except (ValueError, NotImplementedError, TypeError): raise ValueError('Cannot set a frame with no defined index ' 'and a value that cannot be converted to a ' 'Series') self._data = self._data.reindex_axis(value.index.copy(), axis=1, fill_value=np.nan)
python
def _ensure_valid_index(self, value): """ Ensure that if we don't have an index, that we can create one from the passed value. """ # GH5632, make sure that we are a Series convertible if not len(self.index) and is_list_like(value): try: value = Series(value) except (ValueError, NotImplementedError, TypeError): raise ValueError('Cannot set a frame with no defined index ' 'and a value that cannot be converted to a ' 'Series') self._data = self._data.reindex_axis(value.index.copy(), axis=1, fill_value=np.nan)
[ "def", "_ensure_valid_index", "(", "self", ",", "value", ")", ":", "# GH5632, make sure that we are a Series convertible", "if", "not", "len", "(", "self", ".", "index", ")", "and", "is_list_like", "(", "value", ")", ":", "try", ":", "value", "=", "Series", "(", "value", ")", "except", "(", "ValueError", ",", "NotImplementedError", ",", "TypeError", ")", ":", "raise", "ValueError", "(", "'Cannot set a frame with no defined index '", "'and a value that cannot be converted to a '", "'Series'", ")", "self", ".", "_data", "=", "self", ".", "_data", ".", "reindex_axis", "(", "value", ".", "index", ".", "copy", "(", ")", ",", "axis", "=", "1", ",", "fill_value", "=", "np", ".", "nan", ")" ]
Ensure that if we don't have an index, that we can create one from the passed value.
[ "Ensure", "that", "if", "we", "don", "t", "have", "an", "index", "that", "we", "can", "create", "one", "from", "the", "passed", "value", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3400-L3415
19,679
pandas-dev/pandas
pandas/core/frame.py
DataFrame._set_item
def _set_item(self, key, value): """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ self._ensure_valid_index(value) value = self._sanitize_column(key, value) NDFrame._set_item(self, key, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy()
python
def _set_item(self, key, value): """ Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity. """ self._ensure_valid_index(value) value = self._sanitize_column(key, value) NDFrame._set_item(self, key, value) # check if we are modifying a copy # try to set first as we want an invalid # value exception to occur first if len(self): self._check_setitem_copy()
[ "def", "_set_item", "(", "self", ",", "key", ",", "value", ")", ":", "self", ".", "_ensure_valid_index", "(", "value", ")", "value", "=", "self", ".", "_sanitize_column", "(", "key", ",", "value", ")", "NDFrame", ".", "_set_item", "(", "self", ",", "key", ",", "value", ")", "# check if we are modifying a copy", "# try to set first as we want an invalid", "# value exception to occur first", "if", "len", "(", "self", ")", ":", "self", ".", "_check_setitem_copy", "(", ")" ]
Add series to DataFrame in specified column. If series is a numpy-array (not a Series/TimeSeries), it must be the same length as the DataFrames index or an error will be thrown. Series/TimeSeries will be conformed to the DataFrames index to ensure homogeneity.
[ "Add", "series", "to", "DataFrame", "in", "specified", "column", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3417-L3436
19,680
pandas-dev/pandas
pandas/core/frame.py
DataFrame.insert
def insert(self, loc, column, value, allow_duplicates=False): """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns) column : string, number, or hashable object label of the inserted column value : int, Series, or array-like allow_duplicates : bool, optional """ self._ensure_valid_index(value) value = self._sanitize_column(column, value, broadcast=False) self._data.insert(loc, column, value, allow_duplicates=allow_duplicates)
python
def insert(self, loc, column, value, allow_duplicates=False): """ Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns) column : string, number, or hashable object label of the inserted column value : int, Series, or array-like allow_duplicates : bool, optional """ self._ensure_valid_index(value) value = self._sanitize_column(column, value, broadcast=False) self._data.insert(loc, column, value, allow_duplicates=allow_duplicates)
[ "def", "insert", "(", "self", ",", "loc", ",", "column", ",", "value", ",", "allow_duplicates", "=", "False", ")", ":", "self", ".", "_ensure_valid_index", "(", "value", ")", "value", "=", "self", ".", "_sanitize_column", "(", "column", ",", "value", ",", "broadcast", "=", "False", ")", "self", ".", "_data", ".", "insert", "(", "loc", ",", "column", ",", "value", ",", "allow_duplicates", "=", "allow_duplicates", ")" ]
Insert column into DataFrame at specified location. Raises a ValueError if `column` is already contained in the DataFrame, unless `allow_duplicates` is set to True. Parameters ---------- loc : int Insertion index. Must verify 0 <= loc <= len(columns) column : string, number, or hashable object label of the inserted column value : int, Series, or array-like allow_duplicates : bool, optional
[ "Insert", "column", "into", "DataFrame", "at", "specified", "location", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3438-L3457
19,681
pandas-dev/pandas
pandas/core/frame.py
DataFrame.assign
def assign(self, **kwargs): r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy() # >= 3.6 preserve order of kwargs if PY36: for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) else: # <= 3.5: do all calculations first... results = OrderedDict() for k, v in kwargs.items(): results[k] = com.apply_if_callable(v, data) # <= 3.5 and earlier results = sorted(results.items()) # ... and then assign for k, v in results: data[k] = v return data
python
def assign(self, **kwargs): r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy() # >= 3.6 preserve order of kwargs if PY36: for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) else: # <= 3.5: do all calculations first... results = OrderedDict() for k, v in kwargs.items(): results[k] = com.apply_if_callable(v, data) # <= 3.5 and earlier results = sorted(results.items()) # ... and then assign for k, v in results: data[k] = v return data
[ "def", "assign", "(", "self", ",", "*", "*", "kwargs", ")", ":", "data", "=", "self", ".", "copy", "(", ")", "# >= 3.6 preserve order of kwargs", "if", "PY36", ":", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "data", "[", "k", "]", "=", "com", ".", "apply_if_callable", "(", "v", ",", "data", ")", "else", ":", "# <= 3.5: do all calculations first...", "results", "=", "OrderedDict", "(", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "results", "[", "k", "]", "=", "com", ".", "apply_if_callable", "(", "v", ",", "data", ")", "# <= 3.5 and earlier", "results", "=", "sorted", "(", "results", ".", "items", "(", ")", ")", "# ... and then assign", "for", "k", ",", "v", "in", "results", ":", "data", "[", "k", "]", "=", "v", "return", "data" ]
r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15
[ "r", "Assign", "new", "columns", "to", "a", "DataFrame", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3459-L3547
19,682
pandas-dev/pandas
pandas/core/frame.py
DataFrame.lookup
def lookup(self, row_labels, col_labels): """ Label-based "fancy indexing" function for DataFrame. Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. Parameters ---------- row_labels : sequence The row labels to use for lookup col_labels : sequence The column labels to use for lookup Notes ----- Akin to:: result = [df.get_value(row, col) for row, col in zip(row_labels, col_labels)] Examples -------- values : ndarray The found values """ n = len(row_labels) if n != len(col_labels): raise ValueError('Row labels must have same size as column labels') thresh = 1000 if not self._is_mixed_type or n > thresh: values = self.values ridx = self.index.get_indexer(row_labels) cidx = self.columns.get_indexer(col_labels) if (ridx == -1).any(): raise KeyError('One or more row labels was not found') if (cidx == -1).any(): raise KeyError('One or more column labels was not found') flat_index = ridx * len(self.columns) + cidx result = values.flat[flat_index] else: result = np.empty(n, dtype='O') for i, (r, c) in enumerate(zip(row_labels, col_labels)): result[i] = self._get_value(r, c) if is_object_dtype(result): result = lib.maybe_convert_objects(result) return result
python
def lookup(self, row_labels, col_labels): """ Label-based "fancy indexing" function for DataFrame. Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. Parameters ---------- row_labels : sequence The row labels to use for lookup col_labels : sequence The column labels to use for lookup Notes ----- Akin to:: result = [df.get_value(row, col) for row, col in zip(row_labels, col_labels)] Examples -------- values : ndarray The found values """ n = len(row_labels) if n != len(col_labels): raise ValueError('Row labels must have same size as column labels') thresh = 1000 if not self._is_mixed_type or n > thresh: values = self.values ridx = self.index.get_indexer(row_labels) cidx = self.columns.get_indexer(col_labels) if (ridx == -1).any(): raise KeyError('One or more row labels was not found') if (cidx == -1).any(): raise KeyError('One or more column labels was not found') flat_index = ridx * len(self.columns) + cidx result = values.flat[flat_index] else: result = np.empty(n, dtype='O') for i, (r, c) in enumerate(zip(row_labels, col_labels)): result[i] = self._get_value(r, c) if is_object_dtype(result): result = lib.maybe_convert_objects(result) return result
[ "def", "lookup", "(", "self", ",", "row_labels", ",", "col_labels", ")", ":", "n", "=", "len", "(", "row_labels", ")", "if", "n", "!=", "len", "(", "col_labels", ")", ":", "raise", "ValueError", "(", "'Row labels must have same size as column labels'", ")", "thresh", "=", "1000", "if", "not", "self", ".", "_is_mixed_type", "or", "n", ">", "thresh", ":", "values", "=", "self", ".", "values", "ridx", "=", "self", ".", "index", ".", "get_indexer", "(", "row_labels", ")", "cidx", "=", "self", ".", "columns", ".", "get_indexer", "(", "col_labels", ")", "if", "(", "ridx", "==", "-", "1", ")", ".", "any", "(", ")", ":", "raise", "KeyError", "(", "'One or more row labels was not found'", ")", "if", "(", "cidx", "==", "-", "1", ")", ".", "any", "(", ")", ":", "raise", "KeyError", "(", "'One or more column labels was not found'", ")", "flat_index", "=", "ridx", "*", "len", "(", "self", ".", "columns", ")", "+", "cidx", "result", "=", "values", ".", "flat", "[", "flat_index", "]", "else", ":", "result", "=", "np", ".", "empty", "(", "n", ",", "dtype", "=", "'O'", ")", "for", "i", ",", "(", "r", ",", "c", ")", "in", "enumerate", "(", "zip", "(", "row_labels", ",", "col_labels", ")", ")", ":", "result", "[", "i", "]", "=", "self", ".", "_get_value", "(", "r", ",", "c", ")", "if", "is_object_dtype", "(", "result", ")", ":", "result", "=", "lib", ".", "maybe_convert_objects", "(", "result", ")", "return", "result" ]
Label-based "fancy indexing" function for DataFrame. Given equal-length arrays of row and column labels, return an array of the values corresponding to each (row, col) pair. Parameters ---------- row_labels : sequence The row labels to use for lookup col_labels : sequence The column labels to use for lookup Notes ----- Akin to:: result = [df.get_value(row, col) for row, col in zip(row_labels, col_labels)] Examples -------- values : ndarray The found values
[ "Label", "-", "based", "fancy", "indexing", "function", "for", "DataFrame", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3659-L3708
19,683
pandas-dev/pandas
pandas/core/frame.py
DataFrame._reindex_multi
def _reindex_multi(self, axes, copy, fill_value): """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes['index']) new_columns, col_indexer = self.columns.reindex(axes['columns']) if row_indexer is not None and col_indexer is not None: indexer = row_indexer, col_indexer new_values = algorithms.take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor(new_values, index=new_index, columns=new_columns) else: return self._reindex_with_indexers({0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value)
python
def _reindex_multi(self, axes, copy, fill_value): """ We are guaranteed non-Nones in the axes. """ new_index, row_indexer = self.index.reindex(axes['index']) new_columns, col_indexer = self.columns.reindex(axes['columns']) if row_indexer is not None and col_indexer is not None: indexer = row_indexer, col_indexer new_values = algorithms.take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor(new_values, index=new_index, columns=new_columns) else: return self._reindex_with_indexers({0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, copy=copy, fill_value=fill_value)
[ "def", "_reindex_multi", "(", "self", ",", "axes", ",", "copy", ",", "fill_value", ")", ":", "new_index", ",", "row_indexer", "=", "self", ".", "index", ".", "reindex", "(", "axes", "[", "'index'", "]", ")", "new_columns", ",", "col_indexer", "=", "self", ".", "columns", ".", "reindex", "(", "axes", "[", "'columns'", "]", ")", "if", "row_indexer", "is", "not", "None", "and", "col_indexer", "is", "not", "None", ":", "indexer", "=", "row_indexer", ",", "col_indexer", "new_values", "=", "algorithms", ".", "take_2d_multi", "(", "self", ".", "values", ",", "indexer", ",", "fill_value", "=", "fill_value", ")", "return", "self", ".", "_constructor", "(", "new_values", ",", "index", "=", "new_index", ",", "columns", "=", "new_columns", ")", "else", ":", "return", "self", ".", "_reindex_with_indexers", "(", "{", "0", ":", "[", "new_index", ",", "row_indexer", "]", ",", "1", ":", "[", "new_columns", ",", "col_indexer", "]", "}", ",", "copy", "=", "copy", ",", "fill_value", "=", "fill_value", ")" ]
We are guaranteed non-Nones in the axes.
[ "We", "are", "guaranteed", "non", "-", "Nones", "in", "the", "axes", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3747-L3765
19,684
pandas-dev/pandas
pandas/core/frame.py
DataFrame.drop
def drop(self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise'): """ Drop specified labels from rows or columns. Remove rows or columns by specifying label names and corresponding axis, or by specifying directly index or column names. When using a multi-index, labels on different levels can be removed by specifying the level. Parameters ---------- labels : single label or list-like Index or column labels to drop. axis : {0 or 'index', 1 or 'columns'}, default 0 Whether to drop labels from the index (0 or 'index') or columns (1 or 'columns'). index : single label or list-like Alternative to specifying axis (``labels, axis=0`` is equivalent to ``index=labels``). .. versionadded:: 0.21.0 columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). .. versionadded:: 0.21.0 level : int or level name, optional For MultiIndex, level from which the labels will be removed. inplace : bool, default False If True, do operation inplace and return None. errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and only existing labels are dropped. Returns ------- DataFrame DataFrame without the removed index or column labels. Raises ------ KeyError If any of the labels is not found in the selected axis. See Also -------- DataFrame.loc : Label-location based indexer for selection by label. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. DataFrame.drop_duplicates : Return DataFrame with duplicate rows removed, optionally only considering certain columns. Series.drop : Return Series with specified index labels removed. Examples -------- >>> df = pd.DataFrame(np.arange(12).reshape(3, 4), ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 0 1 2 3 1 4 5 6 7 2 8 9 10 11 Drop columns >>> df.drop(['B', 'C'], axis=1) A D 0 0 3 1 4 7 2 8 11 >>> df.drop(columns=['B', 'C']) A D 0 0 3 1 4 7 2 8 11 Drop a row by index >>> df.drop([0, 1]) A B C D 2 8 9 10 11 Drop columns and/or rows of MultiIndex DataFrame >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> df = pd.DataFrame(index=midx, columns=['big', 'small'], ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20], ... [250, 150], [1.5, 0.8], [320, 250], ... [1, 0.8], [0.3, 0.2]]) >>> df big small lama speed 45.0 30.0 weight 200.0 100.0 length 1.5 1.0 cow speed 30.0 20.0 weight 250.0 150.0 length 1.5 0.8 falcon speed 320.0 250.0 weight 1.0 0.8 length 0.3 0.2 >>> df.drop(index='cow', columns='small') big lama speed 45.0 weight 200.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 >>> df.drop(index='length', level=1) big small lama speed 45.0 30.0 weight 200.0 100.0 cow speed 30.0 20.0 weight 250.0 150.0 falcon speed 320.0 250.0 weight 1.0 0.8 """ return super().drop(labels=labels, axis=axis, index=index, columns=columns, level=level, inplace=inplace, errors=errors)
python
def drop(self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise'): """ Drop specified labels from rows or columns. Remove rows or columns by specifying label names and corresponding axis, or by specifying directly index or column names. When using a multi-index, labels on different levels can be removed by specifying the level. Parameters ---------- labels : single label or list-like Index or column labels to drop. axis : {0 or 'index', 1 or 'columns'}, default 0 Whether to drop labels from the index (0 or 'index') or columns (1 or 'columns'). index : single label or list-like Alternative to specifying axis (``labels, axis=0`` is equivalent to ``index=labels``). .. versionadded:: 0.21.0 columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). .. versionadded:: 0.21.0 level : int or level name, optional For MultiIndex, level from which the labels will be removed. inplace : bool, default False If True, do operation inplace and return None. errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and only existing labels are dropped. Returns ------- DataFrame DataFrame without the removed index or column labels. Raises ------ KeyError If any of the labels is not found in the selected axis. See Also -------- DataFrame.loc : Label-location based indexer for selection by label. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. DataFrame.drop_duplicates : Return DataFrame with duplicate rows removed, optionally only considering certain columns. Series.drop : Return Series with specified index labels removed. Examples -------- >>> df = pd.DataFrame(np.arange(12).reshape(3, 4), ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 0 1 2 3 1 4 5 6 7 2 8 9 10 11 Drop columns >>> df.drop(['B', 'C'], axis=1) A D 0 0 3 1 4 7 2 8 11 >>> df.drop(columns=['B', 'C']) A D 0 0 3 1 4 7 2 8 11 Drop a row by index >>> df.drop([0, 1]) A B C D 2 8 9 10 11 Drop columns and/or rows of MultiIndex DataFrame >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> df = pd.DataFrame(index=midx, columns=['big', 'small'], ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20], ... [250, 150], [1.5, 0.8], [320, 250], ... [1, 0.8], [0.3, 0.2]]) >>> df big small lama speed 45.0 30.0 weight 200.0 100.0 length 1.5 1.0 cow speed 30.0 20.0 weight 250.0 150.0 length 1.5 0.8 falcon speed 320.0 250.0 weight 1.0 0.8 length 0.3 0.2 >>> df.drop(index='cow', columns='small') big lama speed 45.0 weight 200.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 >>> df.drop(index='length', level=1) big small lama speed 45.0 30.0 weight 200.0 100.0 cow speed 30.0 20.0 weight 250.0 150.0 falcon speed 320.0 250.0 weight 1.0 0.8 """ return super().drop(labels=labels, axis=axis, index=index, columns=columns, level=level, inplace=inplace, errors=errors)
[ "def", "drop", "(", "self", ",", "labels", "=", "None", ",", "axis", "=", "0", ",", "index", "=", "None", ",", "columns", "=", "None", ",", "level", "=", "None", ",", "inplace", "=", "False", ",", "errors", "=", "'raise'", ")", ":", "return", "super", "(", ")", ".", "drop", "(", "labels", "=", "labels", ",", "axis", "=", "axis", ",", "index", "=", "index", ",", "columns", "=", "columns", ",", "level", "=", "level", ",", "inplace", "=", "inplace", ",", "errors", "=", "errors", ")" ]
Drop specified labels from rows or columns. Remove rows or columns by specifying label names and corresponding axis, or by specifying directly index or column names. When using a multi-index, labels on different levels can be removed by specifying the level. Parameters ---------- labels : single label or list-like Index or column labels to drop. axis : {0 or 'index', 1 or 'columns'}, default 0 Whether to drop labels from the index (0 or 'index') or columns (1 or 'columns'). index : single label or list-like Alternative to specifying axis (``labels, axis=0`` is equivalent to ``index=labels``). .. versionadded:: 0.21.0 columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). .. versionadded:: 0.21.0 level : int or level name, optional For MultiIndex, level from which the labels will be removed. inplace : bool, default False If True, do operation inplace and return None. errors : {'ignore', 'raise'}, default 'raise' If 'ignore', suppress error and only existing labels are dropped. Returns ------- DataFrame DataFrame without the removed index or column labels. Raises ------ KeyError If any of the labels is not found in the selected axis. See Also -------- DataFrame.loc : Label-location based indexer for selection by label. DataFrame.dropna : Return DataFrame with labels on given axis omitted where (all or any) data are missing. DataFrame.drop_duplicates : Return DataFrame with duplicate rows removed, optionally only considering certain columns. Series.drop : Return Series with specified index labels removed. Examples -------- >>> df = pd.DataFrame(np.arange(12).reshape(3, 4), ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 0 1 2 3 1 4 5 6 7 2 8 9 10 11 Drop columns >>> df.drop(['B', 'C'], axis=1) A D 0 0 3 1 4 7 2 8 11 >>> df.drop(columns=['B', 'C']) A D 0 0 3 1 4 7 2 8 11 Drop a row by index >>> df.drop([0, 1]) A B C D 2 8 9 10 11 Drop columns and/or rows of MultiIndex DataFrame >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> df = pd.DataFrame(index=midx, columns=['big', 'small'], ... data=[[45, 30], [200, 100], [1.5, 1], [30, 20], ... [250, 150], [1.5, 0.8], [320, 250], ... [1, 0.8], [0.3, 0.2]]) >>> df big small lama speed 45.0 30.0 weight 200.0 100.0 length 1.5 1.0 cow speed 30.0 20.0 weight 250.0 150.0 length 1.5 0.8 falcon speed 320.0 250.0 weight 1.0 0.8 length 0.3 0.2 >>> df.drop(index='cow', columns='small') big lama speed 45.0 weight 200.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 >>> df.drop(index='length', level=1) big small lama speed 45.0 30.0 weight 200.0 100.0 cow speed 30.0 20.0 weight 250.0 150.0 falcon speed 320.0 250.0 weight 1.0 0.8
[ "Drop", "specified", "labels", "from", "rows", "or", "columns", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3800-L3926
19,685
pandas-dev/pandas
pandas/core/frame.py
DataFrame.rename
def rename(self, *args, **kwargs): """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and ``columns``. index : dict-like or function Alternative to specifying axis (``mapper, axis=0`` is equivalent to ``index=mapper``). columns : dict-like or function Alternative to specifying axis (``mapper, axis=1`` is equivalent to ``columns=mapper``). axis : int or str Axis to target with ``mapper``. Can be either the axis name ('index', 'columns') or number (0, 1). The default is 'index'. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new DataFrame. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame DataFrame with the renamed axis labels. Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- DataFrame.rename_axis : Set the name of the axis. Examples -------- ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(index=str, columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 >>> df.rename(index=str, columns={"A": "a", "C": "c"}) a B 0 1 4 1 2 5 2 3 6 >>> df.rename(index=str, columns={"A": "a", "C": "c"}, errors="raise") Traceback (most recent call last): KeyError: ['C'] not found in axis Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 """ axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename') kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names kwargs.pop('axis', None) kwargs.pop('mapper', None) return super().rename(**kwargs)
python
def rename(self, *args, **kwargs): """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and ``columns``. index : dict-like or function Alternative to specifying axis (``mapper, axis=0`` is equivalent to ``index=mapper``). columns : dict-like or function Alternative to specifying axis (``mapper, axis=1`` is equivalent to ``columns=mapper``). axis : int or str Axis to target with ``mapper``. Can be either the axis name ('index', 'columns') or number (0, 1). The default is 'index'. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new DataFrame. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame DataFrame with the renamed axis labels. Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- DataFrame.rename_axis : Set the name of the axis. Examples -------- ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(index=str, columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 >>> df.rename(index=str, columns={"A": "a", "C": "c"}) a B 0 1 4 1 2 5 2 3 6 >>> df.rename(index=str, columns={"A": "a", "C": "c"}, errors="raise") Traceback (most recent call last): KeyError: ['C'] not found in axis Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6 """ axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename') kwargs.update(axes) # Pop these, since the values are in `kwargs` under different names kwargs.pop('axis', None) kwargs.pop('mapper', None) return super().rename(**kwargs)
[ "def", "rename", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "axes", "=", "validate_axis_style_args", "(", "self", ",", "args", ",", "kwargs", ",", "'mapper'", ",", "'rename'", ")", "kwargs", ".", "update", "(", "axes", ")", "# Pop these, since the values are in `kwargs` under different names", "kwargs", ".", "pop", "(", "'axis'", ",", "None", ")", "kwargs", ".", "pop", "(", "'mapper'", ",", "None", ")", "return", "super", "(", ")", ".", "rename", "(", "*", "*", "kwargs", ")" ]
Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don't throw an error. See the :ref:`user guide <basics.rename>` for more. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis' values. Use either ``mapper`` and ``axis`` to specify the axis to target with ``mapper``, or ``index`` and ``columns``. index : dict-like or function Alternative to specifying axis (``mapper, axis=0`` is equivalent to ``index=mapper``). columns : dict-like or function Alternative to specifying axis (``mapper, axis=1`` is equivalent to ``columns=mapper``). axis : int or str Axis to target with ``mapper``. Can be either the axis name ('index', 'columns') or number (0, 1). The default is 'index'. copy : bool, default True Also copy underlying data. inplace : bool, default False Whether to return a new DataFrame. If True then value of copy is ignored. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise'}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame DataFrame with the renamed axis labels. Raises ------ KeyError If any of the labels is not found in the selected axis and "errors='raise'". See Also -------- DataFrame.rename_axis : Set the name of the axis. Examples -------- ``DataFrame.rename`` supports two calling conventions * ``(index=index_mapper, columns=columns_mapper, ...)`` * ``(mapper, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> df.rename(index=str, columns={"A": "a", "B": "c"}) a c 0 1 4 1 2 5 2 3 6 >>> df.rename(index=str, columns={"A": "a", "C": "c"}) a B 0 1 4 1 2 5 2 3 6 >>> df.rename(index=str, columns={"A": "a", "C": "c"}, errors="raise") Traceback (most recent call last): KeyError: ['C'] not found in axis Using axis-style parameters >>> df.rename(str.lower, axis='columns') a b 0 1 4 1 2 5 2 3 6 >>> df.rename({1: 2, 2: 4}, axis='index') A B 0 1 4 2 2 5 4 3 6
[ "Alter", "axes", "labels", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3932-L4035
19,686
pandas-dev/pandas
pandas/core/frame.py
DataFrame.dropna
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): """ Remove missing values. See the :ref:`User Guide <missing_data>` for more on which values are considered missing, and how to work with missing data. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. * 1, or 'columns' : Drop columns which contain missing value. .. deprecated:: 0.23.0 Pass tuple or list to drop on multiple axes. Only a single axis is allowed. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.isna: Indicate missing values. DataFrame.notna : Indicate existing (non-missing) values. DataFrame.fillna : Replace missing values. Series.dropna : Drop missing values. Index.dropna : Drop missing indices. Examples -------- >>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [np.nan, 'Batmobile', 'Bullwhip'], ... "born": [pd.NaT, pd.Timestamp("1940-04-25"), ... pd.NaT]}) >>> df name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the columns where at least one element is missing. >>> df.dropna(axis='columns') name 0 Alfred 1 Batman 2 Catwoman Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ inplace = validate_bool_kwarg(inplace, 'inplace') if isinstance(axis, (tuple, list)): # GH20987 msg = ("supplying multiple axes to axis is deprecated and " "will be removed in a future version.") warnings.warn(msg, FutureWarning, stacklevel=2) result = self for ax in axis: result = result.dropna(how=how, thresh=thresh, subset=subset, axis=ax) else: axis = self._get_axis_number(axis) agg_axis = 1 - axis agg_obj = self if subset is not None: ax = self._get_axis(agg_axis) indices = ax.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check, subset))) agg_obj = self.take(indices, axis=agg_axis) count = agg_obj.count(axis=agg_axis) if thresh is not None: mask = count >= thresh elif how == 'any': mask = count == len(agg_obj._get_axis(agg_axis)) elif how == 'all': mask = count > 0 else: if how is not None: raise ValueError('invalid how option: {h}'.format(h=how)) else: raise TypeError('must specify how or thresh') result = self.loc(axis=axis)[mask] if inplace: self._update_inplace(result) else: return result
python
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): """ Remove missing values. See the :ref:`User Guide <missing_data>` for more on which values are considered missing, and how to work with missing data. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. * 1, or 'columns' : Drop columns which contain missing value. .. deprecated:: 0.23.0 Pass tuple or list to drop on multiple axes. Only a single axis is allowed. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.isna: Indicate missing values. DataFrame.notna : Indicate existing (non-missing) values. DataFrame.fillna : Replace missing values. Series.dropna : Drop missing values. Index.dropna : Drop missing indices. Examples -------- >>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [np.nan, 'Batmobile', 'Bullwhip'], ... "born": [pd.NaT, pd.Timestamp("1940-04-25"), ... pd.NaT]}) >>> df name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the columns where at least one element is missing. >>> df.dropna(axis='columns') name 0 Alfred 1 Batman 2 Catwoman Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ inplace = validate_bool_kwarg(inplace, 'inplace') if isinstance(axis, (tuple, list)): # GH20987 msg = ("supplying multiple axes to axis is deprecated and " "will be removed in a future version.") warnings.warn(msg, FutureWarning, stacklevel=2) result = self for ax in axis: result = result.dropna(how=how, thresh=thresh, subset=subset, axis=ax) else: axis = self._get_axis_number(axis) agg_axis = 1 - axis agg_obj = self if subset is not None: ax = self._get_axis(agg_axis) indices = ax.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(list(np.compress(check, subset))) agg_obj = self.take(indices, axis=agg_axis) count = agg_obj.count(axis=agg_axis) if thresh is not None: mask = count >= thresh elif how == 'any': mask = count == len(agg_obj._get_axis(agg_axis)) elif how == 'all': mask = count > 0 else: if how is not None: raise ValueError('invalid how option: {h}'.format(h=how)) else: raise TypeError('must specify how or thresh') result = self.loc(axis=axis)[mask] if inplace: self._update_inplace(result) else: return result
[ "def", "dropna", "(", "self", ",", "axis", "=", "0", ",", "how", "=", "'any'", ",", "thresh", "=", "None", ",", "subset", "=", "None", ",", "inplace", "=", "False", ")", ":", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "if", "isinstance", "(", "axis", ",", "(", "tuple", ",", "list", ")", ")", ":", "# GH20987", "msg", "=", "(", "\"supplying multiple axes to axis is deprecated and \"", "\"will be removed in a future version.\"", ")", "warnings", ".", "warn", "(", "msg", ",", "FutureWarning", ",", "stacklevel", "=", "2", ")", "result", "=", "self", "for", "ax", "in", "axis", ":", "result", "=", "result", ".", "dropna", "(", "how", "=", "how", ",", "thresh", "=", "thresh", ",", "subset", "=", "subset", ",", "axis", "=", "ax", ")", "else", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "agg_axis", "=", "1", "-", "axis", "agg_obj", "=", "self", "if", "subset", "is", "not", "None", ":", "ax", "=", "self", ".", "_get_axis", "(", "agg_axis", ")", "indices", "=", "ax", ".", "get_indexer_for", "(", "subset", ")", "check", "=", "indices", "==", "-", "1", "if", "check", ".", "any", "(", ")", ":", "raise", "KeyError", "(", "list", "(", "np", ".", "compress", "(", "check", ",", "subset", ")", ")", ")", "agg_obj", "=", "self", ".", "take", "(", "indices", ",", "axis", "=", "agg_axis", ")", "count", "=", "agg_obj", ".", "count", "(", "axis", "=", "agg_axis", ")", "if", "thresh", "is", "not", "None", ":", "mask", "=", "count", ">=", "thresh", "elif", "how", "==", "'any'", ":", "mask", "=", "count", "==", "len", "(", "agg_obj", ".", "_get_axis", "(", "agg_axis", ")", ")", "elif", "how", "==", "'all'", ":", "mask", "=", "count", ">", "0", "else", ":", "if", "how", "is", "not", "None", ":", "raise", "ValueError", "(", "'invalid how option: {h}'", ".", "format", "(", "h", "=", "how", ")", ")", "else", ":", "raise", "TypeError", "(", "'must specify how or thresh'", ")", "result", "=", "self", ".", "loc", "(", "axis", "=", "axis", ")", "[", "mask", "]", "if", "inplace", ":", "self", ".", "_update_inplace", "(", "result", ")", "else", ":", "return", "result" ]
Remove missing values. See the :ref:`User Guide <missing_data>` for more on which values are considered missing, and how to work with missing data. Parameters ---------- axis : {0 or 'index', 1 or 'columns'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. * 1, or 'columns' : Drop columns which contain missing value. .. deprecated:: 0.23.0 Pass tuple or list to drop on multiple axes. Only a single axis is allowed. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.isna: Indicate missing values. DataFrame.notna : Indicate existing (non-missing) values. DataFrame.fillna : Replace missing values. Series.dropna : Drop missing values. Index.dropna : Drop missing indices. Examples -------- >>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [np.nan, 'Batmobile', 'Bullwhip'], ... "born": [pd.NaT, pd.Timestamp("1940-04-25"), ... pd.NaT]}) >>> df name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the columns where at least one element is missing. >>> df.dropna(axis='columns') name 0 Alfred 1 Batman 2 Catwoman Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred NaN NaT 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip NaT Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25
[ "Remove", "missing", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L4497-L4644
19,687
pandas-dev/pandas
pandas/core/frame.py
DataFrame.drop_duplicates
def drop_duplicates(self, subset=None, keep='first', inplace=False): """ Return DataFrame with duplicate rows removed, optionally only considering certain columns. Indexes, including time indexes are ignored. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy Returns ------- DataFrame """ if self.empty: return self.copy() inplace = validate_bool_kwarg(inplace, 'inplace') duplicated = self.duplicated(subset, keep=keep) if inplace: inds, = (-duplicated)._ndarray_values.nonzero() new_data = self._data.take(inds) self._update_inplace(new_data) else: return self[-duplicated]
python
def drop_duplicates(self, subset=None, keep='first', inplace=False): """ Return DataFrame with duplicate rows removed, optionally only considering certain columns. Indexes, including time indexes are ignored. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy Returns ------- DataFrame """ if self.empty: return self.copy() inplace = validate_bool_kwarg(inplace, 'inplace') duplicated = self.duplicated(subset, keep=keep) if inplace: inds, = (-duplicated)._ndarray_values.nonzero() new_data = self._data.take(inds) self._update_inplace(new_data) else: return self[-duplicated]
[ "def", "drop_duplicates", "(", "self", ",", "subset", "=", "None", ",", "keep", "=", "'first'", ",", "inplace", "=", "False", ")", ":", "if", "self", ".", "empty", ":", "return", "self", ".", "copy", "(", ")", "inplace", "=", "validate_bool_kwarg", "(", "inplace", ",", "'inplace'", ")", "duplicated", "=", "self", ".", "duplicated", "(", "subset", ",", "keep", "=", "keep", ")", "if", "inplace", ":", "inds", ",", "=", "(", "-", "duplicated", ")", ".", "_ndarray_values", ".", "nonzero", "(", ")", "new_data", "=", "self", ".", "_data", ".", "take", "(", "inds", ")", "self", ".", "_update_inplace", "(", "new_data", ")", "else", ":", "return", "self", "[", "-", "duplicated", "]" ]
Return DataFrame with duplicate rows removed, optionally only considering certain columns. Indexes, including time indexes are ignored. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy Returns ------- DataFrame
[ "Return", "DataFrame", "with", "duplicate", "rows", "removed", "optionally", "only", "considering", "certain", "columns", ".", "Indexes", "including", "time", "indexes", "are", "ignored", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L4646-L4679
19,688
pandas-dev/pandas
pandas/core/frame.py
DataFrame.duplicated
def duplicated(self, subset=None, keep='first'): """ Return boolean Series denoting duplicate rows, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- Series """ from pandas.core.sorting import get_group_index from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT if self.empty: return Series(dtype=bool) def f(vals): labels, shape = algorithms.factorize( vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)) return labels.astype('i8', copy=False), len(shape) if subset is None: subset = self.columns elif (not np.iterable(subset) or isinstance(subset, str) or isinstance(subset, tuple) and subset in self.columns): subset = subset, # Verify all columns in subset exist in the queried dataframe # Otherwise, raise a KeyError, same as if you try to __getitem__ with a # key that doesn't exist. diff = Index(subset).difference(self.columns) if not diff.empty: raise KeyError(diff) vals = (col.values for name, col in self.iteritems() if name in subset) labels, shape = map(list, zip(*map(f, vals))) ids = get_group_index(labels, shape, sort=False, xnull=False) return Series(duplicated_int64(ids, keep), index=self.index)
python
def duplicated(self, subset=None, keep='first'): """ Return boolean Series denoting duplicate rows, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- Series """ from pandas.core.sorting import get_group_index from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT if self.empty: return Series(dtype=bool) def f(vals): labels, shape = algorithms.factorize( vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)) return labels.astype('i8', copy=False), len(shape) if subset is None: subset = self.columns elif (not np.iterable(subset) or isinstance(subset, str) or isinstance(subset, tuple) and subset in self.columns): subset = subset, # Verify all columns in subset exist in the queried dataframe # Otherwise, raise a KeyError, same as if you try to __getitem__ with a # key that doesn't exist. diff = Index(subset).difference(self.columns) if not diff.empty: raise KeyError(diff) vals = (col.values for name, col in self.iteritems() if name in subset) labels, shape = map(list, zip(*map(f, vals))) ids = get_group_index(labels, shape, sort=False, xnull=False) return Series(duplicated_int64(ids, keep), index=self.index)
[ "def", "duplicated", "(", "self", ",", "subset", "=", "None", ",", "keep", "=", "'first'", ")", ":", "from", "pandas", ".", "core", ".", "sorting", "import", "get_group_index", "from", "pandas", ".", "_libs", ".", "hashtable", "import", "duplicated_int64", ",", "_SIZE_HINT_LIMIT", "if", "self", ".", "empty", ":", "return", "Series", "(", "dtype", "=", "bool", ")", "def", "f", "(", "vals", ")", ":", "labels", ",", "shape", "=", "algorithms", ".", "factorize", "(", "vals", ",", "size_hint", "=", "min", "(", "len", "(", "self", ")", ",", "_SIZE_HINT_LIMIT", ")", ")", "return", "labels", ".", "astype", "(", "'i8'", ",", "copy", "=", "False", ")", ",", "len", "(", "shape", ")", "if", "subset", "is", "None", ":", "subset", "=", "self", ".", "columns", "elif", "(", "not", "np", ".", "iterable", "(", "subset", ")", "or", "isinstance", "(", "subset", ",", "str", ")", "or", "isinstance", "(", "subset", ",", "tuple", ")", "and", "subset", "in", "self", ".", "columns", ")", ":", "subset", "=", "subset", ",", "# Verify all columns in subset exist in the queried dataframe", "# Otherwise, raise a KeyError, same as if you try to __getitem__ with a", "# key that doesn't exist.", "diff", "=", "Index", "(", "subset", ")", ".", "difference", "(", "self", ".", "columns", ")", "if", "not", "diff", ".", "empty", ":", "raise", "KeyError", "(", "diff", ")", "vals", "=", "(", "col", ".", "values", "for", "name", ",", "col", "in", "self", ".", "iteritems", "(", ")", "if", "name", "in", "subset", ")", "labels", ",", "shape", "=", "map", "(", "list", ",", "zip", "(", "*", "map", "(", "f", ",", "vals", ")", ")", ")", "ids", "=", "get_group_index", "(", "labels", ",", "shape", ",", "sort", "=", "False", ",", "xnull", "=", "False", ")", "return", "Series", "(", "duplicated_int64", "(", "ids", ",", "keep", ")", ",", "index", "=", "self", ".", "index", ")" ]
Return boolean Series denoting duplicate rows, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- Series
[ "Return", "boolean", "Series", "denoting", "duplicate", "rows", "optionally", "only", "considering", "certain", "columns", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L4681-L4732
19,689
pandas-dev/pandas
pandas/core/frame.py
DataFrame.nlargest
def nlargest(self, n, columns, keep='first'): """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - `first` : prioritize the first occurrence(s) - `last` : prioritize the last occurrence(s) - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(3, 'population') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT When using ``keep='last'``, ties are resolved in reverse order: >>> df.nlargest(3, 'population', keep='last') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN When using ``keep='all'``, all duplicate items are maintained: >>> df.nlargest(3, 'population', keep='all') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN To order by the largest values in column "population" and then "GDP", we can specify multiple columns like in the next example. >>> df.nlargest(3, ['population', 'GDP']) population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
python
def nlargest(self, n, columns, keep='first'): """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - `first` : prioritize the first occurrence(s) - `last` : prioritize the last occurrence(s) - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(3, 'population') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT When using ``keep='last'``, ties are resolved in reverse order: >>> df.nlargest(3, 'population', keep='last') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN When using ``keep='all'``, all duplicate items are maintained: >>> df.nlargest(3, 'population', keep='all') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN To order by the largest values in column "population" and then "GDP", we can specify multiple columns like in the next example. >>> df.nlargest(3, ['population', 'GDP']) population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
[ "def", "nlargest", "(", "self", ",", "n", ",", "columns", ",", "keep", "=", "'first'", ")", ":", "return", "algorithms", ".", "SelectNFrame", "(", "self", ",", "n", "=", "n", ",", "keep", "=", "keep", ",", "columns", "=", "columns", ")", ".", "nlargest", "(", ")" ]
Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - `first` : prioritize the first occurrence(s) - `last` : prioritize the last occurrence(s) - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(3, 'population') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT When using ``keep='last'``, ties are resolved in reverse order: >>> df.nlargest(3, 'population', keep='last') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN When using ``keep='all'``, all duplicate items are maintained: >>> df.nlargest(3, 'population', keep='all') population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN To order by the largest values in column "population" and then "GDP", we can specify multiple columns like in the next example. >>> df.nlargest(3, ['population', 'GDP']) population GDP alpha-2 France 65000000 2583560 FR Italy 59000000 1937894 IT Brunei 434000 12128 BN
[ "Return", "the", "first", "n", "rows", "ordered", "by", "columns", "in", "descending", "order", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L4843-L4953
19,690
pandas-dev/pandas
pandas/core/frame.py
DataFrame.nsmallest
def nsmallest(self, n, columns, keep='first'): """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "a". >>> df.nsmallest(3, 'population') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI When using ``keep='last'``, ties are resolved in reverse order: >>> df.nsmallest(3, 'population', keep='last') population GDP alpha-2 Anguilla 11300 311 AI Tuvalu 11300 38 TV Nauru 11300 182 NR When using ``keep='all'``, all duplicate items are maintained: >>> df.nsmallest(3, 'population', keep='all') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI To order by the largest values in column "a" and then "c", we can specify multiple columns like in the next example. >>> df.nsmallest(3, ['population', 'GDP']) population GDP alpha-2 Tuvalu 11300 38 TV Nauru 11300 182 NR Anguilla 11300 311 AI """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nsmallest()
python
def nsmallest(self, n, columns, keep='first'): """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "a". >>> df.nsmallest(3, 'population') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI When using ``keep='last'``, ties are resolved in reverse order: >>> df.nsmallest(3, 'population', keep='last') population GDP alpha-2 Anguilla 11300 311 AI Tuvalu 11300 38 TV Nauru 11300 182 NR When using ``keep='all'``, all duplicate items are maintained: >>> df.nsmallest(3, 'population', keep='all') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI To order by the largest values in column "a" and then "c", we can specify multiple columns like in the next example. >>> df.nsmallest(3, ['population', 'GDP']) population GDP alpha-2 Tuvalu 11300 38 TV Nauru 11300 182 NR Anguilla 11300 311 AI """ return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nsmallest()
[ "def", "nsmallest", "(", "self", ",", "n", ",", "columns", ",", "keep", "=", "'first'", ")", ":", "return", "algorithms", ".", "SelectNFrame", "(", "self", ",", "n", "=", "n", ",", "keep", "=", "keep", ",", "columns", "=", "columns", ")", ".", "nsmallest", "(", ")" ]
Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. keep : {'first', 'last', 'all'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. - ``all`` : do not drop any duplicates, even it means selecting more than `n` items. .. versionadded:: 0.24.0 Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = pd.DataFrame({'population': [59000000, 65000000, 434000, ... 434000, 434000, 337000, 11300, ... 11300, 11300], ... 'GDP': [1937894, 2583560 , 12011, 4520, 12128, ... 17036, 182, 38, 311], ... 'alpha-2': ["IT", "FR", "MT", "MV", "BN", ... "IS", "NR", "TV", "AI"]}, ... index=["Italy", "France", "Malta", ... "Maldives", "Brunei", "Iceland", ... "Nauru", "Tuvalu", "Anguilla"]) >>> df population GDP alpha-2 Italy 59000000 1937894 IT France 65000000 2583560 FR Malta 434000 12011 MT Maldives 434000 4520 MV Brunei 434000 12128 BN Iceland 337000 17036 IS Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "a". >>> df.nsmallest(3, 'population') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI When using ``keep='last'``, ties are resolved in reverse order: >>> df.nsmallest(3, 'population', keep='last') population GDP alpha-2 Anguilla 11300 311 AI Tuvalu 11300 38 TV Nauru 11300 182 NR When using ``keep='all'``, all duplicate items are maintained: >>> df.nsmallest(3, 'population', keep='all') population GDP alpha-2 Nauru 11300 182 NR Tuvalu 11300 38 TV Anguilla 11300 311 AI To order by the largest values in column "a" and then "c", we can specify multiple columns like in the next example. >>> df.nsmallest(3, ['population', 'GDP']) population GDP alpha-2 Tuvalu 11300 38 TV Nauru 11300 182 NR Anguilla 11300 311 AI
[ "Return", "the", "first", "n", "rows", "ordered", "by", "columns", "in", "ascending", "order", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L4955-L5055
19,691
pandas-dev/pandas
pandas/core/frame.py
DataFrame.swaplevel
def swaplevel(self, i=-2, j=-1, axis=0): """ Swap levels i and j in a MultiIndex on a particular axis. Parameters ---------- i, j : int, string (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- DataFrame .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. """ result = self.copy() axis = self._get_axis_number(axis) if axis == 0: result.index = result.index.swaplevel(i, j) else: result.columns = result.columns.swaplevel(i, j) return result
python
def swaplevel(self, i=-2, j=-1, axis=0): """ Swap levels i and j in a MultiIndex on a particular axis. Parameters ---------- i, j : int, string (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- DataFrame .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. """ result = self.copy() axis = self._get_axis_number(axis) if axis == 0: result.index = result.index.swaplevel(i, j) else: result.columns = result.columns.swaplevel(i, j) return result
[ "def", "swaplevel", "(", "self", ",", "i", "=", "-", "2", ",", "j", "=", "-", "1", ",", "axis", "=", "0", ")", ":", "result", "=", "self", ".", "copy", "(", ")", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "axis", "==", "0", ":", "result", ".", "index", "=", "result", ".", "index", ".", "swaplevel", "(", "i", ",", "j", ")", "else", ":", "result", ".", "columns", "=", "result", ".", "columns", ".", "swaplevel", "(", "i", ",", "j", ")", "return", "result" ]
Swap levels i and j in a MultiIndex on a particular axis. Parameters ---------- i, j : int, string (can be mixed) Level of index to be swapped. Can pass level name as string. Returns ------- DataFrame .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index.
[ "Swap", "levels", "i", "and", "j", "in", "a", "MultiIndex", "on", "a", "particular", "axis", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5057-L5082
19,692
pandas-dev/pandas
pandas/core/frame.py
DataFrame.reorder_levels
def reorder_levels(self, order, axis=0): """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int or list of str List representing new level order. Reference level by number (position) or by key (label). axis : int Where to reorder levels. Returns ------- type of caller (new object) """ axis = self._get_axis_number(axis) if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover raise TypeError('Can only reorder levels on a hierarchical axis.') result = self.copy() if axis == 0: result.index = result.index.reorder_levels(order) else: result.columns = result.columns.reorder_levels(order) return result
python
def reorder_levels(self, order, axis=0): """ Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int or list of str List representing new level order. Reference level by number (position) or by key (label). axis : int Where to reorder levels. Returns ------- type of caller (new object) """ axis = self._get_axis_number(axis) if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover raise TypeError('Can only reorder levels on a hierarchical axis.') result = self.copy() if axis == 0: result.index = result.index.reorder_levels(order) else: result.columns = result.columns.reorder_levels(order) return result
[ "def", "reorder_levels", "(", "self", ",", "order", ",", "axis", "=", "0", ")", ":", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "if", "not", "isinstance", "(", "self", ".", "_get_axis", "(", "axis", ")", ",", "MultiIndex", ")", ":", "# pragma: no cover", "raise", "TypeError", "(", "'Can only reorder levels on a hierarchical axis.'", ")", "result", "=", "self", ".", "copy", "(", ")", "if", "axis", "==", "0", ":", "result", ".", "index", "=", "result", ".", "index", ".", "reorder_levels", "(", "order", ")", "else", ":", "result", ".", "columns", "=", "result", ".", "columns", ".", "reorder_levels", "(", "order", ")", "return", "result" ]
Rearrange index levels using input order. May not drop or duplicate levels. Parameters ---------- order : list of int or list of str List representing new level order. Reference level by number (position) or by key (label). axis : int Where to reorder levels. Returns ------- type of caller (new object)
[ "Rearrange", "index", "levels", "using", "input", "order", ".", "May", "not", "drop", "or", "duplicate", "levels", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5084-L5112
19,693
pandas-dev/pandas
pandas/core/frame.py
DataFrame.combine
def combine(self, other, func, fill_value=None, overwrite=True): """ Perform column-wise combine with another DataFrame. Combines a DataFrame with `other` DataFrame using `func` to element-wise combine columns. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame The DataFrame to merge column-wise. func : function Function that takes two series as inputs and return a Series or a scalar. Used to merge the two dataframes column by columns. fill_value : scalar value, default None The value to fill NaNs with prior to passing any column to the merge func. overwrite : bool, default True If True, columns in `self` that do not exist in `other` will be overwritten with NaNs. Returns ------- DataFrame Combination of the provided DataFrames. See Also -------- DataFrame.combine_first : Combine two DataFrame objects and default to non-null values in frame calling the method. Examples -------- Combine using a simple function that chooses the smaller column. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2 >>> df1.combine(df2, take_smaller) A B 0 0 3 1 0 3 Example using a true element-wise combine function. >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, np.minimum) A B 0 1 2 1 0 3 Using `fill_value` fills Nones prior to passing the column to the merge function. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 4.0 However, if the same element in both dataframes is None, that None is preserved >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 3.0 Example that demonstrates the use of `overwrite` and behavior when the axis differ between the dataframes. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2]) >>> df1.combine(df2, take_smaller) A B C 0 NaN NaN NaN 1 NaN 3.0 -10.0 2 NaN 3.0 1.0 >>> df1.combine(df2, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 -10.0 2 NaN 3.0 1.0 Demonstrating the preference of the passed in dataframe. >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2]) >>> df2.combine(df1, take_smaller) A B C 0 0.0 NaN NaN 1 0.0 3.0 NaN 2 NaN 3.0 NaN >>> df2.combine(df1, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ other_idxlen = len(other.index) # save for compare this, other = self.align(other, copy=False) new_index = this.index if other.empty and len(new_index) == len(self.index): return self.copy() if self.empty and len(other) == other_idxlen: return other.copy() # sorts if possible new_columns = this.columns.union(other.columns) do_fill = fill_value is not None result = {} for col in new_columns: series = this[col] otherSeries = other[col] this_dtype = series.dtype other_dtype = otherSeries.dtype this_mask = isna(series) other_mask = isna(otherSeries) # don't overwrite columns unecessarily # DO propagate if this column is not in the intersection if not overwrite and other_mask.all(): result[col] = this[col].copy() continue if do_fill: series = series.copy() otherSeries = otherSeries.copy() series[this_mask] = fill_value otherSeries[other_mask] = fill_value if col not in self.columns: # If self DataFrame does not have col in other DataFrame, # try to promote series, which is all NaN, as other_dtype. new_dtype = other_dtype try: series = series.astype(new_dtype, copy=False) except ValueError: # e.g. new_dtype is integer types pass else: # if we have different dtypes, possibly promote new_dtype = find_common_type([this_dtype, other_dtype]) if not is_dtype_equal(this_dtype, new_dtype): series = series.astype(new_dtype) if not is_dtype_equal(other_dtype, new_dtype): otherSeries = otherSeries.astype(new_dtype) arr = func(series, otherSeries) arr = maybe_downcast_to_dtype(arr, this_dtype) result[col] = arr # convert_objects just in case return self._constructor(result, index=new_index, columns=new_columns)
python
def combine(self, other, func, fill_value=None, overwrite=True): """ Perform column-wise combine with another DataFrame. Combines a DataFrame with `other` DataFrame using `func` to element-wise combine columns. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame The DataFrame to merge column-wise. func : function Function that takes two series as inputs and return a Series or a scalar. Used to merge the two dataframes column by columns. fill_value : scalar value, default None The value to fill NaNs with prior to passing any column to the merge func. overwrite : bool, default True If True, columns in `self` that do not exist in `other` will be overwritten with NaNs. Returns ------- DataFrame Combination of the provided DataFrames. See Also -------- DataFrame.combine_first : Combine two DataFrame objects and default to non-null values in frame calling the method. Examples -------- Combine using a simple function that chooses the smaller column. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2 >>> df1.combine(df2, take_smaller) A B 0 0 3 1 0 3 Example using a true element-wise combine function. >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, np.minimum) A B 0 1 2 1 0 3 Using `fill_value` fills Nones prior to passing the column to the merge function. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 4.0 However, if the same element in both dataframes is None, that None is preserved >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 3.0 Example that demonstrates the use of `overwrite` and behavior when the axis differ between the dataframes. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2]) >>> df1.combine(df2, take_smaller) A B C 0 NaN NaN NaN 1 NaN 3.0 -10.0 2 NaN 3.0 1.0 >>> df1.combine(df2, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 -10.0 2 NaN 3.0 1.0 Demonstrating the preference of the passed in dataframe. >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2]) >>> df2.combine(df1, take_smaller) A B C 0 0.0 NaN NaN 1 0.0 3.0 NaN 2 NaN 3.0 NaN >>> df2.combine(df1, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ other_idxlen = len(other.index) # save for compare this, other = self.align(other, copy=False) new_index = this.index if other.empty and len(new_index) == len(self.index): return self.copy() if self.empty and len(other) == other_idxlen: return other.copy() # sorts if possible new_columns = this.columns.union(other.columns) do_fill = fill_value is not None result = {} for col in new_columns: series = this[col] otherSeries = other[col] this_dtype = series.dtype other_dtype = otherSeries.dtype this_mask = isna(series) other_mask = isna(otherSeries) # don't overwrite columns unecessarily # DO propagate if this column is not in the intersection if not overwrite and other_mask.all(): result[col] = this[col].copy() continue if do_fill: series = series.copy() otherSeries = otherSeries.copy() series[this_mask] = fill_value otherSeries[other_mask] = fill_value if col not in self.columns: # If self DataFrame does not have col in other DataFrame, # try to promote series, which is all NaN, as other_dtype. new_dtype = other_dtype try: series = series.astype(new_dtype, copy=False) except ValueError: # e.g. new_dtype is integer types pass else: # if we have different dtypes, possibly promote new_dtype = find_common_type([this_dtype, other_dtype]) if not is_dtype_equal(this_dtype, new_dtype): series = series.astype(new_dtype) if not is_dtype_equal(other_dtype, new_dtype): otherSeries = otherSeries.astype(new_dtype) arr = func(series, otherSeries) arr = maybe_downcast_to_dtype(arr, this_dtype) result[col] = arr # convert_objects just in case return self._constructor(result, index=new_index, columns=new_columns)
[ "def", "combine", "(", "self", ",", "other", ",", "func", ",", "fill_value", "=", "None", ",", "overwrite", "=", "True", ")", ":", "other_idxlen", "=", "len", "(", "other", ".", "index", ")", "# save for compare", "this", ",", "other", "=", "self", ".", "align", "(", "other", ",", "copy", "=", "False", ")", "new_index", "=", "this", ".", "index", "if", "other", ".", "empty", "and", "len", "(", "new_index", ")", "==", "len", "(", "self", ".", "index", ")", ":", "return", "self", ".", "copy", "(", ")", "if", "self", ".", "empty", "and", "len", "(", "other", ")", "==", "other_idxlen", ":", "return", "other", ".", "copy", "(", ")", "# sorts if possible", "new_columns", "=", "this", ".", "columns", ".", "union", "(", "other", ".", "columns", ")", "do_fill", "=", "fill_value", "is", "not", "None", "result", "=", "{", "}", "for", "col", "in", "new_columns", ":", "series", "=", "this", "[", "col", "]", "otherSeries", "=", "other", "[", "col", "]", "this_dtype", "=", "series", ".", "dtype", "other_dtype", "=", "otherSeries", ".", "dtype", "this_mask", "=", "isna", "(", "series", ")", "other_mask", "=", "isna", "(", "otherSeries", ")", "# don't overwrite columns unecessarily", "# DO propagate if this column is not in the intersection", "if", "not", "overwrite", "and", "other_mask", ".", "all", "(", ")", ":", "result", "[", "col", "]", "=", "this", "[", "col", "]", ".", "copy", "(", ")", "continue", "if", "do_fill", ":", "series", "=", "series", ".", "copy", "(", ")", "otherSeries", "=", "otherSeries", ".", "copy", "(", ")", "series", "[", "this_mask", "]", "=", "fill_value", "otherSeries", "[", "other_mask", "]", "=", "fill_value", "if", "col", "not", "in", "self", ".", "columns", ":", "# If self DataFrame does not have col in other DataFrame,", "# try to promote series, which is all NaN, as other_dtype.", "new_dtype", "=", "other_dtype", "try", ":", "series", "=", "series", ".", "astype", "(", "new_dtype", ",", "copy", "=", "False", ")", "except", "ValueError", ":", "# e.g. new_dtype is integer types", "pass", "else", ":", "# if we have different dtypes, possibly promote", "new_dtype", "=", "find_common_type", "(", "[", "this_dtype", ",", "other_dtype", "]", ")", "if", "not", "is_dtype_equal", "(", "this_dtype", ",", "new_dtype", ")", ":", "series", "=", "series", ".", "astype", "(", "new_dtype", ")", "if", "not", "is_dtype_equal", "(", "other_dtype", ",", "new_dtype", ")", ":", "otherSeries", "=", "otherSeries", ".", "astype", "(", "new_dtype", ")", "arr", "=", "func", "(", "series", ",", "otherSeries", ")", "arr", "=", "maybe_downcast_to_dtype", "(", "arr", ",", "this_dtype", ")", "result", "[", "col", "]", "=", "arr", "# convert_objects just in case", "return", "self", ".", "_constructor", "(", "result", ",", "index", "=", "new_index", ",", "columns", "=", "new_columns", ")" ]
Perform column-wise combine with another DataFrame. Combines a DataFrame with `other` DataFrame using `func` to element-wise combine columns. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame The DataFrame to merge column-wise. func : function Function that takes two series as inputs and return a Series or a scalar. Used to merge the two dataframes column by columns. fill_value : scalar value, default None The value to fill NaNs with prior to passing any column to the merge func. overwrite : bool, default True If True, columns in `self` that do not exist in `other` will be overwritten with NaNs. Returns ------- DataFrame Combination of the provided DataFrames. See Also -------- DataFrame.combine_first : Combine two DataFrame objects and default to non-null values in frame calling the method. Examples -------- Combine using a simple function that chooses the smaller column. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2 >>> df1.combine(df2, take_smaller) A B 0 0 3 1 0 3 Example using a true element-wise combine function. >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, np.minimum) A B 0 1 2 1 0 3 Using `fill_value` fills Nones prior to passing the column to the merge function. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 4.0 However, if the same element in both dataframes is None, that None is preserved >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]}) >>> df1.combine(df2, take_smaller, fill_value=-5) A B 0 0 -5.0 1 0 3.0 Example that demonstrates the use of `overwrite` and behavior when the axis differ between the dataframes. >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2]) >>> df1.combine(df2, take_smaller) A B C 0 NaN NaN NaN 1 NaN 3.0 -10.0 2 NaN 3.0 1.0 >>> df1.combine(df2, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 -10.0 2 NaN 3.0 1.0 Demonstrating the preference of the passed in dataframe. >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2]) >>> df2.combine(df1, take_smaller) A B C 0 0.0 NaN NaN 1 0.0 3.0 NaN 2 NaN 3.0 NaN >>> df2.combine(df1, take_smaller, overwrite=False) A B C 0 0.0 NaN NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0
[ "Perform", "column", "-", "wise", "combine", "with", "another", "DataFrame", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5164-L5330
19,694
pandas-dev/pandas
pandas/core/frame.py
DataFrame.combine_first
def combine_first(self, other): """ Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ import pandas.core.computation.expressions as expressions def extract_values(arr): # Does two things: # 1. maybe gets the values from the Series / Index # 2. convert datelike to i8 if isinstance(arr, (ABCIndexClass, ABCSeries)): arr = arr._values if needs_i8_conversion(arr): if is_extension_array_dtype(arr.dtype): arr = arr.asi8 else: arr = arr.view('i8') return arr def combiner(x, y): mask = isna(x) if isinstance(mask, (ABCIndexClass, ABCSeries)): mask = mask._values x_values = extract_values(x) y_values = extract_values(y) # If the column y in other DataFrame is not in first DataFrame, # just return y_values. if y.name not in self.columns: return y_values return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False)
python
def combine_first(self, other): """ Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ import pandas.core.computation.expressions as expressions def extract_values(arr): # Does two things: # 1. maybe gets the values from the Series / Index # 2. convert datelike to i8 if isinstance(arr, (ABCIndexClass, ABCSeries)): arr = arr._values if needs_i8_conversion(arr): if is_extension_array_dtype(arr.dtype): arr = arr.asi8 else: arr = arr.view('i8') return arr def combiner(x, y): mask = isna(x) if isinstance(mask, (ABCIndexClass, ABCSeries)): mask = mask._values x_values = extract_values(x) y_values = extract_values(y) # If the column y in other DataFrame is not in first DataFrame, # just return y_values. if y.name not in self.columns: return y_values return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False)
[ "def", "combine_first", "(", "self", ",", "other", ")", ":", "import", "pandas", ".", "core", ".", "computation", ".", "expressions", "as", "expressions", "def", "extract_values", "(", "arr", ")", ":", "# Does two things:", "# 1. maybe gets the values from the Series / Index", "# 2. convert datelike to i8", "if", "isinstance", "(", "arr", ",", "(", "ABCIndexClass", ",", "ABCSeries", ")", ")", ":", "arr", "=", "arr", ".", "_values", "if", "needs_i8_conversion", "(", "arr", ")", ":", "if", "is_extension_array_dtype", "(", "arr", ".", "dtype", ")", ":", "arr", "=", "arr", ".", "asi8", "else", ":", "arr", "=", "arr", ".", "view", "(", "'i8'", ")", "return", "arr", "def", "combiner", "(", "x", ",", "y", ")", ":", "mask", "=", "isna", "(", "x", ")", "if", "isinstance", "(", "mask", ",", "(", "ABCIndexClass", ",", "ABCSeries", ")", ")", ":", "mask", "=", "mask", ".", "_values", "x_values", "=", "extract_values", "(", "x", ")", "y_values", "=", "extract_values", "(", "y", ")", "# If the column y in other DataFrame is not in first DataFrame,", "# just return y_values.", "if", "y", ".", "name", "not", "in", "self", ".", "columns", ":", "return", "y_values", "return", "expressions", ".", "where", "(", "mask", ",", "y_values", ",", "x_values", ")", "return", "self", ".", "combine", "(", "other", ",", "combiner", ",", "overwrite", "=", "False", ")" ]
Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0
[ "Update", "null", "elements", "with", "value", "in", "the", "same", "location", "in", "other", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5332-L5406
19,695
pandas-dev/pandas
pandas/core/frame.py
DataFrame.update
def update(self, other, join='left', overwrite=True, filter_func=None, errors='ignore'): """ Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or object coercible into a DataFrame Should have at least one matching index/column label with the original DataFrame. If a Series is passed, its name attribute must be set, and that will be used as the column name to align with the original DataFrame. join : {'left'}, default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. filter_func : callable(1d-array) -> bool 1d-array, optional Can choose to replace values other than NA. Return True for values that should be updated. errors : {'raise', 'ignore'}, default 'ignore' If 'raise', will raise a ValueError if the DataFrame and `other` both contain non-NA data in the same place. .. versionchanged :: 0.24.0 Changed from `raise_conflict=False|True` to `errors='ignore'|'raise'`. Returns ------- None : method directly changes calling object Raises ------ ValueError * When `errors='raise'` and there's overlapping non-NA data. * When `errors` is not either `'ignore'` or `'raise'` NotImplementedError * If `join != 'left'` See Also -------- dict.update : Similar method for dictionaries. DataFrame.merge : For column(s)-on-columns(s) operations. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, 5, 6], ... 'C': [7, 8, 9]}) >>> df.update(new_df) >>> df A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}) >>> df.update(new_df) >>> df A B 0 a d 1 b e 2 c f For Series, it's name attribute must be set. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df A B 0 a d 1 b y 2 c e >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2]) >>> df.update(new_df) >>> df A B 0 a x 1 b d 2 c e If `other` contains NaNs the corresponding values are not updated in the original dataframe. >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]}) >>> df.update(new_df) >>> df A B 0 1 4.0 1 2 500.0 2 3 6.0 """ import pandas.core.computation.expressions as expressions # TODO: Support other joins if join != 'left': # pragma: no cover raise NotImplementedError("Only left join is supported") if errors not in ['ignore', 'raise']: raise ValueError("The parameter errors must be either " "'ignore' or 'raise'") if not isinstance(other, DataFrame): other = DataFrame(other) other = other.reindex_like(self) for col in self.columns: this = self[col]._values that = other[col]._values if filter_func is not None: with np.errstate(all='ignore'): mask = ~filter_func(this) | isna(that) else: if errors == 'raise': mask_this = notna(that) mask_that = notna(this) if any(mask_this & mask_that): raise ValueError("Data overlaps.") if overwrite: mask = isna(that) else: mask = notna(this) # don't overwrite columns unecessarily if mask.all(): continue self[col] = expressions.where(mask, this, that)
python
def update(self, other, join='left', overwrite=True, filter_func=None, errors='ignore'): """ Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or object coercible into a DataFrame Should have at least one matching index/column label with the original DataFrame. If a Series is passed, its name attribute must be set, and that will be used as the column name to align with the original DataFrame. join : {'left'}, default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. filter_func : callable(1d-array) -> bool 1d-array, optional Can choose to replace values other than NA. Return True for values that should be updated. errors : {'raise', 'ignore'}, default 'ignore' If 'raise', will raise a ValueError if the DataFrame and `other` both contain non-NA data in the same place. .. versionchanged :: 0.24.0 Changed from `raise_conflict=False|True` to `errors='ignore'|'raise'`. Returns ------- None : method directly changes calling object Raises ------ ValueError * When `errors='raise'` and there's overlapping non-NA data. * When `errors` is not either `'ignore'` or `'raise'` NotImplementedError * If `join != 'left'` See Also -------- dict.update : Similar method for dictionaries. DataFrame.merge : For column(s)-on-columns(s) operations. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, 5, 6], ... 'C': [7, 8, 9]}) >>> df.update(new_df) >>> df A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}) >>> df.update(new_df) >>> df A B 0 a d 1 b e 2 c f For Series, it's name attribute must be set. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df A B 0 a d 1 b y 2 c e >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2]) >>> df.update(new_df) >>> df A B 0 a x 1 b d 2 c e If `other` contains NaNs the corresponding values are not updated in the original dataframe. >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]}) >>> df.update(new_df) >>> df A B 0 1 4.0 1 2 500.0 2 3 6.0 """ import pandas.core.computation.expressions as expressions # TODO: Support other joins if join != 'left': # pragma: no cover raise NotImplementedError("Only left join is supported") if errors not in ['ignore', 'raise']: raise ValueError("The parameter errors must be either " "'ignore' or 'raise'") if not isinstance(other, DataFrame): other = DataFrame(other) other = other.reindex_like(self) for col in self.columns: this = self[col]._values that = other[col]._values if filter_func is not None: with np.errstate(all='ignore'): mask = ~filter_func(this) | isna(that) else: if errors == 'raise': mask_this = notna(that) mask_that = notna(this) if any(mask_this & mask_that): raise ValueError("Data overlaps.") if overwrite: mask = isna(that) else: mask = notna(this) # don't overwrite columns unecessarily if mask.all(): continue self[col] = expressions.where(mask, this, that)
[ "def", "update", "(", "self", ",", "other", ",", "join", "=", "'left'", ",", "overwrite", "=", "True", ",", "filter_func", "=", "None", ",", "errors", "=", "'ignore'", ")", ":", "import", "pandas", ".", "core", ".", "computation", ".", "expressions", "as", "expressions", "# TODO: Support other joins", "if", "join", "!=", "'left'", ":", "# pragma: no cover", "raise", "NotImplementedError", "(", "\"Only left join is supported\"", ")", "if", "errors", "not", "in", "[", "'ignore'", ",", "'raise'", "]", ":", "raise", "ValueError", "(", "\"The parameter errors must be either \"", "\"'ignore' or 'raise'\"", ")", "if", "not", "isinstance", "(", "other", ",", "DataFrame", ")", ":", "other", "=", "DataFrame", "(", "other", ")", "other", "=", "other", ".", "reindex_like", "(", "self", ")", "for", "col", "in", "self", ".", "columns", ":", "this", "=", "self", "[", "col", "]", ".", "_values", "that", "=", "other", "[", "col", "]", ".", "_values", "if", "filter_func", "is", "not", "None", ":", "with", "np", ".", "errstate", "(", "all", "=", "'ignore'", ")", ":", "mask", "=", "~", "filter_func", "(", "this", ")", "|", "isna", "(", "that", ")", "else", ":", "if", "errors", "==", "'raise'", ":", "mask_this", "=", "notna", "(", "that", ")", "mask_that", "=", "notna", "(", "this", ")", "if", "any", "(", "mask_this", "&", "mask_that", ")", ":", "raise", "ValueError", "(", "\"Data overlaps.\"", ")", "if", "overwrite", ":", "mask", "=", "isna", "(", "that", ")", "else", ":", "mask", "=", "notna", "(", "this", ")", "# don't overwrite columns unecessarily", "if", "mask", ".", "all", "(", ")", ":", "continue", "self", "[", "col", "]", "=", "expressions", ".", "where", "(", "mask", ",", "this", ",", "that", ")" ]
Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or object coercible into a DataFrame Should have at least one matching index/column label with the original DataFrame. If a Series is passed, its name attribute must be set, and that will be used as the column name to align with the original DataFrame. join : {'left'}, default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. filter_func : callable(1d-array) -> bool 1d-array, optional Can choose to replace values other than NA. Return True for values that should be updated. errors : {'raise', 'ignore'}, default 'ignore' If 'raise', will raise a ValueError if the DataFrame and `other` both contain non-NA data in the same place. .. versionchanged :: 0.24.0 Changed from `raise_conflict=False|True` to `errors='ignore'|'raise'`. Returns ------- None : method directly changes calling object Raises ------ ValueError * When `errors='raise'` and there's overlapping non-NA data. * When `errors` is not either `'ignore'` or `'raise'` NotImplementedError * If `join != 'left'` See Also -------- dict.update : Similar method for dictionaries. DataFrame.merge : For column(s)-on-columns(s) operations. Examples -------- >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, 5, 6], ... 'C': [7, 8, 9]}) >>> df.update(new_df) >>> df A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}) >>> df.update(new_df) >>> df A B 0 a d 1 b e 2 c f For Series, it's name attribute must be set. >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df A B 0 a d 1 b y 2 c e >>> df = pd.DataFrame({'A': ['a', 'b', 'c'], ... 'B': ['x', 'y', 'z']}) >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2]) >>> df.update(new_df) >>> df A B 0 a x 1 b d 2 c e If `other` contains NaNs the corresponding values are not updated in the original dataframe. >>> df = pd.DataFrame({'A': [1, 2, 3], ... 'B': [400, 500, 600]}) >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]}) >>> df.update(new_df) >>> df A B 0 1 4.0 1 2 500.0 2 3 6.0
[ "Modify", "in", "place", "using", "non", "-", "NA", "values", "from", "another", "DataFrame", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5410-L5558
19,696
pandas-dev/pandas
pandas/core/frame.py
DataFrame.apply
def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, result_type=None, args=(), **kwds): """ Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). By default (``result_type=None``), the final return type is inferred from the return type of the applied function. Otherwise, it depends on the `result_type` argument. Parameters ---------- func : function Function to apply to each column or row. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis along which the function is applied: * 0 or 'index': apply function to each column. * 1 or 'columns': apply function to each row. broadcast : bool, optional Only relevant for aggregation functions: * ``False`` or ``None`` : returns a Series whose length is the length of the index or the number of columns (based on the `axis` parameter) * ``True`` : results will be broadcast to the original shape of the frame, the original index and columns will be retained. .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by result_type='broadcast'. raw : bool, default False * ``False`` : passes each row or column as a Series to the function. * ``True`` : the passed function will receive ndarray objects instead. If you are just applying a NumPy reduction function this will achieve much better performance. reduce : bool or None, default None Try to apply reduction procedures. If the DataFrame is empty, `apply` will use `reduce` to determine whether the result should be a Series or a DataFrame. If ``reduce=None`` (the default), `apply`'s return value will be guessed by calling `func` on an empty Series (note: while guessing, exceptions raised by `func` will be ignored). If ``reduce=True`` a Series will always be returned, and if ``reduce=False`` a DataFrame will always be returned. .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by ``result_type='reduce'``. result_type : {'expand', 'reduce', 'broadcast', None}, default None These only act when ``axis=1`` (columns): * 'expand' : list-like results will be turned into columns. * 'reduce' : returns a Series if possible rather than expanding list-like results. This is the opposite of 'expand'. * 'broadcast' : results will be broadcast to the original shape of the DataFrame, the original index and columns will be retained. The default behaviour (None) depends on the return value of the applied function: list-like results will be returned as a Series of those. However if the apply function returns a Series these are expanded to columns. .. versionadded:: 0.23.0 args : tuple Positional arguments to pass to `func` in addition to the array/series. **kwds Additional keyword arguments to pass as keywords arguments to `func`. Returns ------- Series or DataFrame Result of applying ``func`` along the given axis of the DataFrame. See Also -------- DataFrame.applymap: For elementwise operations. DataFrame.aggregate: Only perform aggregating type operations. DataFrame.transform: Only perform transforming type operations. Notes ----- In the current implementation apply calls `func` twice on the first column/row to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if `func` has side-effects, as they will take effect twice for the first column/row. Examples -------- >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 1 4 9 2 4 9 Using a numpy universal function (in this case the same as ``np.sqrt(df)``): >>> df.apply(np.sqrt) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 Using a reducing function on either axis >>> df.apply(np.sum, axis=0) A 12 B 27 dtype: int64 >>> df.apply(np.sum, axis=1) 0 13 1 13 2 13 dtype: int64 Retuning a list-like will result in a Series >>> df.apply(lambda x: [1, 2], axis=1) 0 [1, 2] 1 [1, 2] 2 [1, 2] dtype: object Passing result_type='expand' will expand list-like results to columns of a Dataframe >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand') 0 1 0 1 2 1 1 2 2 1 2 Returning a Series inside the function is similar to passing ``result_type='expand'``. The resulting column names will be the Series index. >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1) foo bar 0 1 2 1 1 2 2 1 2 Passing ``result_type='broadcast'`` will ensure the same shape result, whether list-like or scalar is returned by the function, and broadcast it along the axis. The resulting column names will be the originals. >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast') A B 0 1 2 1 1 2 2 1 2 """ from pandas.core.apply import frame_apply op = frame_apply(self, func=func, axis=axis, broadcast=broadcast, raw=raw, reduce=reduce, result_type=result_type, args=args, kwds=kwds) return op.get_result()
python
def apply(self, func, axis=0, broadcast=None, raw=False, reduce=None, result_type=None, args=(), **kwds): """ Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). By default (``result_type=None``), the final return type is inferred from the return type of the applied function. Otherwise, it depends on the `result_type` argument. Parameters ---------- func : function Function to apply to each column or row. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis along which the function is applied: * 0 or 'index': apply function to each column. * 1 or 'columns': apply function to each row. broadcast : bool, optional Only relevant for aggregation functions: * ``False`` or ``None`` : returns a Series whose length is the length of the index or the number of columns (based on the `axis` parameter) * ``True`` : results will be broadcast to the original shape of the frame, the original index and columns will be retained. .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by result_type='broadcast'. raw : bool, default False * ``False`` : passes each row or column as a Series to the function. * ``True`` : the passed function will receive ndarray objects instead. If you are just applying a NumPy reduction function this will achieve much better performance. reduce : bool or None, default None Try to apply reduction procedures. If the DataFrame is empty, `apply` will use `reduce` to determine whether the result should be a Series or a DataFrame. If ``reduce=None`` (the default), `apply`'s return value will be guessed by calling `func` on an empty Series (note: while guessing, exceptions raised by `func` will be ignored). If ``reduce=True`` a Series will always be returned, and if ``reduce=False`` a DataFrame will always be returned. .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by ``result_type='reduce'``. result_type : {'expand', 'reduce', 'broadcast', None}, default None These only act when ``axis=1`` (columns): * 'expand' : list-like results will be turned into columns. * 'reduce' : returns a Series if possible rather than expanding list-like results. This is the opposite of 'expand'. * 'broadcast' : results will be broadcast to the original shape of the DataFrame, the original index and columns will be retained. The default behaviour (None) depends on the return value of the applied function: list-like results will be returned as a Series of those. However if the apply function returns a Series these are expanded to columns. .. versionadded:: 0.23.0 args : tuple Positional arguments to pass to `func` in addition to the array/series. **kwds Additional keyword arguments to pass as keywords arguments to `func`. Returns ------- Series or DataFrame Result of applying ``func`` along the given axis of the DataFrame. See Also -------- DataFrame.applymap: For elementwise operations. DataFrame.aggregate: Only perform aggregating type operations. DataFrame.transform: Only perform transforming type operations. Notes ----- In the current implementation apply calls `func` twice on the first column/row to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if `func` has side-effects, as they will take effect twice for the first column/row. Examples -------- >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 1 4 9 2 4 9 Using a numpy universal function (in this case the same as ``np.sqrt(df)``): >>> df.apply(np.sqrt) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 Using a reducing function on either axis >>> df.apply(np.sum, axis=0) A 12 B 27 dtype: int64 >>> df.apply(np.sum, axis=1) 0 13 1 13 2 13 dtype: int64 Retuning a list-like will result in a Series >>> df.apply(lambda x: [1, 2], axis=1) 0 [1, 2] 1 [1, 2] 2 [1, 2] dtype: object Passing result_type='expand' will expand list-like results to columns of a Dataframe >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand') 0 1 0 1 2 1 1 2 2 1 2 Returning a Series inside the function is similar to passing ``result_type='expand'``. The resulting column names will be the Series index. >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1) foo bar 0 1 2 1 1 2 2 1 2 Passing ``result_type='broadcast'`` will ensure the same shape result, whether list-like or scalar is returned by the function, and broadcast it along the axis. The resulting column names will be the originals. >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast') A B 0 1 2 1 1 2 2 1 2 """ from pandas.core.apply import frame_apply op = frame_apply(self, func=func, axis=axis, broadcast=broadcast, raw=raw, reduce=reduce, result_type=result_type, args=args, kwds=kwds) return op.get_result()
[ "def", "apply", "(", "self", ",", "func", ",", "axis", "=", "0", ",", "broadcast", "=", "None", ",", "raw", "=", "False", ",", "reduce", "=", "None", ",", "result_type", "=", "None", ",", "args", "=", "(", ")", ",", "*", "*", "kwds", ")", ":", "from", "pandas", ".", "core", ".", "apply", "import", "frame_apply", "op", "=", "frame_apply", "(", "self", ",", "func", "=", "func", ",", "axis", "=", "axis", ",", "broadcast", "=", "broadcast", ",", "raw", "=", "raw", ",", "reduce", "=", "reduce", ",", "result_type", "=", "result_type", ",", "args", "=", "args", ",", "kwds", "=", "kwds", ")", "return", "op", ".", "get_result", "(", ")" ]
Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). By default (``result_type=None``), the final return type is inferred from the return type of the applied function. Otherwise, it depends on the `result_type` argument. Parameters ---------- func : function Function to apply to each column or row. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis along which the function is applied: * 0 or 'index': apply function to each column. * 1 or 'columns': apply function to each row. broadcast : bool, optional Only relevant for aggregation functions: * ``False`` or ``None`` : returns a Series whose length is the length of the index or the number of columns (based on the `axis` parameter) * ``True`` : results will be broadcast to the original shape of the frame, the original index and columns will be retained. .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by result_type='broadcast'. raw : bool, default False * ``False`` : passes each row or column as a Series to the function. * ``True`` : the passed function will receive ndarray objects instead. If you are just applying a NumPy reduction function this will achieve much better performance. reduce : bool or None, default None Try to apply reduction procedures. If the DataFrame is empty, `apply` will use `reduce` to determine whether the result should be a Series or a DataFrame. If ``reduce=None`` (the default), `apply`'s return value will be guessed by calling `func` on an empty Series (note: while guessing, exceptions raised by `func` will be ignored). If ``reduce=True`` a Series will always be returned, and if ``reduce=False`` a DataFrame will always be returned. .. deprecated:: 0.23.0 This argument will be removed in a future version, replaced by ``result_type='reduce'``. result_type : {'expand', 'reduce', 'broadcast', None}, default None These only act when ``axis=1`` (columns): * 'expand' : list-like results will be turned into columns. * 'reduce' : returns a Series if possible rather than expanding list-like results. This is the opposite of 'expand'. * 'broadcast' : results will be broadcast to the original shape of the DataFrame, the original index and columns will be retained. The default behaviour (None) depends on the return value of the applied function: list-like results will be returned as a Series of those. However if the apply function returns a Series these are expanded to columns. .. versionadded:: 0.23.0 args : tuple Positional arguments to pass to `func` in addition to the array/series. **kwds Additional keyword arguments to pass as keywords arguments to `func`. Returns ------- Series or DataFrame Result of applying ``func`` along the given axis of the DataFrame. See Also -------- DataFrame.applymap: For elementwise operations. DataFrame.aggregate: Only perform aggregating type operations. DataFrame.transform: Only perform transforming type operations. Notes ----- In the current implementation apply calls `func` twice on the first column/row to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if `func` has side-effects, as they will take effect twice for the first column/row. Examples -------- >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 1 4 9 2 4 9 Using a numpy universal function (in this case the same as ``np.sqrt(df)``): >>> df.apply(np.sqrt) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 Using a reducing function on either axis >>> df.apply(np.sum, axis=0) A 12 B 27 dtype: int64 >>> df.apply(np.sum, axis=1) 0 13 1 13 2 13 dtype: int64 Retuning a list-like will result in a Series >>> df.apply(lambda x: [1, 2], axis=1) 0 [1, 2] 1 [1, 2] 2 [1, 2] dtype: object Passing result_type='expand' will expand list-like results to columns of a Dataframe >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand') 0 1 0 1 2 1 1 2 2 1 2 Returning a Series inside the function is similar to passing ``result_type='expand'``. The resulting column names will be the Series index. >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1) foo bar 0 1 2 1 1 2 2 1 2 Passing ``result_type='broadcast'`` will ensure the same shape result, whether list-like or scalar is returned by the function, and broadcast it along the axis. The resulting column names will be the originals. >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast') A B 0 1 2 1 1 2 2 1 2
[ "Apply", "a", "function", "along", "an", "axis", "of", "the", "DataFrame", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L6355-L6534
19,697
pandas-dev/pandas
pandas/core/frame.py
DataFrame.applymap
def applymap(self, func): """ Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. Parameters ---------- func : callable Python function, returns a single value from a single value. Returns ------- DataFrame Transformed DataFrame. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. Notes ----- In the current implementation applymap calls `func` twice on the first column/row to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if `func` has side-effects, as they will take effect twice for the first column/row. Examples -------- >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]]) >>> df 0 1 0 1.000 2.120 1 3.356 4.567 >>> df.applymap(lambda x: len(str(x))) 0 1 0 3 4 1 5 5 Note that a vectorized version of `func` often exists, which will be much faster. You could square each number elementwise. >>> df.applymap(lambda x: x**2) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 But it's better to avoid applymap in that case. >>> df ** 2 0 1 0 1.000000 4.494400 1 11.262736 20.857489 """ # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if x.empty: return lib.map_infer(x, func) return lib.map_infer(x.astype(object).values, func) return self.apply(infer)
python
def applymap(self, func): """ Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. Parameters ---------- func : callable Python function, returns a single value from a single value. Returns ------- DataFrame Transformed DataFrame. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. Notes ----- In the current implementation applymap calls `func` twice on the first column/row to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if `func` has side-effects, as they will take effect twice for the first column/row. Examples -------- >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]]) >>> df 0 1 0 1.000 2.120 1 3.356 4.567 >>> df.applymap(lambda x: len(str(x))) 0 1 0 3 4 1 5 5 Note that a vectorized version of `func` often exists, which will be much faster. You could square each number elementwise. >>> df.applymap(lambda x: x**2) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 But it's better to avoid applymap in that case. >>> df ** 2 0 1 0 1.000000 4.494400 1 11.262736 20.857489 """ # if we have a dtype == 'M8[ns]', provide boxed values def infer(x): if x.empty: return lib.map_infer(x, func) return lib.map_infer(x.astype(object).values, func) return self.apply(infer)
[ "def", "applymap", "(", "self", ",", "func", ")", ":", "# if we have a dtype == 'M8[ns]', provide boxed values", "def", "infer", "(", "x", ")", ":", "if", "x", ".", "empty", ":", "return", "lib", ".", "map_infer", "(", "x", ",", "func", ")", "return", "lib", ".", "map_infer", "(", "x", ".", "astype", "(", "object", ")", ".", "values", ",", "func", ")", "return", "self", ".", "apply", "(", "infer", ")" ]
Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. Parameters ---------- func : callable Python function, returns a single value from a single value. Returns ------- DataFrame Transformed DataFrame. See Also -------- DataFrame.apply : Apply a function along input axis of DataFrame. Notes ----- In the current implementation applymap calls `func` twice on the first column/row to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if `func` has side-effects, as they will take effect twice for the first column/row. Examples -------- >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]]) >>> df 0 1 0 1.000 2.120 1 3.356 4.567 >>> df.applymap(lambda x: len(str(x))) 0 1 0 3 4 1 5 5 Note that a vectorized version of `func` often exists, which will be much faster. You could square each number elementwise. >>> df.applymap(lambda x: x**2) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 But it's better to avoid applymap in that case. >>> df ** 2 0 1 0 1.000000 4.494400 1 11.262736 20.857489
[ "Apply", "a", "function", "to", "a", "Dataframe", "elementwise", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L6536-L6600
19,698
pandas-dev/pandas
pandas/core/frame.py
DataFrame.append
def append(self, other, ignore_index=False, verify_integrity=False, sort=None): """ Append rows of `other` to the end of caller, returning a new object. Columns in `other` that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. sort : boolean, default None Sort columns if the columns of `self` and `other` are not aligned. The default sorting is deprecated and will change to not-sorting in a future version of pandas. Explicitly pass ``sort=True`` to silence the warning and sort. Explicitly pass ``sort=False`` to silence the warning and not sort. .. versionadded:: 0.23.0 Returns ------- DataFrame See Also -------- concat : General function to concatenate DataFrame, Series or Panel objects. Notes ----- If a list of dict/series is passed and the keys are all contained in the DataFrame's index, the order of the columns in the resulting DataFrame will be unchanged. Iteratively appending rows to a DataFrame can be more computationally intensive than a single concatenate. A better solution is to append those rows to a list and then concatenate the list with the original DataFrame all at once. Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df A B 0 1 2 1 3 4 >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB')) >>> df.append(df2) A B 0 1 2 1 3 4 0 5 6 1 7 8 With `ignore_index` set to True: >>> df.append(df2, ignore_index=True) A B 0 1 2 1 3 4 2 5 6 3 7 8 The following, while not recommended methods for generating DataFrames, show two ways to generate a DataFrame from multiple data sources. Less efficient: >>> df = pd.DataFrame(columns=['A']) >>> for i in range(5): ... df = df.append({'A': i}, ignore_index=True) >>> df A 0 0 1 1 2 2 3 3 4 4 More efficient: >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)], ... ignore_index=True) A 0 0 1 1 2 2 3 3 4 4 """ if isinstance(other, (Series, dict)): if isinstance(other, dict): other = Series(other) if other.name is None and not ignore_index: raise TypeError('Can only append a Series if ignore_index=True' ' or if the Series has a name') if other.name is None: index = None else: # other must have the same index name as self, otherwise # index name will be reset index = Index([other.name], name=self.index.name) idx_diff = other.index.difference(self.columns) try: combined_columns = self.columns.append(idx_diff) except TypeError: combined_columns = self.columns.astype(object).append(idx_diff) other = other.reindex(combined_columns, copy=False) other = DataFrame(other.values.reshape((1, len(other))), index=index, columns=combined_columns) other = other._convert(datetime=True, timedelta=True) if not self.columns.equals(combined_columns): self = self.reindex(columns=combined_columns) elif isinstance(other, list) and not isinstance(other[0], DataFrame): other = DataFrame(other) if (self.columns.get_indexer(other.columns) >= 0).all(): other = other.reindex(columns=self.columns) from pandas.core.reshape.concat import concat if isinstance(other, (list, tuple)): to_concat = [self] + other else: to_concat = [self, other] return concat(to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity, sort=sort)
python
def append(self, other, ignore_index=False, verify_integrity=False, sort=None): """ Append rows of `other` to the end of caller, returning a new object. Columns in `other` that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. sort : boolean, default None Sort columns if the columns of `self` and `other` are not aligned. The default sorting is deprecated and will change to not-sorting in a future version of pandas. Explicitly pass ``sort=True`` to silence the warning and sort. Explicitly pass ``sort=False`` to silence the warning and not sort. .. versionadded:: 0.23.0 Returns ------- DataFrame See Also -------- concat : General function to concatenate DataFrame, Series or Panel objects. Notes ----- If a list of dict/series is passed and the keys are all contained in the DataFrame's index, the order of the columns in the resulting DataFrame will be unchanged. Iteratively appending rows to a DataFrame can be more computationally intensive than a single concatenate. A better solution is to append those rows to a list and then concatenate the list with the original DataFrame all at once. Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df A B 0 1 2 1 3 4 >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB')) >>> df.append(df2) A B 0 1 2 1 3 4 0 5 6 1 7 8 With `ignore_index` set to True: >>> df.append(df2, ignore_index=True) A B 0 1 2 1 3 4 2 5 6 3 7 8 The following, while not recommended methods for generating DataFrames, show two ways to generate a DataFrame from multiple data sources. Less efficient: >>> df = pd.DataFrame(columns=['A']) >>> for i in range(5): ... df = df.append({'A': i}, ignore_index=True) >>> df A 0 0 1 1 2 2 3 3 4 4 More efficient: >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)], ... ignore_index=True) A 0 0 1 1 2 2 3 3 4 4 """ if isinstance(other, (Series, dict)): if isinstance(other, dict): other = Series(other) if other.name is None and not ignore_index: raise TypeError('Can only append a Series if ignore_index=True' ' or if the Series has a name') if other.name is None: index = None else: # other must have the same index name as self, otherwise # index name will be reset index = Index([other.name], name=self.index.name) idx_diff = other.index.difference(self.columns) try: combined_columns = self.columns.append(idx_diff) except TypeError: combined_columns = self.columns.astype(object).append(idx_diff) other = other.reindex(combined_columns, copy=False) other = DataFrame(other.values.reshape((1, len(other))), index=index, columns=combined_columns) other = other._convert(datetime=True, timedelta=True) if not self.columns.equals(combined_columns): self = self.reindex(columns=combined_columns) elif isinstance(other, list) and not isinstance(other[0], DataFrame): other = DataFrame(other) if (self.columns.get_indexer(other.columns) >= 0).all(): other = other.reindex(columns=self.columns) from pandas.core.reshape.concat import concat if isinstance(other, (list, tuple)): to_concat = [self] + other else: to_concat = [self, other] return concat(to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity, sort=sort)
[ "def", "append", "(", "self", ",", "other", ",", "ignore_index", "=", "False", ",", "verify_integrity", "=", "False", ",", "sort", "=", "None", ")", ":", "if", "isinstance", "(", "other", ",", "(", "Series", ",", "dict", ")", ")", ":", "if", "isinstance", "(", "other", ",", "dict", ")", ":", "other", "=", "Series", "(", "other", ")", "if", "other", ".", "name", "is", "None", "and", "not", "ignore_index", ":", "raise", "TypeError", "(", "'Can only append a Series if ignore_index=True'", "' or if the Series has a name'", ")", "if", "other", ".", "name", "is", "None", ":", "index", "=", "None", "else", ":", "# other must have the same index name as self, otherwise", "# index name will be reset", "index", "=", "Index", "(", "[", "other", ".", "name", "]", ",", "name", "=", "self", ".", "index", ".", "name", ")", "idx_diff", "=", "other", ".", "index", ".", "difference", "(", "self", ".", "columns", ")", "try", ":", "combined_columns", "=", "self", ".", "columns", ".", "append", "(", "idx_diff", ")", "except", "TypeError", ":", "combined_columns", "=", "self", ".", "columns", ".", "astype", "(", "object", ")", ".", "append", "(", "idx_diff", ")", "other", "=", "other", ".", "reindex", "(", "combined_columns", ",", "copy", "=", "False", ")", "other", "=", "DataFrame", "(", "other", ".", "values", ".", "reshape", "(", "(", "1", ",", "len", "(", "other", ")", ")", ")", ",", "index", "=", "index", ",", "columns", "=", "combined_columns", ")", "other", "=", "other", ".", "_convert", "(", "datetime", "=", "True", ",", "timedelta", "=", "True", ")", "if", "not", "self", ".", "columns", ".", "equals", "(", "combined_columns", ")", ":", "self", "=", "self", ".", "reindex", "(", "columns", "=", "combined_columns", ")", "elif", "isinstance", "(", "other", ",", "list", ")", "and", "not", "isinstance", "(", "other", "[", "0", "]", ",", "DataFrame", ")", ":", "other", "=", "DataFrame", "(", "other", ")", "if", "(", "self", ".", "columns", ".", "get_indexer", "(", "other", ".", "columns", ")", ">=", "0", ")", ".", "all", "(", ")", ":", "other", "=", "other", ".", "reindex", "(", "columns", "=", "self", ".", "columns", ")", "from", "pandas", ".", "core", ".", "reshape", ".", "concat", "import", "concat", "if", "isinstance", "(", "other", ",", "(", "list", ",", "tuple", ")", ")", ":", "to_concat", "=", "[", "self", "]", "+", "other", "else", ":", "to_concat", "=", "[", "self", ",", "other", "]", "return", "concat", "(", "to_concat", ",", "ignore_index", "=", "ignore_index", ",", "verify_integrity", "=", "verify_integrity", ",", "sort", "=", "sort", ")" ]
Append rows of `other` to the end of caller, returning a new object. Columns in `other` that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. sort : boolean, default None Sort columns if the columns of `self` and `other` are not aligned. The default sorting is deprecated and will change to not-sorting in a future version of pandas. Explicitly pass ``sort=True`` to silence the warning and sort. Explicitly pass ``sort=False`` to silence the warning and not sort. .. versionadded:: 0.23.0 Returns ------- DataFrame See Also -------- concat : General function to concatenate DataFrame, Series or Panel objects. Notes ----- If a list of dict/series is passed and the keys are all contained in the DataFrame's index, the order of the columns in the resulting DataFrame will be unchanged. Iteratively appending rows to a DataFrame can be more computationally intensive than a single concatenate. A better solution is to append those rows to a list and then concatenate the list with the original DataFrame all at once. Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df A B 0 1 2 1 3 4 >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB')) >>> df.append(df2) A B 0 1 2 1 3 4 0 5 6 1 7 8 With `ignore_index` set to True: >>> df.append(df2, ignore_index=True) A B 0 1 2 1 3 4 2 5 6 3 7 8 The following, while not recommended methods for generating DataFrames, show two ways to generate a DataFrame from multiple data sources. Less efficient: >>> df = pd.DataFrame(columns=['A']) >>> for i in range(5): ... df = df.append({'A': i}, ignore_index=True) >>> df A 0 0 1 1 2 2 3 3 4 4 More efficient: >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)], ... ignore_index=True) A 0 0 1 1 2 2 3 3 4 4
[ "Append", "rows", "of", "other", "to", "the", "end", "of", "caller", "returning", "a", "new", "object", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L6605-L6739
19,699
pandas-dev/pandas
pandas/core/frame.py
DataFrame.join
def join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): """ Join columns of another DataFrame. Join columns with `other` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- other : DataFrame, Series, or list of DataFrame Index should be similar to one of the columns in this one. If a Series is passed, its name attribute must be set, and that will be used as the column name in the resulting joined DataFrame. on : str, list of str, or array-like, optional Column or index level name(s) in the caller to join on the index in `other`, otherwise joins index-on-index. If multiple values given, the `other` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation. how : {'left', 'right', 'outer', 'inner'}, default 'left' How to handle the operation of the two objects. * left: use calling frame's index (or column if on is specified) * right: use `other`'s index. * outer: form union of calling frame's index (or column if on is specified) with `other`'s index, and sort it. lexicographically. * inner: form intersection of calling frame's index (or column if on is specified) with `other`'s index, preserving the order of the calling's one. lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' Suffix to use from right frame's overlapping columns. sort : bool, default False Order result DataFrame lexicographically by the join key. If False, the order of the join key depends on the join type (how keyword). Returns ------- DataFrame A dataframe containing columns from both the caller and `other`. See Also -------- DataFrame.merge : For column(s)-on-columns(s) operations. Notes ----- Parameters `on`, `lsuffix`, and `rsuffix` are not supported when passing a list of `DataFrame` objects. Support for specifying index levels as the `on` parameter was added in version 0.23.0. Examples -------- >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']}) >>> df key A 0 K0 A0 1 K1 A1 2 K2 A2 3 K3 A3 4 K4 A4 5 K5 A5 >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}) >>> other key B 0 K0 B0 1 K1 B1 2 K2 B2 Join DataFrames using their indexes. >>> df.join(other, lsuffix='_caller', rsuffix='_other') key_caller A key_other B 0 K0 A0 K0 B0 1 K1 A1 K1 B1 2 K2 A2 K2 B2 3 K3 A3 NaN NaN 4 K4 A4 NaN NaN 5 K5 A5 NaN NaN If we want to join using the key columns, we need to set key to be the index in both `df` and `other`. The joined DataFrame will have key as its index. >>> df.set_index('key').join(other.set_index('key')) A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 NaN K4 A4 NaN K5 A5 NaN Another option to join using the key columns is to use the `on` parameter. DataFrame.join always uses `other`'s index but we can use any column in `df`. This method preserves the original DataFrame's index in the result. >>> df.join(other.set_index('key'), on='key') key A B 0 K0 A0 B0 1 K1 A1 B1 2 K2 A2 B2 3 K3 A3 NaN 4 K4 A4 NaN 5 K5 A5 NaN """ # For SparseDataFrame's benefit return self._join_compat(other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort)
python
def join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False): """ Join columns of another DataFrame. Join columns with `other` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- other : DataFrame, Series, or list of DataFrame Index should be similar to one of the columns in this one. If a Series is passed, its name attribute must be set, and that will be used as the column name in the resulting joined DataFrame. on : str, list of str, or array-like, optional Column or index level name(s) in the caller to join on the index in `other`, otherwise joins index-on-index. If multiple values given, the `other` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation. how : {'left', 'right', 'outer', 'inner'}, default 'left' How to handle the operation of the two objects. * left: use calling frame's index (or column if on is specified) * right: use `other`'s index. * outer: form union of calling frame's index (or column if on is specified) with `other`'s index, and sort it. lexicographically. * inner: form intersection of calling frame's index (or column if on is specified) with `other`'s index, preserving the order of the calling's one. lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' Suffix to use from right frame's overlapping columns. sort : bool, default False Order result DataFrame lexicographically by the join key. If False, the order of the join key depends on the join type (how keyword). Returns ------- DataFrame A dataframe containing columns from both the caller and `other`. See Also -------- DataFrame.merge : For column(s)-on-columns(s) operations. Notes ----- Parameters `on`, `lsuffix`, and `rsuffix` are not supported when passing a list of `DataFrame` objects. Support for specifying index levels as the `on` parameter was added in version 0.23.0. Examples -------- >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']}) >>> df key A 0 K0 A0 1 K1 A1 2 K2 A2 3 K3 A3 4 K4 A4 5 K5 A5 >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}) >>> other key B 0 K0 B0 1 K1 B1 2 K2 B2 Join DataFrames using their indexes. >>> df.join(other, lsuffix='_caller', rsuffix='_other') key_caller A key_other B 0 K0 A0 K0 B0 1 K1 A1 K1 B1 2 K2 A2 K2 B2 3 K3 A3 NaN NaN 4 K4 A4 NaN NaN 5 K5 A5 NaN NaN If we want to join using the key columns, we need to set key to be the index in both `df` and `other`. The joined DataFrame will have key as its index. >>> df.set_index('key').join(other.set_index('key')) A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 NaN K4 A4 NaN K5 A5 NaN Another option to join using the key columns is to use the `on` parameter. DataFrame.join always uses `other`'s index but we can use any column in `df`. This method preserves the original DataFrame's index in the result. >>> df.join(other.set_index('key'), on='key') key A B 0 K0 A0 B0 1 K1 A1 B1 2 K2 A2 B2 3 K3 A3 NaN 4 K4 A4 NaN 5 K5 A5 NaN """ # For SparseDataFrame's benefit return self._join_compat(other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort)
[ "def", "join", "(", "self", ",", "other", ",", "on", "=", "None", ",", "how", "=", "'left'", ",", "lsuffix", "=", "''", ",", "rsuffix", "=", "''", ",", "sort", "=", "False", ")", ":", "# For SparseDataFrame's benefit", "return", "self", ".", "_join_compat", "(", "other", ",", "on", "=", "on", ",", "how", "=", "how", ",", "lsuffix", "=", "lsuffix", ",", "rsuffix", "=", "rsuffix", ",", "sort", "=", "sort", ")" ]
Join columns of another DataFrame. Join columns with `other` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- other : DataFrame, Series, or list of DataFrame Index should be similar to one of the columns in this one. If a Series is passed, its name attribute must be set, and that will be used as the column name in the resulting joined DataFrame. on : str, list of str, or array-like, optional Column or index level name(s) in the caller to join on the index in `other`, otherwise joins index-on-index. If multiple values given, the `other` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation. how : {'left', 'right', 'outer', 'inner'}, default 'left' How to handle the operation of the two objects. * left: use calling frame's index (or column if on is specified) * right: use `other`'s index. * outer: form union of calling frame's index (or column if on is specified) with `other`'s index, and sort it. lexicographically. * inner: form intersection of calling frame's index (or column if on is specified) with `other`'s index, preserving the order of the calling's one. lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' Suffix to use from right frame's overlapping columns. sort : bool, default False Order result DataFrame lexicographically by the join key. If False, the order of the join key depends on the join type (how keyword). Returns ------- DataFrame A dataframe containing columns from both the caller and `other`. See Also -------- DataFrame.merge : For column(s)-on-columns(s) operations. Notes ----- Parameters `on`, `lsuffix`, and `rsuffix` are not supported when passing a list of `DataFrame` objects. Support for specifying index levels as the `on` parameter was added in version 0.23.0. Examples -------- >>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'], ... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']}) >>> df key A 0 K0 A0 1 K1 A1 2 K2 A2 3 K3 A3 4 K4 A4 5 K5 A5 >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}) >>> other key B 0 K0 B0 1 K1 B1 2 K2 B2 Join DataFrames using their indexes. >>> df.join(other, lsuffix='_caller', rsuffix='_other') key_caller A key_other B 0 K0 A0 K0 B0 1 K1 A1 K1 B1 2 K2 A2 K2 B2 3 K3 A3 NaN NaN 4 K4 A4 NaN NaN 5 K5 A5 NaN NaN If we want to join using the key columns, we need to set key to be the index in both `df` and `other`. The joined DataFrame will have key as its index. >>> df.set_index('key').join(other.set_index('key')) A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 NaN K4 A4 NaN K5 A5 NaN Another option to join using the key columns is to use the `on` parameter. DataFrame.join always uses `other`'s index but we can use any column in `df`. This method preserves the original DataFrame's index in the result. >>> df.join(other.set_index('key'), on='key') key A B 0 K0 A0 B0 1 K1 A1 B1 2 K2 A2 B2 3 K3 A3 NaN 4 K4 A4 NaN 5 K5 A5 NaN
[ "Join", "columns", "of", "another", "DataFrame", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L6741-L6862