id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
20,000
pandas-dev/pandas
setup.py
maybe_cythonize
def maybe_cythonize(extensions, *args, **kwargs): """ Render tempita templates before calling cythonize """ if len(sys.argv) > 1 and 'clean' in sys.argv: # Avoid running cythonize on `python setup.py clean` # See https://github.com/cython/cython/issues/1495 return extensions if not cython: # Avoid trying to look up numpy when installing from sdist # https://github.com/pandas-dev/pandas/issues/25193 # TODO: See if this can be removed after pyproject.toml added. return extensions numpy_incl = pkg_resources.resource_filename('numpy', 'core/include') # TODO: Is this really necessary here? for ext in extensions: if (hasattr(ext, 'include_dirs') and numpy_incl not in ext.include_dirs): ext.include_dirs.append(numpy_incl) build_ext.render_templates(_pxifiles) return cythonize(extensions, *args, **kwargs)
python
def maybe_cythonize(extensions, *args, **kwargs): """ Render tempita templates before calling cythonize """ if len(sys.argv) > 1 and 'clean' in sys.argv: # Avoid running cythonize on `python setup.py clean` # See https://github.com/cython/cython/issues/1495 return extensions if not cython: # Avoid trying to look up numpy when installing from sdist # https://github.com/pandas-dev/pandas/issues/25193 # TODO: See if this can be removed after pyproject.toml added. return extensions numpy_incl = pkg_resources.resource_filename('numpy', 'core/include') # TODO: Is this really necessary here? for ext in extensions: if (hasattr(ext, 'include_dirs') and numpy_incl not in ext.include_dirs): ext.include_dirs.append(numpy_incl) build_ext.render_templates(_pxifiles) return cythonize(extensions, *args, **kwargs)
[ "def", "maybe_cythonize", "(", "extensions", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "sys", ".", "argv", ")", ">", "1", "and", "'clean'", "in", "sys", ".", "argv", ":", "# Avoid running cythonize on `python setup.py clean`", "# See https://github.com/cython/cython/issues/1495", "return", "extensions", "if", "not", "cython", ":", "# Avoid trying to look up numpy when installing from sdist", "# https://github.com/pandas-dev/pandas/issues/25193", "# TODO: See if this can be removed after pyproject.toml added.", "return", "extensions", "numpy_incl", "=", "pkg_resources", ".", "resource_filename", "(", "'numpy'", ",", "'core/include'", ")", "# TODO: Is this really necessary here?", "for", "ext", "in", "extensions", ":", "if", "(", "hasattr", "(", "ext", ",", "'include_dirs'", ")", "and", "numpy_incl", "not", "in", "ext", ".", "include_dirs", ")", ":", "ext", ".", "include_dirs", ".", "append", "(", "numpy_incl", ")", "build_ext", ".", "render_templates", "(", "_pxifiles", ")", "return", "cythonize", "(", "extensions", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Render tempita templates before calling cythonize
[ "Render", "tempita", "templates", "before", "calling", "cythonize" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/setup.py#L490-L512
20,001
pandas-dev/pandas
pandas/core/groupby/generic.py
NDFrameGroupBy._transform_fast
def _transform_fast(self, result, obj, func_nm): """ Fast transform path for aggregations """ # if there were groups with no observations (Categorical only?) # try casting data to original dtype cast = self._transform_should_cast(func_nm) # for each col, reshape to to size of original frame # by take operation ids, _, ngroup = self.grouper.group_info output = [] for i, _ in enumerate(result.columns): res = algorithms.take_1d(result.iloc[:, i].values, ids) if cast: res = self._try_cast(res, obj.iloc[:, i]) output.append(res) return DataFrame._from_arrays(output, columns=result.columns, index=obj.index)
python
def _transform_fast(self, result, obj, func_nm): """ Fast transform path for aggregations """ # if there were groups with no observations (Categorical only?) # try casting data to original dtype cast = self._transform_should_cast(func_nm) # for each col, reshape to to size of original frame # by take operation ids, _, ngroup = self.grouper.group_info output = [] for i, _ in enumerate(result.columns): res = algorithms.take_1d(result.iloc[:, i].values, ids) if cast: res = self._try_cast(res, obj.iloc[:, i]) output.append(res) return DataFrame._from_arrays(output, columns=result.columns, index=obj.index)
[ "def", "_transform_fast", "(", "self", ",", "result", ",", "obj", ",", "func_nm", ")", ":", "# if there were groups with no observations (Categorical only?)", "# try casting data to original dtype", "cast", "=", "self", ".", "_transform_should_cast", "(", "func_nm", ")", "# for each col, reshape to to size of original frame", "# by take operation", "ids", ",", "_", ",", "ngroup", "=", "self", ".", "grouper", ".", "group_info", "output", "=", "[", "]", "for", "i", ",", "_", "in", "enumerate", "(", "result", ".", "columns", ")", ":", "res", "=", "algorithms", ".", "take_1d", "(", "result", ".", "iloc", "[", ":", ",", "i", "]", ".", "values", ",", "ids", ")", "if", "cast", ":", "res", "=", "self", ".", "_try_cast", "(", "res", ",", "obj", ".", "iloc", "[", ":", ",", "i", "]", ")", "output", ".", "append", "(", "res", ")", "return", "DataFrame", ".", "_from_arrays", "(", "output", ",", "columns", "=", "result", ".", "columns", ",", "index", "=", "obj", ".", "index", ")" ]
Fast transform path for aggregations
[ "Fast", "transform", "path", "for", "aggregations" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/generic.py#L526-L545
20,002
pandas-dev/pandas
pandas/core/groupby/generic.py
NDFrameGroupBy.filter
def filter(self, func, dropna=True, *args, **kwargs): # noqa """ Return a copy of a DataFrame excluding elements from groups that do not satisfy the boolean criterion specified by func. Parameters ---------- f : function Function to apply to each subframe. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Returns ------- filtered : DataFrame Notes ----- Each subframe is endowed the attribute 'name' in case you need to know which group you are working on. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> grouped.filter(lambda x: x['B'].mean() > 3.) A B C 1 bar 2 5.0 3 bar 4 1.0 5 bar 6 9.0 """ indices = [] obj = self._selected_obj gen = self.grouper.get_iterator(obj, axis=self.axis) for name, group in gen: object.__setattr__(group, 'name', name) res = func(group, *args, **kwargs) try: res = res.squeeze() except AttributeError: # allow e.g., scalars and frames to pass pass # interpret the result of the filter if is_bool(res) or (is_scalar(res) and isna(res)): if res and notna(res): indices.append(self._get_index(name)) else: # non scalars aren't allowed raise TypeError("filter function returned a %s, " "but expected a scalar bool" % type(res).__name__) return self._apply_filter(indices, dropna)
python
def filter(self, func, dropna=True, *args, **kwargs): # noqa """ Return a copy of a DataFrame excluding elements from groups that do not satisfy the boolean criterion specified by func. Parameters ---------- f : function Function to apply to each subframe. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Returns ------- filtered : DataFrame Notes ----- Each subframe is endowed the attribute 'name' in case you need to know which group you are working on. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> grouped.filter(lambda x: x['B'].mean() > 3.) A B C 1 bar 2 5.0 3 bar 4 1.0 5 bar 6 9.0 """ indices = [] obj = self._selected_obj gen = self.grouper.get_iterator(obj, axis=self.axis) for name, group in gen: object.__setattr__(group, 'name', name) res = func(group, *args, **kwargs) try: res = res.squeeze() except AttributeError: # allow e.g., scalars and frames to pass pass # interpret the result of the filter if is_bool(res) or (is_scalar(res) and isna(res)): if res and notna(res): indices.append(self._get_index(name)) else: # non scalars aren't allowed raise TypeError("filter function returned a %s, " "but expected a scalar bool" % type(res).__name__) return self._apply_filter(indices, dropna)
[ "def", "filter", "(", "self", ",", "func", ",", "dropna", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# noqa", "indices", "=", "[", "]", "obj", "=", "self", ".", "_selected_obj", "gen", "=", "self", ".", "grouper", ".", "get_iterator", "(", "obj", ",", "axis", "=", "self", ".", "axis", ")", "for", "name", ",", "group", "in", "gen", ":", "object", ".", "__setattr__", "(", "group", ",", "'name'", ",", "name", ")", "res", "=", "func", "(", "group", ",", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "res", "=", "res", ".", "squeeze", "(", ")", "except", "AttributeError", ":", "# allow e.g., scalars and frames to pass", "pass", "# interpret the result of the filter", "if", "is_bool", "(", "res", ")", "or", "(", "is_scalar", "(", "res", ")", "and", "isna", "(", "res", ")", ")", ":", "if", "res", "and", "notna", "(", "res", ")", ":", "indices", ".", "append", "(", "self", ".", "_get_index", "(", "name", ")", ")", "else", ":", "# non scalars aren't allowed", "raise", "TypeError", "(", "\"filter function returned a %s, \"", "\"but expected a scalar bool\"", "%", "type", "(", "res", ")", ".", "__name__", ")", "return", "self", ".", "_apply_filter", "(", "indices", ",", "dropna", ")" ]
Return a copy of a DataFrame excluding elements from groups that do not satisfy the boolean criterion specified by func. Parameters ---------- f : function Function to apply to each subframe. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Returns ------- filtered : DataFrame Notes ----- Each subframe is endowed the attribute 'name' in case you need to know which group you are working on. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> grouped.filter(lambda x: x['B'].mean() > 3.) A B C 1 bar 2 5.0 3 bar 4 1.0 5 bar 6 9.0
[ "Return", "a", "copy", "of", "a", "DataFrame", "excluding", "elements", "from", "groups", "that", "do", "not", "satisfy", "the", "boolean", "criterion", "specified", "by", "func", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/generic.py#L601-L661
20,003
pandas-dev/pandas
pandas/core/groupby/generic.py
SeriesGroupBy.filter
def filter(self, func, dropna=True, *args, **kwargs): # noqa """ Return a copy of a Series excluding elements from groups that do not satisfy the boolean criterion specified by func. Parameters ---------- func : function To apply to each group. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.) 1 2 3 4 5 6 Name: B, dtype: int64 Returns ------- filtered : Series """ if isinstance(func, str): wrapper = lambda x: getattr(x, func)(*args, **kwargs) else: wrapper = lambda x: func(x, *args, **kwargs) # Interpret np.nan as False. def true_and_notna(x, *args, **kwargs): b = wrapper(x, *args, **kwargs) return b and notna(b) try: indices = [self._get_index(name) for name, group in self if true_and_notna(group)] except ValueError: raise TypeError("the filter must return a boolean result") except TypeError: raise TypeError("the filter must return a boolean result") filtered = self._apply_filter(indices, dropna) return filtered
python
def filter(self, func, dropna=True, *args, **kwargs): # noqa """ Return a copy of a Series excluding elements from groups that do not satisfy the boolean criterion specified by func. Parameters ---------- func : function To apply to each group. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.) 1 2 3 4 5 6 Name: B, dtype: int64 Returns ------- filtered : Series """ if isinstance(func, str): wrapper = lambda x: getattr(x, func)(*args, **kwargs) else: wrapper = lambda x: func(x, *args, **kwargs) # Interpret np.nan as False. def true_and_notna(x, *args, **kwargs): b = wrapper(x, *args, **kwargs) return b and notna(b) try: indices = [self._get_index(name) for name, group in self if true_and_notna(group)] except ValueError: raise TypeError("the filter must return a boolean result") except TypeError: raise TypeError("the filter must return a boolean result") filtered = self._apply_filter(indices, dropna) return filtered
[ "def", "filter", "(", "self", ",", "func", ",", "dropna", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# noqa", "if", "isinstance", "(", "func", ",", "str", ")", ":", "wrapper", "=", "lambda", "x", ":", "getattr", "(", "x", ",", "func", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "wrapper", "=", "lambda", "x", ":", "func", "(", "x", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# Interpret np.nan as False.", "def", "true_and_notna", "(", "x", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "b", "=", "wrapper", "(", "x", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "b", "and", "notna", "(", "b", ")", "try", ":", "indices", "=", "[", "self", ".", "_get_index", "(", "name", ")", "for", "name", ",", "group", "in", "self", "if", "true_and_notna", "(", "group", ")", "]", "except", "ValueError", ":", "raise", "TypeError", "(", "\"the filter must return a boolean result\"", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "\"the filter must return a boolean result\"", ")", "filtered", "=", "self", ".", "_apply_filter", "(", "indices", ",", "dropna", ")", "return", "filtered" ]
Return a copy of a Series excluding elements from groups that do not satisfy the boolean criterion specified by func. Parameters ---------- func : function To apply to each group. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.) 1 2 3 4 5 6 Name: B, dtype: int64 Returns ------- filtered : Series
[ "Return", "a", "copy", "of", "a", "Series", "excluding", "elements", "from", "groups", "that", "do", "not", "satisfy", "the", "boolean", "criterion", "specified", "by", "func", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/generic.py#L949-L997
20,004
pandas-dev/pandas
pandas/core/groupby/generic.py
SeriesGroupBy.nunique
def nunique(self, dropna=True): """ Return number of unique elements in the group. """ ids, _, _ = self.grouper.group_info val = self.obj.get_values() try: sorter = np.lexsort((val, ids)) except TypeError: # catches object dtypes msg = 'val.dtype must be object, got {}'.format(val.dtype) assert val.dtype == object, msg val, _ = algorithms.factorize(val, sort=False) sorter = np.lexsort((val, ids)) _isna = lambda a: a == -1 else: _isna = isna ids, val = ids[sorter], val[sorter] # group boundaries are where group ids change # unique observations are where sorted values change idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] inc = np.r_[1, val[1:] != val[:-1]] # 1st item of each group is a new unique observation mask = _isna(val) if dropna: inc[idx] = 1 inc[mask] = 0 else: inc[mask & np.r_[False, mask[:-1]]] = 0 inc[idx] = 1 out = np.add.reduceat(inc, idx).astype('int64', copy=False) if len(ids): # NaN/NaT group exists if the head of ids is -1, # so remove it from res and exclude its index from idx if ids[0] == -1: res = out[1:] idx = idx[np.flatnonzero(idx)] else: res = out else: res = out[1:] ri = self.grouper.result_index # we might have duplications among the bins if len(res) != len(ri): res, out = np.zeros(len(ri), dtype=out.dtype), res res[ids[idx]] = out return Series(res, index=ri, name=self._selection_name)
python
def nunique(self, dropna=True): """ Return number of unique elements in the group. """ ids, _, _ = self.grouper.group_info val = self.obj.get_values() try: sorter = np.lexsort((val, ids)) except TypeError: # catches object dtypes msg = 'val.dtype must be object, got {}'.format(val.dtype) assert val.dtype == object, msg val, _ = algorithms.factorize(val, sort=False) sorter = np.lexsort((val, ids)) _isna = lambda a: a == -1 else: _isna = isna ids, val = ids[sorter], val[sorter] # group boundaries are where group ids change # unique observations are where sorted values change idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] inc = np.r_[1, val[1:] != val[:-1]] # 1st item of each group is a new unique observation mask = _isna(val) if dropna: inc[idx] = 1 inc[mask] = 0 else: inc[mask & np.r_[False, mask[:-1]]] = 0 inc[idx] = 1 out = np.add.reduceat(inc, idx).astype('int64', copy=False) if len(ids): # NaN/NaT group exists if the head of ids is -1, # so remove it from res and exclude its index from idx if ids[0] == -1: res = out[1:] idx = idx[np.flatnonzero(idx)] else: res = out else: res = out[1:] ri = self.grouper.result_index # we might have duplications among the bins if len(res) != len(ri): res, out = np.zeros(len(ri), dtype=out.dtype), res res[ids[idx]] = out return Series(res, index=ri, name=self._selection_name)
[ "def", "nunique", "(", "self", ",", "dropna", "=", "True", ")", ":", "ids", ",", "_", ",", "_", "=", "self", ".", "grouper", ".", "group_info", "val", "=", "self", ".", "obj", ".", "get_values", "(", ")", "try", ":", "sorter", "=", "np", ".", "lexsort", "(", "(", "val", ",", "ids", ")", ")", "except", "TypeError", ":", "# catches object dtypes", "msg", "=", "'val.dtype must be object, got {}'", ".", "format", "(", "val", ".", "dtype", ")", "assert", "val", ".", "dtype", "==", "object", ",", "msg", "val", ",", "_", "=", "algorithms", ".", "factorize", "(", "val", ",", "sort", "=", "False", ")", "sorter", "=", "np", ".", "lexsort", "(", "(", "val", ",", "ids", ")", ")", "_isna", "=", "lambda", "a", ":", "a", "==", "-", "1", "else", ":", "_isna", "=", "isna", "ids", ",", "val", "=", "ids", "[", "sorter", "]", ",", "val", "[", "sorter", "]", "# group boundaries are where group ids change", "# unique observations are where sorted values change", "idx", "=", "np", ".", "r_", "[", "0", ",", "1", "+", "np", ".", "nonzero", "(", "ids", "[", "1", ":", "]", "!=", "ids", "[", ":", "-", "1", "]", ")", "[", "0", "]", "]", "inc", "=", "np", ".", "r_", "[", "1", ",", "val", "[", "1", ":", "]", "!=", "val", "[", ":", "-", "1", "]", "]", "# 1st item of each group is a new unique observation", "mask", "=", "_isna", "(", "val", ")", "if", "dropna", ":", "inc", "[", "idx", "]", "=", "1", "inc", "[", "mask", "]", "=", "0", "else", ":", "inc", "[", "mask", "&", "np", ".", "r_", "[", "False", ",", "mask", "[", ":", "-", "1", "]", "]", "]", "=", "0", "inc", "[", "idx", "]", "=", "1", "out", "=", "np", ".", "add", ".", "reduceat", "(", "inc", ",", "idx", ")", ".", "astype", "(", "'int64'", ",", "copy", "=", "False", ")", "if", "len", "(", "ids", ")", ":", "# NaN/NaT group exists if the head of ids is -1,", "# so remove it from res and exclude its index from idx", "if", "ids", "[", "0", "]", "==", "-", "1", ":", "res", "=", "out", "[", "1", ":", "]", "idx", "=", "idx", "[", "np", ".", "flatnonzero", "(", "idx", ")", "]", "else", ":", "res", "=", "out", "else", ":", "res", "=", "out", "[", "1", ":", "]", "ri", "=", "self", ".", "grouper", ".", "result_index", "# we might have duplications among the bins", "if", "len", "(", "res", ")", "!=", "len", "(", "ri", ")", ":", "res", ",", "out", "=", "np", ".", "zeros", "(", "len", "(", "ri", ")", ",", "dtype", "=", "out", ".", "dtype", ")", ",", "res", "res", "[", "ids", "[", "idx", "]", "]", "=", "out", "return", "Series", "(", "res", ",", "index", "=", "ri", ",", "name", "=", "self", ".", "_selection_name", ")" ]
Return number of unique elements in the group.
[ "Return", "number", "of", "unique", "elements", "in", "the", "group", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/generic.py#L999-L1054
20,005
pandas-dev/pandas
pandas/core/groupby/generic.py
SeriesGroupBy.pct_change
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None): """Calcuate pct_change of each value to previous entry in group""" # TODO: Remove this conditional when #23918 is fixed if freq: return self.apply(lambda x: x.pct_change(periods=periods, fill_method=fill_method, limit=limit, freq=freq)) filled = getattr(self, fill_method)(limit=limit) fill_grp = filled.groupby(self.grouper.labels) shifted = fill_grp.shift(periods=periods, freq=freq) return (filled / shifted) - 1
python
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None): """Calcuate pct_change of each value to previous entry in group""" # TODO: Remove this conditional when #23918 is fixed if freq: return self.apply(lambda x: x.pct_change(periods=periods, fill_method=fill_method, limit=limit, freq=freq)) filled = getattr(self, fill_method)(limit=limit) fill_grp = filled.groupby(self.grouper.labels) shifted = fill_grp.shift(periods=periods, freq=freq) return (filled / shifted) - 1
[ "def", "pct_change", "(", "self", ",", "periods", "=", "1", ",", "fill_method", "=", "'pad'", ",", "limit", "=", "None", ",", "freq", "=", "None", ")", ":", "# TODO: Remove this conditional when #23918 is fixed", "if", "freq", ":", "return", "self", ".", "apply", "(", "lambda", "x", ":", "x", ".", "pct_change", "(", "periods", "=", "periods", ",", "fill_method", "=", "fill_method", ",", "limit", "=", "limit", ",", "freq", "=", "freq", ")", ")", "filled", "=", "getattr", "(", "self", ",", "fill_method", ")", "(", "limit", "=", "limit", ")", "fill_grp", "=", "filled", ".", "groupby", "(", "self", ".", "grouper", ".", "labels", ")", "shifted", "=", "fill_grp", ".", "shift", "(", "periods", "=", "periods", ",", "freq", "=", "freq", ")", "return", "(", "filled", "/", "shifted", ")", "-", "1" ]
Calcuate pct_change of each value to previous entry in group
[ "Calcuate", "pct_change", "of", "each", "value", "to", "previous", "entry", "in", "group" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/generic.py#L1202-L1213
20,006
pandas-dev/pandas
pandas/core/groupby/generic.py
DataFrameGroupBy._gotitem
def _gotitem(self, key, ndim, subset=None): """ sub-classes to define return a sliced object Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ if ndim == 2: if subset is None: subset = self.obj return DataFrameGroupBy(subset, self.grouper, selection=key, grouper=self.grouper, exclusions=self.exclusions, as_index=self.as_index, observed=self.observed) elif ndim == 1: if subset is None: subset = self.obj[key] return SeriesGroupBy(subset, selection=key, grouper=self.grouper) raise AssertionError("invalid ndim for _gotitem")
python
def _gotitem(self, key, ndim, subset=None): """ sub-classes to define return a sliced object Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ if ndim == 2: if subset is None: subset = self.obj return DataFrameGroupBy(subset, self.grouper, selection=key, grouper=self.grouper, exclusions=self.exclusions, as_index=self.as_index, observed=self.observed) elif ndim == 1: if subset is None: subset = self.obj[key] return SeriesGroupBy(subset, selection=key, grouper=self.grouper) raise AssertionError("invalid ndim for _gotitem")
[ "def", "_gotitem", "(", "self", ",", "key", ",", "ndim", ",", "subset", "=", "None", ")", ":", "if", "ndim", "==", "2", ":", "if", "subset", "is", "None", ":", "subset", "=", "self", ".", "obj", "return", "DataFrameGroupBy", "(", "subset", ",", "self", ".", "grouper", ",", "selection", "=", "key", ",", "grouper", "=", "self", ".", "grouper", ",", "exclusions", "=", "self", ".", "exclusions", ",", "as_index", "=", "self", ".", "as_index", ",", "observed", "=", "self", ".", "observed", ")", "elif", "ndim", "==", "1", ":", "if", "subset", "is", "None", ":", "subset", "=", "self", ".", "obj", "[", "key", "]", "return", "SeriesGroupBy", "(", "subset", ",", "selection", "=", "key", ",", "grouper", "=", "self", ".", "grouper", ")", "raise", "AssertionError", "(", "\"invalid ndim for _gotitem\"", ")" ]
sub-classes to define return a sliced object Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on
[ "sub", "-", "classes", "to", "define", "return", "a", "sliced", "object" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/generic.py#L1297-L1325
20,007
pandas-dev/pandas
pandas/core/groupby/generic.py
DataFrameGroupBy._fill
def _fill(self, direction, limit=None): """Overridden method to join grouped columns in output""" res = super()._fill(direction, limit=limit) output = OrderedDict( (grp.name, grp.grouper) for grp in self.grouper.groupings) from pandas import concat return concat((self._wrap_transformed_output(output), res), axis=1)
python
def _fill(self, direction, limit=None): """Overridden method to join grouped columns in output""" res = super()._fill(direction, limit=limit) output = OrderedDict( (grp.name, grp.grouper) for grp in self.grouper.groupings) from pandas import concat return concat((self._wrap_transformed_output(output), res), axis=1)
[ "def", "_fill", "(", "self", ",", "direction", ",", "limit", "=", "None", ")", ":", "res", "=", "super", "(", ")", ".", "_fill", "(", "direction", ",", "limit", "=", "limit", ")", "output", "=", "OrderedDict", "(", "(", "grp", ".", "name", ",", "grp", ".", "grouper", ")", "for", "grp", "in", "self", ".", "grouper", ".", "groupings", ")", "from", "pandas", "import", "concat", "return", "concat", "(", "(", "self", ".", "_wrap_transformed_output", "(", "output", ")", ",", "res", ")", ",", "axis", "=", "1", ")" ]
Overridden method to join grouped columns in output
[ "Overridden", "method", "to", "join", "grouped", "columns", "in", "output" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/generic.py#L1472-L1479
20,008
pandas-dev/pandas
pandas/core/groupby/generic.py
DataFrameGroupBy.nunique
def nunique(self, dropna=True): """ Return DataFrame with number of distinct observations per group for each column. .. versionadded:: 0.20.0 Parameters ---------- dropna : boolean, default True Don't include NaN in the counts. Returns ------- nunique: DataFrame Examples -------- >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam', ... 'ham', 'ham'], ... 'value1': [1, 5, 5, 2, 5, 5], ... 'value2': list('abbaxy')}) >>> df id value1 value2 0 spam 1 a 1 egg 5 b 2 egg 5 b 3 spam 2 a 4 ham 5 x 5 ham 5 y >>> df.groupby('id').nunique() id value1 value2 id egg 1 1 1 ham 1 1 2 spam 1 2 1 Check for rows with the same id but conflicting values: >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any()) id value1 value2 0 spam 1 a 3 spam 2 a 4 ham 5 x 5 ham 5 y """ obj = self._selected_obj def groupby_series(obj, col=None): return SeriesGroupBy(obj, selection=col, grouper=self.grouper).nunique(dropna=dropna) if isinstance(obj, Series): results = groupby_series(obj) else: from pandas.core.reshape.concat import concat results = [groupby_series(obj[col], col) for col in obj.columns] results = concat(results, axis=1) results.columns.names = obj.columns.names if not self.as_index: results.index = ibase.default_index(len(results)) return results
python
def nunique(self, dropna=True): """ Return DataFrame with number of distinct observations per group for each column. .. versionadded:: 0.20.0 Parameters ---------- dropna : boolean, default True Don't include NaN in the counts. Returns ------- nunique: DataFrame Examples -------- >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam', ... 'ham', 'ham'], ... 'value1': [1, 5, 5, 2, 5, 5], ... 'value2': list('abbaxy')}) >>> df id value1 value2 0 spam 1 a 1 egg 5 b 2 egg 5 b 3 spam 2 a 4 ham 5 x 5 ham 5 y >>> df.groupby('id').nunique() id value1 value2 id egg 1 1 1 ham 1 1 2 spam 1 2 1 Check for rows with the same id but conflicting values: >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any()) id value1 value2 0 spam 1 a 3 spam 2 a 4 ham 5 x 5 ham 5 y """ obj = self._selected_obj def groupby_series(obj, col=None): return SeriesGroupBy(obj, selection=col, grouper=self.grouper).nunique(dropna=dropna) if isinstance(obj, Series): results = groupby_series(obj) else: from pandas.core.reshape.concat import concat results = [groupby_series(obj[col], col) for col in obj.columns] results = concat(results, axis=1) results.columns.names = obj.columns.names if not self.as_index: results.index = ibase.default_index(len(results)) return results
[ "def", "nunique", "(", "self", ",", "dropna", "=", "True", ")", ":", "obj", "=", "self", ".", "_selected_obj", "def", "groupby_series", "(", "obj", ",", "col", "=", "None", ")", ":", "return", "SeriesGroupBy", "(", "obj", ",", "selection", "=", "col", ",", "grouper", "=", "self", ".", "grouper", ")", ".", "nunique", "(", "dropna", "=", "dropna", ")", "if", "isinstance", "(", "obj", ",", "Series", ")", ":", "results", "=", "groupby_series", "(", "obj", ")", "else", ":", "from", "pandas", ".", "core", ".", "reshape", ".", "concat", "import", "concat", "results", "=", "[", "groupby_series", "(", "obj", "[", "col", "]", ",", "col", ")", "for", "col", "in", "obj", ".", "columns", "]", "results", "=", "concat", "(", "results", ",", "axis", "=", "1", ")", "results", ".", "columns", ".", "names", "=", "obj", ".", "columns", ".", "names", "if", "not", "self", ".", "as_index", ":", "results", ".", "index", "=", "ibase", ".", "default_index", "(", "len", "(", "results", ")", ")", "return", "results" ]
Return DataFrame with number of distinct observations per group for each column. .. versionadded:: 0.20.0 Parameters ---------- dropna : boolean, default True Don't include NaN in the counts. Returns ------- nunique: DataFrame Examples -------- >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam', ... 'ham', 'ham'], ... 'value1': [1, 5, 5, 2, 5, 5], ... 'value2': list('abbaxy')}) >>> df id value1 value2 0 spam 1 a 1 egg 5 b 2 egg 5 b 3 spam 2 a 4 ham 5 x 5 ham 5 y >>> df.groupby('id').nunique() id value1 value2 id egg 1 1 1 ham 1 1 2 spam 1 2 1 Check for rows with the same id but conflicting values: >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any()) id value1 value2 0 spam 1 a 3 spam 2 a 4 ham 5 x 5 ham 5 y
[ "Return", "DataFrame", "with", "number", "of", "distinct", "observations", "per", "group", "for", "each", "column", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/generic.py#L1499-L1564
20,009
pandas-dev/pandas
pandas/core/internals/arrays.py
extract_array
def extract_array(obj, extract_numpy=False): """ Extract the ndarray or ExtensionArray from a Series or Index. For all other types, `obj` is just returned as is. Parameters ---------- obj : object For Series / Index, the underlying ExtensionArray is unboxed. For Numpy-backed ExtensionArrays, the ndarray is extracted. extract_numpy : bool, default False Whether to extract the ndarray from a PandasArray Returns ------- arr : object Examples -------- >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category')) [a, b, c] Categories (3, object): [a, b, c] Other objects like lists, arrays, and DataFrames are just passed through. >>> extract_array([1, 2, 3]) [1, 2, 3] For an ndarray-backed Series / Index a PandasArray is returned. >>> extract_array(pd.Series([1, 2, 3])) <PandasArray> [1, 2, 3] Length: 3, dtype: int64 To extract all the way down to the ndarray, pass ``extract_numpy=True``. >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) array([1, 2, 3]) """ if isinstance(obj, (ABCIndexClass, ABCSeries)): obj = obj.array if extract_numpy and isinstance(obj, ABCPandasArray): obj = obj.to_numpy() return obj
python
def extract_array(obj, extract_numpy=False): """ Extract the ndarray or ExtensionArray from a Series or Index. For all other types, `obj` is just returned as is. Parameters ---------- obj : object For Series / Index, the underlying ExtensionArray is unboxed. For Numpy-backed ExtensionArrays, the ndarray is extracted. extract_numpy : bool, default False Whether to extract the ndarray from a PandasArray Returns ------- arr : object Examples -------- >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category')) [a, b, c] Categories (3, object): [a, b, c] Other objects like lists, arrays, and DataFrames are just passed through. >>> extract_array([1, 2, 3]) [1, 2, 3] For an ndarray-backed Series / Index a PandasArray is returned. >>> extract_array(pd.Series([1, 2, 3])) <PandasArray> [1, 2, 3] Length: 3, dtype: int64 To extract all the way down to the ndarray, pass ``extract_numpy=True``. >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) array([1, 2, 3]) """ if isinstance(obj, (ABCIndexClass, ABCSeries)): obj = obj.array if extract_numpy and isinstance(obj, ABCPandasArray): obj = obj.to_numpy() return obj
[ "def", "extract_array", "(", "obj", ",", "extract_numpy", "=", "False", ")", ":", "if", "isinstance", "(", "obj", ",", "(", "ABCIndexClass", ",", "ABCSeries", ")", ")", ":", "obj", "=", "obj", ".", "array", "if", "extract_numpy", "and", "isinstance", "(", "obj", ",", "ABCPandasArray", ")", ":", "obj", "=", "obj", ".", "to_numpy", "(", ")", "return", "obj" ]
Extract the ndarray or ExtensionArray from a Series or Index. For all other types, `obj` is just returned as is. Parameters ---------- obj : object For Series / Index, the underlying ExtensionArray is unboxed. For Numpy-backed ExtensionArrays, the ndarray is extracted. extract_numpy : bool, default False Whether to extract the ndarray from a PandasArray Returns ------- arr : object Examples -------- >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category')) [a, b, c] Categories (3, object): [a, b, c] Other objects like lists, arrays, and DataFrames are just passed through. >>> extract_array([1, 2, 3]) [1, 2, 3] For an ndarray-backed Series / Index a PandasArray is returned. >>> extract_array(pd.Series([1, 2, 3])) <PandasArray> [1, 2, 3] Length: 3, dtype: int64 To extract all the way down to the ndarray, pass ``extract_numpy=True``. >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) array([1, 2, 3])
[ "Extract", "the", "ndarray", "or", "ExtensionArray", "from", "a", "Series", "or", "Index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/arrays.py#L7-L55
20,010
pandas-dev/pandas
pandas/core/common.py
flatten
def flatten(l): """ Flatten an arbitrarily nested sequence. Parameters ---------- l : sequence The non string sequence to flatten Notes ----- This doesn't consider strings sequences. Returns ------- flattened : generator """ for el in l: if _iterable_not_string(el): for s in flatten(el): yield s else: yield el
python
def flatten(l): """ Flatten an arbitrarily nested sequence. Parameters ---------- l : sequence The non string sequence to flatten Notes ----- This doesn't consider strings sequences. Returns ------- flattened : generator """ for el in l: if _iterable_not_string(el): for s in flatten(el): yield s else: yield el
[ "def", "flatten", "(", "l", ")", ":", "for", "el", "in", "l", ":", "if", "_iterable_not_string", "(", "el", ")", ":", "for", "s", "in", "flatten", "(", "el", ")", ":", "yield", "s", "else", ":", "yield", "el" ]
Flatten an arbitrarily nested sequence. Parameters ---------- l : sequence The non string sequence to flatten Notes ----- This doesn't consider strings sequences. Returns ------- flattened : generator
[ "Flatten", "an", "arbitrarily", "nested", "sequence", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/common.py#L35-L57
20,011
pandas-dev/pandas
pandas/core/common.py
is_bool_indexer
def is_bool_indexer(key: Any) -> bool: """ Check whether `key` is a valid boolean indexer. Parameters ---------- key : Any Only list-likes may be considered boolean indexers. All other types are not considered a boolean indexer. For array-like input, boolean ndarrays or ExtensionArrays with ``_is_boolean`` set are considered boolean indexers. Returns ------- bool Raises ------ ValueError When the array is an object-dtype ndarray or ExtensionArray and contains missing values. """ na_msg = 'cannot index with vector containing NA / NaN values' if (isinstance(key, (ABCSeries, np.ndarray, ABCIndex)) or (is_array_like(key) and is_extension_array_dtype(key.dtype))): if key.dtype == np.object_: key = np.asarray(values_from_object(key)) if not lib.is_bool_array(key): if isna(key).any(): raise ValueError(na_msg) return False return True elif is_bool_dtype(key.dtype): # an ndarray with bool-dtype by definition has no missing values. # So we only need to check for NAs in ExtensionArrays if is_extension_array_dtype(key.dtype): if np.any(key.isna()): raise ValueError(na_msg) return True elif isinstance(key, list): try: arr = np.asarray(key) return arr.dtype == np.bool_ and len(arr) == len(key) except TypeError: # pragma: no cover return False return False
python
def is_bool_indexer(key: Any) -> bool: """ Check whether `key` is a valid boolean indexer. Parameters ---------- key : Any Only list-likes may be considered boolean indexers. All other types are not considered a boolean indexer. For array-like input, boolean ndarrays or ExtensionArrays with ``_is_boolean`` set are considered boolean indexers. Returns ------- bool Raises ------ ValueError When the array is an object-dtype ndarray or ExtensionArray and contains missing values. """ na_msg = 'cannot index with vector containing NA / NaN values' if (isinstance(key, (ABCSeries, np.ndarray, ABCIndex)) or (is_array_like(key) and is_extension_array_dtype(key.dtype))): if key.dtype == np.object_: key = np.asarray(values_from_object(key)) if not lib.is_bool_array(key): if isna(key).any(): raise ValueError(na_msg) return False return True elif is_bool_dtype(key.dtype): # an ndarray with bool-dtype by definition has no missing values. # So we only need to check for NAs in ExtensionArrays if is_extension_array_dtype(key.dtype): if np.any(key.isna()): raise ValueError(na_msg) return True elif isinstance(key, list): try: arr = np.asarray(key) return arr.dtype == np.bool_ and len(arr) == len(key) except TypeError: # pragma: no cover return False return False
[ "def", "is_bool_indexer", "(", "key", ":", "Any", ")", "->", "bool", ":", "na_msg", "=", "'cannot index with vector containing NA / NaN values'", "if", "(", "isinstance", "(", "key", ",", "(", "ABCSeries", ",", "np", ".", "ndarray", ",", "ABCIndex", ")", ")", "or", "(", "is_array_like", "(", "key", ")", "and", "is_extension_array_dtype", "(", "key", ".", "dtype", ")", ")", ")", ":", "if", "key", ".", "dtype", "==", "np", ".", "object_", ":", "key", "=", "np", ".", "asarray", "(", "values_from_object", "(", "key", ")", ")", "if", "not", "lib", ".", "is_bool_array", "(", "key", ")", ":", "if", "isna", "(", "key", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "na_msg", ")", "return", "False", "return", "True", "elif", "is_bool_dtype", "(", "key", ".", "dtype", ")", ":", "# an ndarray with bool-dtype by definition has no missing values.", "# So we only need to check for NAs in ExtensionArrays", "if", "is_extension_array_dtype", "(", "key", ".", "dtype", ")", ":", "if", "np", ".", "any", "(", "key", ".", "isna", "(", ")", ")", ":", "raise", "ValueError", "(", "na_msg", ")", "return", "True", "elif", "isinstance", "(", "key", ",", "list", ")", ":", "try", ":", "arr", "=", "np", ".", "asarray", "(", "key", ")", "return", "arr", ".", "dtype", "==", "np", ".", "bool_", "and", "len", "(", "arr", ")", "==", "len", "(", "key", ")", "except", "TypeError", ":", "# pragma: no cover", "return", "False", "return", "False" ]
Check whether `key` is a valid boolean indexer. Parameters ---------- key : Any Only list-likes may be considered boolean indexers. All other types are not considered a boolean indexer. For array-like input, boolean ndarrays or ExtensionArrays with ``_is_boolean`` set are considered boolean indexers. Returns ------- bool Raises ------ ValueError When the array is an object-dtype ndarray or ExtensionArray and contains missing values.
[ "Check", "whether", "key", "is", "a", "valid", "boolean", "indexer", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/common.py#L95-L142
20,012
pandas-dev/pandas
pandas/core/common.py
cast_scalar_indexer
def cast_scalar_indexer(val): """ To avoid numpy DeprecationWarnings, cast float to integer where valid. Parameters ---------- val : scalar Returns ------- outval : scalar """ # assumes lib.is_scalar(val) if lib.is_float(val) and val == int(val): return int(val) return val
python
def cast_scalar_indexer(val): """ To avoid numpy DeprecationWarnings, cast float to integer where valid. Parameters ---------- val : scalar Returns ------- outval : scalar """ # assumes lib.is_scalar(val) if lib.is_float(val) and val == int(val): return int(val) return val
[ "def", "cast_scalar_indexer", "(", "val", ")", ":", "# assumes lib.is_scalar(val)", "if", "lib", ".", "is_float", "(", "val", ")", "and", "val", "==", "int", "(", "val", ")", ":", "return", "int", "(", "val", ")", "return", "val" ]
To avoid numpy DeprecationWarnings, cast float to integer where valid. Parameters ---------- val : scalar Returns ------- outval : scalar
[ "To", "avoid", "numpy", "DeprecationWarnings", "cast", "float", "to", "integer", "where", "valid", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/common.py#L145-L160
20,013
pandas-dev/pandas
pandas/core/common.py
index_labels_to_array
def index_labels_to_array(labels, dtype=None): """ Transform label or iterable of labels to array, for use in Index. Parameters ---------- dtype : dtype If specified, use as dtype of the resulting array, otherwise infer. Returns ------- array """ if isinstance(labels, (str, tuple)): labels = [labels] if not isinstance(labels, (list, np.ndarray)): try: labels = list(labels) except TypeError: # non-iterable labels = [labels] labels = asarray_tuplesafe(labels, dtype=dtype) return labels
python
def index_labels_to_array(labels, dtype=None): """ Transform label or iterable of labels to array, for use in Index. Parameters ---------- dtype : dtype If specified, use as dtype of the resulting array, otherwise infer. Returns ------- array """ if isinstance(labels, (str, tuple)): labels = [labels] if not isinstance(labels, (list, np.ndarray)): try: labels = list(labels) except TypeError: # non-iterable labels = [labels] labels = asarray_tuplesafe(labels, dtype=dtype) return labels
[ "def", "index_labels_to_array", "(", "labels", ",", "dtype", "=", "None", ")", ":", "if", "isinstance", "(", "labels", ",", "(", "str", ",", "tuple", ")", ")", ":", "labels", "=", "[", "labels", "]", "if", "not", "isinstance", "(", "labels", ",", "(", "list", ",", "np", ".", "ndarray", ")", ")", ":", "try", ":", "labels", "=", "list", "(", "labels", ")", "except", "TypeError", ":", "# non-iterable", "labels", "=", "[", "labels", "]", "labels", "=", "asarray_tuplesafe", "(", "labels", ",", "dtype", "=", "dtype", ")", "return", "labels" ]
Transform label or iterable of labels to array, for use in Index. Parameters ---------- dtype : dtype If specified, use as dtype of the resulting array, otherwise infer. Returns ------- array
[ "Transform", "label", "or", "iterable", "of", "labels", "to", "array", "for", "use", "in", "Index", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/common.py#L259-L283
20,014
pandas-dev/pandas
pandas/core/common.py
is_null_slice
def is_null_slice(obj): """ We have a null slice. """ return (isinstance(obj, slice) and obj.start is None and obj.stop is None and obj.step is None)
python
def is_null_slice(obj): """ We have a null slice. """ return (isinstance(obj, slice) and obj.start is None and obj.stop is None and obj.step is None)
[ "def", "is_null_slice", "(", "obj", ")", ":", "return", "(", "isinstance", "(", "obj", ",", "slice", ")", "and", "obj", ".", "start", "is", "None", "and", "obj", ".", "stop", "is", "None", "and", "obj", ".", "step", "is", "None", ")" ]
We have a null slice.
[ "We", "have", "a", "null", "slice", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/common.py#L292-L297
20,015
pandas-dev/pandas
pandas/core/common.py
is_full_slice
def is_full_slice(obj, l): """ We have a full length slice. """ return (isinstance(obj, slice) and obj.start == 0 and obj.stop == l and obj.step is None)
python
def is_full_slice(obj, l): """ We have a full length slice. """ return (isinstance(obj, slice) and obj.start == 0 and obj.stop == l and obj.step is None)
[ "def", "is_full_slice", "(", "obj", ",", "l", ")", ":", "return", "(", "isinstance", "(", "obj", ",", "slice", ")", "and", "obj", ".", "start", "==", "0", "and", "obj", ".", "stop", "==", "l", "and", "obj", ".", "step", "is", "None", ")" ]
We have a full length slice.
[ "We", "have", "a", "full", "length", "slice", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/common.py#L308-L313
20,016
pandas-dev/pandas
pandas/core/common.py
apply_if_callable
def apply_if_callable(maybe_callable, obj, **kwargs): """ Evaluate possibly callable input using obj and kwargs if it is callable, otherwise return as it is. Parameters ---------- maybe_callable : possibly a callable obj : NDFrame **kwargs """ if callable(maybe_callable): return maybe_callable(obj, **kwargs) return maybe_callable
python
def apply_if_callable(maybe_callable, obj, **kwargs): """ Evaluate possibly callable input using obj and kwargs if it is callable, otherwise return as it is. Parameters ---------- maybe_callable : possibly a callable obj : NDFrame **kwargs """ if callable(maybe_callable): return maybe_callable(obj, **kwargs) return maybe_callable
[ "def", "apply_if_callable", "(", "maybe_callable", ",", "obj", ",", "*", "*", "kwargs", ")", ":", "if", "callable", "(", "maybe_callable", ")", ":", "return", "maybe_callable", "(", "obj", ",", "*", "*", "kwargs", ")", "return", "maybe_callable" ]
Evaluate possibly callable input using obj and kwargs if it is callable, otherwise return as it is. Parameters ---------- maybe_callable : possibly a callable obj : NDFrame **kwargs
[ "Evaluate", "possibly", "callable", "input", "using", "obj", "and", "kwargs", "if", "it", "is", "callable", "otherwise", "return", "as", "it", "is", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/common.py#L333-L348
20,017
pandas-dev/pandas
pandas/core/common.py
standardize_mapping
def standardize_mapping(into): """ Helper function to standardize a supplied mapping. .. versionadded:: 0.21.0 Parameters ---------- into : instance or subclass of collections.abc.Mapping Must be a class, an initialized collections.defaultdict, or an instance of a collections.abc.Mapping subclass. Returns ------- mapping : a collections.abc.Mapping subclass or other constructor a callable object that can accept an iterator to create the desired Mapping. See Also -------- DataFrame.to_dict Series.to_dict """ if not inspect.isclass(into): if isinstance(into, collections.defaultdict): return partial( collections.defaultdict, into.default_factory) into = type(into) if not issubclass(into, abc.Mapping): raise TypeError('unsupported type: {into}'.format(into=into)) elif into == collections.defaultdict: raise TypeError( 'to_dict() only accepts initialized defaultdicts') return into
python
def standardize_mapping(into): """ Helper function to standardize a supplied mapping. .. versionadded:: 0.21.0 Parameters ---------- into : instance or subclass of collections.abc.Mapping Must be a class, an initialized collections.defaultdict, or an instance of a collections.abc.Mapping subclass. Returns ------- mapping : a collections.abc.Mapping subclass or other constructor a callable object that can accept an iterator to create the desired Mapping. See Also -------- DataFrame.to_dict Series.to_dict """ if not inspect.isclass(into): if isinstance(into, collections.defaultdict): return partial( collections.defaultdict, into.default_factory) into = type(into) if not issubclass(into, abc.Mapping): raise TypeError('unsupported type: {into}'.format(into=into)) elif into == collections.defaultdict: raise TypeError( 'to_dict() only accepts initialized defaultdicts') return into
[ "def", "standardize_mapping", "(", "into", ")", ":", "if", "not", "inspect", ".", "isclass", "(", "into", ")", ":", "if", "isinstance", "(", "into", ",", "collections", ".", "defaultdict", ")", ":", "return", "partial", "(", "collections", ".", "defaultdict", ",", "into", ".", "default_factory", ")", "into", "=", "type", "(", "into", ")", "if", "not", "issubclass", "(", "into", ",", "abc", ".", "Mapping", ")", ":", "raise", "TypeError", "(", "'unsupported type: {into}'", ".", "format", "(", "into", "=", "into", ")", ")", "elif", "into", "==", "collections", ".", "defaultdict", ":", "raise", "TypeError", "(", "'to_dict() only accepts initialized defaultdicts'", ")", "return", "into" ]
Helper function to standardize a supplied mapping. .. versionadded:: 0.21.0 Parameters ---------- into : instance or subclass of collections.abc.Mapping Must be a class, an initialized collections.defaultdict, or an instance of a collections.abc.Mapping subclass. Returns ------- mapping : a collections.abc.Mapping subclass or other constructor a callable object that can accept an iterator to create the desired Mapping. See Also -------- DataFrame.to_dict Series.to_dict
[ "Helper", "function", "to", "standardize", "a", "supplied", "mapping", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/common.py#L368-L401
20,018
pandas-dev/pandas
pandas/core/common.py
random_state
def random_state(state=None): """ Helper function for processing random_state arguments. Parameters ---------- state : int, np.random.RandomState, None. If receives an int, passes to np.random.RandomState() as seed. If receives an np.random.RandomState object, just returns object. If receives `None`, returns np.random. If receives anything else, raises an informative ValueError. Default None. Returns ------- np.random.RandomState """ if is_integer(state): return np.random.RandomState(state) elif isinstance(state, np.random.RandomState): return state elif state is None: return np.random else: raise ValueError("random_state must be an integer, a numpy " "RandomState, or None")
python
def random_state(state=None): """ Helper function for processing random_state arguments. Parameters ---------- state : int, np.random.RandomState, None. If receives an int, passes to np.random.RandomState() as seed. If receives an np.random.RandomState object, just returns object. If receives `None`, returns np.random. If receives anything else, raises an informative ValueError. Default None. Returns ------- np.random.RandomState """ if is_integer(state): return np.random.RandomState(state) elif isinstance(state, np.random.RandomState): return state elif state is None: return np.random else: raise ValueError("random_state must be an integer, a numpy " "RandomState, or None")
[ "def", "random_state", "(", "state", "=", "None", ")", ":", "if", "is_integer", "(", "state", ")", ":", "return", "np", ".", "random", ".", "RandomState", "(", "state", ")", "elif", "isinstance", "(", "state", ",", "np", ".", "random", ".", "RandomState", ")", ":", "return", "state", "elif", "state", "is", "None", ":", "return", "np", ".", "random", "else", ":", "raise", "ValueError", "(", "\"random_state must be an integer, a numpy \"", "\"RandomState, or None\"", ")" ]
Helper function for processing random_state arguments. Parameters ---------- state : int, np.random.RandomState, None. If receives an int, passes to np.random.RandomState() as seed. If receives an np.random.RandomState object, just returns object. If receives `None`, returns np.random. If receives anything else, raises an informative ValueError. Default None. Returns ------- np.random.RandomState
[ "Helper", "function", "for", "processing", "random_state", "arguments", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/common.py#L404-L430
20,019
pandas-dev/pandas
pandas/core/common.py
_pipe
def _pipe(obj, func, *args, **kwargs): """ Apply a function ``func`` to object ``obj`` either by passing obj as the first argument to the function or, in the case that the func is a tuple, interpret the first element of the tuple as a function and pass the obj to that function as a keyword argument whose key is the value of the second element of the tuple. Parameters ---------- func : callable or tuple of (callable, string) Function to apply to this object or, alternatively, a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of `callable`` that expects the object. args : iterable, optional positional arguments passed into ``func``. kwargs : dict, optional a dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``. """ if isinstance(func, tuple): func, target = func if target in kwargs: msg = '%s is both the pipe target and a keyword argument' % target raise ValueError(msg) kwargs[target] = obj return func(*args, **kwargs) else: return func(obj, *args, **kwargs)
python
def _pipe(obj, func, *args, **kwargs): """ Apply a function ``func`` to object ``obj`` either by passing obj as the first argument to the function or, in the case that the func is a tuple, interpret the first element of the tuple as a function and pass the obj to that function as a keyword argument whose key is the value of the second element of the tuple. Parameters ---------- func : callable or tuple of (callable, string) Function to apply to this object or, alternatively, a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of `callable`` that expects the object. args : iterable, optional positional arguments passed into ``func``. kwargs : dict, optional a dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``. """ if isinstance(func, tuple): func, target = func if target in kwargs: msg = '%s is both the pipe target and a keyword argument' % target raise ValueError(msg) kwargs[target] = obj return func(*args, **kwargs) else: return func(obj, *args, **kwargs)
[ "def", "_pipe", "(", "obj", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "func", ",", "tuple", ")", ":", "func", ",", "target", "=", "func", "if", "target", "in", "kwargs", ":", "msg", "=", "'%s is both the pipe target and a keyword argument'", "%", "target", "raise", "ValueError", "(", "msg", ")", "kwargs", "[", "target", "]", "=", "obj", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "func", "(", "obj", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Apply a function ``func`` to object ``obj`` either by passing obj as the first argument to the function or, in the case that the func is a tuple, interpret the first element of the tuple as a function and pass the obj to that function as a keyword argument whose key is the value of the second element of the tuple. Parameters ---------- func : callable or tuple of (callable, string) Function to apply to this object or, alternatively, a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of `callable`` that expects the object. args : iterable, optional positional arguments passed into ``func``. kwargs : dict, optional a dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``.
[ "Apply", "a", "function", "func", "to", "object", "obj", "either", "by", "passing", "obj", "as", "the", "first", "argument", "to", "the", "function", "or", "in", "the", "case", "that", "the", "func", "is", "a", "tuple", "interpret", "the", "first", "element", "of", "the", "tuple", "as", "a", "function", "and", "pass", "the", "obj", "to", "that", "function", "as", "a", "keyword", "argument", "whose", "key", "is", "the", "value", "of", "the", "second", "element", "of", "the", "tuple", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/common.py#L433-L465
20,020
pandas-dev/pandas
pandas/core/nanops.py
_get_fill_value
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None): """ return the correct fill value for the dtype of the values """ if fill_value is not None: return fill_value if _na_ok_dtype(dtype): if fill_value_typ is None: return np.nan else: if fill_value_typ == '+inf': return np.inf else: return -np.inf else: if fill_value_typ is None: return tslibs.iNaT else: if fill_value_typ == '+inf': # need the max int here return _int64_max else: return tslibs.iNaT
python
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None): """ return the correct fill value for the dtype of the values """ if fill_value is not None: return fill_value if _na_ok_dtype(dtype): if fill_value_typ is None: return np.nan else: if fill_value_typ == '+inf': return np.inf else: return -np.inf else: if fill_value_typ is None: return tslibs.iNaT else: if fill_value_typ == '+inf': # need the max int here return _int64_max else: return tslibs.iNaT
[ "def", "_get_fill_value", "(", "dtype", ",", "fill_value", "=", "None", ",", "fill_value_typ", "=", "None", ")", ":", "if", "fill_value", "is", "not", "None", ":", "return", "fill_value", "if", "_na_ok_dtype", "(", "dtype", ")", ":", "if", "fill_value_typ", "is", "None", ":", "return", "np", ".", "nan", "else", ":", "if", "fill_value_typ", "==", "'+inf'", ":", "return", "np", ".", "inf", "else", ":", "return", "-", "np", ".", "inf", "else", ":", "if", "fill_value_typ", "is", "None", ":", "return", "tslibs", ".", "iNaT", "else", ":", "if", "fill_value_typ", "==", "'+inf'", ":", "# need the max int here", "return", "_int64_max", "else", ":", "return", "tslibs", ".", "iNaT" ]
return the correct fill value for the dtype of the values
[ "return", "the", "correct", "fill", "value", "for", "the", "dtype", "of", "the", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L180-L200
20,021
pandas-dev/pandas
pandas/core/nanops.py
_get_values
def _get_values(values, skipna, fill_value=None, fill_value_typ=None, isfinite=False, copy=True, mask=None): """ utility to get the values view, mask, dtype if necessary copy and mask using the specified fill_value copy = True will force the copy """ if is_datetime64tz_dtype(values): # com.values_from_object returns M8[ns] dtype instead of tz-aware, # so this case must be handled separately from the rest dtype = values.dtype values = getattr(values, "_values", values) else: values = com.values_from_object(values) dtype = values.dtype if mask is None: if isfinite: mask = _isfinite(values) else: mask = isna(values) if is_datetime_or_timedelta_dtype(values) or is_datetime64tz_dtype(values): # changing timedelta64/datetime64 to int64 needs to happen after # finding `mask` above values = getattr(values, "asi8", values) values = values.view(np.int64) dtype_ok = _na_ok_dtype(dtype) # get our fill value (in case we need to provide an alternative # dtype for it) fill_value = _get_fill_value(dtype, fill_value=fill_value, fill_value_typ=fill_value_typ) if skipna: if copy: values = values.copy() if dtype_ok: np.putmask(values, mask, fill_value) # promote if needed else: values, changed = maybe_upcast_putmask(values, mask, fill_value) elif copy: values = values.copy() # return a platform independent precision dtype dtype_max = dtype if is_integer_dtype(dtype) or is_bool_dtype(dtype): dtype_max = np.int64 elif is_float_dtype(dtype): dtype_max = np.float64 return values, mask, dtype, dtype_max, fill_value
python
def _get_values(values, skipna, fill_value=None, fill_value_typ=None, isfinite=False, copy=True, mask=None): """ utility to get the values view, mask, dtype if necessary copy and mask using the specified fill_value copy = True will force the copy """ if is_datetime64tz_dtype(values): # com.values_from_object returns M8[ns] dtype instead of tz-aware, # so this case must be handled separately from the rest dtype = values.dtype values = getattr(values, "_values", values) else: values = com.values_from_object(values) dtype = values.dtype if mask is None: if isfinite: mask = _isfinite(values) else: mask = isna(values) if is_datetime_or_timedelta_dtype(values) or is_datetime64tz_dtype(values): # changing timedelta64/datetime64 to int64 needs to happen after # finding `mask` above values = getattr(values, "asi8", values) values = values.view(np.int64) dtype_ok = _na_ok_dtype(dtype) # get our fill value (in case we need to provide an alternative # dtype for it) fill_value = _get_fill_value(dtype, fill_value=fill_value, fill_value_typ=fill_value_typ) if skipna: if copy: values = values.copy() if dtype_ok: np.putmask(values, mask, fill_value) # promote if needed else: values, changed = maybe_upcast_putmask(values, mask, fill_value) elif copy: values = values.copy() # return a platform independent precision dtype dtype_max = dtype if is_integer_dtype(dtype) or is_bool_dtype(dtype): dtype_max = np.int64 elif is_float_dtype(dtype): dtype_max = np.float64 return values, mask, dtype, dtype_max, fill_value
[ "def", "_get_values", "(", "values", ",", "skipna", ",", "fill_value", "=", "None", ",", "fill_value_typ", "=", "None", ",", "isfinite", "=", "False", ",", "copy", "=", "True", ",", "mask", "=", "None", ")", ":", "if", "is_datetime64tz_dtype", "(", "values", ")", ":", "# com.values_from_object returns M8[ns] dtype instead of tz-aware,", "# so this case must be handled separately from the rest", "dtype", "=", "values", ".", "dtype", "values", "=", "getattr", "(", "values", ",", "\"_values\"", ",", "values", ")", "else", ":", "values", "=", "com", ".", "values_from_object", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "if", "mask", "is", "None", ":", "if", "isfinite", ":", "mask", "=", "_isfinite", "(", "values", ")", "else", ":", "mask", "=", "isna", "(", "values", ")", "if", "is_datetime_or_timedelta_dtype", "(", "values", ")", "or", "is_datetime64tz_dtype", "(", "values", ")", ":", "# changing timedelta64/datetime64 to int64 needs to happen after", "# finding `mask` above", "values", "=", "getattr", "(", "values", ",", "\"asi8\"", ",", "values", ")", "values", "=", "values", ".", "view", "(", "np", ".", "int64", ")", "dtype_ok", "=", "_na_ok_dtype", "(", "dtype", ")", "# get our fill value (in case we need to provide an alternative", "# dtype for it)", "fill_value", "=", "_get_fill_value", "(", "dtype", ",", "fill_value", "=", "fill_value", ",", "fill_value_typ", "=", "fill_value_typ", ")", "if", "skipna", ":", "if", "copy", ":", "values", "=", "values", ".", "copy", "(", ")", "if", "dtype_ok", ":", "np", ".", "putmask", "(", "values", ",", "mask", ",", "fill_value", ")", "# promote if needed", "else", ":", "values", ",", "changed", "=", "maybe_upcast_putmask", "(", "values", ",", "mask", ",", "fill_value", ")", "elif", "copy", ":", "values", "=", "values", ".", "copy", "(", ")", "# return a platform independent precision dtype", "dtype_max", "=", "dtype", "if", "is_integer_dtype", "(", "dtype", ")", "or", "is_bool_dtype", "(", "dtype", ")", ":", "dtype_max", "=", "np", ".", "int64", "elif", "is_float_dtype", "(", "dtype", ")", ":", "dtype_max", "=", "np", ".", "float64", "return", "values", ",", "mask", ",", "dtype", ",", "dtype_max", ",", "fill_value" ]
utility to get the values view, mask, dtype if necessary copy and mask using the specified fill_value copy = True will force the copy
[ "utility", "to", "get", "the", "values", "view", "mask", "dtype", "if", "necessary", "copy", "and", "mask", "using", "the", "specified", "fill_value", "copy", "=", "True", "will", "force", "the", "copy" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L203-L258
20,022
pandas-dev/pandas
pandas/core/nanops.py
_wrap_results
def _wrap_results(result, dtype, fill_value=None): """ wrap our results if needed """ if is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): if fill_value is None: # GH#24293 fill_value = iNaT if not isinstance(result, np.ndarray): tz = getattr(dtype, 'tz', None) assert not isna(fill_value), "Expected non-null fill_value" if result == fill_value: result = np.nan result = tslibs.Timestamp(result, tz=tz) else: result = result.view(dtype) elif is_timedelta64_dtype(dtype): if not isinstance(result, np.ndarray): if result == fill_value: result = np.nan # raise if we have a timedelta64[ns] which is too large if np.fabs(result) > _int64_max: raise ValueError("overflow in timedelta operation") result = tslibs.Timedelta(result, unit='ns') else: result = result.astype('i8').view(dtype) return result
python
def _wrap_results(result, dtype, fill_value=None): """ wrap our results if needed """ if is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): if fill_value is None: # GH#24293 fill_value = iNaT if not isinstance(result, np.ndarray): tz = getattr(dtype, 'tz', None) assert not isna(fill_value), "Expected non-null fill_value" if result == fill_value: result = np.nan result = tslibs.Timestamp(result, tz=tz) else: result = result.view(dtype) elif is_timedelta64_dtype(dtype): if not isinstance(result, np.ndarray): if result == fill_value: result = np.nan # raise if we have a timedelta64[ns] which is too large if np.fabs(result) > _int64_max: raise ValueError("overflow in timedelta operation") result = tslibs.Timedelta(result, unit='ns') else: result = result.astype('i8').view(dtype) return result
[ "def", "_wrap_results", "(", "result", ",", "dtype", ",", "fill_value", "=", "None", ")", ":", "if", "is_datetime64_dtype", "(", "dtype", ")", "or", "is_datetime64tz_dtype", "(", "dtype", ")", ":", "if", "fill_value", "is", "None", ":", "# GH#24293", "fill_value", "=", "iNaT", "if", "not", "isinstance", "(", "result", ",", "np", ".", "ndarray", ")", ":", "tz", "=", "getattr", "(", "dtype", ",", "'tz'", ",", "None", ")", "assert", "not", "isna", "(", "fill_value", ")", ",", "\"Expected non-null fill_value\"", "if", "result", "==", "fill_value", ":", "result", "=", "np", ".", "nan", "result", "=", "tslibs", ".", "Timestamp", "(", "result", ",", "tz", "=", "tz", ")", "else", ":", "result", "=", "result", ".", "view", "(", "dtype", ")", "elif", "is_timedelta64_dtype", "(", "dtype", ")", ":", "if", "not", "isinstance", "(", "result", ",", "np", ".", "ndarray", ")", ":", "if", "result", "==", "fill_value", ":", "result", "=", "np", ".", "nan", "# raise if we have a timedelta64[ns] which is too large", "if", "np", ".", "fabs", "(", "result", ")", ">", "_int64_max", ":", "raise", "ValueError", "(", "\"overflow in timedelta operation\"", ")", "result", "=", "tslibs", ".", "Timedelta", "(", "result", ",", "unit", "=", "'ns'", ")", "else", ":", "result", "=", "result", ".", "astype", "(", "'i8'", ")", ".", "view", "(", "dtype", ")", "return", "result" ]
wrap our results if needed
[ "wrap", "our", "results", "if", "needed" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L276-L304
20,023
pandas-dev/pandas
pandas/core/nanops.py
_na_for_min_count
def _na_for_min_count(values, axis): """Return the missing value for `values` Parameters ---------- values : ndarray axis : int or None axis for the reduction Returns ------- result : scalar or ndarray For 1-D values, returns a scalar of the correct missing type. For 2-D values, returns a 1-D array where each element is missing. """ # we either return np.nan or pd.NaT if is_numeric_dtype(values): values = values.astype('float64') fill_value = na_value_for_dtype(values.dtype) if values.ndim == 1: return fill_value else: result_shape = (values.shape[:axis] + values.shape[axis + 1:]) result = np.empty(result_shape, dtype=values.dtype) result.fill(fill_value) return result
python
def _na_for_min_count(values, axis): """Return the missing value for `values` Parameters ---------- values : ndarray axis : int or None axis for the reduction Returns ------- result : scalar or ndarray For 1-D values, returns a scalar of the correct missing type. For 2-D values, returns a 1-D array where each element is missing. """ # we either return np.nan or pd.NaT if is_numeric_dtype(values): values = values.astype('float64') fill_value = na_value_for_dtype(values.dtype) if values.ndim == 1: return fill_value else: result_shape = (values.shape[:axis] + values.shape[axis + 1:]) result = np.empty(result_shape, dtype=values.dtype) result.fill(fill_value) return result
[ "def", "_na_for_min_count", "(", "values", ",", "axis", ")", ":", "# we either return np.nan or pd.NaT", "if", "is_numeric_dtype", "(", "values", ")", ":", "values", "=", "values", ".", "astype", "(", "'float64'", ")", "fill_value", "=", "na_value_for_dtype", "(", "values", ".", "dtype", ")", "if", "values", ".", "ndim", "==", "1", ":", "return", "fill_value", "else", ":", "result_shape", "=", "(", "values", ".", "shape", "[", ":", "axis", "]", "+", "values", ".", "shape", "[", "axis", "+", "1", ":", "]", ")", "result", "=", "np", ".", "empty", "(", "result_shape", ",", "dtype", "=", "values", ".", "dtype", ")", "result", ".", "fill", "(", "fill_value", ")", "return", "result" ]
Return the missing value for `values` Parameters ---------- values : ndarray axis : int or None axis for the reduction Returns ------- result : scalar or ndarray For 1-D values, returns a scalar of the correct missing type. For 2-D values, returns a 1-D array where each element is missing.
[ "Return", "the", "missing", "value", "for", "values" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L307-L334
20,024
pandas-dev/pandas
pandas/core/nanops.py
nanany
def nanany(values, axis=None, skipna=True, mask=None): """ Check if any elements along an axis evaluate to True. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2]) >>> nanops.nanany(s) True >>> import pandas.core.nanops as nanops >>> s = pd.Series([np.nan]) >>> nanops.nanany(s) False """ values, mask, dtype, _, _ = _get_values(values, skipna, False, copy=skipna, mask=mask) return values.any(axis)
python
def nanany(values, axis=None, skipna=True, mask=None): """ Check if any elements along an axis evaluate to True. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2]) >>> nanops.nanany(s) True >>> import pandas.core.nanops as nanops >>> s = pd.Series([np.nan]) >>> nanops.nanany(s) False """ values, mask, dtype, _, _ = _get_values(values, skipna, False, copy=skipna, mask=mask) return values.any(axis)
[ "def", "nanany", "(", "values", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "mask", "=", "None", ")", ":", "values", ",", "mask", ",", "dtype", ",", "_", ",", "_", "=", "_get_values", "(", "values", ",", "skipna", ",", "False", ",", "copy", "=", "skipna", ",", "mask", "=", "mask", ")", "return", "values", ".", "any", "(", "axis", ")" ]
Check if any elements along an axis evaluate to True. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2]) >>> nanops.nanany(s) True >>> import pandas.core.nanops as nanops >>> s = pd.Series([np.nan]) >>> nanops.nanany(s) False
[ "Check", "if", "any", "elements", "along", "an", "axis", "evaluate", "to", "True", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L337-L367
20,025
pandas-dev/pandas
pandas/core/nanops.py
nanall
def nanall(values, axis=None, skipna=True, mask=None): """ Check if all elements along an axis evaluate to True. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanall(s) True >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 0]) >>> nanops.nanall(s) False """ values, mask, dtype, _, _ = _get_values(values, skipna, True, copy=skipna, mask=mask) return values.all(axis)
python
def nanall(values, axis=None, skipna=True, mask=None): """ Check if all elements along an axis evaluate to True. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanall(s) True >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 0]) >>> nanops.nanall(s) False """ values, mask, dtype, _, _ = _get_values(values, skipna, True, copy=skipna, mask=mask) return values.all(axis)
[ "def", "nanall", "(", "values", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "mask", "=", "None", ")", ":", "values", ",", "mask", ",", "dtype", ",", "_", ",", "_", "=", "_get_values", "(", "values", ",", "skipna", ",", "True", ",", "copy", "=", "skipna", ",", "mask", "=", "mask", ")", "return", "values", ".", "all", "(", "axis", ")" ]
Check if all elements along an axis evaluate to True. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanall(s) True >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 0]) >>> nanops.nanall(s) False
[ "Check", "if", "all", "elements", "along", "an", "axis", "evaluate", "to", "True", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L370-L400
20,026
pandas-dev/pandas
pandas/core/nanops.py
nansum
def nansum(values, axis=None, skipna=True, min_count=0, mask=None): """ Sum the elements along an axis ignoring NaNs Parameters ---------- values : ndarray[dtype] axis: int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- result : dtype Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nansum(s) 3.0 """ values, mask, dtype, dtype_max, _ = _get_values(values, skipna, 0, mask=mask) dtype_sum = dtype_max if is_float_dtype(dtype): dtype_sum = dtype elif is_timedelta64_dtype(dtype): dtype_sum = np.float64 the_sum = values.sum(axis, dtype=dtype_sum) the_sum = _maybe_null_out(the_sum, axis, mask, min_count=min_count) return _wrap_results(the_sum, dtype)
python
def nansum(values, axis=None, skipna=True, min_count=0, mask=None): """ Sum the elements along an axis ignoring NaNs Parameters ---------- values : ndarray[dtype] axis: int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- result : dtype Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nansum(s) 3.0 """ values, mask, dtype, dtype_max, _ = _get_values(values, skipna, 0, mask=mask) dtype_sum = dtype_max if is_float_dtype(dtype): dtype_sum = dtype elif is_timedelta64_dtype(dtype): dtype_sum = np.float64 the_sum = values.sum(axis, dtype=dtype_sum) the_sum = _maybe_null_out(the_sum, axis, mask, min_count=min_count) return _wrap_results(the_sum, dtype)
[ "def", "nansum", "(", "values", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "min_count", "=", "0", ",", "mask", "=", "None", ")", ":", "values", ",", "mask", ",", "dtype", ",", "dtype_max", ",", "_", "=", "_get_values", "(", "values", ",", "skipna", ",", "0", ",", "mask", "=", "mask", ")", "dtype_sum", "=", "dtype_max", "if", "is_float_dtype", "(", "dtype", ")", ":", "dtype_sum", "=", "dtype", "elif", "is_timedelta64_dtype", "(", "dtype", ")", ":", "dtype_sum", "=", "np", ".", "float64", "the_sum", "=", "values", ".", "sum", "(", "axis", ",", "dtype", "=", "dtype_sum", ")", "the_sum", "=", "_maybe_null_out", "(", "the_sum", ",", "axis", ",", "mask", ",", "min_count", "=", "min_count", ")", "return", "_wrap_results", "(", "the_sum", ",", "dtype", ")" ]
Sum the elements along an axis ignoring NaNs Parameters ---------- values : ndarray[dtype] axis: int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- result : dtype Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nansum(s) 3.0
[ "Sum", "the", "elements", "along", "an", "axis", "ignoring", "NaNs" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L404-L438
20,027
pandas-dev/pandas
pandas/core/nanops.py
nanmean
def nanmean(values, axis=None, skipna=True, mask=None): """ Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s) 1.5 """ values, mask, dtype, dtype_max, _ = _get_values( values, skipna, 0, mask=mask) dtype_sum = dtype_max dtype_count = np.float64 if (is_integer_dtype(dtype) or is_timedelta64_dtype(dtype) or is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)): dtype_sum = np.float64 elif is_float_dtype(dtype): dtype_sum = dtype dtype_count = dtype count = _get_counts(mask, axis, dtype=dtype_count) the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum)) if axis is not None and getattr(the_sum, 'ndim', False): with np.errstate(all="ignore"): # suppress division by zero warnings the_mean = the_sum / count ct_mask = count == 0 if ct_mask.any(): the_mean[ct_mask] = np.nan else: the_mean = the_sum / count if count > 0 else np.nan return _wrap_results(the_mean, dtype)
python
def nanmean(values, axis=None, skipna=True, mask=None): """ Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s) 1.5 """ values, mask, dtype, dtype_max, _ = _get_values( values, skipna, 0, mask=mask) dtype_sum = dtype_max dtype_count = np.float64 if (is_integer_dtype(dtype) or is_timedelta64_dtype(dtype) or is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)): dtype_sum = np.float64 elif is_float_dtype(dtype): dtype_sum = dtype dtype_count = dtype count = _get_counts(mask, axis, dtype=dtype_count) the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum)) if axis is not None and getattr(the_sum, 'ndim', False): with np.errstate(all="ignore"): # suppress division by zero warnings the_mean = the_sum / count ct_mask = count == 0 if ct_mask.any(): the_mean[ct_mask] = np.nan else: the_mean = the_sum / count if count > 0 else np.nan return _wrap_results(the_mean, dtype)
[ "def", "nanmean", "(", "values", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "mask", "=", "None", ")", ":", "values", ",", "mask", ",", "dtype", ",", "dtype_max", ",", "_", "=", "_get_values", "(", "values", ",", "skipna", ",", "0", ",", "mask", "=", "mask", ")", "dtype_sum", "=", "dtype_max", "dtype_count", "=", "np", ".", "float64", "if", "(", "is_integer_dtype", "(", "dtype", ")", "or", "is_timedelta64_dtype", "(", "dtype", ")", "or", "is_datetime64_dtype", "(", "dtype", ")", "or", "is_datetime64tz_dtype", "(", "dtype", ")", ")", ":", "dtype_sum", "=", "np", ".", "float64", "elif", "is_float_dtype", "(", "dtype", ")", ":", "dtype_sum", "=", "dtype", "dtype_count", "=", "dtype", "count", "=", "_get_counts", "(", "mask", ",", "axis", ",", "dtype", "=", "dtype_count", ")", "the_sum", "=", "_ensure_numeric", "(", "values", ".", "sum", "(", "axis", ",", "dtype", "=", "dtype_sum", ")", ")", "if", "axis", "is", "not", "None", "and", "getattr", "(", "the_sum", ",", "'ndim'", ",", "False", ")", ":", "with", "np", ".", "errstate", "(", "all", "=", "\"ignore\"", ")", ":", "# suppress division by zero warnings", "the_mean", "=", "the_sum", "/", "count", "ct_mask", "=", "count", "==", "0", "if", "ct_mask", ".", "any", "(", ")", ":", "the_mean", "[", "ct_mask", "]", "=", "np", ".", "nan", "else", ":", "the_mean", "=", "the_sum", "/", "count", "if", "count", ">", "0", "else", "np", ".", "nan", "return", "_wrap_results", "(", "the_mean", ",", "dtype", ")" ]
Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s) 1.5
[ "Compute", "the", "mean", "of", "the", "element", "along", "an", "axis", "ignoring", "NaNs" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L443-L491
20,028
pandas-dev/pandas
pandas/core/nanops.py
nanstd
def nanstd(values, axis=None, skipna=True, ddof=1, mask=None): """ Compute the standard deviation along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanstd(s) 1.0 """ result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)) return _wrap_results(result, values.dtype)
python
def nanstd(values, axis=None, skipna=True, ddof=1, mask=None): """ Compute the standard deviation along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanstd(s) 1.0 """ result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)) return _wrap_results(result, values.dtype)
[ "def", "nanstd", "(", "values", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "ddof", "=", "1", ",", "mask", "=", "None", ")", ":", "result", "=", "np", ".", "sqrt", "(", "nanvar", "(", "values", ",", "axis", "=", "axis", ",", "skipna", "=", "skipna", ",", "ddof", "=", "ddof", ",", "mask", "=", "mask", ")", ")", "return", "_wrap_results", "(", "result", ",", "values", ".", "dtype", ")" ]
Compute the standard deviation along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanstd(s) 1.0
[ "Compute", "the", "standard", "deviation", "along", "given", "axis", "while", "ignoring", "NaNs" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L581-L611
20,029
pandas-dev/pandas
pandas/core/nanops.py
nanvar
def nanvar(values, axis=None, skipna=True, ddof=1, mask=None): """ Compute the variance along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanvar(s) 1.0 """ values = com.values_from_object(values) dtype = values.dtype if mask is None: mask = isna(values) if is_any_int_dtype(values): values = values.astype('f8') values[mask] = np.nan if is_float_dtype(values): count, d = _get_counts_nanvar(mask, axis, ddof, values.dtype) else: count, d = _get_counts_nanvar(mask, axis, ddof) if skipna: values = values.copy() np.putmask(values, mask, 0) # xref GH10242 # Compute variance via two-pass algorithm, which is stable against # cancellation errors and relatively accurate for small numbers of # observations. # # See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count if axis is not None: avg = np.expand_dims(avg, axis) sqr = _ensure_numeric((avg - values) ** 2) np.putmask(sqr, mask, 0) result = sqr.sum(axis=axis, dtype=np.float64) / d # Return variance as np.float64 (the datatype used in the accumulator), # unless we were dealing with a float array, in which case use the same # precision as the original values array. if is_float_dtype(dtype): result = result.astype(dtype) return _wrap_results(result, values.dtype)
python
def nanvar(values, axis=None, skipna=True, ddof=1, mask=None): """ Compute the variance along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanvar(s) 1.0 """ values = com.values_from_object(values) dtype = values.dtype if mask is None: mask = isna(values) if is_any_int_dtype(values): values = values.astype('f8') values[mask] = np.nan if is_float_dtype(values): count, d = _get_counts_nanvar(mask, axis, ddof, values.dtype) else: count, d = _get_counts_nanvar(mask, axis, ddof) if skipna: values = values.copy() np.putmask(values, mask, 0) # xref GH10242 # Compute variance via two-pass algorithm, which is stable against # cancellation errors and relatively accurate for small numbers of # observations. # # See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count if axis is not None: avg = np.expand_dims(avg, axis) sqr = _ensure_numeric((avg - values) ** 2) np.putmask(sqr, mask, 0) result = sqr.sum(axis=axis, dtype=np.float64) / d # Return variance as np.float64 (the datatype used in the accumulator), # unless we were dealing with a float array, in which case use the same # precision as the original values array. if is_float_dtype(dtype): result = result.astype(dtype) return _wrap_results(result, values.dtype)
[ "def", "nanvar", "(", "values", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "ddof", "=", "1", ",", "mask", "=", "None", ")", ":", "values", "=", "com", ".", "values_from_object", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "if", "mask", "is", "None", ":", "mask", "=", "isna", "(", "values", ")", "if", "is_any_int_dtype", "(", "values", ")", ":", "values", "=", "values", ".", "astype", "(", "'f8'", ")", "values", "[", "mask", "]", "=", "np", ".", "nan", "if", "is_float_dtype", "(", "values", ")", ":", "count", ",", "d", "=", "_get_counts_nanvar", "(", "mask", ",", "axis", ",", "ddof", ",", "values", ".", "dtype", ")", "else", ":", "count", ",", "d", "=", "_get_counts_nanvar", "(", "mask", ",", "axis", ",", "ddof", ")", "if", "skipna", ":", "values", "=", "values", ".", "copy", "(", ")", "np", ".", "putmask", "(", "values", ",", "mask", ",", "0", ")", "# xref GH10242", "# Compute variance via two-pass algorithm, which is stable against", "# cancellation errors and relatively accurate for small numbers of", "# observations.", "#", "# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance", "avg", "=", "_ensure_numeric", "(", "values", ".", "sum", "(", "axis", "=", "axis", ",", "dtype", "=", "np", ".", "float64", ")", ")", "/", "count", "if", "axis", "is", "not", "None", ":", "avg", "=", "np", ".", "expand_dims", "(", "avg", ",", "axis", ")", "sqr", "=", "_ensure_numeric", "(", "(", "avg", "-", "values", ")", "**", "2", ")", "np", ".", "putmask", "(", "sqr", ",", "mask", ",", "0", ")", "result", "=", "sqr", ".", "sum", "(", "axis", "=", "axis", ",", "dtype", "=", "np", ".", "float64", ")", "/", "d", "# Return variance as np.float64 (the datatype used in the accumulator),", "# unless we were dealing with a float array, in which case use the same", "# precision as the original values array.", "if", "is_float_dtype", "(", "dtype", ")", ":", "result", "=", "result", ".", "astype", "(", "dtype", ")", "return", "_wrap_results", "(", "result", ",", "values", ".", "dtype", ")" ]
Compute the variance along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanvar(s) 1.0
[ "Compute", "the", "variance", "along", "given", "axis", "while", "ignoring", "NaNs" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L616-L679
20,030
pandas-dev/pandas
pandas/core/nanops.py
nansem
def nansem(values, axis=None, skipna=True, ddof=1, mask=None): """ Compute the standard error in the mean along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nansem(s) 0.5773502691896258 """ # This checks if non-numeric-like data is passed with numeric_only=False # and raises a TypeError otherwise nanvar(values, axis, skipna, ddof=ddof, mask=mask) if mask is None: mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') count, _ = _get_counts_nanvar(mask, axis, ddof, values.dtype) var = nanvar(values, axis, skipna, ddof=ddof) return np.sqrt(var) / np.sqrt(count)
python
def nansem(values, axis=None, skipna=True, ddof=1, mask=None): """ Compute the standard error in the mean along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nansem(s) 0.5773502691896258 """ # This checks if non-numeric-like data is passed with numeric_only=False # and raises a TypeError otherwise nanvar(values, axis, skipna, ddof=ddof, mask=mask) if mask is None: mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') count, _ = _get_counts_nanvar(mask, axis, ddof, values.dtype) var = nanvar(values, axis, skipna, ddof=ddof) return np.sqrt(var) / np.sqrt(count)
[ "def", "nansem", "(", "values", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "ddof", "=", "1", ",", "mask", "=", "None", ")", ":", "# This checks if non-numeric-like data is passed with numeric_only=False", "# and raises a TypeError otherwise", "nanvar", "(", "values", ",", "axis", ",", "skipna", ",", "ddof", "=", "ddof", ",", "mask", "=", "mask", ")", "if", "mask", "is", "None", ":", "mask", "=", "isna", "(", "values", ")", "if", "not", "is_float_dtype", "(", "values", ".", "dtype", ")", ":", "values", "=", "values", ".", "astype", "(", "'f8'", ")", "count", ",", "_", "=", "_get_counts_nanvar", "(", "mask", ",", "axis", ",", "ddof", ",", "values", ".", "dtype", ")", "var", "=", "nanvar", "(", "values", ",", "axis", ",", "skipna", ",", "ddof", "=", "ddof", ")", "return", "np", ".", "sqrt", "(", "var", ")", "/", "np", ".", "sqrt", "(", "count", ")" ]
Compute the standard error in the mean along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nansem(s) 0.5773502691896258
[ "Compute", "the", "standard", "error", "in", "the", "mean", "along", "given", "axis", "while", "ignoring", "NaNs" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L683-L723
20,031
pandas-dev/pandas
pandas/core/nanops.py
nanskew
def nanskew(values, axis=None, skipna=True, mask=None): """ Compute the sample skewness. The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G1. The algorithm computes this coefficient directly from the second and third central moment. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1,np.nan, 1, 2]) >>> nanops.nanskew(s) 1.7320508075688787 """ values = com.values_from_object(values) if mask is None: mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') count = _get_counts(mask, axis) else: count = _get_counts(mask, axis, dtype=values.dtype) if skipna: values = values.copy() np.putmask(values, mask, 0) mean = values.sum(axis, dtype=np.float64) / count if axis is not None: mean = np.expand_dims(mean, axis) adjusted = values - mean if skipna: np.putmask(adjusted, mask, 0) adjusted2 = adjusted ** 2 adjusted3 = adjusted2 * adjusted m2 = adjusted2.sum(axis, dtype=np.float64) m3 = adjusted3.sum(axis, dtype=np.float64) # floating point error # # #18044 in _libs/windows.pyx calc_skew follow this behavior # to fix the fperr to treat m2 <1e-14 as zero m2 = _zero_out_fperr(m2) m3 = _zero_out_fperr(m3) with np.errstate(invalid='ignore', divide='ignore'): result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5) dtype = values.dtype if is_float_dtype(dtype): result = result.astype(dtype) if isinstance(result, np.ndarray): result = np.where(m2 == 0, 0, result) result[count < 3] = np.nan return result else: result = 0 if m2 == 0 else result if count < 3: return np.nan return result
python
def nanskew(values, axis=None, skipna=True, mask=None): """ Compute the sample skewness. The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G1. The algorithm computes this coefficient directly from the second and third central moment. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1,np.nan, 1, 2]) >>> nanops.nanskew(s) 1.7320508075688787 """ values = com.values_from_object(values) if mask is None: mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') count = _get_counts(mask, axis) else: count = _get_counts(mask, axis, dtype=values.dtype) if skipna: values = values.copy() np.putmask(values, mask, 0) mean = values.sum(axis, dtype=np.float64) / count if axis is not None: mean = np.expand_dims(mean, axis) adjusted = values - mean if skipna: np.putmask(adjusted, mask, 0) adjusted2 = adjusted ** 2 adjusted3 = adjusted2 * adjusted m2 = adjusted2.sum(axis, dtype=np.float64) m3 = adjusted3.sum(axis, dtype=np.float64) # floating point error # # #18044 in _libs/windows.pyx calc_skew follow this behavior # to fix the fperr to treat m2 <1e-14 as zero m2 = _zero_out_fperr(m2) m3 = _zero_out_fperr(m3) with np.errstate(invalid='ignore', divide='ignore'): result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5) dtype = values.dtype if is_float_dtype(dtype): result = result.astype(dtype) if isinstance(result, np.ndarray): result = np.where(m2 == 0, 0, result) result[count < 3] = np.nan return result else: result = 0 if m2 == 0 else result if count < 3: return np.nan return result
[ "def", "nanskew", "(", "values", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "mask", "=", "None", ")", ":", "values", "=", "com", ".", "values_from_object", "(", "values", ")", "if", "mask", "is", "None", ":", "mask", "=", "isna", "(", "values", ")", "if", "not", "is_float_dtype", "(", "values", ".", "dtype", ")", ":", "values", "=", "values", ".", "astype", "(", "'f8'", ")", "count", "=", "_get_counts", "(", "mask", ",", "axis", ")", "else", ":", "count", "=", "_get_counts", "(", "mask", ",", "axis", ",", "dtype", "=", "values", ".", "dtype", ")", "if", "skipna", ":", "values", "=", "values", ".", "copy", "(", ")", "np", ".", "putmask", "(", "values", ",", "mask", ",", "0", ")", "mean", "=", "values", ".", "sum", "(", "axis", ",", "dtype", "=", "np", ".", "float64", ")", "/", "count", "if", "axis", "is", "not", "None", ":", "mean", "=", "np", ".", "expand_dims", "(", "mean", ",", "axis", ")", "adjusted", "=", "values", "-", "mean", "if", "skipna", ":", "np", ".", "putmask", "(", "adjusted", ",", "mask", ",", "0", ")", "adjusted2", "=", "adjusted", "**", "2", "adjusted3", "=", "adjusted2", "*", "adjusted", "m2", "=", "adjusted2", ".", "sum", "(", "axis", ",", "dtype", "=", "np", ".", "float64", ")", "m3", "=", "adjusted3", ".", "sum", "(", "axis", ",", "dtype", "=", "np", ".", "float64", ")", "# floating point error", "#", "# #18044 in _libs/windows.pyx calc_skew follow this behavior", "# to fix the fperr to treat m2 <1e-14 as zero", "m2", "=", "_zero_out_fperr", "(", "m2", ")", "m3", "=", "_zero_out_fperr", "(", "m3", ")", "with", "np", ".", "errstate", "(", "invalid", "=", "'ignore'", ",", "divide", "=", "'ignore'", ")", ":", "result", "=", "(", "count", "*", "(", "count", "-", "1", ")", "**", "0.5", "/", "(", "count", "-", "2", ")", ")", "*", "(", "m3", "/", "m2", "**", "1.5", ")", "dtype", "=", "values", ".", "dtype", "if", "is_float_dtype", "(", "dtype", ")", ":", "result", "=", "result", ".", "astype", "(", "dtype", ")", "if", "isinstance", "(", "result", ",", "np", ".", "ndarray", ")", ":", "result", "=", "np", ".", "where", "(", "m2", "==", "0", ",", "0", ",", "result", ")", "result", "[", "count", "<", "3", "]", "=", "np", ".", "nan", "return", "result", "else", ":", "result", "=", "0", "if", "m2", "==", "0", "else", "result", "if", "count", "<", "3", ":", "return", "np", ".", "nan", "return", "result" ]
Compute the sample skewness. The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G1. The algorithm computes this coefficient directly from the second and third central moment. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1,np.nan, 1, 2]) >>> nanops.nanskew(s) 1.7320508075688787
[ "Compute", "the", "sample", "skewness", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L816-L891
20,032
pandas-dev/pandas
pandas/core/nanops.py
nankurt
def nankurt(values, axis=None, skipna=True, mask=None): """ Compute the sample excess kurtosis The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G2, computed directly from the second and fourth central moment. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1,np.nan, 1, 3, 2]) >>> nanops.nankurt(s) -1.2892561983471076 """ values = com.values_from_object(values) if mask is None: mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') count = _get_counts(mask, axis) else: count = _get_counts(mask, axis, dtype=values.dtype) if skipna: values = values.copy() np.putmask(values, mask, 0) mean = values.sum(axis, dtype=np.float64) / count if axis is not None: mean = np.expand_dims(mean, axis) adjusted = values - mean if skipna: np.putmask(adjusted, mask, 0) adjusted2 = adjusted ** 2 adjusted4 = adjusted2 ** 2 m2 = adjusted2.sum(axis, dtype=np.float64) m4 = adjusted4.sum(axis, dtype=np.float64) with np.errstate(invalid='ignore', divide='ignore'): adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3)) numer = count * (count + 1) * (count - 1) * m4 denom = (count - 2) * (count - 3) * m2 ** 2 # floating point error # # #18044 in _libs/windows.pyx calc_kurt follow this behavior # to fix the fperr to treat denom <1e-14 as zero numer = _zero_out_fperr(numer) denom = _zero_out_fperr(denom) if not isinstance(denom, np.ndarray): # if ``denom`` is a scalar, check these corner cases first before # doing division if count < 4: return np.nan if denom == 0: return 0 with np.errstate(invalid='ignore', divide='ignore'): result = numer / denom - adj dtype = values.dtype if is_float_dtype(dtype): result = result.astype(dtype) if isinstance(result, np.ndarray): result = np.where(denom == 0, 0, result) result[count < 4] = np.nan return result
python
def nankurt(values, axis=None, skipna=True, mask=None): """ Compute the sample excess kurtosis The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G2, computed directly from the second and fourth central moment. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1,np.nan, 1, 3, 2]) >>> nanops.nankurt(s) -1.2892561983471076 """ values = com.values_from_object(values) if mask is None: mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') count = _get_counts(mask, axis) else: count = _get_counts(mask, axis, dtype=values.dtype) if skipna: values = values.copy() np.putmask(values, mask, 0) mean = values.sum(axis, dtype=np.float64) / count if axis is not None: mean = np.expand_dims(mean, axis) adjusted = values - mean if skipna: np.putmask(adjusted, mask, 0) adjusted2 = adjusted ** 2 adjusted4 = adjusted2 ** 2 m2 = adjusted2.sum(axis, dtype=np.float64) m4 = adjusted4.sum(axis, dtype=np.float64) with np.errstate(invalid='ignore', divide='ignore'): adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3)) numer = count * (count + 1) * (count - 1) * m4 denom = (count - 2) * (count - 3) * m2 ** 2 # floating point error # # #18044 in _libs/windows.pyx calc_kurt follow this behavior # to fix the fperr to treat denom <1e-14 as zero numer = _zero_out_fperr(numer) denom = _zero_out_fperr(denom) if not isinstance(denom, np.ndarray): # if ``denom`` is a scalar, check these corner cases first before # doing division if count < 4: return np.nan if denom == 0: return 0 with np.errstate(invalid='ignore', divide='ignore'): result = numer / denom - adj dtype = values.dtype if is_float_dtype(dtype): result = result.astype(dtype) if isinstance(result, np.ndarray): result = np.where(denom == 0, 0, result) result[count < 4] = np.nan return result
[ "def", "nankurt", "(", "values", ",", "axis", "=", "None", ",", "skipna", "=", "True", ",", "mask", "=", "None", ")", ":", "values", "=", "com", ".", "values_from_object", "(", "values", ")", "if", "mask", "is", "None", ":", "mask", "=", "isna", "(", "values", ")", "if", "not", "is_float_dtype", "(", "values", ".", "dtype", ")", ":", "values", "=", "values", ".", "astype", "(", "'f8'", ")", "count", "=", "_get_counts", "(", "mask", ",", "axis", ")", "else", ":", "count", "=", "_get_counts", "(", "mask", ",", "axis", ",", "dtype", "=", "values", ".", "dtype", ")", "if", "skipna", ":", "values", "=", "values", ".", "copy", "(", ")", "np", ".", "putmask", "(", "values", ",", "mask", ",", "0", ")", "mean", "=", "values", ".", "sum", "(", "axis", ",", "dtype", "=", "np", ".", "float64", ")", "/", "count", "if", "axis", "is", "not", "None", ":", "mean", "=", "np", ".", "expand_dims", "(", "mean", ",", "axis", ")", "adjusted", "=", "values", "-", "mean", "if", "skipna", ":", "np", ".", "putmask", "(", "adjusted", ",", "mask", ",", "0", ")", "adjusted2", "=", "adjusted", "**", "2", "adjusted4", "=", "adjusted2", "**", "2", "m2", "=", "adjusted2", ".", "sum", "(", "axis", ",", "dtype", "=", "np", ".", "float64", ")", "m4", "=", "adjusted4", ".", "sum", "(", "axis", ",", "dtype", "=", "np", ".", "float64", ")", "with", "np", ".", "errstate", "(", "invalid", "=", "'ignore'", ",", "divide", "=", "'ignore'", ")", ":", "adj", "=", "3", "*", "(", "count", "-", "1", ")", "**", "2", "/", "(", "(", "count", "-", "2", ")", "*", "(", "count", "-", "3", ")", ")", "numer", "=", "count", "*", "(", "count", "+", "1", ")", "*", "(", "count", "-", "1", ")", "*", "m4", "denom", "=", "(", "count", "-", "2", ")", "*", "(", "count", "-", "3", ")", "*", "m2", "**", "2", "# floating point error", "#", "# #18044 in _libs/windows.pyx calc_kurt follow this behavior", "# to fix the fperr to treat denom <1e-14 as zero", "numer", "=", "_zero_out_fperr", "(", "numer", ")", "denom", "=", "_zero_out_fperr", "(", "denom", ")", "if", "not", "isinstance", "(", "denom", ",", "np", ".", "ndarray", ")", ":", "# if ``denom`` is a scalar, check these corner cases first before", "# doing division", "if", "count", "<", "4", ":", "return", "np", ".", "nan", "if", "denom", "==", "0", ":", "return", "0", "with", "np", ".", "errstate", "(", "invalid", "=", "'ignore'", ",", "divide", "=", "'ignore'", ")", ":", "result", "=", "numer", "/", "denom", "-", "adj", "dtype", "=", "values", ".", "dtype", "if", "is_float_dtype", "(", "dtype", ")", ":", "result", "=", "result", ".", "astype", "(", "dtype", ")", "if", "isinstance", "(", "result", ",", "np", ".", "ndarray", ")", ":", "result", "=", "np", ".", "where", "(", "denom", "==", "0", ",", "0", ",", "result", ")", "result", "[", "count", "<", "4", "]", "=", "np", ".", "nan", "return", "result" ]
Compute the sample excess kurtosis The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G2, computed directly from the second and fourth central moment. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1,np.nan, 1, 3, 2]) >>> nanops.nankurt(s) -1.2892561983471076
[ "Compute", "the", "sample", "excess", "kurtosis" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L895-L980
20,033
pandas-dev/pandas
pandas/core/nanops.py
_nanpercentile_1d
def _nanpercentile_1d(values, mask, q, na_value, interpolation): """ Wraper for np.percentile that skips missing values, specialized to 1-dimensional case. Parameters ---------- values : array over which to find quantiles mask : ndarray[bool] locations in values that should be considered missing q : scalar or array of quantile indices to find na_value : scalar value to return for empty or all-null values interpolation : str Returns ------- quantiles : scalar or array """ # mask is Union[ExtensionArray, ndarray] values = values[~mask] if len(values) == 0: if lib.is_scalar(q): return na_value else: return np.array([na_value] * len(q), dtype=values.dtype) return np.percentile(values, q, interpolation=interpolation)
python
def _nanpercentile_1d(values, mask, q, na_value, interpolation): """ Wraper for np.percentile that skips missing values, specialized to 1-dimensional case. Parameters ---------- values : array over which to find quantiles mask : ndarray[bool] locations in values that should be considered missing q : scalar or array of quantile indices to find na_value : scalar value to return for empty or all-null values interpolation : str Returns ------- quantiles : scalar or array """ # mask is Union[ExtensionArray, ndarray] values = values[~mask] if len(values) == 0: if lib.is_scalar(q): return na_value else: return np.array([na_value] * len(q), dtype=values.dtype) return np.percentile(values, q, interpolation=interpolation)
[ "def", "_nanpercentile_1d", "(", "values", ",", "mask", ",", "q", ",", "na_value", ",", "interpolation", ")", ":", "# mask is Union[ExtensionArray, ndarray]", "values", "=", "values", "[", "~", "mask", "]", "if", "len", "(", "values", ")", "==", "0", ":", "if", "lib", ".", "is_scalar", "(", "q", ")", ":", "return", "na_value", "else", ":", "return", "np", ".", "array", "(", "[", "na_value", "]", "*", "len", "(", "q", ")", ",", "dtype", "=", "values", ".", "dtype", ")", "return", "np", ".", "percentile", "(", "values", ",", "q", ",", "interpolation", "=", "interpolation", ")" ]
Wraper for np.percentile that skips missing values, specialized to 1-dimensional case. Parameters ---------- values : array over which to find quantiles mask : ndarray[bool] locations in values that should be considered missing q : scalar or array of quantile indices to find na_value : scalar value to return for empty or all-null values interpolation : str Returns ------- quantiles : scalar or array
[ "Wraper", "for", "np", ".", "percentile", "that", "skips", "missing", "values", "specialized", "to", "1", "-", "dimensional", "case", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L1203-L1232
20,034
pandas-dev/pandas
pandas/core/nanops.py
nanpercentile
def nanpercentile(values, q, axis, na_value, mask, ndim, interpolation): """ Wraper for np.percentile that skips missing values. Parameters ---------- values : array over which to find quantiles q : scalar or array of quantile indices to find axis : {0, 1} na_value : scalar value to return for empty or all-null values mask : ndarray[bool] locations in values that should be considered missing ndim : {1, 2} interpolation : str Returns ------- quantiles : scalar or array """ if not lib.is_scalar(mask) and mask.any(): if ndim == 1: return _nanpercentile_1d(values, mask, q, na_value, interpolation=interpolation) else: # for nonconsolidatable blocks mask is 1D, but values 2D if mask.ndim < values.ndim: mask = mask.reshape(values.shape) if axis == 0: values = values.T mask = mask.T result = [_nanpercentile_1d(val, m, q, na_value, interpolation=interpolation) for (val, m) in zip(list(values), list(mask))] result = np.array(result, dtype=values.dtype, copy=False).T return result else: return np.percentile(values, q, axis=axis, interpolation=interpolation)
python
def nanpercentile(values, q, axis, na_value, mask, ndim, interpolation): """ Wraper for np.percentile that skips missing values. Parameters ---------- values : array over which to find quantiles q : scalar or array of quantile indices to find axis : {0, 1} na_value : scalar value to return for empty or all-null values mask : ndarray[bool] locations in values that should be considered missing ndim : {1, 2} interpolation : str Returns ------- quantiles : scalar or array """ if not lib.is_scalar(mask) and mask.any(): if ndim == 1: return _nanpercentile_1d(values, mask, q, na_value, interpolation=interpolation) else: # for nonconsolidatable blocks mask is 1D, but values 2D if mask.ndim < values.ndim: mask = mask.reshape(values.shape) if axis == 0: values = values.T mask = mask.T result = [_nanpercentile_1d(val, m, q, na_value, interpolation=interpolation) for (val, m) in zip(list(values), list(mask))] result = np.array(result, dtype=values.dtype, copy=False).T return result else: return np.percentile(values, q, axis=axis, interpolation=interpolation)
[ "def", "nanpercentile", "(", "values", ",", "q", ",", "axis", ",", "na_value", ",", "mask", ",", "ndim", ",", "interpolation", ")", ":", "if", "not", "lib", ".", "is_scalar", "(", "mask", ")", "and", "mask", ".", "any", "(", ")", ":", "if", "ndim", "==", "1", ":", "return", "_nanpercentile_1d", "(", "values", ",", "mask", ",", "q", ",", "na_value", ",", "interpolation", "=", "interpolation", ")", "else", ":", "# for nonconsolidatable blocks mask is 1D, but values 2D", "if", "mask", ".", "ndim", "<", "values", ".", "ndim", ":", "mask", "=", "mask", ".", "reshape", "(", "values", ".", "shape", ")", "if", "axis", "==", "0", ":", "values", "=", "values", ".", "T", "mask", "=", "mask", ".", "T", "result", "=", "[", "_nanpercentile_1d", "(", "val", ",", "m", ",", "q", ",", "na_value", ",", "interpolation", "=", "interpolation", ")", "for", "(", "val", ",", "m", ")", "in", "zip", "(", "list", "(", "values", ")", ",", "list", "(", "mask", ")", ")", "]", "result", "=", "np", ".", "array", "(", "result", ",", "dtype", "=", "values", ".", "dtype", ",", "copy", "=", "False", ")", ".", "T", "return", "result", "else", ":", "return", "np", ".", "percentile", "(", "values", ",", "q", ",", "axis", "=", "axis", ",", "interpolation", "=", "interpolation", ")" ]
Wraper for np.percentile that skips missing values. Parameters ---------- values : array over which to find quantiles q : scalar or array of quantile indices to find axis : {0, 1} na_value : scalar value to return for empty or all-null values mask : ndarray[bool] locations in values that should be considered missing ndim : {1, 2} interpolation : str Returns ------- quantiles : scalar or array
[ "Wraper", "for", "np", ".", "percentile", "that", "skips", "missing", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/nanops.py#L1235-L1272
20,035
pandas-dev/pandas
pandas/io/clipboards.py
read_clipboard
def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover r""" Read text from clipboard and pass to read_csv. See read_csv for the full argument list Parameters ---------- sep : str, default '\s+' A string or regex delimiter. The default of '\s+' denotes one or more whitespace characters. Returns ------- parsed : DataFrame """ encoding = kwargs.pop('encoding', 'utf-8') # only utf-8 is valid for passed value because that's what clipboard # supports if encoding is not None and encoding.lower().replace('-', '') != 'utf8': raise NotImplementedError( 'reading from clipboard only supports utf-8 encoding') from pandas.io.clipboard import clipboard_get from pandas.io.parsers import read_csv text = clipboard_get() # Try to decode (if needed, as "text" might already be a string here). try: text = text.decode(kwargs.get('encoding') or get_option('display.encoding')) except AttributeError: pass # Excel copies into clipboard with \t separation # inspect no more then the 10 first lines, if they # all contain an equal number (>0) of tabs, infer # that this came from excel and set 'sep' accordingly lines = text[:10000].split('\n')[:-1][:10] # Need to remove leading white space, since read_csv # accepts: # a b # 0 1 2 # 1 3 4 counts = {x.lstrip().count('\t') for x in lines} if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0: sep = '\t' # Edge case where sep is specified to be None, return to default if sep is None and kwargs.get('delim_whitespace') is None: sep = r'\s+' # Regex separator currently only works with python engine. # Default to python if separator is multi-character (regex) if len(sep) > 1 and kwargs.get('engine') is None: kwargs['engine'] = 'python' elif len(sep) > 1 and kwargs.get('engine') == 'c': warnings.warn('read_clipboard with regex separator does not work' ' properly with c engine') return read_csv(StringIO(text), sep=sep, **kwargs)
python
def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover r""" Read text from clipboard and pass to read_csv. See read_csv for the full argument list Parameters ---------- sep : str, default '\s+' A string or regex delimiter. The default of '\s+' denotes one or more whitespace characters. Returns ------- parsed : DataFrame """ encoding = kwargs.pop('encoding', 'utf-8') # only utf-8 is valid for passed value because that's what clipboard # supports if encoding is not None and encoding.lower().replace('-', '') != 'utf8': raise NotImplementedError( 'reading from clipboard only supports utf-8 encoding') from pandas.io.clipboard import clipboard_get from pandas.io.parsers import read_csv text = clipboard_get() # Try to decode (if needed, as "text" might already be a string here). try: text = text.decode(kwargs.get('encoding') or get_option('display.encoding')) except AttributeError: pass # Excel copies into clipboard with \t separation # inspect no more then the 10 first lines, if they # all contain an equal number (>0) of tabs, infer # that this came from excel and set 'sep' accordingly lines = text[:10000].split('\n')[:-1][:10] # Need to remove leading white space, since read_csv # accepts: # a b # 0 1 2 # 1 3 4 counts = {x.lstrip().count('\t') for x in lines} if len(lines) > 1 and len(counts) == 1 and counts.pop() != 0: sep = '\t' # Edge case where sep is specified to be None, return to default if sep is None and kwargs.get('delim_whitespace') is None: sep = r'\s+' # Regex separator currently only works with python engine. # Default to python if separator is multi-character (regex) if len(sep) > 1 and kwargs.get('engine') is None: kwargs['engine'] = 'python' elif len(sep) > 1 and kwargs.get('engine') == 'c': warnings.warn('read_clipboard with regex separator does not work' ' properly with c engine') return read_csv(StringIO(text), sep=sep, **kwargs)
[ "def", "read_clipboard", "(", "sep", "=", "r'\\s+'", ",", "*", "*", "kwargs", ")", ":", "# pragma: no cover", "encoding", "=", "kwargs", ".", "pop", "(", "'encoding'", ",", "'utf-8'", ")", "# only utf-8 is valid for passed value because that's what clipboard", "# supports", "if", "encoding", "is", "not", "None", "and", "encoding", ".", "lower", "(", ")", ".", "replace", "(", "'-'", ",", "''", ")", "!=", "'utf8'", ":", "raise", "NotImplementedError", "(", "'reading from clipboard only supports utf-8 encoding'", ")", "from", "pandas", ".", "io", ".", "clipboard", "import", "clipboard_get", "from", "pandas", ".", "io", ".", "parsers", "import", "read_csv", "text", "=", "clipboard_get", "(", ")", "# Try to decode (if needed, as \"text\" might already be a string here).", "try", ":", "text", "=", "text", ".", "decode", "(", "kwargs", ".", "get", "(", "'encoding'", ")", "or", "get_option", "(", "'display.encoding'", ")", ")", "except", "AttributeError", ":", "pass", "# Excel copies into clipboard with \\t separation", "# inspect no more then the 10 first lines, if they", "# all contain an equal number (>0) of tabs, infer", "# that this came from excel and set 'sep' accordingly", "lines", "=", "text", "[", ":", "10000", "]", ".", "split", "(", "'\\n'", ")", "[", ":", "-", "1", "]", "[", ":", "10", "]", "# Need to remove leading white space, since read_csv", "# accepts:", "# a b", "# 0 1 2", "# 1 3 4", "counts", "=", "{", "x", ".", "lstrip", "(", ")", ".", "count", "(", "'\\t'", ")", "for", "x", "in", "lines", "}", "if", "len", "(", "lines", ")", ">", "1", "and", "len", "(", "counts", ")", "==", "1", "and", "counts", ".", "pop", "(", ")", "!=", "0", ":", "sep", "=", "'\\t'", "# Edge case where sep is specified to be None, return to default", "if", "sep", "is", "None", "and", "kwargs", ".", "get", "(", "'delim_whitespace'", ")", "is", "None", ":", "sep", "=", "r'\\s+'", "# Regex separator currently only works with python engine.", "# Default to python if separator is multi-character (regex)", "if", "len", "(", "sep", ")", ">", "1", "and", "kwargs", ".", "get", "(", "'engine'", ")", "is", "None", ":", "kwargs", "[", "'engine'", "]", "=", "'python'", "elif", "len", "(", "sep", ")", ">", "1", "and", "kwargs", ".", "get", "(", "'engine'", ")", "==", "'c'", ":", "warnings", ".", "warn", "(", "'read_clipboard with regex separator does not work'", "' properly with c engine'", ")", "return", "read_csv", "(", "StringIO", "(", "text", ")", ",", "sep", "=", "sep", ",", "*", "*", "kwargs", ")" ]
r""" Read text from clipboard and pass to read_csv. See read_csv for the full argument list Parameters ---------- sep : str, default '\s+' A string or regex delimiter. The default of '\s+' denotes one or more whitespace characters. Returns ------- parsed : DataFrame
[ "r", "Read", "text", "from", "clipboard", "and", "pass", "to", "read_csv", ".", "See", "read_csv", "for", "the", "full", "argument", "list" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/clipboards.py#L10-L72
20,036
pandas-dev/pandas
pandas/io/clipboards.py
to_clipboard
def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover """ Attempt to write text representation of object to the system clipboard The clipboard can be then pasted into Excel for example. Parameters ---------- obj : the object to write to the clipboard excel : boolean, defaults to True if True, use the provided separator, writing in a csv format for allowing easy pasting into excel. if False, write a string representation of the object to the clipboard sep : optional, defaults to tab other keywords are passed to to_csv Notes ----- Requirements for your platform - Linux: xclip, or xsel (with gtk or PyQt4 modules) - Windows: - OS X: """ encoding = kwargs.pop('encoding', 'utf-8') # testing if an invalid encoding is passed to clipboard if encoding is not None and encoding.lower().replace('-', '') != 'utf8': raise ValueError('clipboard only supports utf-8 encoding') from pandas.io.clipboard import clipboard_set if excel is None: excel = True if excel: try: if sep is None: sep = '\t' buf = StringIO() # clipboard_set (pyperclip) expects unicode obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs) text = buf.getvalue() clipboard_set(text) return except TypeError: warnings.warn('to_clipboard in excel mode requires a single ' 'character separator.') elif sep is not None: warnings.warn('to_clipboard with excel=False ignores the sep argument') if isinstance(obj, ABCDataFrame): # str(df) has various unhelpful defaults, like truncation with option_context('display.max_colwidth', 999999): objstr = obj.to_string(**kwargs) else: objstr = str(obj) clipboard_set(objstr)
python
def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover """ Attempt to write text representation of object to the system clipboard The clipboard can be then pasted into Excel for example. Parameters ---------- obj : the object to write to the clipboard excel : boolean, defaults to True if True, use the provided separator, writing in a csv format for allowing easy pasting into excel. if False, write a string representation of the object to the clipboard sep : optional, defaults to tab other keywords are passed to to_csv Notes ----- Requirements for your platform - Linux: xclip, or xsel (with gtk or PyQt4 modules) - Windows: - OS X: """ encoding = kwargs.pop('encoding', 'utf-8') # testing if an invalid encoding is passed to clipboard if encoding is not None and encoding.lower().replace('-', '') != 'utf8': raise ValueError('clipboard only supports utf-8 encoding') from pandas.io.clipboard import clipboard_set if excel is None: excel = True if excel: try: if sep is None: sep = '\t' buf = StringIO() # clipboard_set (pyperclip) expects unicode obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs) text = buf.getvalue() clipboard_set(text) return except TypeError: warnings.warn('to_clipboard in excel mode requires a single ' 'character separator.') elif sep is not None: warnings.warn('to_clipboard with excel=False ignores the sep argument') if isinstance(obj, ABCDataFrame): # str(df) has various unhelpful defaults, like truncation with option_context('display.max_colwidth', 999999): objstr = obj.to_string(**kwargs) else: objstr = str(obj) clipboard_set(objstr)
[ "def", "to_clipboard", "(", "obj", ",", "excel", "=", "True", ",", "sep", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# pragma: no cover", "encoding", "=", "kwargs", ".", "pop", "(", "'encoding'", ",", "'utf-8'", ")", "# testing if an invalid encoding is passed to clipboard", "if", "encoding", "is", "not", "None", "and", "encoding", ".", "lower", "(", ")", ".", "replace", "(", "'-'", ",", "''", ")", "!=", "'utf8'", ":", "raise", "ValueError", "(", "'clipboard only supports utf-8 encoding'", ")", "from", "pandas", ".", "io", ".", "clipboard", "import", "clipboard_set", "if", "excel", "is", "None", ":", "excel", "=", "True", "if", "excel", ":", "try", ":", "if", "sep", "is", "None", ":", "sep", "=", "'\\t'", "buf", "=", "StringIO", "(", ")", "# clipboard_set (pyperclip) expects unicode", "obj", ".", "to_csv", "(", "buf", ",", "sep", "=", "sep", ",", "encoding", "=", "'utf-8'", ",", "*", "*", "kwargs", ")", "text", "=", "buf", ".", "getvalue", "(", ")", "clipboard_set", "(", "text", ")", "return", "except", "TypeError", ":", "warnings", ".", "warn", "(", "'to_clipboard in excel mode requires a single '", "'character separator.'", ")", "elif", "sep", "is", "not", "None", ":", "warnings", ".", "warn", "(", "'to_clipboard with excel=False ignores the sep argument'", ")", "if", "isinstance", "(", "obj", ",", "ABCDataFrame", ")", ":", "# str(df) has various unhelpful defaults, like truncation", "with", "option_context", "(", "'display.max_colwidth'", ",", "999999", ")", ":", "objstr", "=", "obj", ".", "to_string", "(", "*", "*", "kwargs", ")", "else", ":", "objstr", "=", "str", "(", "obj", ")", "clipboard_set", "(", "objstr", ")" ]
Attempt to write text representation of object to the system clipboard The clipboard can be then pasted into Excel for example. Parameters ---------- obj : the object to write to the clipboard excel : boolean, defaults to True if True, use the provided separator, writing in a csv format for allowing easy pasting into excel. if False, write a string representation of the object to the clipboard sep : optional, defaults to tab other keywords are passed to to_csv Notes ----- Requirements for your platform - Linux: xclip, or xsel (with gtk or PyQt4 modules) - Windows: - OS X:
[ "Attempt", "to", "write", "text", "representation", "of", "object", "to", "the", "system", "clipboard", "The", "clipboard", "can", "be", "then", "pasted", "into", "Excel", "for", "example", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/clipboards.py#L75-L132
20,037
pandas-dev/pandas
pandas/io/html.py
_get_skiprows
def _get_skiprows(skiprows): """Get an iterator given an integer, slice or container. Parameters ---------- skiprows : int, slice, container The iterator to use to skip rows; can also be a slice. Raises ------ TypeError * If `skiprows` is not a slice, integer, or Container Returns ------- it : iterable A proper iterator to use to skip rows of a DataFrame. """ if isinstance(skiprows, slice): return lrange(skiprows.start or 0, skiprows.stop, skiprows.step or 1) elif isinstance(skiprows, numbers.Integral) or is_list_like(skiprows): return skiprows elif skiprows is None: return 0 raise TypeError('%r is not a valid type for skipping rows' % type(skiprows).__name__)
python
def _get_skiprows(skiprows): """Get an iterator given an integer, slice or container. Parameters ---------- skiprows : int, slice, container The iterator to use to skip rows; can also be a slice. Raises ------ TypeError * If `skiprows` is not a slice, integer, or Container Returns ------- it : iterable A proper iterator to use to skip rows of a DataFrame. """ if isinstance(skiprows, slice): return lrange(skiprows.start or 0, skiprows.stop, skiprows.step or 1) elif isinstance(skiprows, numbers.Integral) or is_list_like(skiprows): return skiprows elif skiprows is None: return 0 raise TypeError('%r is not a valid type for skipping rows' % type(skiprows).__name__)
[ "def", "_get_skiprows", "(", "skiprows", ")", ":", "if", "isinstance", "(", "skiprows", ",", "slice", ")", ":", "return", "lrange", "(", "skiprows", ".", "start", "or", "0", ",", "skiprows", ".", "stop", ",", "skiprows", ".", "step", "or", "1", ")", "elif", "isinstance", "(", "skiprows", ",", "numbers", ".", "Integral", ")", "or", "is_list_like", "(", "skiprows", ")", ":", "return", "skiprows", "elif", "skiprows", "is", "None", ":", "return", "0", "raise", "TypeError", "(", "'%r is not a valid type for skipping rows'", "%", "type", "(", "skiprows", ")", ".", "__name__", ")" ]
Get an iterator given an integer, slice or container. Parameters ---------- skiprows : int, slice, container The iterator to use to skip rows; can also be a slice. Raises ------ TypeError * If `skiprows` is not a slice, integer, or Container Returns ------- it : iterable A proper iterator to use to skip rows of a DataFrame.
[ "Get", "an", "iterator", "given", "an", "integer", "slice", "or", "container", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/html.py#L85-L110
20,038
pandas-dev/pandas
pandas/io/html.py
_read
def _read(obj): """Try to read from a url, file or string. Parameters ---------- obj : str, unicode, or file-like Returns ------- raw_text : str """ if _is_url(obj): with urlopen(obj) as url: text = url.read() elif hasattr(obj, 'read'): text = obj.read() elif isinstance(obj, (str, bytes)): text = obj try: if os.path.isfile(text): with open(text, 'rb') as f: return f.read() except (TypeError, ValueError): pass else: raise TypeError("Cannot read object of type %r" % type(obj).__name__) return text
python
def _read(obj): """Try to read from a url, file or string. Parameters ---------- obj : str, unicode, or file-like Returns ------- raw_text : str """ if _is_url(obj): with urlopen(obj) as url: text = url.read() elif hasattr(obj, 'read'): text = obj.read() elif isinstance(obj, (str, bytes)): text = obj try: if os.path.isfile(text): with open(text, 'rb') as f: return f.read() except (TypeError, ValueError): pass else: raise TypeError("Cannot read object of type %r" % type(obj).__name__) return text
[ "def", "_read", "(", "obj", ")", ":", "if", "_is_url", "(", "obj", ")", ":", "with", "urlopen", "(", "obj", ")", "as", "url", ":", "text", "=", "url", ".", "read", "(", ")", "elif", "hasattr", "(", "obj", ",", "'read'", ")", ":", "text", "=", "obj", ".", "read", "(", ")", "elif", "isinstance", "(", "obj", ",", "(", "str", ",", "bytes", ")", ")", ":", "text", "=", "obj", "try", ":", "if", "os", ".", "path", ".", "isfile", "(", "text", ")", ":", "with", "open", "(", "text", ",", "'rb'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "pass", "else", ":", "raise", "TypeError", "(", "\"Cannot read object of type %r\"", "%", "type", "(", "obj", ")", ".", "__name__", ")", "return", "text" ]
Try to read from a url, file or string. Parameters ---------- obj : str, unicode, or file-like Returns ------- raw_text : str
[ "Try", "to", "read", "from", "a", "url", "file", "or", "string", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/html.py#L113-L139
20,039
pandas-dev/pandas
pandas/io/html.py
_build_xpath_expr
def _build_xpath_expr(attrs): """Build an xpath expression to simulate bs4's ability to pass in kwargs to search for attributes when using the lxml parser. Parameters ---------- attrs : dict A dict of HTML attributes. These are NOT checked for validity. Returns ------- expr : unicode An XPath expression that checks for the given HTML attributes. """ # give class attribute as class_ because class is a python keyword if 'class_' in attrs: attrs['class'] = attrs.pop('class_') s = ["@{key}={val!r}".format(key=k, val=v) for k, v in attrs.items()] return '[{expr}]'.format(expr=' and '.join(s))
python
def _build_xpath_expr(attrs): """Build an xpath expression to simulate bs4's ability to pass in kwargs to search for attributes when using the lxml parser. Parameters ---------- attrs : dict A dict of HTML attributes. These are NOT checked for validity. Returns ------- expr : unicode An XPath expression that checks for the given HTML attributes. """ # give class attribute as class_ because class is a python keyword if 'class_' in attrs: attrs['class'] = attrs.pop('class_') s = ["@{key}={val!r}".format(key=k, val=v) for k, v in attrs.items()] return '[{expr}]'.format(expr=' and '.join(s))
[ "def", "_build_xpath_expr", "(", "attrs", ")", ":", "# give class attribute as class_ because class is a python keyword", "if", "'class_'", "in", "attrs", ":", "attrs", "[", "'class'", "]", "=", "attrs", ".", "pop", "(", "'class_'", ")", "s", "=", "[", "\"@{key}={val!r}\"", ".", "format", "(", "key", "=", "k", ",", "val", "=", "v", ")", "for", "k", ",", "v", "in", "attrs", ".", "items", "(", ")", "]", "return", "'[{expr}]'", ".", "format", "(", "expr", "=", "' and '", ".", "join", "(", "s", ")", ")" ]
Build an xpath expression to simulate bs4's ability to pass in kwargs to search for attributes when using the lxml parser. Parameters ---------- attrs : dict A dict of HTML attributes. These are NOT checked for validity. Returns ------- expr : unicode An XPath expression that checks for the given HTML attributes.
[ "Build", "an", "xpath", "expression", "to", "simulate", "bs4", "s", "ability", "to", "pass", "in", "kwargs", "to", "search", "for", "attributes", "when", "using", "the", "lxml", "parser", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/html.py#L601-L620
20,040
pandas-dev/pandas
pandas/io/html.py
_parser_dispatch
def _parser_dispatch(flavor): """Choose the parser based on the input flavor. Parameters ---------- flavor : str The type of parser to use. This must be a valid backend. Returns ------- cls : _HtmlFrameParser subclass The parser class based on the requested input flavor. Raises ------ ValueError * If `flavor` is not a valid backend. ImportError * If you do not have the requested `flavor` """ valid_parsers = list(_valid_parsers.keys()) if flavor not in valid_parsers: raise ValueError('{invalid!r} is not a valid flavor, valid flavors ' 'are {valid}' .format(invalid=flavor, valid=valid_parsers)) if flavor in ('bs4', 'html5lib'): if not _HAS_HTML5LIB: raise ImportError("html5lib not found, please install it") if not _HAS_BS4: raise ImportError( "BeautifulSoup4 (bs4) not found, please install it") import bs4 if LooseVersion(bs4.__version__) <= LooseVersion('4.2.0'): raise ValueError("A minimum version of BeautifulSoup 4.2.1 " "is required") else: if not _HAS_LXML: raise ImportError("lxml not found, please install it") return _valid_parsers[flavor]
python
def _parser_dispatch(flavor): """Choose the parser based on the input flavor. Parameters ---------- flavor : str The type of parser to use. This must be a valid backend. Returns ------- cls : _HtmlFrameParser subclass The parser class based on the requested input flavor. Raises ------ ValueError * If `flavor` is not a valid backend. ImportError * If you do not have the requested `flavor` """ valid_parsers = list(_valid_parsers.keys()) if flavor not in valid_parsers: raise ValueError('{invalid!r} is not a valid flavor, valid flavors ' 'are {valid}' .format(invalid=flavor, valid=valid_parsers)) if flavor in ('bs4', 'html5lib'): if not _HAS_HTML5LIB: raise ImportError("html5lib not found, please install it") if not _HAS_BS4: raise ImportError( "BeautifulSoup4 (bs4) not found, please install it") import bs4 if LooseVersion(bs4.__version__) <= LooseVersion('4.2.0'): raise ValueError("A minimum version of BeautifulSoup 4.2.1 " "is required") else: if not _HAS_LXML: raise ImportError("lxml not found, please install it") return _valid_parsers[flavor]
[ "def", "_parser_dispatch", "(", "flavor", ")", ":", "valid_parsers", "=", "list", "(", "_valid_parsers", ".", "keys", "(", ")", ")", "if", "flavor", "not", "in", "valid_parsers", ":", "raise", "ValueError", "(", "'{invalid!r} is not a valid flavor, valid flavors '", "'are {valid}'", ".", "format", "(", "invalid", "=", "flavor", ",", "valid", "=", "valid_parsers", ")", ")", "if", "flavor", "in", "(", "'bs4'", ",", "'html5lib'", ")", ":", "if", "not", "_HAS_HTML5LIB", ":", "raise", "ImportError", "(", "\"html5lib not found, please install it\"", ")", "if", "not", "_HAS_BS4", ":", "raise", "ImportError", "(", "\"BeautifulSoup4 (bs4) not found, please install it\"", ")", "import", "bs4", "if", "LooseVersion", "(", "bs4", ".", "__version__", ")", "<=", "LooseVersion", "(", "'4.2.0'", ")", ":", "raise", "ValueError", "(", "\"A minimum version of BeautifulSoup 4.2.1 \"", "\"is required\"", ")", "else", ":", "if", "not", "_HAS_LXML", ":", "raise", "ImportError", "(", "\"lxml not found, please install it\"", ")", "return", "_valid_parsers", "[", "flavor", "]" ]
Choose the parser based on the input flavor. Parameters ---------- flavor : str The type of parser to use. This must be a valid backend. Returns ------- cls : _HtmlFrameParser subclass The parser class based on the requested input flavor. Raises ------ ValueError * If `flavor` is not a valid backend. ImportError * If you do not have the requested `flavor`
[ "Choose", "the", "parser", "based", "on", "the", "input", "flavor", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/html.py#L806-L846
20,041
pandas-dev/pandas
pandas/io/html.py
read_html
def read_html(io, match='.+', flavor=None, header=None, index_col=None, skiprows=None, attrs=None, parse_dates=False, tupleize_cols=None, thousands=',', encoding=None, decimal='.', converters=None, na_values=None, keep_default_na=True, displayed_only=True): r"""Read HTML tables into a ``list`` of ``DataFrame`` objects. Parameters ---------- io : str or file-like A URL, a file-like object, or a raw string containing HTML. Note that lxml only accepts the http, ftp and file url protocols. If you have a URL that starts with ``'https'`` you might try removing the ``'s'``. match : str or compiled regular expression, optional The set of tables containing text matching this regex or string will be returned. Unless the HTML is extremely simple you will probably need to pass a non-empty string here. Defaults to '.+' (match any non-empty string). The default value will return all tables contained on a page. This value is converted to a regular expression so that there is consistent behavior between Beautiful Soup and lxml. flavor : str or None, container of strings The parsing engine to use. 'bs4' and 'html5lib' are synonymous with each other, they are both there for backwards compatibility. The default of ``None`` tries to use ``lxml`` to parse and if that fails it falls back on ``bs4`` + ``html5lib``. header : int or list-like or None, optional The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to make the columns headers. index_col : int or list-like or None, optional The column (or list of columns) to use to create the index. skiprows : int or list-like or slice or None, optional 0-based. Number of rows to skip after parsing the column integer. If a sequence of integers or a slice is given, will skip the rows indexed by that sequence. Note that a single element sequence means 'skip the nth row' whereas an integer means 'skip n rows'. attrs : dict or None, optional This is a dictionary of attributes that you can pass to use to identify the table in the HTML. These are not checked for validity before being passed to lxml or Beautiful Soup. However, these attributes must be valid HTML table attributes to work correctly. For example, :: attrs = {'id': 'table'} is a valid attribute dictionary because the 'id' HTML tag attribute is a valid HTML attribute for *any* HTML tag as per `this document <http://www.w3.org/TR/html-markup/global-attributes.html>`__. :: attrs = {'asdf': 'table'} is *not* a valid attribute dictionary because 'asdf' is not a valid HTML attribute even if it is a valid XML attribute. Valid HTML 4.01 table attributes can be found `here <http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A working draft of the HTML 5 spec can be found `here <http://www.w3.org/TR/html-markup/table.html>`__. It contains the latest information on table attributes for the modern web. parse_dates : bool, optional See :func:`~read_csv` for more details. tupleize_cols : bool, optional If ``False`` try to parse multiple header rows into a :class:`~pandas.MultiIndex`, otherwise return raw tuples. Defaults to ``False``. .. deprecated:: 0.21.0 This argument will be removed and will always convert to MultiIndex thousands : str, optional Separator to use to parse thousands. Defaults to ``','``. encoding : str or None, optional The encoding used to decode the web page. Defaults to ``None``.``None`` preserves the previous encoding behavior, which depends on the underlying parser library (e.g., the parser library will try to use the encoding provided by the document). decimal : str, default '.' Character to recognize as decimal point (e.g. use ',' for European data). .. versionadded:: 0.19.0 converters : dict, default None Dict of functions for converting values in certain columns. Keys can either be integers or column labels, values are functions that take one input argument, the cell (not column) content, and return the transformed content. .. versionadded:: 0.19.0 na_values : iterable, default None Custom NA values .. versionadded:: 0.19.0 keep_default_na : bool, default True If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to .. versionadded:: 0.19.0 displayed_only : bool, default True Whether elements with "display: none" should be parsed .. versionadded:: 0.23.0 Returns ------- dfs : list of DataFrames See Also -------- read_csv Notes ----- Before using this function you should read the :ref:`gotchas about the HTML parsing libraries <io.html.gotchas>`. Expect to do some cleanup after you call this function. For example, you might need to manually assign column names if the column names are converted to NaN when you pass the `header=0` argument. We try to assume as little as possible about the structure of the table and push the idiosyncrasies of the HTML contained in the table to the user. This function searches for ``<table>`` elements and only for ``<tr>`` and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>`` element in the table. ``<td>`` stands for "table data". This function attempts to properly handle ``colspan`` and ``rowspan`` attributes. If the function has a ``<thead>`` argument, it is used to construct the header, otherwise the function attempts to find the header within the body (by putting rows with only ``<th>`` elements into the header). .. versionadded:: 0.21.0 Similar to :func:`~read_csv` the `header` argument is applied **after** `skiprows` is applied. This function will *always* return a list of :class:`DataFrame` *or* it will fail, e.g., it will *not* return an empty list. Examples -------- See the :ref:`read_html documentation in the IO section of the docs <io.read_html>` for some examples of reading in HTML tables. """ _importers() # Type check here. We don't want to parse only to fail because of an # invalid value of an integer skiprows. if isinstance(skiprows, numbers.Integral) and skiprows < 0: raise ValueError('cannot skip rows starting from the end of the ' 'data (you passed a negative value)') _validate_header_arg(header) return _parse(flavor=flavor, io=io, match=match, header=header, index_col=index_col, skiprows=skiprows, parse_dates=parse_dates, tupleize_cols=tupleize_cols, thousands=thousands, attrs=attrs, encoding=encoding, decimal=decimal, converters=converters, na_values=na_values, keep_default_na=keep_default_na, displayed_only=displayed_only)
python
def read_html(io, match='.+', flavor=None, header=None, index_col=None, skiprows=None, attrs=None, parse_dates=False, tupleize_cols=None, thousands=',', encoding=None, decimal='.', converters=None, na_values=None, keep_default_na=True, displayed_only=True): r"""Read HTML tables into a ``list`` of ``DataFrame`` objects. Parameters ---------- io : str or file-like A URL, a file-like object, or a raw string containing HTML. Note that lxml only accepts the http, ftp and file url protocols. If you have a URL that starts with ``'https'`` you might try removing the ``'s'``. match : str or compiled regular expression, optional The set of tables containing text matching this regex or string will be returned. Unless the HTML is extremely simple you will probably need to pass a non-empty string here. Defaults to '.+' (match any non-empty string). The default value will return all tables contained on a page. This value is converted to a regular expression so that there is consistent behavior between Beautiful Soup and lxml. flavor : str or None, container of strings The parsing engine to use. 'bs4' and 'html5lib' are synonymous with each other, they are both there for backwards compatibility. The default of ``None`` tries to use ``lxml`` to parse and if that fails it falls back on ``bs4`` + ``html5lib``. header : int or list-like or None, optional The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to make the columns headers. index_col : int or list-like or None, optional The column (or list of columns) to use to create the index. skiprows : int or list-like or slice or None, optional 0-based. Number of rows to skip after parsing the column integer. If a sequence of integers or a slice is given, will skip the rows indexed by that sequence. Note that a single element sequence means 'skip the nth row' whereas an integer means 'skip n rows'. attrs : dict or None, optional This is a dictionary of attributes that you can pass to use to identify the table in the HTML. These are not checked for validity before being passed to lxml or Beautiful Soup. However, these attributes must be valid HTML table attributes to work correctly. For example, :: attrs = {'id': 'table'} is a valid attribute dictionary because the 'id' HTML tag attribute is a valid HTML attribute for *any* HTML tag as per `this document <http://www.w3.org/TR/html-markup/global-attributes.html>`__. :: attrs = {'asdf': 'table'} is *not* a valid attribute dictionary because 'asdf' is not a valid HTML attribute even if it is a valid XML attribute. Valid HTML 4.01 table attributes can be found `here <http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A working draft of the HTML 5 spec can be found `here <http://www.w3.org/TR/html-markup/table.html>`__. It contains the latest information on table attributes for the modern web. parse_dates : bool, optional See :func:`~read_csv` for more details. tupleize_cols : bool, optional If ``False`` try to parse multiple header rows into a :class:`~pandas.MultiIndex`, otherwise return raw tuples. Defaults to ``False``. .. deprecated:: 0.21.0 This argument will be removed and will always convert to MultiIndex thousands : str, optional Separator to use to parse thousands. Defaults to ``','``. encoding : str or None, optional The encoding used to decode the web page. Defaults to ``None``.``None`` preserves the previous encoding behavior, which depends on the underlying parser library (e.g., the parser library will try to use the encoding provided by the document). decimal : str, default '.' Character to recognize as decimal point (e.g. use ',' for European data). .. versionadded:: 0.19.0 converters : dict, default None Dict of functions for converting values in certain columns. Keys can either be integers or column labels, values are functions that take one input argument, the cell (not column) content, and return the transformed content. .. versionadded:: 0.19.0 na_values : iterable, default None Custom NA values .. versionadded:: 0.19.0 keep_default_na : bool, default True If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to .. versionadded:: 0.19.0 displayed_only : bool, default True Whether elements with "display: none" should be parsed .. versionadded:: 0.23.0 Returns ------- dfs : list of DataFrames See Also -------- read_csv Notes ----- Before using this function you should read the :ref:`gotchas about the HTML parsing libraries <io.html.gotchas>`. Expect to do some cleanup after you call this function. For example, you might need to manually assign column names if the column names are converted to NaN when you pass the `header=0` argument. We try to assume as little as possible about the structure of the table and push the idiosyncrasies of the HTML contained in the table to the user. This function searches for ``<table>`` elements and only for ``<tr>`` and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>`` element in the table. ``<td>`` stands for "table data". This function attempts to properly handle ``colspan`` and ``rowspan`` attributes. If the function has a ``<thead>`` argument, it is used to construct the header, otherwise the function attempts to find the header within the body (by putting rows with only ``<th>`` elements into the header). .. versionadded:: 0.21.0 Similar to :func:`~read_csv` the `header` argument is applied **after** `skiprows` is applied. This function will *always* return a list of :class:`DataFrame` *or* it will fail, e.g., it will *not* return an empty list. Examples -------- See the :ref:`read_html documentation in the IO section of the docs <io.read_html>` for some examples of reading in HTML tables. """ _importers() # Type check here. We don't want to parse only to fail because of an # invalid value of an integer skiprows. if isinstance(skiprows, numbers.Integral) and skiprows < 0: raise ValueError('cannot skip rows starting from the end of the ' 'data (you passed a negative value)') _validate_header_arg(header) return _parse(flavor=flavor, io=io, match=match, header=header, index_col=index_col, skiprows=skiprows, parse_dates=parse_dates, tupleize_cols=tupleize_cols, thousands=thousands, attrs=attrs, encoding=encoding, decimal=decimal, converters=converters, na_values=na_values, keep_default_na=keep_default_na, displayed_only=displayed_only)
[ "def", "read_html", "(", "io", ",", "match", "=", "'.+'", ",", "flavor", "=", "None", ",", "header", "=", "None", ",", "index_col", "=", "None", ",", "skiprows", "=", "None", ",", "attrs", "=", "None", ",", "parse_dates", "=", "False", ",", "tupleize_cols", "=", "None", ",", "thousands", "=", "','", ",", "encoding", "=", "None", ",", "decimal", "=", "'.'", ",", "converters", "=", "None", ",", "na_values", "=", "None", ",", "keep_default_na", "=", "True", ",", "displayed_only", "=", "True", ")", ":", "_importers", "(", ")", "# Type check here. We don't want to parse only to fail because of an", "# invalid value of an integer skiprows.", "if", "isinstance", "(", "skiprows", ",", "numbers", ".", "Integral", ")", "and", "skiprows", "<", "0", ":", "raise", "ValueError", "(", "'cannot skip rows starting from the end of the '", "'data (you passed a negative value)'", ")", "_validate_header_arg", "(", "header", ")", "return", "_parse", "(", "flavor", "=", "flavor", ",", "io", "=", "io", ",", "match", "=", "match", ",", "header", "=", "header", ",", "index_col", "=", "index_col", ",", "skiprows", "=", "skiprows", ",", "parse_dates", "=", "parse_dates", ",", "tupleize_cols", "=", "tupleize_cols", ",", "thousands", "=", "thousands", ",", "attrs", "=", "attrs", ",", "encoding", "=", "encoding", ",", "decimal", "=", "decimal", ",", "converters", "=", "converters", ",", "na_values", "=", "na_values", ",", "keep_default_na", "=", "keep_default_na", ",", "displayed_only", "=", "displayed_only", ")" ]
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects. Parameters ---------- io : str or file-like A URL, a file-like object, or a raw string containing HTML. Note that lxml only accepts the http, ftp and file url protocols. If you have a URL that starts with ``'https'`` you might try removing the ``'s'``. match : str or compiled regular expression, optional The set of tables containing text matching this regex or string will be returned. Unless the HTML is extremely simple you will probably need to pass a non-empty string here. Defaults to '.+' (match any non-empty string). The default value will return all tables contained on a page. This value is converted to a regular expression so that there is consistent behavior between Beautiful Soup and lxml. flavor : str or None, container of strings The parsing engine to use. 'bs4' and 'html5lib' are synonymous with each other, they are both there for backwards compatibility. The default of ``None`` tries to use ``lxml`` to parse and if that fails it falls back on ``bs4`` + ``html5lib``. header : int or list-like or None, optional The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to make the columns headers. index_col : int or list-like or None, optional The column (or list of columns) to use to create the index. skiprows : int or list-like or slice or None, optional 0-based. Number of rows to skip after parsing the column integer. If a sequence of integers or a slice is given, will skip the rows indexed by that sequence. Note that a single element sequence means 'skip the nth row' whereas an integer means 'skip n rows'. attrs : dict or None, optional This is a dictionary of attributes that you can pass to use to identify the table in the HTML. These are not checked for validity before being passed to lxml or Beautiful Soup. However, these attributes must be valid HTML table attributes to work correctly. For example, :: attrs = {'id': 'table'} is a valid attribute dictionary because the 'id' HTML tag attribute is a valid HTML attribute for *any* HTML tag as per `this document <http://www.w3.org/TR/html-markup/global-attributes.html>`__. :: attrs = {'asdf': 'table'} is *not* a valid attribute dictionary because 'asdf' is not a valid HTML attribute even if it is a valid XML attribute. Valid HTML 4.01 table attributes can be found `here <http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A working draft of the HTML 5 spec can be found `here <http://www.w3.org/TR/html-markup/table.html>`__. It contains the latest information on table attributes for the modern web. parse_dates : bool, optional See :func:`~read_csv` for more details. tupleize_cols : bool, optional If ``False`` try to parse multiple header rows into a :class:`~pandas.MultiIndex`, otherwise return raw tuples. Defaults to ``False``. .. deprecated:: 0.21.0 This argument will be removed and will always convert to MultiIndex thousands : str, optional Separator to use to parse thousands. Defaults to ``','``. encoding : str or None, optional The encoding used to decode the web page. Defaults to ``None``.``None`` preserves the previous encoding behavior, which depends on the underlying parser library (e.g., the parser library will try to use the encoding provided by the document). decimal : str, default '.' Character to recognize as decimal point (e.g. use ',' for European data). .. versionadded:: 0.19.0 converters : dict, default None Dict of functions for converting values in certain columns. Keys can either be integers or column labels, values are functions that take one input argument, the cell (not column) content, and return the transformed content. .. versionadded:: 0.19.0 na_values : iterable, default None Custom NA values .. versionadded:: 0.19.0 keep_default_na : bool, default True If na_values are specified and keep_default_na is False the default NaN values are overridden, otherwise they're appended to .. versionadded:: 0.19.0 displayed_only : bool, default True Whether elements with "display: none" should be parsed .. versionadded:: 0.23.0 Returns ------- dfs : list of DataFrames See Also -------- read_csv Notes ----- Before using this function you should read the :ref:`gotchas about the HTML parsing libraries <io.html.gotchas>`. Expect to do some cleanup after you call this function. For example, you might need to manually assign column names if the column names are converted to NaN when you pass the `header=0` argument. We try to assume as little as possible about the structure of the table and push the idiosyncrasies of the HTML contained in the table to the user. This function searches for ``<table>`` elements and only for ``<tr>`` and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>`` element in the table. ``<td>`` stands for "table data". This function attempts to properly handle ``colspan`` and ``rowspan`` attributes. If the function has a ``<thead>`` argument, it is used to construct the header, otherwise the function attempts to find the header within the body (by putting rows with only ``<th>`` elements into the header). .. versionadded:: 0.21.0 Similar to :func:`~read_csv` the `header` argument is applied **after** `skiprows` is applied. This function will *always* return a list of :class:`DataFrame` *or* it will fail, e.g., it will *not* return an empty list. Examples -------- See the :ref:`read_html documentation in the IO section of the docs <io.read_html>` for some examples of reading in HTML tables.
[ "r", "Read", "HTML", "tables", "into", "a", "list", "of", "DataFrame", "objects", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/html.py#L921-L1088
20,042
pandas-dev/pandas
pandas/io/html.py
_HtmlFrameParser.parse_tables
def parse_tables(self): """ Parse and return all tables from the DOM. Returns ------- list of parsed (header, body, footer) tuples from tables. """ tables = self._parse_tables(self._build_doc(), self.match, self.attrs) return (self._parse_thead_tbody_tfoot(table) for table in tables)
python
def parse_tables(self): """ Parse and return all tables from the DOM. Returns ------- list of parsed (header, body, footer) tuples from tables. """ tables = self._parse_tables(self._build_doc(), self.match, self.attrs) return (self._parse_thead_tbody_tfoot(table) for table in tables)
[ "def", "parse_tables", "(", "self", ")", ":", "tables", "=", "self", ".", "_parse_tables", "(", "self", ".", "_build_doc", "(", ")", ",", "self", ".", "match", ",", "self", ".", "attrs", ")", "return", "(", "self", ".", "_parse_thead_tbody_tfoot", "(", "table", ")", "for", "table", "in", "tables", ")" ]
Parse and return all tables from the DOM. Returns ------- list of parsed (header, body, footer) tuples from tables.
[ "Parse", "and", "return", "all", "tables", "from", "the", "DOM", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/html.py#L208-L217
20,043
pandas-dev/pandas
pandas/io/html.py
_HtmlFrameParser._parse_thead_tbody_tfoot
def _parse_thead_tbody_tfoot(self, table_html): """ Given a table, return parsed header, body, and foot. Parameters ---------- table_html : node-like Returns ------- tuple of (header, body, footer), each a list of list-of-text rows. Notes ----- Header and body are lists-of-lists. Top level list is a list of rows. Each row is a list of str text. Logic: Use <thead>, <tbody>, <tfoot> elements to identify header, body, and footer, otherwise: - Put all rows into body - Move rows from top of body to header only if all elements inside row are <th> - Move rows from bottom of body to footer only if all elements inside row are <th> """ header_rows = self._parse_thead_tr(table_html) body_rows = self._parse_tbody_tr(table_html) footer_rows = self._parse_tfoot_tr(table_html) def row_is_all_th(row): return all(self._equals_tag(t, 'th') for t in self._parse_td(row)) if not header_rows: # The table has no <thead>. Move the top all-<th> rows from # body_rows to header_rows. (This is a common case because many # tables in the wild have no <thead> or <tfoot> while body_rows and row_is_all_th(body_rows[0]): header_rows.append(body_rows.pop(0)) header = self._expand_colspan_rowspan(header_rows) body = self._expand_colspan_rowspan(body_rows) footer = self._expand_colspan_rowspan(footer_rows) return header, body, footer
python
def _parse_thead_tbody_tfoot(self, table_html): """ Given a table, return parsed header, body, and foot. Parameters ---------- table_html : node-like Returns ------- tuple of (header, body, footer), each a list of list-of-text rows. Notes ----- Header and body are lists-of-lists. Top level list is a list of rows. Each row is a list of str text. Logic: Use <thead>, <tbody>, <tfoot> elements to identify header, body, and footer, otherwise: - Put all rows into body - Move rows from top of body to header only if all elements inside row are <th> - Move rows from bottom of body to footer only if all elements inside row are <th> """ header_rows = self._parse_thead_tr(table_html) body_rows = self._parse_tbody_tr(table_html) footer_rows = self._parse_tfoot_tr(table_html) def row_is_all_th(row): return all(self._equals_tag(t, 'th') for t in self._parse_td(row)) if not header_rows: # The table has no <thead>. Move the top all-<th> rows from # body_rows to header_rows. (This is a common case because many # tables in the wild have no <thead> or <tfoot> while body_rows and row_is_all_th(body_rows[0]): header_rows.append(body_rows.pop(0)) header = self._expand_colspan_rowspan(header_rows) body = self._expand_colspan_rowspan(body_rows) footer = self._expand_colspan_rowspan(footer_rows) return header, body, footer
[ "def", "_parse_thead_tbody_tfoot", "(", "self", ",", "table_html", ")", ":", "header_rows", "=", "self", ".", "_parse_thead_tr", "(", "table_html", ")", "body_rows", "=", "self", ".", "_parse_tbody_tr", "(", "table_html", ")", "footer_rows", "=", "self", ".", "_parse_tfoot_tr", "(", "table_html", ")", "def", "row_is_all_th", "(", "row", ")", ":", "return", "all", "(", "self", ".", "_equals_tag", "(", "t", ",", "'th'", ")", "for", "t", "in", "self", ".", "_parse_td", "(", "row", ")", ")", "if", "not", "header_rows", ":", "# The table has no <thead>. Move the top all-<th> rows from", "# body_rows to header_rows. (This is a common case because many", "# tables in the wild have no <thead> or <tfoot>", "while", "body_rows", "and", "row_is_all_th", "(", "body_rows", "[", "0", "]", ")", ":", "header_rows", ".", "append", "(", "body_rows", ".", "pop", "(", "0", ")", ")", "header", "=", "self", ".", "_expand_colspan_rowspan", "(", "header_rows", ")", "body", "=", "self", ".", "_expand_colspan_rowspan", "(", "body_rows", ")", "footer", "=", "self", ".", "_expand_colspan_rowspan", "(", "footer_rows", ")", "return", "header", ",", "body", ",", "footer" ]
Given a table, return parsed header, body, and foot. Parameters ---------- table_html : node-like Returns ------- tuple of (header, body, footer), each a list of list-of-text rows. Notes ----- Header and body are lists-of-lists. Top level list is a list of rows. Each row is a list of str text. Logic: Use <thead>, <tbody>, <tfoot> elements to identify header, body, and footer, otherwise: - Put all rows into body - Move rows from top of body to header only if all elements inside row are <th> - Move rows from bottom of body to footer only if all elements inside row are <th>
[ "Given", "a", "table", "return", "parsed", "header", "body", "and", "foot", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/html.py#L375-L420
20,044
pandas-dev/pandas
pandas/io/html.py
_HtmlFrameParser._handle_hidden_tables
def _handle_hidden_tables(self, tbl_list, attr_name): """ Return list of tables, potentially removing hidden elements Parameters ---------- tbl_list : list of node-like Type of list elements will vary depending upon parser used attr_name : str Name of the accessor for retrieving HTML attributes Returns ------- list of node-like Return type matches `tbl_list` """ if not self.displayed_only: return tbl_list return [x for x in tbl_list if "display:none" not in getattr(x, attr_name).get('style', '').replace(" ", "")]
python
def _handle_hidden_tables(self, tbl_list, attr_name): """ Return list of tables, potentially removing hidden elements Parameters ---------- tbl_list : list of node-like Type of list elements will vary depending upon parser used attr_name : str Name of the accessor for retrieving HTML attributes Returns ------- list of node-like Return type matches `tbl_list` """ if not self.displayed_only: return tbl_list return [x for x in tbl_list if "display:none" not in getattr(x, attr_name).get('style', '').replace(" ", "")]
[ "def", "_handle_hidden_tables", "(", "self", ",", "tbl_list", ",", "attr_name", ")", ":", "if", "not", "self", ".", "displayed_only", ":", "return", "tbl_list", "return", "[", "x", "for", "x", "in", "tbl_list", "if", "\"display:none\"", "not", "in", "getattr", "(", "x", ",", "attr_name", ")", ".", "get", "(", "'style'", ",", "''", ")", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "]" ]
Return list of tables, potentially removing hidden elements Parameters ---------- tbl_list : list of node-like Type of list elements will vary depending upon parser used attr_name : str Name of the accessor for retrieving HTML attributes Returns ------- list of node-like Return type matches `tbl_list`
[ "Return", "list", "of", "tables", "potentially", "removing", "hidden", "elements" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/html.py#L498-L518
20,045
pandas-dev/pandas
pandas/core/dtypes/concat.py
_get_series_result_type
def _get_series_result_type(result, objs=None): """ return appropriate class of Series concat input is either dict or array-like """ from pandas import SparseSeries, SparseDataFrame, DataFrame # concat Series with axis 1 if isinstance(result, dict): # concat Series with axis 1 if all(isinstance(c, (SparseSeries, SparseDataFrame)) for c in result.values()): return SparseDataFrame else: return DataFrame # otherwise it is a SingleBlockManager (axis = 0) if result._block.is_sparse: return SparseSeries else: return objs[0]._constructor
python
def _get_series_result_type(result, objs=None): """ return appropriate class of Series concat input is either dict or array-like """ from pandas import SparseSeries, SparseDataFrame, DataFrame # concat Series with axis 1 if isinstance(result, dict): # concat Series with axis 1 if all(isinstance(c, (SparseSeries, SparseDataFrame)) for c in result.values()): return SparseDataFrame else: return DataFrame # otherwise it is a SingleBlockManager (axis = 0) if result._block.is_sparse: return SparseSeries else: return objs[0]._constructor
[ "def", "_get_series_result_type", "(", "result", ",", "objs", "=", "None", ")", ":", "from", "pandas", "import", "SparseSeries", ",", "SparseDataFrame", ",", "DataFrame", "# concat Series with axis 1", "if", "isinstance", "(", "result", ",", "dict", ")", ":", "# concat Series with axis 1", "if", "all", "(", "isinstance", "(", "c", ",", "(", "SparseSeries", ",", "SparseDataFrame", ")", ")", "for", "c", "in", "result", ".", "values", "(", ")", ")", ":", "return", "SparseDataFrame", "else", ":", "return", "DataFrame", "# otherwise it is a SingleBlockManager (axis = 0)", "if", "result", ".", "_block", ".", "is_sparse", ":", "return", "SparseSeries", "else", ":", "return", "objs", "[", "0", "]", ".", "_constructor" ]
return appropriate class of Series concat input is either dict or array-like
[ "return", "appropriate", "class", "of", "Series", "concat", "input", "is", "either", "dict", "or", "array", "-", "like" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/concat.py#L59-L79
20,046
pandas-dev/pandas
pandas/core/dtypes/concat.py
_get_frame_result_type
def _get_frame_result_type(result, objs): """ return appropriate class of DataFrame-like concat if all blocks are sparse, return SparseDataFrame otherwise, return 1st obj """ if (result.blocks and ( any(isinstance(obj, ABCSparseDataFrame) for obj in objs))): from pandas.core.sparse.api import SparseDataFrame return SparseDataFrame else: return next(obj for obj in objs if not isinstance(obj, ABCSparseDataFrame))
python
def _get_frame_result_type(result, objs): """ return appropriate class of DataFrame-like concat if all blocks are sparse, return SparseDataFrame otherwise, return 1st obj """ if (result.blocks and ( any(isinstance(obj, ABCSparseDataFrame) for obj in objs))): from pandas.core.sparse.api import SparseDataFrame return SparseDataFrame else: return next(obj for obj in objs if not isinstance(obj, ABCSparseDataFrame))
[ "def", "_get_frame_result_type", "(", "result", ",", "objs", ")", ":", "if", "(", "result", ".", "blocks", "and", "(", "any", "(", "isinstance", "(", "obj", ",", "ABCSparseDataFrame", ")", "for", "obj", "in", "objs", ")", ")", ")", ":", "from", "pandas", ".", "core", ".", "sparse", ".", "api", "import", "SparseDataFrame", "return", "SparseDataFrame", "else", ":", "return", "next", "(", "obj", "for", "obj", "in", "objs", "if", "not", "isinstance", "(", "obj", ",", "ABCSparseDataFrame", ")", ")" ]
return appropriate class of DataFrame-like concat if all blocks are sparse, return SparseDataFrame otherwise, return 1st obj
[ "return", "appropriate", "class", "of", "DataFrame", "-", "like", "concat", "if", "all", "blocks", "are", "sparse", "return", "SparseDataFrame", "otherwise", "return", "1st", "obj" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/concat.py#L82-L95
20,047
pandas-dev/pandas
pandas/core/dtypes/concat.py
union_categoricals
def union_categoricals(to_union, sort_categories=False, ignore_order=False): """ Combine list-like of Categorical-like, unioning categories. All categories must have the same dtype. .. versionadded:: 0.19.0 Parameters ---------- to_union : list-like of Categorical, CategoricalIndex, or Series with dtype='category' sort_categories : boolean, default False If true, resulting categories will be lexsorted, otherwise they will be ordered as they appear in the data. ignore_order : boolean, default False If true, the ordered attribute of the Categoricals will be ignored. Results in an unordered categorical. .. versionadded:: 0.20.0 Returns ------- result : Categorical Raises ------ TypeError - all inputs do not have the same dtype - all inputs do not have the same ordered property - all inputs are ordered and their categories are not identical - sort_categories=True and Categoricals are ordered ValueError Empty list of categoricals passed Notes ----- To learn more about categories, see `link <http://pandas.pydata.org/pandas-docs/stable/categorical.html#unioning>`__ Examples -------- >>> from pandas.api.types import union_categoricals If you want to combine categoricals that do not necessarily have the same categories, `union_categoricals` will combine a list-like of categoricals. The new categories will be the union of the categories being combined. >>> a = pd.Categorical(["b", "c"]) >>> b = pd.Categorical(["a", "b"]) >>> union_categoricals([a, b]) [b, c, a, b] Categories (3, object): [b, c, a] By default, the resulting categories will be ordered as they appear in the `categories` of the data. If you want the categories to be lexsorted, use `sort_categories=True` argument. >>> union_categoricals([a, b], sort_categories=True) [b, c, a, b] Categories (3, object): [a, b, c] `union_categoricals` also works with the case of combining two categoricals of the same categories and order information (e.g. what you could also `append` for). >>> a = pd.Categorical(["a", "b"], ordered=True) >>> b = pd.Categorical(["a", "b", "a"], ordered=True) >>> union_categoricals([a, b]) [a, b, a, b, a] Categories (2, object): [a < b] Raises `TypeError` because the categories are ordered and not identical. >>> a = pd.Categorical(["a", "b"], ordered=True) >>> b = pd.Categorical(["a", "b", "c"], ordered=True) >>> union_categoricals([a, b]) TypeError: to union ordered Categoricals, all categories must be the same New in version 0.20.0 Ordered categoricals with different categories or orderings can be combined by using the `ignore_ordered=True` argument. >>> a = pd.Categorical(["a", "b", "c"], ordered=True) >>> b = pd.Categorical(["c", "b", "a"], ordered=True) >>> union_categoricals([a, b], ignore_order=True) [a, b, c, c, b, a] Categories (3, object): [a, b, c] `union_categoricals` also works with a `CategoricalIndex`, or `Series` containing categorical data, but note that the resulting array will always be a plain `Categorical` >>> a = pd.Series(["b", "c"], dtype='category') >>> b = pd.Series(["a", "b"], dtype='category') >>> union_categoricals([a, b]) [b, c, a, b] Categories (3, object): [b, c, a] """ from pandas import Index, Categorical, CategoricalIndex, Series from pandas.core.arrays.categorical import _recode_for_categories if len(to_union) == 0: raise ValueError('No Categoricals to union') def _maybe_unwrap(x): if isinstance(x, (CategoricalIndex, Series)): return x.values elif isinstance(x, Categorical): return x else: raise TypeError("all components to combine must be Categorical") to_union = [_maybe_unwrap(x) for x in to_union] first = to_union[0] if not all(is_dtype_equal(other.categories.dtype, first.categories.dtype) for other in to_union[1:]): raise TypeError("dtype of categories must be the same") ordered = False if all(first.is_dtype_equal(other) for other in to_union[1:]): # identical categories - fastpath categories = first.categories ordered = first.ordered if all(first.categories.equals(other.categories) for other in to_union[1:]): new_codes = np.concatenate([c.codes for c in to_union]) else: codes = [first.codes] + [_recode_for_categories(other.codes, other.categories, first.categories) for other in to_union[1:]] new_codes = np.concatenate(codes) if sort_categories and not ignore_order and ordered: raise TypeError("Cannot use sort_categories=True with " "ordered Categoricals") if sort_categories and not categories.is_monotonic_increasing: categories = categories.sort_values() indexer = categories.get_indexer(first.categories) from pandas.core.algorithms import take_1d new_codes = take_1d(indexer, new_codes, fill_value=-1) elif ignore_order or all(not c.ordered for c in to_union): # different categories - union and recode cats = first.categories.append([c.categories for c in to_union[1:]]) categories = Index(cats.unique()) if sort_categories: categories = categories.sort_values() new_codes = [_recode_for_categories(c.codes, c.categories, categories) for c in to_union] new_codes = np.concatenate(new_codes) else: # ordered - to show a proper error message if all(c.ordered for c in to_union): msg = ("to union ordered Categoricals, " "all categories must be the same") raise TypeError(msg) else: raise TypeError('Categorical.ordered must be the same') if ignore_order: ordered = False return Categorical(new_codes, categories=categories, ordered=ordered, fastpath=True)
python
def union_categoricals(to_union, sort_categories=False, ignore_order=False): """ Combine list-like of Categorical-like, unioning categories. All categories must have the same dtype. .. versionadded:: 0.19.0 Parameters ---------- to_union : list-like of Categorical, CategoricalIndex, or Series with dtype='category' sort_categories : boolean, default False If true, resulting categories will be lexsorted, otherwise they will be ordered as they appear in the data. ignore_order : boolean, default False If true, the ordered attribute of the Categoricals will be ignored. Results in an unordered categorical. .. versionadded:: 0.20.0 Returns ------- result : Categorical Raises ------ TypeError - all inputs do not have the same dtype - all inputs do not have the same ordered property - all inputs are ordered and their categories are not identical - sort_categories=True and Categoricals are ordered ValueError Empty list of categoricals passed Notes ----- To learn more about categories, see `link <http://pandas.pydata.org/pandas-docs/stable/categorical.html#unioning>`__ Examples -------- >>> from pandas.api.types import union_categoricals If you want to combine categoricals that do not necessarily have the same categories, `union_categoricals` will combine a list-like of categoricals. The new categories will be the union of the categories being combined. >>> a = pd.Categorical(["b", "c"]) >>> b = pd.Categorical(["a", "b"]) >>> union_categoricals([a, b]) [b, c, a, b] Categories (3, object): [b, c, a] By default, the resulting categories will be ordered as they appear in the `categories` of the data. If you want the categories to be lexsorted, use `sort_categories=True` argument. >>> union_categoricals([a, b], sort_categories=True) [b, c, a, b] Categories (3, object): [a, b, c] `union_categoricals` also works with the case of combining two categoricals of the same categories and order information (e.g. what you could also `append` for). >>> a = pd.Categorical(["a", "b"], ordered=True) >>> b = pd.Categorical(["a", "b", "a"], ordered=True) >>> union_categoricals([a, b]) [a, b, a, b, a] Categories (2, object): [a < b] Raises `TypeError` because the categories are ordered and not identical. >>> a = pd.Categorical(["a", "b"], ordered=True) >>> b = pd.Categorical(["a", "b", "c"], ordered=True) >>> union_categoricals([a, b]) TypeError: to union ordered Categoricals, all categories must be the same New in version 0.20.0 Ordered categoricals with different categories or orderings can be combined by using the `ignore_ordered=True` argument. >>> a = pd.Categorical(["a", "b", "c"], ordered=True) >>> b = pd.Categorical(["c", "b", "a"], ordered=True) >>> union_categoricals([a, b], ignore_order=True) [a, b, c, c, b, a] Categories (3, object): [a, b, c] `union_categoricals` also works with a `CategoricalIndex`, or `Series` containing categorical data, but note that the resulting array will always be a plain `Categorical` >>> a = pd.Series(["b", "c"], dtype='category') >>> b = pd.Series(["a", "b"], dtype='category') >>> union_categoricals([a, b]) [b, c, a, b] Categories (3, object): [b, c, a] """ from pandas import Index, Categorical, CategoricalIndex, Series from pandas.core.arrays.categorical import _recode_for_categories if len(to_union) == 0: raise ValueError('No Categoricals to union') def _maybe_unwrap(x): if isinstance(x, (CategoricalIndex, Series)): return x.values elif isinstance(x, Categorical): return x else: raise TypeError("all components to combine must be Categorical") to_union = [_maybe_unwrap(x) for x in to_union] first = to_union[0] if not all(is_dtype_equal(other.categories.dtype, first.categories.dtype) for other in to_union[1:]): raise TypeError("dtype of categories must be the same") ordered = False if all(first.is_dtype_equal(other) for other in to_union[1:]): # identical categories - fastpath categories = first.categories ordered = first.ordered if all(first.categories.equals(other.categories) for other in to_union[1:]): new_codes = np.concatenate([c.codes for c in to_union]) else: codes = [first.codes] + [_recode_for_categories(other.codes, other.categories, first.categories) for other in to_union[1:]] new_codes = np.concatenate(codes) if sort_categories and not ignore_order and ordered: raise TypeError("Cannot use sort_categories=True with " "ordered Categoricals") if sort_categories and not categories.is_monotonic_increasing: categories = categories.sort_values() indexer = categories.get_indexer(first.categories) from pandas.core.algorithms import take_1d new_codes = take_1d(indexer, new_codes, fill_value=-1) elif ignore_order or all(not c.ordered for c in to_union): # different categories - union and recode cats = first.categories.append([c.categories for c in to_union[1:]]) categories = Index(cats.unique()) if sort_categories: categories = categories.sort_values() new_codes = [_recode_for_categories(c.codes, c.categories, categories) for c in to_union] new_codes = np.concatenate(new_codes) else: # ordered - to show a proper error message if all(c.ordered for c in to_union): msg = ("to union ordered Categoricals, " "all categories must be the same") raise TypeError(msg) else: raise TypeError('Categorical.ordered must be the same') if ignore_order: ordered = False return Categorical(new_codes, categories=categories, ordered=ordered, fastpath=True)
[ "def", "union_categoricals", "(", "to_union", ",", "sort_categories", "=", "False", ",", "ignore_order", "=", "False", ")", ":", "from", "pandas", "import", "Index", ",", "Categorical", ",", "CategoricalIndex", ",", "Series", "from", "pandas", ".", "core", ".", "arrays", ".", "categorical", "import", "_recode_for_categories", "if", "len", "(", "to_union", ")", "==", "0", ":", "raise", "ValueError", "(", "'No Categoricals to union'", ")", "def", "_maybe_unwrap", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "(", "CategoricalIndex", ",", "Series", ")", ")", ":", "return", "x", ".", "values", "elif", "isinstance", "(", "x", ",", "Categorical", ")", ":", "return", "x", "else", ":", "raise", "TypeError", "(", "\"all components to combine must be Categorical\"", ")", "to_union", "=", "[", "_maybe_unwrap", "(", "x", ")", "for", "x", "in", "to_union", "]", "first", "=", "to_union", "[", "0", "]", "if", "not", "all", "(", "is_dtype_equal", "(", "other", ".", "categories", ".", "dtype", ",", "first", ".", "categories", ".", "dtype", ")", "for", "other", "in", "to_union", "[", "1", ":", "]", ")", ":", "raise", "TypeError", "(", "\"dtype of categories must be the same\"", ")", "ordered", "=", "False", "if", "all", "(", "first", ".", "is_dtype_equal", "(", "other", ")", "for", "other", "in", "to_union", "[", "1", ":", "]", ")", ":", "# identical categories - fastpath", "categories", "=", "first", ".", "categories", "ordered", "=", "first", ".", "ordered", "if", "all", "(", "first", ".", "categories", ".", "equals", "(", "other", ".", "categories", ")", "for", "other", "in", "to_union", "[", "1", ":", "]", ")", ":", "new_codes", "=", "np", ".", "concatenate", "(", "[", "c", ".", "codes", "for", "c", "in", "to_union", "]", ")", "else", ":", "codes", "=", "[", "first", ".", "codes", "]", "+", "[", "_recode_for_categories", "(", "other", ".", "codes", ",", "other", ".", "categories", ",", "first", ".", "categories", ")", "for", "other", "in", "to_union", "[", "1", ":", "]", "]", "new_codes", "=", "np", ".", "concatenate", "(", "codes", ")", "if", "sort_categories", "and", "not", "ignore_order", "and", "ordered", ":", "raise", "TypeError", "(", "\"Cannot use sort_categories=True with \"", "\"ordered Categoricals\"", ")", "if", "sort_categories", "and", "not", "categories", ".", "is_monotonic_increasing", ":", "categories", "=", "categories", ".", "sort_values", "(", ")", "indexer", "=", "categories", ".", "get_indexer", "(", "first", ".", "categories", ")", "from", "pandas", ".", "core", ".", "algorithms", "import", "take_1d", "new_codes", "=", "take_1d", "(", "indexer", ",", "new_codes", ",", "fill_value", "=", "-", "1", ")", "elif", "ignore_order", "or", "all", "(", "not", "c", ".", "ordered", "for", "c", "in", "to_union", ")", ":", "# different categories - union and recode", "cats", "=", "first", ".", "categories", ".", "append", "(", "[", "c", ".", "categories", "for", "c", "in", "to_union", "[", "1", ":", "]", "]", ")", "categories", "=", "Index", "(", "cats", ".", "unique", "(", ")", ")", "if", "sort_categories", ":", "categories", "=", "categories", ".", "sort_values", "(", ")", "new_codes", "=", "[", "_recode_for_categories", "(", "c", ".", "codes", ",", "c", ".", "categories", ",", "categories", ")", "for", "c", "in", "to_union", "]", "new_codes", "=", "np", ".", "concatenate", "(", "new_codes", ")", "else", ":", "# ordered - to show a proper error message", "if", "all", "(", "c", ".", "ordered", "for", "c", "in", "to_union", ")", ":", "msg", "=", "(", "\"to union ordered Categoricals, \"", "\"all categories must be the same\"", ")", "raise", "TypeError", "(", "msg", ")", "else", ":", "raise", "TypeError", "(", "'Categorical.ordered must be the same'", ")", "if", "ignore_order", ":", "ordered", "=", "False", "return", "Categorical", "(", "new_codes", ",", "categories", "=", "categories", ",", "ordered", "=", "ordered", ",", "fastpath", "=", "True", ")" ]
Combine list-like of Categorical-like, unioning categories. All categories must have the same dtype. .. versionadded:: 0.19.0 Parameters ---------- to_union : list-like of Categorical, CategoricalIndex, or Series with dtype='category' sort_categories : boolean, default False If true, resulting categories will be lexsorted, otherwise they will be ordered as they appear in the data. ignore_order : boolean, default False If true, the ordered attribute of the Categoricals will be ignored. Results in an unordered categorical. .. versionadded:: 0.20.0 Returns ------- result : Categorical Raises ------ TypeError - all inputs do not have the same dtype - all inputs do not have the same ordered property - all inputs are ordered and their categories are not identical - sort_categories=True and Categoricals are ordered ValueError Empty list of categoricals passed Notes ----- To learn more about categories, see `link <http://pandas.pydata.org/pandas-docs/stable/categorical.html#unioning>`__ Examples -------- >>> from pandas.api.types import union_categoricals If you want to combine categoricals that do not necessarily have the same categories, `union_categoricals` will combine a list-like of categoricals. The new categories will be the union of the categories being combined. >>> a = pd.Categorical(["b", "c"]) >>> b = pd.Categorical(["a", "b"]) >>> union_categoricals([a, b]) [b, c, a, b] Categories (3, object): [b, c, a] By default, the resulting categories will be ordered as they appear in the `categories` of the data. If you want the categories to be lexsorted, use `sort_categories=True` argument. >>> union_categoricals([a, b], sort_categories=True) [b, c, a, b] Categories (3, object): [a, b, c] `union_categoricals` also works with the case of combining two categoricals of the same categories and order information (e.g. what you could also `append` for). >>> a = pd.Categorical(["a", "b"], ordered=True) >>> b = pd.Categorical(["a", "b", "a"], ordered=True) >>> union_categoricals([a, b]) [a, b, a, b, a] Categories (2, object): [a < b] Raises `TypeError` because the categories are ordered and not identical. >>> a = pd.Categorical(["a", "b"], ordered=True) >>> b = pd.Categorical(["a", "b", "c"], ordered=True) >>> union_categoricals([a, b]) TypeError: to union ordered Categoricals, all categories must be the same New in version 0.20.0 Ordered categoricals with different categories or orderings can be combined by using the `ignore_ordered=True` argument. >>> a = pd.Categorical(["a", "b", "c"], ordered=True) >>> b = pd.Categorical(["c", "b", "a"], ordered=True) >>> union_categoricals([a, b], ignore_order=True) [a, b, c, c, b, a] Categories (3, object): [a, b, c] `union_categoricals` also works with a `CategoricalIndex`, or `Series` containing categorical data, but note that the resulting array will always be a plain `Categorical` >>> a = pd.Series(["b", "c"], dtype='category') >>> b = pd.Series(["a", "b"], dtype='category') >>> union_categoricals([a, b]) [b, c, a, b] Categories (3, object): [b, c, a]
[ "Combine", "list", "-", "like", "of", "Categorical", "-", "like", "unioning", "categories", ".", "All", "categories", "must", "have", "the", "same", "dtype", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/concat.py#L209-L381
20,048
pandas-dev/pandas
pandas/core/dtypes/concat.py
_concat_datetimetz
def _concat_datetimetz(to_concat, name=None): """ concat DatetimeIndex with the same tz all inputs must be DatetimeIndex it is used in DatetimeIndex.append also """ # Right now, internals will pass a List[DatetimeArray] here # for reductions like quantile. I would like to disentangle # all this before we get here. sample = to_concat[0] if isinstance(sample, ABCIndexClass): return sample._concat_same_dtype(to_concat, name=name) elif isinstance(sample, ABCDatetimeArray): return sample._concat_same_type(to_concat)
python
def _concat_datetimetz(to_concat, name=None): """ concat DatetimeIndex with the same tz all inputs must be DatetimeIndex it is used in DatetimeIndex.append also """ # Right now, internals will pass a List[DatetimeArray] here # for reductions like quantile. I would like to disentangle # all this before we get here. sample = to_concat[0] if isinstance(sample, ABCIndexClass): return sample._concat_same_dtype(to_concat, name=name) elif isinstance(sample, ABCDatetimeArray): return sample._concat_same_type(to_concat)
[ "def", "_concat_datetimetz", "(", "to_concat", ",", "name", "=", "None", ")", ":", "# Right now, internals will pass a List[DatetimeArray] here", "# for reductions like quantile. I would like to disentangle", "# all this before we get here.", "sample", "=", "to_concat", "[", "0", "]", "if", "isinstance", "(", "sample", ",", "ABCIndexClass", ")", ":", "return", "sample", ".", "_concat_same_dtype", "(", "to_concat", ",", "name", "=", "name", ")", "elif", "isinstance", "(", "sample", ",", "ABCDatetimeArray", ")", ":", "return", "sample", ".", "_concat_same_type", "(", "to_concat", ")" ]
concat DatetimeIndex with the same tz all inputs must be DatetimeIndex it is used in DatetimeIndex.append also
[ "concat", "DatetimeIndex", "with", "the", "same", "tz", "all", "inputs", "must", "be", "DatetimeIndex", "it", "is", "used", "in", "DatetimeIndex", ".", "append", "also" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/concat.py#L459-L473
20,049
pandas-dev/pandas
pandas/core/dtypes/concat.py
_concat_index_asobject
def _concat_index_asobject(to_concat, name=None): """ concat all inputs as object. DatetimeIndex, TimedeltaIndex and PeriodIndex are converted to object dtype before concatenation """ from pandas import Index from pandas.core.arrays import ExtensionArray klasses = (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex, ExtensionArray) to_concat = [x.astype(object) if isinstance(x, klasses) else x for x in to_concat] self = to_concat[0] attribs = self._get_attributes_dict() attribs['name'] = name to_concat = [x._values if isinstance(x, Index) else x for x in to_concat] return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs)
python
def _concat_index_asobject(to_concat, name=None): """ concat all inputs as object. DatetimeIndex, TimedeltaIndex and PeriodIndex are converted to object dtype before concatenation """ from pandas import Index from pandas.core.arrays import ExtensionArray klasses = (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex, ExtensionArray) to_concat = [x.astype(object) if isinstance(x, klasses) else x for x in to_concat] self = to_concat[0] attribs = self._get_attributes_dict() attribs['name'] = name to_concat = [x._values if isinstance(x, Index) else x for x in to_concat] return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs)
[ "def", "_concat_index_asobject", "(", "to_concat", ",", "name", "=", "None", ")", ":", "from", "pandas", "import", "Index", "from", "pandas", ".", "core", ".", "arrays", "import", "ExtensionArray", "klasses", "=", "(", "ABCDatetimeIndex", ",", "ABCTimedeltaIndex", ",", "ABCPeriodIndex", ",", "ExtensionArray", ")", "to_concat", "=", "[", "x", ".", "astype", "(", "object", ")", "if", "isinstance", "(", "x", ",", "klasses", ")", "else", "x", "for", "x", "in", "to_concat", "]", "self", "=", "to_concat", "[", "0", "]", "attribs", "=", "self", ".", "_get_attributes_dict", "(", ")", "attribs", "[", "'name'", "]", "=", "name", "to_concat", "=", "[", "x", ".", "_values", "if", "isinstance", "(", "x", ",", "Index", ")", "else", "x", "for", "x", "in", "to_concat", "]", "return", "self", ".", "_shallow_copy_with_infer", "(", "np", ".", "concatenate", "(", "to_concat", ")", ",", "*", "*", "attribs", ")" ]
concat all inputs as object. DatetimeIndex, TimedeltaIndex and PeriodIndex are converted to object dtype before concatenation
[ "concat", "all", "inputs", "as", "object", ".", "DatetimeIndex", "TimedeltaIndex", "and", "PeriodIndex", "are", "converted", "to", "object", "dtype", "before", "concatenation" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/concat.py#L481-L501
20,050
pandas-dev/pandas
pandas/util/_exceptions.py
rewrite_exception
def rewrite_exception(old_name, new_name): """Rewrite the message of an exception.""" try: yield except Exception as e: msg = e.args[0] msg = msg.replace(old_name, new_name) args = (msg,) if len(e.args) > 1: args = args + e.args[1:] e.args = args raise
python
def rewrite_exception(old_name, new_name): """Rewrite the message of an exception.""" try: yield except Exception as e: msg = e.args[0] msg = msg.replace(old_name, new_name) args = (msg,) if len(e.args) > 1: args = args + e.args[1:] e.args = args raise
[ "def", "rewrite_exception", "(", "old_name", ",", "new_name", ")", ":", "try", ":", "yield", "except", "Exception", "as", "e", ":", "msg", "=", "e", ".", "args", "[", "0", "]", "msg", "=", "msg", ".", "replace", "(", "old_name", ",", "new_name", ")", "args", "=", "(", "msg", ",", ")", "if", "len", "(", "e", ".", "args", ")", ">", "1", ":", "args", "=", "args", "+", "e", ".", "args", "[", "1", ":", "]", "e", ".", "args", "=", "args", "raise" ]
Rewrite the message of an exception.
[ "Rewrite", "the", "message", "of", "an", "exception", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_exceptions.py#L5-L16
20,051
pandas-dev/pandas
pandas/io/formats/style.py
_get_level_lengths
def _get_level_lengths(index, hidden_elements=None): """ Given an index, find the level length for each element. Optional argument is a list of index positions which should not be visible. Result is a dictionary of (level, inital_position): span """ sentinel = object() levels = index.format(sparsify=sentinel, adjoin=False, names=False) if hidden_elements is None: hidden_elements = [] lengths = {} if index.nlevels == 1: for i, value in enumerate(levels): if(i not in hidden_elements): lengths[(0, i)] = 1 return lengths for i, lvl in enumerate(levels): for j, row in enumerate(lvl): if not get_option('display.multi_sparse'): lengths[(i, j)] = 1 elif (row != sentinel) and (j not in hidden_elements): last_label = j lengths[(i, last_label)] = 1 elif (row != sentinel): # even if its hidden, keep track of it in case # length >1 and later elements are visible last_label = j lengths[(i, last_label)] = 0 elif(j not in hidden_elements): lengths[(i, last_label)] += 1 non_zero_lengths = { element: length for element, length in lengths.items() if length >= 1} return non_zero_lengths
python
def _get_level_lengths(index, hidden_elements=None): """ Given an index, find the level length for each element. Optional argument is a list of index positions which should not be visible. Result is a dictionary of (level, inital_position): span """ sentinel = object() levels = index.format(sparsify=sentinel, adjoin=False, names=False) if hidden_elements is None: hidden_elements = [] lengths = {} if index.nlevels == 1: for i, value in enumerate(levels): if(i not in hidden_elements): lengths[(0, i)] = 1 return lengths for i, lvl in enumerate(levels): for j, row in enumerate(lvl): if not get_option('display.multi_sparse'): lengths[(i, j)] = 1 elif (row != sentinel) and (j not in hidden_elements): last_label = j lengths[(i, last_label)] = 1 elif (row != sentinel): # even if its hidden, keep track of it in case # length >1 and later elements are visible last_label = j lengths[(i, last_label)] = 0 elif(j not in hidden_elements): lengths[(i, last_label)] += 1 non_zero_lengths = { element: length for element, length in lengths.items() if length >= 1} return non_zero_lengths
[ "def", "_get_level_lengths", "(", "index", ",", "hidden_elements", "=", "None", ")", ":", "sentinel", "=", "object", "(", ")", "levels", "=", "index", ".", "format", "(", "sparsify", "=", "sentinel", ",", "adjoin", "=", "False", ",", "names", "=", "False", ")", "if", "hidden_elements", "is", "None", ":", "hidden_elements", "=", "[", "]", "lengths", "=", "{", "}", "if", "index", ".", "nlevels", "==", "1", ":", "for", "i", ",", "value", "in", "enumerate", "(", "levels", ")", ":", "if", "(", "i", "not", "in", "hidden_elements", ")", ":", "lengths", "[", "(", "0", ",", "i", ")", "]", "=", "1", "return", "lengths", "for", "i", ",", "lvl", "in", "enumerate", "(", "levels", ")", ":", "for", "j", ",", "row", "in", "enumerate", "(", "lvl", ")", ":", "if", "not", "get_option", "(", "'display.multi_sparse'", ")", ":", "lengths", "[", "(", "i", ",", "j", ")", "]", "=", "1", "elif", "(", "row", "!=", "sentinel", ")", "and", "(", "j", "not", "in", "hidden_elements", ")", ":", "last_label", "=", "j", "lengths", "[", "(", "i", ",", "last_label", ")", "]", "=", "1", "elif", "(", "row", "!=", "sentinel", ")", ":", "# even if its hidden, keep track of it in case", "# length >1 and later elements are visible", "last_label", "=", "j", "lengths", "[", "(", "i", ",", "last_label", ")", "]", "=", "0", "elif", "(", "j", "not", "in", "hidden_elements", ")", ":", "lengths", "[", "(", "i", ",", "last_label", ")", "]", "+=", "1", "non_zero_lengths", "=", "{", "element", ":", "length", "for", "element", ",", "length", "in", "lengths", ".", "items", "(", ")", "if", "length", ">=", "1", "}", "return", "non_zero_lengths" ]
Given an index, find the level length for each element. Optional argument is a list of index positions which should not be visible. Result is a dictionary of (level, inital_position): span
[ "Given", "an", "index", "find", "the", "level", "length", "for", "each", "element", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L1322-L1362
20,052
pandas-dev/pandas
pandas/io/formats/style.py
Styler.format
def format(self, formatter, subset=None): """ Format the text display value of cells. .. versionadded:: 0.18.0 Parameters ---------- formatter : str, callable, or dict subset : IndexSlice An argument to ``DataFrame.loc`` that restricts which elements ``formatter`` is applied to. Returns ------- self : Styler Notes ----- ``formatter`` is either an ``a`` or a dict ``{column name: a}`` where ``a`` is one of - str: this will be wrapped in: ``a.format(x)`` - callable: called with the value of an individual cell The default display value for numeric values is the "general" (``g``) format with ``pd.options.display.precision`` precision. Examples -------- >>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b']) >>> df.style.format("{:.2%}") >>> df['c'] = ['a', 'b', 'c', 'd'] >>> df.style.format({'c': str.upper}) """ if subset is None: row_locs = range(len(self.data)) col_locs = range(len(self.data.columns)) else: subset = _non_reducing_slice(subset) if len(subset) == 1: subset = subset, self.data.columns sub_df = self.data.loc[subset] row_locs = self.data.index.get_indexer_for(sub_df.index) col_locs = self.data.columns.get_indexer_for(sub_df.columns) if is_dict_like(formatter): for col, col_formatter in formatter.items(): # formatter must be callable, so '{}' are converted to lambdas col_formatter = _maybe_wrap_formatter(col_formatter) col_num = self.data.columns.get_indexer_for([col])[0] for row_num in row_locs: self._display_funcs[(row_num, col_num)] = col_formatter else: # single scalar to format all cells with locs = product(*(row_locs, col_locs)) for i, j in locs: formatter = _maybe_wrap_formatter(formatter) self._display_funcs[(i, j)] = formatter return self
python
def format(self, formatter, subset=None): """ Format the text display value of cells. .. versionadded:: 0.18.0 Parameters ---------- formatter : str, callable, or dict subset : IndexSlice An argument to ``DataFrame.loc`` that restricts which elements ``formatter`` is applied to. Returns ------- self : Styler Notes ----- ``formatter`` is either an ``a`` or a dict ``{column name: a}`` where ``a`` is one of - str: this will be wrapped in: ``a.format(x)`` - callable: called with the value of an individual cell The default display value for numeric values is the "general" (``g``) format with ``pd.options.display.precision`` precision. Examples -------- >>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b']) >>> df.style.format("{:.2%}") >>> df['c'] = ['a', 'b', 'c', 'd'] >>> df.style.format({'c': str.upper}) """ if subset is None: row_locs = range(len(self.data)) col_locs = range(len(self.data.columns)) else: subset = _non_reducing_slice(subset) if len(subset) == 1: subset = subset, self.data.columns sub_df = self.data.loc[subset] row_locs = self.data.index.get_indexer_for(sub_df.index) col_locs = self.data.columns.get_indexer_for(sub_df.columns) if is_dict_like(formatter): for col, col_formatter in formatter.items(): # formatter must be callable, so '{}' are converted to lambdas col_formatter = _maybe_wrap_formatter(col_formatter) col_num = self.data.columns.get_indexer_for([col])[0] for row_num in row_locs: self._display_funcs[(row_num, col_num)] = col_formatter else: # single scalar to format all cells with locs = product(*(row_locs, col_locs)) for i, j in locs: formatter = _maybe_wrap_formatter(formatter) self._display_funcs[(i, j)] = formatter return self
[ "def", "format", "(", "self", ",", "formatter", ",", "subset", "=", "None", ")", ":", "if", "subset", "is", "None", ":", "row_locs", "=", "range", "(", "len", "(", "self", ".", "data", ")", ")", "col_locs", "=", "range", "(", "len", "(", "self", ".", "data", ".", "columns", ")", ")", "else", ":", "subset", "=", "_non_reducing_slice", "(", "subset", ")", "if", "len", "(", "subset", ")", "==", "1", ":", "subset", "=", "subset", ",", "self", ".", "data", ".", "columns", "sub_df", "=", "self", ".", "data", ".", "loc", "[", "subset", "]", "row_locs", "=", "self", ".", "data", ".", "index", ".", "get_indexer_for", "(", "sub_df", ".", "index", ")", "col_locs", "=", "self", ".", "data", ".", "columns", ".", "get_indexer_for", "(", "sub_df", ".", "columns", ")", "if", "is_dict_like", "(", "formatter", ")", ":", "for", "col", ",", "col_formatter", "in", "formatter", ".", "items", "(", ")", ":", "# formatter must be callable, so '{}' are converted to lambdas", "col_formatter", "=", "_maybe_wrap_formatter", "(", "col_formatter", ")", "col_num", "=", "self", ".", "data", ".", "columns", ".", "get_indexer_for", "(", "[", "col", "]", ")", "[", "0", "]", "for", "row_num", "in", "row_locs", ":", "self", ".", "_display_funcs", "[", "(", "row_num", ",", "col_num", ")", "]", "=", "col_formatter", "else", ":", "# single scalar to format all cells with", "locs", "=", "product", "(", "*", "(", "row_locs", ",", "col_locs", ")", ")", "for", "i", ",", "j", "in", "locs", ":", "formatter", "=", "_maybe_wrap_formatter", "(", "formatter", ")", "self", ".", "_display_funcs", "[", "(", "i", ",", "j", ")", "]", "=", "formatter", "return", "self" ]
Format the text display value of cells. .. versionadded:: 0.18.0 Parameters ---------- formatter : str, callable, or dict subset : IndexSlice An argument to ``DataFrame.loc`` that restricts which elements ``formatter`` is applied to. Returns ------- self : Styler Notes ----- ``formatter`` is either an ``a`` or a dict ``{column name: a}`` where ``a`` is one of - str: this will be wrapped in: ``a.format(x)`` - callable: called with the value of an individual cell The default display value for numeric values is the "general" (``g``) format with ``pd.options.display.precision`` precision. Examples -------- >>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b']) >>> df.style.format("{:.2%}") >>> df['c'] = ['a', 'b', 'c', 'd'] >>> df.style.format({'c': str.upper})
[ "Format", "the", "text", "display", "value", "of", "cells", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L356-L419
20,053
pandas-dev/pandas
pandas/io/formats/style.py
Styler.render
def render(self, **kwargs): """ Render the built up styles to HTML. Parameters ---------- **kwargs Any additional keyword arguments are passed through to ``self.template.render``. This is useful when you need to provide additional variables for a custom template. .. versionadded:: 0.20 Returns ------- rendered : str The rendered HTML. Notes ----- ``Styler`` objects have defined the ``_repr_html_`` method which automatically calls ``self.render()`` when it's the last item in a Notebook cell. When calling ``Styler.render()`` directly, wrap the result in ``IPython.display.HTML`` to view the rendered HTML in the notebook. Pandas uses the following keys in render. Arguments passed in ``**kwargs`` take precedence, so think carefully if you want to override them: * head * cellstyle * body * uuid * precision * table_styles * caption * table_attributes """ self._compute() # TODO: namespace all the pandas keys d = self._translate() # filter out empty styles, every cell will have a class # but the list of props may just be [['', '']]. # so we have the neested anys below trimmed = [x for x in d['cellstyle'] if any(any(y) for y in x['props'])] d['cellstyle'] = trimmed d.update(kwargs) return self.template.render(**d)
python
def render(self, **kwargs): """ Render the built up styles to HTML. Parameters ---------- **kwargs Any additional keyword arguments are passed through to ``self.template.render``. This is useful when you need to provide additional variables for a custom template. .. versionadded:: 0.20 Returns ------- rendered : str The rendered HTML. Notes ----- ``Styler`` objects have defined the ``_repr_html_`` method which automatically calls ``self.render()`` when it's the last item in a Notebook cell. When calling ``Styler.render()`` directly, wrap the result in ``IPython.display.HTML`` to view the rendered HTML in the notebook. Pandas uses the following keys in render. Arguments passed in ``**kwargs`` take precedence, so think carefully if you want to override them: * head * cellstyle * body * uuid * precision * table_styles * caption * table_attributes """ self._compute() # TODO: namespace all the pandas keys d = self._translate() # filter out empty styles, every cell will have a class # but the list of props may just be [['', '']]. # so we have the neested anys below trimmed = [x for x in d['cellstyle'] if any(any(y) for y in x['props'])] d['cellstyle'] = trimmed d.update(kwargs) return self.template.render(**d)
[ "def", "render", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_compute", "(", ")", "# TODO: namespace all the pandas keys", "d", "=", "self", ".", "_translate", "(", ")", "# filter out empty styles, every cell will have a class", "# but the list of props may just be [['', '']].", "# so we have the neested anys below", "trimmed", "=", "[", "x", "for", "x", "in", "d", "[", "'cellstyle'", "]", "if", "any", "(", "any", "(", "y", ")", "for", "y", "in", "x", "[", "'props'", "]", ")", "]", "d", "[", "'cellstyle'", "]", "=", "trimmed", "d", ".", "update", "(", "kwargs", ")", "return", "self", ".", "template", ".", "render", "(", "*", "*", "d", ")" ]
Render the built up styles to HTML. Parameters ---------- **kwargs Any additional keyword arguments are passed through to ``self.template.render``. This is useful when you need to provide additional variables for a custom template. .. versionadded:: 0.20 Returns ------- rendered : str The rendered HTML. Notes ----- ``Styler`` objects have defined the ``_repr_html_`` method which automatically calls ``self.render()`` when it's the last item in a Notebook cell. When calling ``Styler.render()`` directly, wrap the result in ``IPython.display.HTML`` to view the rendered HTML in the notebook. Pandas uses the following keys in render. Arguments passed in ``**kwargs`` take precedence, so think carefully if you want to override them: * head * cellstyle * body * uuid * precision * table_styles * caption * table_attributes
[ "Render", "the", "built", "up", "styles", "to", "HTML", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L421-L471
20,054
pandas-dev/pandas
pandas/io/formats/style.py
Styler._update_ctx
def _update_ctx(self, attrs): """ Update the state of the Styler. Collects a mapping of {index_label: ['<property>: <value>']}. attrs : Series or DataFrame should contain strings of '<property>: <value>;<prop2>: <val2>' Whitespace shouldn't matter and the final trailing ';' shouldn't matter. """ for row_label, v in attrs.iterrows(): for col_label, col in v.iteritems(): i = self.index.get_indexer([row_label])[0] j = self.columns.get_indexer([col_label])[0] for pair in col.rstrip(";").split(";"): self.ctx[(i, j)].append(pair)
python
def _update_ctx(self, attrs): """ Update the state of the Styler. Collects a mapping of {index_label: ['<property>: <value>']}. attrs : Series or DataFrame should contain strings of '<property>: <value>;<prop2>: <val2>' Whitespace shouldn't matter and the final trailing ';' shouldn't matter. """ for row_label, v in attrs.iterrows(): for col_label, col in v.iteritems(): i = self.index.get_indexer([row_label])[0] j = self.columns.get_indexer([col_label])[0] for pair in col.rstrip(";").split(";"): self.ctx[(i, j)].append(pair)
[ "def", "_update_ctx", "(", "self", ",", "attrs", ")", ":", "for", "row_label", ",", "v", "in", "attrs", ".", "iterrows", "(", ")", ":", "for", "col_label", ",", "col", "in", "v", ".", "iteritems", "(", ")", ":", "i", "=", "self", ".", "index", ".", "get_indexer", "(", "[", "row_label", "]", ")", "[", "0", "]", "j", "=", "self", ".", "columns", ".", "get_indexer", "(", "[", "col_label", "]", ")", "[", "0", "]", "for", "pair", "in", "col", ".", "rstrip", "(", "\";\"", ")", ".", "split", "(", "\";\"", ")", ":", "self", ".", "ctx", "[", "(", "i", ",", "j", ")", "]", ".", "append", "(", "pair", ")" ]
Update the state of the Styler. Collects a mapping of {index_label: ['<property>: <value>']}. attrs : Series or DataFrame should contain strings of '<property>: <value>;<prop2>: <val2>' Whitespace shouldn't matter and the final trailing ';' shouldn't matter.
[ "Update", "the", "state", "of", "the", "Styler", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L473-L489
20,055
pandas-dev/pandas
pandas/io/formats/style.py
Styler._compute
def _compute(self): """ Execute the style functions built up in `self._todo`. Relies on the conventions that all style functions go through .apply or .applymap. The append styles to apply as tuples of (application method, *args, **kwargs) """ r = self for func, args, kwargs in self._todo: r = func(self)(*args, **kwargs) return r
python
def _compute(self): """ Execute the style functions built up in `self._todo`. Relies on the conventions that all style functions go through .apply or .applymap. The append styles to apply as tuples of (application method, *args, **kwargs) """ r = self for func, args, kwargs in self._todo: r = func(self)(*args, **kwargs) return r
[ "def", "_compute", "(", "self", ")", ":", "r", "=", "self", "for", "func", ",", "args", ",", "kwargs", "in", "self", ".", "_todo", ":", "r", "=", "func", "(", "self", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "r" ]
Execute the style functions built up in `self._todo`. Relies on the conventions that all style functions go through .apply or .applymap. The append styles to apply as tuples of (application method, *args, **kwargs)
[ "Execute", "the", "style", "functions", "built", "up", "in", "self", ".", "_todo", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L520-L532
20,056
pandas-dev/pandas
pandas/io/formats/style.py
Styler.apply
def apply(self, func, axis=0, subset=None, **kwargs): """ Apply a function column-wise, row-wise, or table-wise, updating the HTML representation with the result. Parameters ---------- func : function ``func`` should take a Series or DataFrame (depending on ``axis``), and return an object with the same shape. Must return a DataFrame with identical index and column labels when ``axis=None`` axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``func`` Returns ------- self : Styler Notes ----- The output shape of ``func`` should match the input, i.e. if ``x`` is the input row, column, or table (depending on ``axis``), then ``func(x).shape == x.shape`` should be true. This is similar to ``DataFrame.apply``, except that ``axis=None`` applies the function to the entire DataFrame at once, rather than column-wise or row-wise. Examples -------- >>> def highlight_max(x): ... return ['background-color: yellow' if v == x.max() else '' for v in x] ... >>> df = pd.DataFrame(np.random.randn(5, 2)) >>> df.style.apply(highlight_max) """ self._todo.append((lambda instance: getattr(instance, '_apply'), (func, axis, subset), kwargs)) return self
python
def apply(self, func, axis=0, subset=None, **kwargs): """ Apply a function column-wise, row-wise, or table-wise, updating the HTML representation with the result. Parameters ---------- func : function ``func`` should take a Series or DataFrame (depending on ``axis``), and return an object with the same shape. Must return a DataFrame with identical index and column labels when ``axis=None`` axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``func`` Returns ------- self : Styler Notes ----- The output shape of ``func`` should match the input, i.e. if ``x`` is the input row, column, or table (depending on ``axis``), then ``func(x).shape == x.shape`` should be true. This is similar to ``DataFrame.apply``, except that ``axis=None`` applies the function to the entire DataFrame at once, rather than column-wise or row-wise. Examples -------- >>> def highlight_max(x): ... return ['background-color: yellow' if v == x.max() else '' for v in x] ... >>> df = pd.DataFrame(np.random.randn(5, 2)) >>> df.style.apply(highlight_max) """ self._todo.append((lambda instance: getattr(instance, '_apply'), (func, axis, subset), kwargs)) return self
[ "def", "apply", "(", "self", ",", "func", ",", "axis", "=", "0", ",", "subset", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_todo", ".", "append", "(", "(", "lambda", "instance", ":", "getattr", "(", "instance", ",", "'_apply'", ")", ",", "(", "func", ",", "axis", ",", "subset", ")", ",", "kwargs", ")", ")", "return", "self" ]
Apply a function column-wise, row-wise, or table-wise, updating the HTML representation with the result. Parameters ---------- func : function ``func`` should take a Series or DataFrame (depending on ``axis``), and return an object with the same shape. Must return a DataFrame with identical index and column labels when ``axis=None`` axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``func`` Returns ------- self : Styler Notes ----- The output shape of ``func`` should match the input, i.e. if ``x`` is the input row, column, or table (depending on ``axis``), then ``func(x).shape == x.shape`` should be true. This is similar to ``DataFrame.apply``, except that ``axis=None`` applies the function to the entire DataFrame at once, rather than column-wise or row-wise. Examples -------- >>> def highlight_max(x): ... return ['background-color: yellow' if v == x.max() else '' for v in x] ... >>> df = pd.DataFrame(np.random.randn(5, 2)) >>> df.style.apply(highlight_max)
[ "Apply", "a", "function", "column", "-", "wise", "row", "-", "wise", "or", "table", "-", "wise", "updating", "the", "HTML", "representation", "with", "the", "result", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L567-L614
20,057
pandas-dev/pandas
pandas/io/formats/style.py
Styler.applymap
def applymap(self, func, subset=None, **kwargs): """ Apply a function elementwise, updating the HTML representation with the result. Parameters ---------- func : function ``func`` should take a scalar and return a scalar subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``func`` Returns ------- self : Styler See Also -------- Styler.where """ self._todo.append((lambda instance: getattr(instance, '_applymap'), (func, subset), kwargs)) return self
python
def applymap(self, func, subset=None, **kwargs): """ Apply a function elementwise, updating the HTML representation with the result. Parameters ---------- func : function ``func`` should take a scalar and return a scalar subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``func`` Returns ------- self : Styler See Also -------- Styler.where """ self._todo.append((lambda instance: getattr(instance, '_applymap'), (func, subset), kwargs)) return self
[ "def", "applymap", "(", "self", ",", "func", ",", "subset", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_todo", ".", "append", "(", "(", "lambda", "instance", ":", "getattr", "(", "instance", ",", "'_applymap'", ")", ",", "(", "func", ",", "subset", ")", ",", "kwargs", ")", ")", "return", "self" ]
Apply a function elementwise, updating the HTML representation with the result. Parameters ---------- func : function ``func`` should take a scalar and return a scalar subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``func`` Returns ------- self : Styler See Also -------- Styler.where
[ "Apply", "a", "function", "elementwise", "updating", "the", "HTML", "representation", "with", "the", "result", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L625-L650
20,058
pandas-dev/pandas
pandas/io/formats/style.py
Styler.where
def where(self, cond, value, other=None, subset=None, **kwargs): """ Apply a function elementwise, updating the HTML representation with a style which is selected in accordance with the return value of a function. .. versionadded:: 0.21.0 Parameters ---------- cond : callable ``cond`` should take a scalar and return a boolean value : str applied when ``cond`` returns true other : str applied when ``cond`` returns false subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``cond`` Returns ------- self : Styler See Also -------- Styler.applymap """ if other is None: other = '' return self.applymap(lambda val: value if cond(val) else other, subset=subset, **kwargs)
python
def where(self, cond, value, other=None, subset=None, **kwargs): """ Apply a function elementwise, updating the HTML representation with a style which is selected in accordance with the return value of a function. .. versionadded:: 0.21.0 Parameters ---------- cond : callable ``cond`` should take a scalar and return a boolean value : str applied when ``cond`` returns true other : str applied when ``cond`` returns false subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``cond`` Returns ------- self : Styler See Also -------- Styler.applymap """ if other is None: other = '' return self.applymap(lambda val: value if cond(val) else other, subset=subset, **kwargs)
[ "def", "where", "(", "self", ",", "cond", ",", "value", ",", "other", "=", "None", ",", "subset", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "other", "is", "None", ":", "other", "=", "''", "return", "self", ".", "applymap", "(", "lambda", "val", ":", "value", "if", "cond", "(", "val", ")", "else", "other", ",", "subset", "=", "subset", ",", "*", "*", "kwargs", ")" ]
Apply a function elementwise, updating the HTML representation with a style which is selected in accordance with the return value of a function. .. versionadded:: 0.21.0 Parameters ---------- cond : callable ``cond`` should take a scalar and return a boolean value : str applied when ``cond`` returns true other : str applied when ``cond`` returns false subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``cond`` Returns ------- self : Styler See Also -------- Styler.applymap
[ "Apply", "a", "function", "elementwise", "updating", "the", "HTML", "representation", "with", "a", "style", "which", "is", "selected", "in", "accordance", "with", "the", "return", "value", "of", "a", "function", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L652-L687
20,059
pandas-dev/pandas
pandas/io/formats/style.py
Styler.hide_columns
def hide_columns(self, subset): """ Hide columns from rendering. .. versionadded:: 0.23.0 Parameters ---------- subset : IndexSlice An argument to ``DataFrame.loc`` that identifies which columns are hidden. Returns ------- self : Styler """ subset = _non_reducing_slice(subset) hidden_df = self.data.loc[subset] self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns) return self
python
def hide_columns(self, subset): """ Hide columns from rendering. .. versionadded:: 0.23.0 Parameters ---------- subset : IndexSlice An argument to ``DataFrame.loc`` that identifies which columns are hidden. Returns ------- self : Styler """ subset = _non_reducing_slice(subset) hidden_df = self.data.loc[subset] self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns) return self
[ "def", "hide_columns", "(", "self", ",", "subset", ")", ":", "subset", "=", "_non_reducing_slice", "(", "subset", ")", "hidden_df", "=", "self", ".", "data", ".", "loc", "[", "subset", "]", "self", ".", "hidden_columns", "=", "self", ".", "columns", ".", "get_indexer_for", "(", "hidden_df", ".", "columns", ")", "return", "self" ]
Hide columns from rendering. .. versionadded:: 0.23.0 Parameters ---------- subset : IndexSlice An argument to ``DataFrame.loc`` that identifies which columns are hidden. Returns ------- self : Styler
[ "Hide", "columns", "from", "rendering", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L838-L857
20,060
pandas-dev/pandas
pandas/io/formats/style.py
Styler.highlight_null
def highlight_null(self, null_color='red'): """ Shade the background ``null_color`` for missing values. Parameters ---------- null_color : str Returns ------- self : Styler """ self.applymap(self._highlight_null, null_color=null_color) return self
python
def highlight_null(self, null_color='red'): """ Shade the background ``null_color`` for missing values. Parameters ---------- null_color : str Returns ------- self : Styler """ self.applymap(self._highlight_null, null_color=null_color) return self
[ "def", "highlight_null", "(", "self", ",", "null_color", "=", "'red'", ")", ":", "self", ".", "applymap", "(", "self", ".", "_highlight_null", ",", "null_color", "=", "null_color", ")", "return", "self" ]
Shade the background ``null_color`` for missing values. Parameters ---------- null_color : str Returns ------- self : Styler
[ "Shade", "the", "background", "null_color", "for", "missing", "values", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L868-L881
20,061
pandas-dev/pandas
pandas/io/formats/style.py
Styler._background_gradient
def _background_gradient(s, cmap='PuBu', low=0, high=0, text_color_threshold=0.408): """ Color background in a range according to the data. """ if (not isinstance(text_color_threshold, (float, int)) or not 0 <= text_color_threshold <= 1): msg = "`text_color_threshold` must be a value from 0 to 1." raise ValueError(msg) with _mpl(Styler.background_gradient) as (plt, colors): smin = s.values.min() smax = s.values.max() rng = smax - smin # extend lower / upper bounds, compresses color range norm = colors.Normalize(smin - (rng * low), smax + (rng * high)) # matplotlib colors.Normalize modifies inplace? # https://github.com/matplotlib/matplotlib/issues/5427 rgbas = plt.cm.get_cmap(cmap)(norm(s.values)) def relative_luminance(rgba): """ Calculate relative luminance of a color. The calculation adheres to the W3C standards (https://www.w3.org/WAI/GL/wiki/Relative_luminance) Parameters ---------- color : rgb or rgba tuple Returns ------- float The relative luminance as a value from 0 to 1 """ r, g, b = ( x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4) for x in rgba[:3] ) return 0.2126 * r + 0.7152 * g + 0.0722 * b def css(rgba): dark = relative_luminance(rgba) < text_color_threshold text_color = '#f1f1f1' if dark else '#000000' return 'background-color: {b};color: {c};'.format( b=colors.rgb2hex(rgba), c=text_color ) if s.ndim == 1: return [css(rgba) for rgba in rgbas] else: return pd.DataFrame( [[css(rgba) for rgba in row] for row in rgbas], index=s.index, columns=s.columns )
python
def _background_gradient(s, cmap='PuBu', low=0, high=0, text_color_threshold=0.408): """ Color background in a range according to the data. """ if (not isinstance(text_color_threshold, (float, int)) or not 0 <= text_color_threshold <= 1): msg = "`text_color_threshold` must be a value from 0 to 1." raise ValueError(msg) with _mpl(Styler.background_gradient) as (plt, colors): smin = s.values.min() smax = s.values.max() rng = smax - smin # extend lower / upper bounds, compresses color range norm = colors.Normalize(smin - (rng * low), smax + (rng * high)) # matplotlib colors.Normalize modifies inplace? # https://github.com/matplotlib/matplotlib/issues/5427 rgbas = plt.cm.get_cmap(cmap)(norm(s.values)) def relative_luminance(rgba): """ Calculate relative luminance of a color. The calculation adheres to the W3C standards (https://www.w3.org/WAI/GL/wiki/Relative_luminance) Parameters ---------- color : rgb or rgba tuple Returns ------- float The relative luminance as a value from 0 to 1 """ r, g, b = ( x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4) for x in rgba[:3] ) return 0.2126 * r + 0.7152 * g + 0.0722 * b def css(rgba): dark = relative_luminance(rgba) < text_color_threshold text_color = '#f1f1f1' if dark else '#000000' return 'background-color: {b};color: {c};'.format( b=colors.rgb2hex(rgba), c=text_color ) if s.ndim == 1: return [css(rgba) for rgba in rgbas] else: return pd.DataFrame( [[css(rgba) for rgba in row] for row in rgbas], index=s.index, columns=s.columns )
[ "def", "_background_gradient", "(", "s", ",", "cmap", "=", "'PuBu'", ",", "low", "=", "0", ",", "high", "=", "0", ",", "text_color_threshold", "=", "0.408", ")", ":", "if", "(", "not", "isinstance", "(", "text_color_threshold", ",", "(", "float", ",", "int", ")", ")", "or", "not", "0", "<=", "text_color_threshold", "<=", "1", ")", ":", "msg", "=", "\"`text_color_threshold` must be a value from 0 to 1.\"", "raise", "ValueError", "(", "msg", ")", "with", "_mpl", "(", "Styler", ".", "background_gradient", ")", "as", "(", "plt", ",", "colors", ")", ":", "smin", "=", "s", ".", "values", ".", "min", "(", ")", "smax", "=", "s", ".", "values", ".", "max", "(", ")", "rng", "=", "smax", "-", "smin", "# extend lower / upper bounds, compresses color range", "norm", "=", "colors", ".", "Normalize", "(", "smin", "-", "(", "rng", "*", "low", ")", ",", "smax", "+", "(", "rng", "*", "high", ")", ")", "# matplotlib colors.Normalize modifies inplace?", "# https://github.com/matplotlib/matplotlib/issues/5427", "rgbas", "=", "plt", ".", "cm", ".", "get_cmap", "(", "cmap", ")", "(", "norm", "(", "s", ".", "values", ")", ")", "def", "relative_luminance", "(", "rgba", ")", ":", "\"\"\"\n Calculate relative luminance of a color.\n\n The calculation adheres to the W3C standards\n (https://www.w3.org/WAI/GL/wiki/Relative_luminance)\n\n Parameters\n ----------\n color : rgb or rgba tuple\n\n Returns\n -------\n float\n The relative luminance as a value from 0 to 1\n \"\"\"", "r", ",", "g", ",", "b", "=", "(", "x", "/", "12.92", "if", "x", "<=", "0.03928", "else", "(", "(", "x", "+", "0.055", ")", "/", "1.055", "**", "2.4", ")", "for", "x", "in", "rgba", "[", ":", "3", "]", ")", "return", "0.2126", "*", "r", "+", "0.7152", "*", "g", "+", "0.0722", "*", "b", "def", "css", "(", "rgba", ")", ":", "dark", "=", "relative_luminance", "(", "rgba", ")", "<", "text_color_threshold", "text_color", "=", "'#f1f1f1'", "if", "dark", "else", "'#000000'", "return", "'background-color: {b};color: {c};'", ".", "format", "(", "b", "=", "colors", ".", "rgb2hex", "(", "rgba", ")", ",", "c", "=", "text_color", ")", "if", "s", ".", "ndim", "==", "1", ":", "return", "[", "css", "(", "rgba", ")", "for", "rgba", "in", "rgbas", "]", "else", ":", "return", "pd", ".", "DataFrame", "(", "[", "[", "css", "(", "rgba", ")", "for", "rgba", "in", "row", "]", "for", "row", "in", "rgbas", "]", ",", "index", "=", "s", ".", "index", ",", "columns", "=", "s", ".", "columns", ")" ]
Color background in a range according to the data.
[ "Color", "background", "in", "a", "range", "according", "to", "the", "data", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L934-L989
20,062
pandas-dev/pandas
pandas/io/formats/style.py
Styler.set_properties
def set_properties(self, subset=None, **kwargs): """ Convenience method for setting one or more non-data dependent properties or each cell. Parameters ---------- subset : IndexSlice a valid slice for ``data`` to limit the style application to kwargs : dict property: value pairs to be set for each cell Returns ------- self : Styler Examples -------- >>> df = pd.DataFrame(np.random.randn(10, 4)) >>> df.style.set_properties(color="white", align="right") >>> df.style.set_properties(**{'background-color': 'yellow'}) """ values = ';'.join('{p}: {v}'.format(p=p, v=v) for p, v in kwargs.items()) f = lambda x: values return self.applymap(f, subset=subset)
python
def set_properties(self, subset=None, **kwargs): """ Convenience method for setting one or more non-data dependent properties or each cell. Parameters ---------- subset : IndexSlice a valid slice for ``data`` to limit the style application to kwargs : dict property: value pairs to be set for each cell Returns ------- self : Styler Examples -------- >>> df = pd.DataFrame(np.random.randn(10, 4)) >>> df.style.set_properties(color="white", align="right") >>> df.style.set_properties(**{'background-color': 'yellow'}) """ values = ';'.join('{p}: {v}'.format(p=p, v=v) for p, v in kwargs.items()) f = lambda x: values return self.applymap(f, subset=subset)
[ "def", "set_properties", "(", "self", ",", "subset", "=", "None", ",", "*", "*", "kwargs", ")", ":", "values", "=", "';'", ".", "join", "(", "'{p}: {v}'", ".", "format", "(", "p", "=", "p", ",", "v", "=", "v", ")", "for", "p", ",", "v", "in", "kwargs", ".", "items", "(", ")", ")", "f", "=", "lambda", "x", ":", "values", "return", "self", ".", "applymap", "(", "f", ",", "subset", "=", "subset", ")" ]
Convenience method for setting one or more non-data dependent properties or each cell. Parameters ---------- subset : IndexSlice a valid slice for ``data`` to limit the style application to kwargs : dict property: value pairs to be set for each cell Returns ------- self : Styler Examples -------- >>> df = pd.DataFrame(np.random.randn(10, 4)) >>> df.style.set_properties(color="white", align="right") >>> df.style.set_properties(**{'background-color': 'yellow'})
[ "Convenience", "method", "for", "setting", "one", "or", "more", "non", "-", "data", "dependent", "properties", "or", "each", "cell", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L991-L1016
20,063
pandas-dev/pandas
pandas/io/formats/style.py
Styler._bar
def _bar(s, align, colors, width=100, vmin=None, vmax=None): """ Draw bar chart in dataframe cells. """ # Get input value range. smin = s.min() if vmin is None else vmin if isinstance(smin, ABCSeries): smin = smin.min() smax = s.max() if vmax is None else vmax if isinstance(smax, ABCSeries): smax = smax.max() if align == 'mid': smin = min(0, smin) smax = max(0, smax) elif align == 'zero': # For "zero" mode, we want the range to be symmetrical around zero. smax = max(abs(smin), abs(smax)) smin = -smax # Transform to percent-range of linear-gradient normed = width * (s.values - smin) / (smax - smin + 1e-12) zero = -width * smin / (smax - smin + 1e-12) def css_bar(start, end, color): """ Generate CSS code to draw a bar from start to end. """ css = 'width: 10em; height: 80%;' if end > start: css += 'background: linear-gradient(90deg,' if start > 0: css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format( s=start, c=color ) css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format( e=min(end, width), c=color, ) return css def css(x): if pd.isna(x): return '' # avoid deprecated indexing `colors[x > zero]` color = colors[1] if x > zero else colors[0] if align == 'left': return css_bar(0, x, color) else: return css_bar(min(x, zero), max(x, zero), color) if s.ndim == 1: return [css(x) for x in normed] else: return pd.DataFrame( [[css(x) for x in row] for row in normed], index=s.index, columns=s.columns )
python
def _bar(s, align, colors, width=100, vmin=None, vmax=None): """ Draw bar chart in dataframe cells. """ # Get input value range. smin = s.min() if vmin is None else vmin if isinstance(smin, ABCSeries): smin = smin.min() smax = s.max() if vmax is None else vmax if isinstance(smax, ABCSeries): smax = smax.max() if align == 'mid': smin = min(0, smin) smax = max(0, smax) elif align == 'zero': # For "zero" mode, we want the range to be symmetrical around zero. smax = max(abs(smin), abs(smax)) smin = -smax # Transform to percent-range of linear-gradient normed = width * (s.values - smin) / (smax - smin + 1e-12) zero = -width * smin / (smax - smin + 1e-12) def css_bar(start, end, color): """ Generate CSS code to draw a bar from start to end. """ css = 'width: 10em; height: 80%;' if end > start: css += 'background: linear-gradient(90deg,' if start > 0: css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format( s=start, c=color ) css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format( e=min(end, width), c=color, ) return css def css(x): if pd.isna(x): return '' # avoid deprecated indexing `colors[x > zero]` color = colors[1] if x > zero else colors[0] if align == 'left': return css_bar(0, x, color) else: return css_bar(min(x, zero), max(x, zero), color) if s.ndim == 1: return [css(x) for x in normed] else: return pd.DataFrame( [[css(x) for x in row] for row in normed], index=s.index, columns=s.columns )
[ "def", "_bar", "(", "s", ",", "align", ",", "colors", ",", "width", "=", "100", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ")", ":", "# Get input value range.", "smin", "=", "s", ".", "min", "(", ")", "if", "vmin", "is", "None", "else", "vmin", "if", "isinstance", "(", "smin", ",", "ABCSeries", ")", ":", "smin", "=", "smin", ".", "min", "(", ")", "smax", "=", "s", ".", "max", "(", ")", "if", "vmax", "is", "None", "else", "vmax", "if", "isinstance", "(", "smax", ",", "ABCSeries", ")", ":", "smax", "=", "smax", ".", "max", "(", ")", "if", "align", "==", "'mid'", ":", "smin", "=", "min", "(", "0", ",", "smin", ")", "smax", "=", "max", "(", "0", ",", "smax", ")", "elif", "align", "==", "'zero'", ":", "# For \"zero\" mode, we want the range to be symmetrical around zero.", "smax", "=", "max", "(", "abs", "(", "smin", ")", ",", "abs", "(", "smax", ")", ")", "smin", "=", "-", "smax", "# Transform to percent-range of linear-gradient", "normed", "=", "width", "*", "(", "s", ".", "values", "-", "smin", ")", "/", "(", "smax", "-", "smin", "+", "1e-12", ")", "zero", "=", "-", "width", "*", "smin", "/", "(", "smax", "-", "smin", "+", "1e-12", ")", "def", "css_bar", "(", "start", ",", "end", ",", "color", ")", ":", "\"\"\"\n Generate CSS code to draw a bar from start to end.\n \"\"\"", "css", "=", "'width: 10em; height: 80%;'", "if", "end", ">", "start", ":", "css", "+=", "'background: linear-gradient(90deg,'", "if", "start", ">", "0", ":", "css", "+=", "' transparent {s:.1f}%, {c} {s:.1f}%, '", ".", "format", "(", "s", "=", "start", ",", "c", "=", "color", ")", "css", "+=", "'{c} {e:.1f}%, transparent {e:.1f}%)'", ".", "format", "(", "e", "=", "min", "(", "end", ",", "width", ")", ",", "c", "=", "color", ",", ")", "return", "css", "def", "css", "(", "x", ")", ":", "if", "pd", ".", "isna", "(", "x", ")", ":", "return", "''", "# avoid deprecated indexing `colors[x > zero]`", "color", "=", "colors", "[", "1", "]", "if", "x", ">", "zero", "else", "colors", "[", "0", "]", "if", "align", "==", "'left'", ":", "return", "css_bar", "(", "0", ",", "x", ",", "color", ")", "else", ":", "return", "css_bar", "(", "min", "(", "x", ",", "zero", ")", ",", "max", "(", "x", ",", "zero", ")", ",", "color", ")", "if", "s", ".", "ndim", "==", "1", ":", "return", "[", "css", "(", "x", ")", "for", "x", "in", "normed", "]", "else", ":", "return", "pd", ".", "DataFrame", "(", "[", "[", "css", "(", "x", ")", "for", "x", "in", "row", "]", "for", "row", "in", "normed", "]", ",", "index", "=", "s", ".", "index", ",", "columns", "=", "s", ".", "columns", ")" ]
Draw bar chart in dataframe cells.
[ "Draw", "bar", "chart", "in", "dataframe", "cells", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L1019-L1075
20,064
pandas-dev/pandas
pandas/io/formats/style.py
Styler.bar
def bar(self, subset=None, axis=0, color='#d65f5f', width=100, align='left', vmin=None, vmax=None): """ Draw bar chart in the cell backgrounds. Parameters ---------- subset : IndexSlice, optional A valid slice for `data` to limit the style application to. axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. color : str or 2-tuple/list If a str is passed, the color is the same for both negative and positive numbers. If 2-tuple/list is used, the first element is the color_negative and the second is the color_positive (eg: ['#d65f5f', '#5fba7d']). width : float, default 100 A number between 0 or 100. The largest value will cover `width` percent of the cell's width. align : {'left', 'zero',' mid'}, default 'left' How to align the bars with the cells. - 'left' : the min value starts at the left of the cell. - 'zero' : a value of zero is located at the center of the cell. - 'mid' : the center of the cell is at (max-min)/2, or if values are all negative (positive) the zero is aligned at the right (left) of the cell. .. versionadded:: 0.20.0 vmin : float, optional Minimum bar value, defining the left hand limit of the bar drawing range, lower values are clipped to `vmin`. When None (default): the minimum value of the data will be used. .. versionadded:: 0.24.0 vmax : float, optional Maximum bar value, defining the right hand limit of the bar drawing range, higher values are clipped to `vmax`. When None (default): the maximum value of the data will be used. .. versionadded:: 0.24.0 Returns ------- self : Styler """ if align not in ('left', 'zero', 'mid'): raise ValueError("`align` must be one of {'left', 'zero',' mid'}") if not (is_list_like(color)): color = [color, color] elif len(color) == 1: color = [color[0], color[0]] elif len(color) > 2: raise ValueError("`color` must be string or a list-like" " of length 2: [`color_neg`, `color_pos`]" " (eg: color=['#d65f5f', '#5fba7d'])") subset = _maybe_numeric_slice(self.data, subset) subset = _non_reducing_slice(subset) self.apply(self._bar, subset=subset, axis=axis, align=align, colors=color, width=width, vmin=vmin, vmax=vmax) return self
python
def bar(self, subset=None, axis=0, color='#d65f5f', width=100, align='left', vmin=None, vmax=None): """ Draw bar chart in the cell backgrounds. Parameters ---------- subset : IndexSlice, optional A valid slice for `data` to limit the style application to. axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. color : str or 2-tuple/list If a str is passed, the color is the same for both negative and positive numbers. If 2-tuple/list is used, the first element is the color_negative and the second is the color_positive (eg: ['#d65f5f', '#5fba7d']). width : float, default 100 A number between 0 or 100. The largest value will cover `width` percent of the cell's width. align : {'left', 'zero',' mid'}, default 'left' How to align the bars with the cells. - 'left' : the min value starts at the left of the cell. - 'zero' : a value of zero is located at the center of the cell. - 'mid' : the center of the cell is at (max-min)/2, or if values are all negative (positive) the zero is aligned at the right (left) of the cell. .. versionadded:: 0.20.0 vmin : float, optional Minimum bar value, defining the left hand limit of the bar drawing range, lower values are clipped to `vmin`. When None (default): the minimum value of the data will be used. .. versionadded:: 0.24.0 vmax : float, optional Maximum bar value, defining the right hand limit of the bar drawing range, higher values are clipped to `vmax`. When None (default): the maximum value of the data will be used. .. versionadded:: 0.24.0 Returns ------- self : Styler """ if align not in ('left', 'zero', 'mid'): raise ValueError("`align` must be one of {'left', 'zero',' mid'}") if not (is_list_like(color)): color = [color, color] elif len(color) == 1: color = [color[0], color[0]] elif len(color) > 2: raise ValueError("`color` must be string or a list-like" " of length 2: [`color_neg`, `color_pos`]" " (eg: color=['#d65f5f', '#5fba7d'])") subset = _maybe_numeric_slice(self.data, subset) subset = _non_reducing_slice(subset) self.apply(self._bar, subset=subset, axis=axis, align=align, colors=color, width=width, vmin=vmin, vmax=vmax) return self
[ "def", "bar", "(", "self", ",", "subset", "=", "None", ",", "axis", "=", "0", ",", "color", "=", "'#d65f5f'", ",", "width", "=", "100", ",", "align", "=", "'left'", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ")", ":", "if", "align", "not", "in", "(", "'left'", ",", "'zero'", ",", "'mid'", ")", ":", "raise", "ValueError", "(", "\"`align` must be one of {'left', 'zero',' mid'}\"", ")", "if", "not", "(", "is_list_like", "(", "color", ")", ")", ":", "color", "=", "[", "color", ",", "color", "]", "elif", "len", "(", "color", ")", "==", "1", ":", "color", "=", "[", "color", "[", "0", "]", ",", "color", "[", "0", "]", "]", "elif", "len", "(", "color", ")", ">", "2", ":", "raise", "ValueError", "(", "\"`color` must be string or a list-like\"", "\" of length 2: [`color_neg`, `color_pos`]\"", "\" (eg: color=['#d65f5f', '#5fba7d'])\"", ")", "subset", "=", "_maybe_numeric_slice", "(", "self", ".", "data", ",", "subset", ")", "subset", "=", "_non_reducing_slice", "(", "subset", ")", "self", ".", "apply", "(", "self", ".", "_bar", ",", "subset", "=", "subset", ",", "axis", "=", "axis", ",", "align", "=", "align", ",", "colors", "=", "color", ",", "width", "=", "width", ",", "vmin", "=", "vmin", ",", "vmax", "=", "vmax", ")", "return", "self" ]
Draw bar chart in the cell backgrounds. Parameters ---------- subset : IndexSlice, optional A valid slice for `data` to limit the style application to. axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. color : str or 2-tuple/list If a str is passed, the color is the same for both negative and positive numbers. If 2-tuple/list is used, the first element is the color_negative and the second is the color_positive (eg: ['#d65f5f', '#5fba7d']). width : float, default 100 A number between 0 or 100. The largest value will cover `width` percent of the cell's width. align : {'left', 'zero',' mid'}, default 'left' How to align the bars with the cells. - 'left' : the min value starts at the left of the cell. - 'zero' : a value of zero is located at the center of the cell. - 'mid' : the center of the cell is at (max-min)/2, or if values are all negative (positive) the zero is aligned at the right (left) of the cell. .. versionadded:: 0.20.0 vmin : float, optional Minimum bar value, defining the left hand limit of the bar drawing range, lower values are clipped to `vmin`. When None (default): the minimum value of the data will be used. .. versionadded:: 0.24.0 vmax : float, optional Maximum bar value, defining the right hand limit of the bar drawing range, higher values are clipped to `vmax`. When None (default): the maximum value of the data will be used. .. versionadded:: 0.24.0 Returns ------- self : Styler
[ "Draw", "bar", "chart", "in", "the", "cell", "backgrounds", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L1077-L1145
20,065
pandas-dev/pandas
pandas/io/formats/style.py
Styler.highlight_max
def highlight_max(self, subset=None, color='yellow', axis=0): """ Highlight the maximum by shading the background. Parameters ---------- subset : IndexSlice, default None a valid slice for ``data`` to limit the style application to. color : str, default 'yellow' axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. Returns ------- self : Styler """ return self._highlight_handler(subset=subset, color=color, axis=axis, max_=True)
python
def highlight_max(self, subset=None, color='yellow', axis=0): """ Highlight the maximum by shading the background. Parameters ---------- subset : IndexSlice, default None a valid slice for ``data`` to limit the style application to. color : str, default 'yellow' axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. Returns ------- self : Styler """ return self._highlight_handler(subset=subset, color=color, axis=axis, max_=True)
[ "def", "highlight_max", "(", "self", ",", "subset", "=", "None", ",", "color", "=", "'yellow'", ",", "axis", "=", "0", ")", ":", "return", "self", ".", "_highlight_handler", "(", "subset", "=", "subset", ",", "color", "=", "color", ",", "axis", "=", "axis", ",", "max_", "=", "True", ")" ]
Highlight the maximum by shading the background. Parameters ---------- subset : IndexSlice, default None a valid slice for ``data`` to limit the style application to. color : str, default 'yellow' axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. Returns ------- self : Styler
[ "Highlight", "the", "maximum", "by", "shading", "the", "background", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L1147-L1166
20,066
pandas-dev/pandas
pandas/io/formats/style.py
Styler.highlight_min
def highlight_min(self, subset=None, color='yellow', axis=0): """ Highlight the minimum by shading the background. Parameters ---------- subset : IndexSlice, default None a valid slice for ``data`` to limit the style application to. color : str, default 'yellow' axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. Returns ------- self : Styler """ return self._highlight_handler(subset=subset, color=color, axis=axis, max_=False)
python
def highlight_min(self, subset=None, color='yellow', axis=0): """ Highlight the minimum by shading the background. Parameters ---------- subset : IndexSlice, default None a valid slice for ``data`` to limit the style application to. color : str, default 'yellow' axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. Returns ------- self : Styler """ return self._highlight_handler(subset=subset, color=color, axis=axis, max_=False)
[ "def", "highlight_min", "(", "self", ",", "subset", "=", "None", ",", "color", "=", "'yellow'", ",", "axis", "=", "0", ")", ":", "return", "self", ".", "_highlight_handler", "(", "subset", "=", "subset", ",", "color", "=", "color", ",", "axis", "=", "axis", ",", "max_", "=", "False", ")" ]
Highlight the minimum by shading the background. Parameters ---------- subset : IndexSlice, default None a valid slice for ``data`` to limit the style application to. color : str, default 'yellow' axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. Returns ------- self : Styler
[ "Highlight", "the", "minimum", "by", "shading", "the", "background", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L1168-L1187
20,067
pandas-dev/pandas
pandas/io/formats/style.py
Styler._highlight_extrema
def _highlight_extrema(data, color='yellow', max_=True): """ Highlight the min or max in a Series or DataFrame. """ attr = 'background-color: {0}'.format(color) if data.ndim == 1: # Series from .apply if max_: extrema = data == data.max() else: extrema = data == data.min() return [attr if v else '' for v in extrema] else: # DataFrame from .tee if max_: extrema = data == data.max().max() else: extrema = data == data.min().min() return pd.DataFrame(np.where(extrema, attr, ''), index=data.index, columns=data.columns)
python
def _highlight_extrema(data, color='yellow', max_=True): """ Highlight the min or max in a Series or DataFrame. """ attr = 'background-color: {0}'.format(color) if data.ndim == 1: # Series from .apply if max_: extrema = data == data.max() else: extrema = data == data.min() return [attr if v else '' for v in extrema] else: # DataFrame from .tee if max_: extrema = data == data.max().max() else: extrema = data == data.min().min() return pd.DataFrame(np.where(extrema, attr, ''), index=data.index, columns=data.columns)
[ "def", "_highlight_extrema", "(", "data", ",", "color", "=", "'yellow'", ",", "max_", "=", "True", ")", ":", "attr", "=", "'background-color: {0}'", ".", "format", "(", "color", ")", "if", "data", ".", "ndim", "==", "1", ":", "# Series from .apply", "if", "max_", ":", "extrema", "=", "data", "==", "data", ".", "max", "(", ")", "else", ":", "extrema", "=", "data", "==", "data", ".", "min", "(", ")", "return", "[", "attr", "if", "v", "else", "''", "for", "v", "in", "extrema", "]", "else", ":", "# DataFrame from .tee", "if", "max_", ":", "extrema", "=", "data", "==", "data", ".", "max", "(", ")", ".", "max", "(", ")", "else", ":", "extrema", "=", "data", "==", "data", ".", "min", "(", ")", ".", "min", "(", ")", "return", "pd", ".", "DataFrame", "(", "np", ".", "where", "(", "extrema", ",", "attr", ",", "''", ")", ",", "index", "=", "data", ".", "index", ",", "columns", "=", "data", ".", "columns", ")" ]
Highlight the min or max in a Series or DataFrame.
[ "Highlight", "the", "min", "or", "max", "in", "a", "Series", "or", "DataFrame", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L1197-L1214
20,068
pandas-dev/pandas
pandas/io/formats/style.py
Styler.from_custom_template
def from_custom_template(cls, searchpath, name): """ Factory function for creating a subclass of ``Styler`` with a custom template and Jinja environment. Parameters ---------- searchpath : str or list Path or paths of directories containing the templates name : str Name of your custom template to use for rendering Returns ------- MyStyler : subclass of Styler Has the correct ``env`` and ``template`` class attributes set. """ loader = ChoiceLoader([ FileSystemLoader(searchpath), cls.loader, ]) class MyStyler(cls): env = Environment(loader=loader) template = env.get_template(name) return MyStyler
python
def from_custom_template(cls, searchpath, name): """ Factory function for creating a subclass of ``Styler`` with a custom template and Jinja environment. Parameters ---------- searchpath : str or list Path or paths of directories containing the templates name : str Name of your custom template to use for rendering Returns ------- MyStyler : subclass of Styler Has the correct ``env`` and ``template`` class attributes set. """ loader = ChoiceLoader([ FileSystemLoader(searchpath), cls.loader, ]) class MyStyler(cls): env = Environment(loader=loader) template = env.get_template(name) return MyStyler
[ "def", "from_custom_template", "(", "cls", ",", "searchpath", ",", "name", ")", ":", "loader", "=", "ChoiceLoader", "(", "[", "FileSystemLoader", "(", "searchpath", ")", ",", "cls", ".", "loader", ",", "]", ")", "class", "MyStyler", "(", "cls", ")", ":", "env", "=", "Environment", "(", "loader", "=", "loader", ")", "template", "=", "env", ".", "get_template", "(", "name", ")", "return", "MyStyler" ]
Factory function for creating a subclass of ``Styler`` with a custom template and Jinja environment. Parameters ---------- searchpath : str or list Path or paths of directories containing the templates name : str Name of your custom template to use for rendering Returns ------- MyStyler : subclass of Styler Has the correct ``env`` and ``template`` class attributes set.
[ "Factory", "function", "for", "creating", "a", "subclass", "of", "Styler", "with", "a", "custom", "template", "and", "Jinja", "environment", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/style.py#L1217-L1243
20,069
pandas-dev/pandas
pandas/core/indexes/numeric.py
Int64Index._assert_safe_casting
def _assert_safe_casting(cls, data, subarr): """ Ensure incoming data can be represented as ints. """ if not issubclass(data.dtype.type, np.signedinteger): if not np.array_equal(data, subarr): raise TypeError('Unsafe NumPy casting, you must ' 'explicitly cast')
python
def _assert_safe_casting(cls, data, subarr): """ Ensure incoming data can be represented as ints. """ if not issubclass(data.dtype.type, np.signedinteger): if not np.array_equal(data, subarr): raise TypeError('Unsafe NumPy casting, you must ' 'explicitly cast')
[ "def", "_assert_safe_casting", "(", "cls", ",", "data", ",", "subarr", ")", ":", "if", "not", "issubclass", "(", "data", ".", "dtype", ".", "type", ",", "np", ".", "signedinteger", ")", ":", "if", "not", "np", ".", "array_equal", "(", "data", ",", "subarr", ")", ":", "raise", "TypeError", "(", "'Unsafe NumPy casting, you must '", "'explicitly cast'", ")" ]
Ensure incoming data can be represented as ints.
[ "Ensure", "incoming", "data", "can", "be", "represented", "as", "ints", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/numeric.py#L215-L222
20,070
pandas-dev/pandas
pandas/core/indexes/numeric.py
Float64Index.get_value
def get_value(self, series, key): """ we always want to get an index value, never a value """ if not is_scalar(key): raise InvalidIndexError k = com.values_from_object(key) loc = self.get_loc(k) new_values = com.values_from_object(series)[loc] return new_values
python
def get_value(self, series, key): """ we always want to get an index value, never a value """ if not is_scalar(key): raise InvalidIndexError k = com.values_from_object(key) loc = self.get_loc(k) new_values = com.values_from_object(series)[loc] return new_values
[ "def", "get_value", "(", "self", ",", "series", ",", "key", ")", ":", "if", "not", "is_scalar", "(", "key", ")", ":", "raise", "InvalidIndexError", "k", "=", "com", ".", "values_from_object", "(", "key", ")", "loc", "=", "self", ".", "get_loc", "(", "k", ")", "new_values", "=", "com", ".", "values_from_object", "(", "series", ")", "[", "loc", "]", "return", "new_values" ]
we always want to get an index value, never a value
[ "we", "always", "want", "to", "get", "an", "index", "value", "never", "a", "value" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/numeric.py#L364-L373
20,071
pandas-dev/pandas
pandas/io/pytables.py
to_hdf
def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, append=None, **kwargs): """ store this object, close it if we opened it """ if append: f = lambda store: store.append(key, value, **kwargs) else: f = lambda store: store.put(key, value, **kwargs) path_or_buf = _stringify_path(path_or_buf) if isinstance(path_or_buf, str): with HDFStore(path_or_buf, mode=mode, complevel=complevel, complib=complib) as store: f(store) else: f(path_or_buf)
python
def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, append=None, **kwargs): """ store this object, close it if we opened it """ if append: f = lambda store: store.append(key, value, **kwargs) else: f = lambda store: store.put(key, value, **kwargs) path_or_buf = _stringify_path(path_or_buf) if isinstance(path_or_buf, str): with HDFStore(path_or_buf, mode=mode, complevel=complevel, complib=complib) as store: f(store) else: f(path_or_buf)
[ "def", "to_hdf", "(", "path_or_buf", ",", "key", ",", "value", ",", "mode", "=", "None", ",", "complevel", "=", "None", ",", "complib", "=", "None", ",", "append", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "append", ":", "f", "=", "lambda", "store", ":", "store", ".", "append", "(", "key", ",", "value", ",", "*", "*", "kwargs", ")", "else", ":", "f", "=", "lambda", "store", ":", "store", ".", "put", "(", "key", ",", "value", ",", "*", "*", "kwargs", ")", "path_or_buf", "=", "_stringify_path", "(", "path_or_buf", ")", "if", "isinstance", "(", "path_or_buf", ",", "str", ")", ":", "with", "HDFStore", "(", "path_or_buf", ",", "mode", "=", "mode", ",", "complevel", "=", "complevel", ",", "complib", "=", "complib", ")", "as", "store", ":", "f", "(", "store", ")", "else", ":", "f", "(", "path_or_buf", ")" ]
store this object, close it if we opened it
[ "store", "this", "object", "close", "it", "if", "we", "opened", "it" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L248-L263
20,072
pandas-dev/pandas
pandas/io/pytables.py
read_hdf
def read_hdf(path_or_buf, key=None, mode='r', **kwargs): """ Read from the store, close it if we opened it. Retrieve pandas object stored in file, optionally based on where criteria Parameters ---------- path_or_buf : string, buffer or path object Path to the file to open, or an open :class:`pandas.HDFStore` object. Supports any object implementing the ``__fspath__`` protocol. This includes :class:`pathlib.Path` and py._path.local.LocalPath objects. .. versionadded:: 0.19.0 support for pathlib, py.path. .. versionadded:: 0.21.0 support for __fspath__ protocol. key : object, optional The group identifier in the store. Can be omitted if the HDF file contains a single pandas object. mode : {'r', 'r+', 'a'}, optional Mode to use when opening the file. Ignored if path_or_buf is a :class:`pandas.HDFStore`. Default is 'r'. where : list, optional A list of Term (or convertible) objects. start : int, optional Row number to start selection. stop : int, optional Row number to stop selection. columns : list, optional A list of columns names to return. iterator : bool, optional Return an iterator object. chunksize : int, optional Number of rows to include in an iteration when using an iterator. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. **kwargs Additional keyword arguments passed to HDFStore. Returns ------- item : object The selected object. Return type depends on the object stored. See Also -------- DataFrame.to_hdf : Write a HDF file from a DataFrame. HDFStore : Low-level access to HDF files. Examples -------- >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) >>> df.to_hdf('./store.h5', 'data') >>> reread = pd.read_hdf('./store.h5') """ if mode not in ['r', 'r+', 'a']: raise ValueError('mode {0} is not allowed while performing a read. ' 'Allowed modes are r, r+ and a.'.format(mode)) # grab the scope if 'where' in kwargs: kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1) if isinstance(path_or_buf, HDFStore): if not path_or_buf.is_open: raise IOError('The HDFStore must be open for reading.') store = path_or_buf auto_close = False else: path_or_buf = _stringify_path(path_or_buf) if not isinstance(path_or_buf, str): raise NotImplementedError('Support for generic buffers has not ' 'been implemented.') try: exists = os.path.exists(path_or_buf) # if filepath is too long except (TypeError, ValueError): exists = False if not exists: raise FileNotFoundError( 'File {path} does not exist'.format(path=path_or_buf)) store = HDFStore(path_or_buf, mode=mode, **kwargs) # can't auto open/close if we are using an iterator # so delegate to the iterator auto_close = True try: if key is None: groups = store.groups() if len(groups) == 0: raise ValueError('No dataset in HDF5 file.') candidate_only_group = groups[0] # For the HDF file to have only one dataset, all other groups # should then be metadata groups for that candidate group. (This # assumes that the groups() method enumerates parent groups # before their children.) for group_to_check in groups[1:]: if not _is_metadata_of(group_to_check, candidate_only_group): raise ValueError('key must be provided when HDF5 file ' 'contains multiple datasets.') key = candidate_only_group._v_pathname return store.select(key, auto_close=auto_close, **kwargs) except (ValueError, TypeError, KeyError): # if there is an error, close the store try: store.close() except AttributeError: pass raise
python
def read_hdf(path_or_buf, key=None, mode='r', **kwargs): """ Read from the store, close it if we opened it. Retrieve pandas object stored in file, optionally based on where criteria Parameters ---------- path_or_buf : string, buffer or path object Path to the file to open, or an open :class:`pandas.HDFStore` object. Supports any object implementing the ``__fspath__`` protocol. This includes :class:`pathlib.Path` and py._path.local.LocalPath objects. .. versionadded:: 0.19.0 support for pathlib, py.path. .. versionadded:: 0.21.0 support for __fspath__ protocol. key : object, optional The group identifier in the store. Can be omitted if the HDF file contains a single pandas object. mode : {'r', 'r+', 'a'}, optional Mode to use when opening the file. Ignored if path_or_buf is a :class:`pandas.HDFStore`. Default is 'r'. where : list, optional A list of Term (or convertible) objects. start : int, optional Row number to start selection. stop : int, optional Row number to stop selection. columns : list, optional A list of columns names to return. iterator : bool, optional Return an iterator object. chunksize : int, optional Number of rows to include in an iteration when using an iterator. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. **kwargs Additional keyword arguments passed to HDFStore. Returns ------- item : object The selected object. Return type depends on the object stored. See Also -------- DataFrame.to_hdf : Write a HDF file from a DataFrame. HDFStore : Low-level access to HDF files. Examples -------- >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) >>> df.to_hdf('./store.h5', 'data') >>> reread = pd.read_hdf('./store.h5') """ if mode not in ['r', 'r+', 'a']: raise ValueError('mode {0} is not allowed while performing a read. ' 'Allowed modes are r, r+ and a.'.format(mode)) # grab the scope if 'where' in kwargs: kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1) if isinstance(path_or_buf, HDFStore): if not path_or_buf.is_open: raise IOError('The HDFStore must be open for reading.') store = path_or_buf auto_close = False else: path_or_buf = _stringify_path(path_or_buf) if not isinstance(path_or_buf, str): raise NotImplementedError('Support for generic buffers has not ' 'been implemented.') try: exists = os.path.exists(path_or_buf) # if filepath is too long except (TypeError, ValueError): exists = False if not exists: raise FileNotFoundError( 'File {path} does not exist'.format(path=path_or_buf)) store = HDFStore(path_or_buf, mode=mode, **kwargs) # can't auto open/close if we are using an iterator # so delegate to the iterator auto_close = True try: if key is None: groups = store.groups() if len(groups) == 0: raise ValueError('No dataset in HDF5 file.') candidate_only_group = groups[0] # For the HDF file to have only one dataset, all other groups # should then be metadata groups for that candidate group. (This # assumes that the groups() method enumerates parent groups # before their children.) for group_to_check in groups[1:]: if not _is_metadata_of(group_to_check, candidate_only_group): raise ValueError('key must be provided when HDF5 file ' 'contains multiple datasets.') key = candidate_only_group._v_pathname return store.select(key, auto_close=auto_close, **kwargs) except (ValueError, TypeError, KeyError): # if there is an error, close the store try: store.close() except AttributeError: pass raise
[ "def", "read_hdf", "(", "path_or_buf", ",", "key", "=", "None", ",", "mode", "=", "'r'", ",", "*", "*", "kwargs", ")", ":", "if", "mode", "not", "in", "[", "'r'", ",", "'r+'", ",", "'a'", "]", ":", "raise", "ValueError", "(", "'mode {0} is not allowed while performing a read. '", "'Allowed modes are r, r+ and a.'", ".", "format", "(", "mode", ")", ")", "# grab the scope", "if", "'where'", "in", "kwargs", ":", "kwargs", "[", "'where'", "]", "=", "_ensure_term", "(", "kwargs", "[", "'where'", "]", ",", "scope_level", "=", "1", ")", "if", "isinstance", "(", "path_or_buf", ",", "HDFStore", ")", ":", "if", "not", "path_or_buf", ".", "is_open", ":", "raise", "IOError", "(", "'The HDFStore must be open for reading.'", ")", "store", "=", "path_or_buf", "auto_close", "=", "False", "else", ":", "path_or_buf", "=", "_stringify_path", "(", "path_or_buf", ")", "if", "not", "isinstance", "(", "path_or_buf", ",", "str", ")", ":", "raise", "NotImplementedError", "(", "'Support for generic buffers has not '", "'been implemented.'", ")", "try", ":", "exists", "=", "os", ".", "path", ".", "exists", "(", "path_or_buf", ")", "# if filepath is too long", "except", "(", "TypeError", ",", "ValueError", ")", ":", "exists", "=", "False", "if", "not", "exists", ":", "raise", "FileNotFoundError", "(", "'File {path} does not exist'", ".", "format", "(", "path", "=", "path_or_buf", ")", ")", "store", "=", "HDFStore", "(", "path_or_buf", ",", "mode", "=", "mode", ",", "*", "*", "kwargs", ")", "# can't auto open/close if we are using an iterator", "# so delegate to the iterator", "auto_close", "=", "True", "try", ":", "if", "key", "is", "None", ":", "groups", "=", "store", ".", "groups", "(", ")", "if", "len", "(", "groups", ")", "==", "0", ":", "raise", "ValueError", "(", "'No dataset in HDF5 file.'", ")", "candidate_only_group", "=", "groups", "[", "0", "]", "# For the HDF file to have only one dataset, all other groups", "# should then be metadata groups for that candidate group. (This", "# assumes that the groups() method enumerates parent groups", "# before their children.)", "for", "group_to_check", "in", "groups", "[", "1", ":", "]", ":", "if", "not", "_is_metadata_of", "(", "group_to_check", ",", "candidate_only_group", ")", ":", "raise", "ValueError", "(", "'key must be provided when HDF5 file '", "'contains multiple datasets.'", ")", "key", "=", "candidate_only_group", ".", "_v_pathname", "return", "store", ".", "select", "(", "key", ",", "auto_close", "=", "auto_close", ",", "*", "*", "kwargs", ")", "except", "(", "ValueError", ",", "TypeError", ",", "KeyError", ")", ":", "# if there is an error, close the store", "try", ":", "store", ".", "close", "(", ")", "except", "AttributeError", ":", "pass", "raise" ]
Read from the store, close it if we opened it. Retrieve pandas object stored in file, optionally based on where criteria Parameters ---------- path_or_buf : string, buffer or path object Path to the file to open, or an open :class:`pandas.HDFStore` object. Supports any object implementing the ``__fspath__`` protocol. This includes :class:`pathlib.Path` and py._path.local.LocalPath objects. .. versionadded:: 0.19.0 support for pathlib, py.path. .. versionadded:: 0.21.0 support for __fspath__ protocol. key : object, optional The group identifier in the store. Can be omitted if the HDF file contains a single pandas object. mode : {'r', 'r+', 'a'}, optional Mode to use when opening the file. Ignored if path_or_buf is a :class:`pandas.HDFStore`. Default is 'r'. where : list, optional A list of Term (or convertible) objects. start : int, optional Row number to start selection. stop : int, optional Row number to stop selection. columns : list, optional A list of columns names to return. iterator : bool, optional Return an iterator object. chunksize : int, optional Number of rows to include in an iteration when using an iterator. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. **kwargs Additional keyword arguments passed to HDFStore. Returns ------- item : object The selected object. Return type depends on the object stored. See Also -------- DataFrame.to_hdf : Write a HDF file from a DataFrame. HDFStore : Low-level access to HDF files. Examples -------- >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) >>> df.to_hdf('./store.h5', 'data') >>> reread = pd.read_hdf('./store.h5')
[ "Read", "from", "the", "store", "close", "it", "if", "we", "opened", "it", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L266-L384
20,073
pandas-dev/pandas
pandas/io/pytables.py
_is_metadata_of
def _is_metadata_of(group, parent_group): """Check if a given group is a metadata group for a given parent_group.""" if group._v_depth <= parent_group._v_depth: return False current = group while current._v_depth > 1: parent = current._v_parent if parent == parent_group and current._v_name == 'meta': return True current = current._v_parent return False
python
def _is_metadata_of(group, parent_group): """Check if a given group is a metadata group for a given parent_group.""" if group._v_depth <= parent_group._v_depth: return False current = group while current._v_depth > 1: parent = current._v_parent if parent == parent_group and current._v_name == 'meta': return True current = current._v_parent return False
[ "def", "_is_metadata_of", "(", "group", ",", "parent_group", ")", ":", "if", "group", ".", "_v_depth", "<=", "parent_group", ".", "_v_depth", ":", "return", "False", "current", "=", "group", "while", "current", ".", "_v_depth", ">", "1", ":", "parent", "=", "current", ".", "_v_parent", "if", "parent", "==", "parent_group", "and", "current", ".", "_v_name", "==", "'meta'", ":", "return", "True", "current", "=", "current", ".", "_v_parent", "return", "False" ]
Check if a given group is a metadata group for a given parent_group.
[ "Check", "if", "a", "given", "group", "is", "a", "metadata", "group", "for", "a", "given", "parent_group", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L387-L398
20,074
pandas-dev/pandas
pandas/io/pytables.py
_get_tz
def _get_tz(tz): """ for a tz-aware type, return an encoded zone """ zone = timezones.get_timezone(tz) if zone is None: zone = tz.utcoffset().total_seconds() return zone
python
def _get_tz(tz): """ for a tz-aware type, return an encoded zone """ zone = timezones.get_timezone(tz) if zone is None: zone = tz.utcoffset().total_seconds() return zone
[ "def", "_get_tz", "(", "tz", ")", ":", "zone", "=", "timezones", ".", "get_timezone", "(", "tz", ")", "if", "zone", "is", "None", ":", "zone", "=", "tz", ".", "utcoffset", "(", ")", ".", "total_seconds", "(", ")", "return", "zone" ]
for a tz-aware type, return an encoded zone
[ "for", "a", "tz", "-", "aware", "type", "return", "an", "encoded", "zone" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L4356-L4361
20,075
pandas-dev/pandas
pandas/io/pytables.py
_set_tz
def _set_tz(values, tz, preserve_UTC=False, coerce=False): """ coerce the values to a DatetimeIndex if tz is set preserve the input shape if possible Parameters ---------- values : ndarray tz : string/pickled tz object preserve_UTC : boolean, preserve the UTC of the result coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray """ if tz is not None: name = getattr(values, 'name', None) values = values.ravel() tz = timezones.get_timezone(_ensure_decoded(tz)) values = DatetimeIndex(values, name=name) if values.tz is None: values = values.tz_localize('UTC').tz_convert(tz) if preserve_UTC: if tz == 'UTC': values = list(values) elif coerce: values = np.asarray(values, dtype='M8[ns]') return values
python
def _set_tz(values, tz, preserve_UTC=False, coerce=False): """ coerce the values to a DatetimeIndex if tz is set preserve the input shape if possible Parameters ---------- values : ndarray tz : string/pickled tz object preserve_UTC : boolean, preserve the UTC of the result coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray """ if tz is not None: name = getattr(values, 'name', None) values = values.ravel() tz = timezones.get_timezone(_ensure_decoded(tz)) values = DatetimeIndex(values, name=name) if values.tz is None: values = values.tz_localize('UTC').tz_convert(tz) if preserve_UTC: if tz == 'UTC': values = list(values) elif coerce: values = np.asarray(values, dtype='M8[ns]') return values
[ "def", "_set_tz", "(", "values", ",", "tz", ",", "preserve_UTC", "=", "False", ",", "coerce", "=", "False", ")", ":", "if", "tz", "is", "not", "None", ":", "name", "=", "getattr", "(", "values", ",", "'name'", ",", "None", ")", "values", "=", "values", ".", "ravel", "(", ")", "tz", "=", "timezones", ".", "get_timezone", "(", "_ensure_decoded", "(", "tz", ")", ")", "values", "=", "DatetimeIndex", "(", "values", ",", "name", "=", "name", ")", "if", "values", ".", "tz", "is", "None", ":", "values", "=", "values", ".", "tz_localize", "(", "'UTC'", ")", ".", "tz_convert", "(", "tz", ")", "if", "preserve_UTC", ":", "if", "tz", "==", "'UTC'", ":", "values", "=", "list", "(", "values", ")", "elif", "coerce", ":", "values", "=", "np", ".", "asarray", "(", "values", ",", "dtype", "=", "'M8[ns]'", ")", "return", "values" ]
coerce the values to a DatetimeIndex if tz is set preserve the input shape if possible Parameters ---------- values : ndarray tz : string/pickled tz object preserve_UTC : boolean, preserve the UTC of the result coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
[ "coerce", "the", "values", "to", "a", "DatetimeIndex", "if", "tz", "is", "set", "preserve", "the", "input", "shape", "if", "possible" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L4364-L4390
20,076
pandas-dev/pandas
pandas/io/pytables.py
_convert_string_array
def _convert_string_array(data, encoding, errors, itemsize=None): """ we take a string-like that is object dtype and coerce to a fixed size string type Parameters ---------- data : a numpy array of object dtype encoding : None or string-encoding errors : handler for encoding errors itemsize : integer, optional, defaults to the max length of the strings Returns ------- data in a fixed-length string dtype, encoded to bytes if needed """ # encode if needed if encoding is not None and len(data): data = Series(data.ravel()).str.encode( encoding, errors).values.reshape(data.shape) # create the sized dtype if itemsize is None: ensured = ensure_object(data.ravel()) itemsize = max(1, libwriters.max_len_string_array(ensured)) data = np.asarray(data, dtype="S{size}".format(size=itemsize)) return data
python
def _convert_string_array(data, encoding, errors, itemsize=None): """ we take a string-like that is object dtype and coerce to a fixed size string type Parameters ---------- data : a numpy array of object dtype encoding : None or string-encoding errors : handler for encoding errors itemsize : integer, optional, defaults to the max length of the strings Returns ------- data in a fixed-length string dtype, encoded to bytes if needed """ # encode if needed if encoding is not None and len(data): data = Series(data.ravel()).str.encode( encoding, errors).values.reshape(data.shape) # create the sized dtype if itemsize is None: ensured = ensure_object(data.ravel()) itemsize = max(1, libwriters.max_len_string_array(ensured)) data = np.asarray(data, dtype="S{size}".format(size=itemsize)) return data
[ "def", "_convert_string_array", "(", "data", ",", "encoding", ",", "errors", ",", "itemsize", "=", "None", ")", ":", "# encode if needed", "if", "encoding", "is", "not", "None", "and", "len", "(", "data", ")", ":", "data", "=", "Series", "(", "data", ".", "ravel", "(", ")", ")", ".", "str", ".", "encode", "(", "encoding", ",", "errors", ")", ".", "values", ".", "reshape", "(", "data", ".", "shape", ")", "# create the sized dtype", "if", "itemsize", "is", "None", ":", "ensured", "=", "ensure_object", "(", "data", ".", "ravel", "(", ")", ")", "itemsize", "=", "max", "(", "1", ",", "libwriters", ".", "max_len_string_array", "(", "ensured", ")", ")", "data", "=", "np", ".", "asarray", "(", "data", ",", "dtype", "=", "\"S{size}\"", ".", "format", "(", "size", "=", "itemsize", ")", ")", "return", "data" ]
we take a string-like that is object dtype and coerce to a fixed size string type Parameters ---------- data : a numpy array of object dtype encoding : None or string-encoding errors : handler for encoding errors itemsize : integer, optional, defaults to the max length of the strings Returns ------- data in a fixed-length string dtype, encoded to bytes if needed
[ "we", "take", "a", "string", "-", "like", "that", "is", "object", "dtype", "and", "coerce", "to", "a", "fixed", "size", "string", "type" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L4521-L4549
20,077
pandas-dev/pandas
pandas/io/pytables.py
_unconvert_string_array
def _unconvert_string_array(data, nan_rep=None, encoding=None, errors='strict'): """ inverse of _convert_string_array Parameters ---------- data : fixed length string dtyped array nan_rep : the storage repr of NaN, optional encoding : the encoding of the data, optional errors : handler for encoding errors, default 'strict' Returns ------- an object array of the decoded data """ shape = data.shape data = np.asarray(data.ravel(), dtype=object) # guard against a None encoding (because of a legacy # where the passed encoding is actually None) encoding = _ensure_encoding(encoding) if encoding is not None and len(data): itemsize = libwriters.max_len_string_array(ensure_object(data)) dtype = "U{0}".format(itemsize) if isinstance(data[0], bytes): data = Series(data).str.decode(encoding, errors=errors).values else: data = data.astype(dtype, copy=False).astype(object, copy=False) if nan_rep is None: nan_rep = 'nan' data = libwriters.string_array_replace_from_nan_rep(data, nan_rep) return data.reshape(shape)
python
def _unconvert_string_array(data, nan_rep=None, encoding=None, errors='strict'): """ inverse of _convert_string_array Parameters ---------- data : fixed length string dtyped array nan_rep : the storage repr of NaN, optional encoding : the encoding of the data, optional errors : handler for encoding errors, default 'strict' Returns ------- an object array of the decoded data """ shape = data.shape data = np.asarray(data.ravel(), dtype=object) # guard against a None encoding (because of a legacy # where the passed encoding is actually None) encoding = _ensure_encoding(encoding) if encoding is not None and len(data): itemsize = libwriters.max_len_string_array(ensure_object(data)) dtype = "U{0}".format(itemsize) if isinstance(data[0], bytes): data = Series(data).str.decode(encoding, errors=errors).values else: data = data.astype(dtype, copy=False).astype(object, copy=False) if nan_rep is None: nan_rep = 'nan' data = libwriters.string_array_replace_from_nan_rep(data, nan_rep) return data.reshape(shape)
[ "def", "_unconvert_string_array", "(", "data", ",", "nan_rep", "=", "None", ",", "encoding", "=", "None", ",", "errors", "=", "'strict'", ")", ":", "shape", "=", "data", ".", "shape", "data", "=", "np", ".", "asarray", "(", "data", ".", "ravel", "(", ")", ",", "dtype", "=", "object", ")", "# guard against a None encoding (because of a legacy", "# where the passed encoding is actually None)", "encoding", "=", "_ensure_encoding", "(", "encoding", ")", "if", "encoding", "is", "not", "None", "and", "len", "(", "data", ")", ":", "itemsize", "=", "libwriters", ".", "max_len_string_array", "(", "ensure_object", "(", "data", ")", ")", "dtype", "=", "\"U{0}\"", ".", "format", "(", "itemsize", ")", "if", "isinstance", "(", "data", "[", "0", "]", ",", "bytes", ")", ":", "data", "=", "Series", "(", "data", ")", ".", "str", ".", "decode", "(", "encoding", ",", "errors", "=", "errors", ")", ".", "values", "else", ":", "data", "=", "data", ".", "astype", "(", "dtype", ",", "copy", "=", "False", ")", ".", "astype", "(", "object", ",", "copy", "=", "False", ")", "if", "nan_rep", "is", "None", ":", "nan_rep", "=", "'nan'", "data", "=", "libwriters", ".", "string_array_replace_from_nan_rep", "(", "data", ",", "nan_rep", ")", "return", "data", ".", "reshape", "(", "shape", ")" ]
inverse of _convert_string_array Parameters ---------- data : fixed length string dtyped array nan_rep : the storage repr of NaN, optional encoding : the encoding of the data, optional errors : handler for encoding errors, default 'strict' Returns ------- an object array of the decoded data
[ "inverse", "of", "_convert_string_array" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L4552-L4589
20,078
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.open
def open(self, mode='a', **kwargs): """ Open the file in the specified mode Parameters ---------- mode : {'a', 'w', 'r', 'r+'}, default 'a' See HDFStore docstring or tables.open_file for info about modes """ tables = _tables() if self._mode != mode: # if we are changing a write mode to read, ok if self._mode in ['a', 'w'] and mode in ['r', 'r+']: pass elif mode in ['w']: # this would truncate, raise here if self.is_open: raise PossibleDataLossError( "Re-opening the file [{0}] with mode [{1}] " "will delete the current file!" .format(self._path, self._mode) ) self._mode = mode # close and reopen the handle if self.is_open: self.close() if self._complevel and self._complevel > 0: self._filters = _tables().Filters(self._complevel, self._complib, fletcher32=self._fletcher32) try: self._handle = tables.open_file(self._path, self._mode, **kwargs) except (IOError) as e: # pragma: no cover if 'can not be written' in str(e): print( 'Opening {path} in read-only mode'.format(path=self._path)) self._handle = tables.open_file(self._path, 'r', **kwargs) else: raise except (ValueError) as e: # trap PyTables >= 3.1 FILE_OPEN_POLICY exception # to provide an updated message if 'FILE_OPEN_POLICY' in str(e): e = ValueError( "PyTables [{version}] no longer supports opening multiple " "files\n" "even in read-only mode on this HDF5 version " "[{hdf_version}]. You can accept this\n" "and not open the same file multiple times at once,\n" "upgrade the HDF5 version, or downgrade to PyTables 3.0.0 " "which allows\n" "files to be opened multiple times at once\n" .format(version=tables.__version__, hdf_version=tables.get_hdf5_version())) raise e except (Exception) as e: # trying to read from a non-existent file causes an error which # is not part of IOError, make it one if self._mode == 'r' and 'Unable to open/create file' in str(e): raise IOError(str(e)) raise
python
def open(self, mode='a', **kwargs): """ Open the file in the specified mode Parameters ---------- mode : {'a', 'w', 'r', 'r+'}, default 'a' See HDFStore docstring or tables.open_file for info about modes """ tables = _tables() if self._mode != mode: # if we are changing a write mode to read, ok if self._mode in ['a', 'w'] and mode in ['r', 'r+']: pass elif mode in ['w']: # this would truncate, raise here if self.is_open: raise PossibleDataLossError( "Re-opening the file [{0}] with mode [{1}] " "will delete the current file!" .format(self._path, self._mode) ) self._mode = mode # close and reopen the handle if self.is_open: self.close() if self._complevel and self._complevel > 0: self._filters = _tables().Filters(self._complevel, self._complib, fletcher32=self._fletcher32) try: self._handle = tables.open_file(self._path, self._mode, **kwargs) except (IOError) as e: # pragma: no cover if 'can not be written' in str(e): print( 'Opening {path} in read-only mode'.format(path=self._path)) self._handle = tables.open_file(self._path, 'r', **kwargs) else: raise except (ValueError) as e: # trap PyTables >= 3.1 FILE_OPEN_POLICY exception # to provide an updated message if 'FILE_OPEN_POLICY' in str(e): e = ValueError( "PyTables [{version}] no longer supports opening multiple " "files\n" "even in read-only mode on this HDF5 version " "[{hdf_version}]. You can accept this\n" "and not open the same file multiple times at once,\n" "upgrade the HDF5 version, or downgrade to PyTables 3.0.0 " "which allows\n" "files to be opened multiple times at once\n" .format(version=tables.__version__, hdf_version=tables.get_hdf5_version())) raise e except (Exception) as e: # trying to read from a non-existent file causes an error which # is not part of IOError, make it one if self._mode == 'r' and 'Unable to open/create file' in str(e): raise IOError(str(e)) raise
[ "def", "open", "(", "self", ",", "mode", "=", "'a'", ",", "*", "*", "kwargs", ")", ":", "tables", "=", "_tables", "(", ")", "if", "self", ".", "_mode", "!=", "mode", ":", "# if we are changing a write mode to read, ok", "if", "self", ".", "_mode", "in", "[", "'a'", ",", "'w'", "]", "and", "mode", "in", "[", "'r'", ",", "'r+'", "]", ":", "pass", "elif", "mode", "in", "[", "'w'", "]", ":", "# this would truncate, raise here", "if", "self", ".", "is_open", ":", "raise", "PossibleDataLossError", "(", "\"Re-opening the file [{0}] with mode [{1}] \"", "\"will delete the current file!\"", ".", "format", "(", "self", ".", "_path", ",", "self", ".", "_mode", ")", ")", "self", ".", "_mode", "=", "mode", "# close and reopen the handle", "if", "self", ".", "is_open", ":", "self", ".", "close", "(", ")", "if", "self", ".", "_complevel", "and", "self", ".", "_complevel", ">", "0", ":", "self", ".", "_filters", "=", "_tables", "(", ")", ".", "Filters", "(", "self", ".", "_complevel", ",", "self", ".", "_complib", ",", "fletcher32", "=", "self", ".", "_fletcher32", ")", "try", ":", "self", ".", "_handle", "=", "tables", ".", "open_file", "(", "self", ".", "_path", ",", "self", ".", "_mode", ",", "*", "*", "kwargs", ")", "except", "(", "IOError", ")", "as", "e", ":", "# pragma: no cover", "if", "'can not be written'", "in", "str", "(", "e", ")", ":", "print", "(", "'Opening {path} in read-only mode'", ".", "format", "(", "path", "=", "self", ".", "_path", ")", ")", "self", ".", "_handle", "=", "tables", ".", "open_file", "(", "self", ".", "_path", ",", "'r'", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "except", "(", "ValueError", ")", "as", "e", ":", "# trap PyTables >= 3.1 FILE_OPEN_POLICY exception", "# to provide an updated message", "if", "'FILE_OPEN_POLICY'", "in", "str", "(", "e", ")", ":", "e", "=", "ValueError", "(", "\"PyTables [{version}] no longer supports opening multiple \"", "\"files\\n\"", "\"even in read-only mode on this HDF5 version \"", "\"[{hdf_version}]. You can accept this\\n\"", "\"and not open the same file multiple times at once,\\n\"", "\"upgrade the HDF5 version, or downgrade to PyTables 3.0.0 \"", "\"which allows\\n\"", "\"files to be opened multiple times at once\\n\"", ".", "format", "(", "version", "=", "tables", ".", "__version__", ",", "hdf_version", "=", "tables", ".", "get_hdf5_version", "(", ")", ")", ")", "raise", "e", "except", "(", "Exception", ")", "as", "e", ":", "# trying to read from a non-existent file causes an error which", "# is not part of IOError, make it one", "if", "self", ".", "_mode", "==", "'r'", "and", "'Unable to open/create file'", "in", "str", "(", "e", ")", ":", "raise", "IOError", "(", "str", "(", "e", ")", ")", "raise" ]
Open the file in the specified mode Parameters ---------- mode : {'a', 'w', 'r', 'r+'}, default 'a' See HDFStore docstring or tables.open_file for info about modes
[ "Open", "the", "file", "in", "the", "specified", "mode" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L553-L624
20,079
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.flush
def flush(self, fsync=False): """ Force all buffered modifications to be written to disk. Parameters ---------- fsync : bool (default False) call ``os.fsync()`` on the file handle to force writing to disk. Notes ----- Without ``fsync=True``, flushing may not guarantee that the OS writes to disk. With fsync, the operation will block until the OS claims the file has been written; however, other caching layers may still interfere. """ if self._handle is not None: self._handle.flush() if fsync: try: os.fsync(self._handle.fileno()) except OSError: pass
python
def flush(self, fsync=False): """ Force all buffered modifications to be written to disk. Parameters ---------- fsync : bool (default False) call ``os.fsync()`` on the file handle to force writing to disk. Notes ----- Without ``fsync=True``, flushing may not guarantee that the OS writes to disk. With fsync, the operation will block until the OS claims the file has been written; however, other caching layers may still interfere. """ if self._handle is not None: self._handle.flush() if fsync: try: os.fsync(self._handle.fileno()) except OSError: pass
[ "def", "flush", "(", "self", ",", "fsync", "=", "False", ")", ":", "if", "self", ".", "_handle", "is", "not", "None", ":", "self", ".", "_handle", ".", "flush", "(", ")", "if", "fsync", ":", "try", ":", "os", ".", "fsync", "(", "self", ".", "_handle", ".", "fileno", "(", ")", ")", "except", "OSError", ":", "pass" ]
Force all buffered modifications to be written to disk. Parameters ---------- fsync : bool (default False) call ``os.fsync()`` on the file handle to force writing to disk. Notes ----- Without ``fsync=True``, flushing may not guarantee that the OS writes to disk. With fsync, the operation will block until the OS claims the file has been written; however, other caching layers may still interfere.
[ "Force", "all", "buffered", "modifications", "to", "be", "written", "to", "disk", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L643-L665
20,080
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.get
def get(self, key): """ Retrieve pandas object stored in file Parameters ---------- key : object Returns ------- obj : same type as object stored in file """ group = self.get_node(key) if group is None: raise KeyError('No object named {key} in the file'.format(key=key)) return self._read_group(group)
python
def get(self, key): """ Retrieve pandas object stored in file Parameters ---------- key : object Returns ------- obj : same type as object stored in file """ group = self.get_node(key) if group is None: raise KeyError('No object named {key} in the file'.format(key=key)) return self._read_group(group)
[ "def", "get", "(", "self", ",", "key", ")", ":", "group", "=", "self", ".", "get_node", "(", "key", ")", "if", "group", "is", "None", ":", "raise", "KeyError", "(", "'No object named {key} in the file'", ".", "format", "(", "key", "=", "key", ")", ")", "return", "self", ".", "_read_group", "(", "group", ")" ]
Retrieve pandas object stored in file Parameters ---------- key : object Returns ------- obj : same type as object stored in file
[ "Retrieve", "pandas", "object", "stored", "in", "file" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L667-L682
20,081
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.select
def select(self, key, where=None, start=None, stop=None, columns=None, iterator=False, chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas object stored in file, optionally based on where criteria Parameters ---------- key : object where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection columns : a list of columns that if not None, will limit the return columns iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator auto_close : boolean, should automatically close the store when finished, default is False Returns ------- The selected object """ group = self.get_node(key) if group is None: raise KeyError('No object named {key} in the file'.format(key=key)) # create the storer and axes where = _ensure_term(where, scope_level=1) s = self._create_storer(group) s.infer_axes() # function to call on iteration def func(_start, _stop, _where): return s.read(start=_start, stop=_stop, where=_where, columns=columns) # create the iterator it = TableIterator(self, s, func, where=where, nrows=s.nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) return it.get_result()
python
def select(self, key, where=None, start=None, stop=None, columns=None, iterator=False, chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas object stored in file, optionally based on where criteria Parameters ---------- key : object where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection columns : a list of columns that if not None, will limit the return columns iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator auto_close : boolean, should automatically close the store when finished, default is False Returns ------- The selected object """ group = self.get_node(key) if group is None: raise KeyError('No object named {key} in the file'.format(key=key)) # create the storer and axes where = _ensure_term(where, scope_level=1) s = self._create_storer(group) s.infer_axes() # function to call on iteration def func(_start, _stop, _where): return s.read(start=_start, stop=_stop, where=_where, columns=columns) # create the iterator it = TableIterator(self, s, func, where=where, nrows=s.nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) return it.get_result()
[ "def", "select", "(", "self", ",", "key", ",", "where", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "columns", "=", "None", ",", "iterator", "=", "False", ",", "chunksize", "=", "None", ",", "auto_close", "=", "False", ",", "*", "*", "kwargs", ")", ":", "group", "=", "self", ".", "get_node", "(", "key", ")", "if", "group", "is", "None", ":", "raise", "KeyError", "(", "'No object named {key} in the file'", ".", "format", "(", "key", "=", "key", ")", ")", "# create the storer and axes", "where", "=", "_ensure_term", "(", "where", ",", "scope_level", "=", "1", ")", "s", "=", "self", ".", "_create_storer", "(", "group", ")", "s", ".", "infer_axes", "(", ")", "# function to call on iteration", "def", "func", "(", "_start", ",", "_stop", ",", "_where", ")", ":", "return", "s", ".", "read", "(", "start", "=", "_start", ",", "stop", "=", "_stop", ",", "where", "=", "_where", ",", "columns", "=", "columns", ")", "# create the iterator", "it", "=", "TableIterator", "(", "self", ",", "s", ",", "func", ",", "where", "=", "where", ",", "nrows", "=", "s", ".", "nrows", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "iterator", "=", "iterator", ",", "chunksize", "=", "chunksize", ",", "auto_close", "=", "auto_close", ")", "return", "it", ".", "get_result", "(", ")" ]
Retrieve pandas object stored in file, optionally based on where criteria Parameters ---------- key : object where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection columns : a list of columns that if not None, will limit the return columns iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator auto_close : boolean, should automatically close the store when finished, default is False Returns ------- The selected object
[ "Retrieve", "pandas", "object", "stored", "in", "file", "optionally", "based", "on", "where", "criteria" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L684-L727
20,082
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.select_as_coordinates
def select_as_coordinates( self, key, where=None, start=None, stop=None, **kwargs): """ return the selection as an Index Parameters ---------- key : object where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection """ where = _ensure_term(where, scope_level=1) return self.get_storer(key).read_coordinates(where=where, start=start, stop=stop, **kwargs)
python
def select_as_coordinates( self, key, where=None, start=None, stop=None, **kwargs): """ return the selection as an Index Parameters ---------- key : object where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection """ where = _ensure_term(where, scope_level=1) return self.get_storer(key).read_coordinates(where=where, start=start, stop=stop, **kwargs)
[ "def", "select_as_coordinates", "(", "self", ",", "key", ",", "where", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "*", "*", "kwargs", ")", ":", "where", "=", "_ensure_term", "(", "where", ",", "scope_level", "=", "1", ")", "return", "self", ".", "get_storer", "(", "key", ")", ".", "read_coordinates", "(", "where", "=", "where", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "*", "*", "kwargs", ")" ]
return the selection as an Index Parameters ---------- key : object where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection
[ "return", "the", "selection", "as", "an", "Index" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L729-L743
20,083
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.select_column
def select_column(self, key, column, **kwargs): """ return a single column from the table. This is generally only useful to select an indexable Parameters ---------- key : object column: the column of interest Exceptions ---------- raises KeyError if the column is not found (or key is not a valid store) raises ValueError if the column can not be extracted individually (it is part of a data block) """ return self.get_storer(key).read_column(column=column, **kwargs)
python
def select_column(self, key, column, **kwargs): """ return a single column from the table. This is generally only useful to select an indexable Parameters ---------- key : object column: the column of interest Exceptions ---------- raises KeyError if the column is not found (or key is not a valid store) raises ValueError if the column can not be extracted individually (it is part of a data block) """ return self.get_storer(key).read_column(column=column, **kwargs)
[ "def", "select_column", "(", "self", ",", "key", ",", "column", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "get_storer", "(", "key", ")", ".", "read_column", "(", "column", "=", "column", ",", "*", "*", "kwargs", ")" ]
return a single column from the table. This is generally only useful to select an indexable Parameters ---------- key : object column: the column of interest Exceptions ---------- raises KeyError if the column is not found (or key is not a valid store) raises ValueError if the column can not be extracted individually (it is part of a data block)
[ "return", "a", "single", "column", "from", "the", "table", ".", "This", "is", "generally", "only", "useful", "to", "select", "an", "indexable" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L745-L763
20,084
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.select_as_multiple
def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas objects from multiple tables Parameters ---------- keys : a list of the tables selector : the table to apply the where criteria (defaults to keys[0] if not supplied) columns : the columns I want back start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator Exceptions ---------- raises KeyError if keys or selector is not found or keys is empty raises TypeError if keys is not a list or tuple raises ValueError if the tables are not ALL THE SAME DIMENSIONS """ # default to single select where = _ensure_term(where, scope_level=1) if isinstance(keys, (list, tuple)) and len(keys) == 1: keys = keys[0] if isinstance(keys, str): return self.select(key=keys, where=where, columns=columns, start=start, stop=stop, iterator=iterator, chunksize=chunksize, **kwargs) if not isinstance(keys, (list, tuple)): raise TypeError("keys must be a list/tuple") if not len(keys): raise ValueError("keys must have a non-zero length") if selector is None: selector = keys[0] # collect the tables tbls = [self.get_storer(k) for k in keys] s = self.get_storer(selector) # validate rows nrows = None for t, k in itertools.chain([(s, selector)], zip(tbls, keys)): if t is None: raise KeyError("Invalid table [{key}]".format(key=k)) if not t.is_table: raise TypeError( "object [{obj}] is not a table, and cannot be used in all " "select as multiple".format(obj=t.pathname) ) if nrows is None: nrows = t.nrows elif t.nrows != nrows: raise ValueError( "all tables must have exactly the same nrows!") # axis is the concentation axes axis = list({t.non_index_axes[0][0] for t in tbls})[0] def func(_start, _stop, _where): # retrieve the objs, _where is always passed as a set of # coordinates here objs = [t.read(where=_where, columns=columns, start=_start, stop=_stop, **kwargs) for t in tbls] # concat and return return concat(objs, axis=axis, verify_integrity=False)._consolidate() # create the iterator it = TableIterator(self, s, func, where=where, nrows=nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) return it.get_result(coordinates=True)
python
def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas objects from multiple tables Parameters ---------- keys : a list of the tables selector : the table to apply the where criteria (defaults to keys[0] if not supplied) columns : the columns I want back start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator Exceptions ---------- raises KeyError if keys or selector is not found or keys is empty raises TypeError if keys is not a list or tuple raises ValueError if the tables are not ALL THE SAME DIMENSIONS """ # default to single select where = _ensure_term(where, scope_level=1) if isinstance(keys, (list, tuple)) and len(keys) == 1: keys = keys[0] if isinstance(keys, str): return self.select(key=keys, where=where, columns=columns, start=start, stop=stop, iterator=iterator, chunksize=chunksize, **kwargs) if not isinstance(keys, (list, tuple)): raise TypeError("keys must be a list/tuple") if not len(keys): raise ValueError("keys must have a non-zero length") if selector is None: selector = keys[0] # collect the tables tbls = [self.get_storer(k) for k in keys] s = self.get_storer(selector) # validate rows nrows = None for t, k in itertools.chain([(s, selector)], zip(tbls, keys)): if t is None: raise KeyError("Invalid table [{key}]".format(key=k)) if not t.is_table: raise TypeError( "object [{obj}] is not a table, and cannot be used in all " "select as multiple".format(obj=t.pathname) ) if nrows is None: nrows = t.nrows elif t.nrows != nrows: raise ValueError( "all tables must have exactly the same nrows!") # axis is the concentation axes axis = list({t.non_index_axes[0][0] for t in tbls})[0] def func(_start, _stop, _where): # retrieve the objs, _where is always passed as a set of # coordinates here objs = [t.read(where=_where, columns=columns, start=_start, stop=_stop, **kwargs) for t in tbls] # concat and return return concat(objs, axis=axis, verify_integrity=False)._consolidate() # create the iterator it = TableIterator(self, s, func, where=where, nrows=nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) return it.get_result(coordinates=True)
[ "def", "select_as_multiple", "(", "self", ",", "keys", ",", "where", "=", "None", ",", "selector", "=", "None", ",", "columns", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "iterator", "=", "False", ",", "chunksize", "=", "None", ",", "auto_close", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# default to single select", "where", "=", "_ensure_term", "(", "where", ",", "scope_level", "=", "1", ")", "if", "isinstance", "(", "keys", ",", "(", "list", ",", "tuple", ")", ")", "and", "len", "(", "keys", ")", "==", "1", ":", "keys", "=", "keys", "[", "0", "]", "if", "isinstance", "(", "keys", ",", "str", ")", ":", "return", "self", ".", "select", "(", "key", "=", "keys", ",", "where", "=", "where", ",", "columns", "=", "columns", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "iterator", "=", "iterator", ",", "chunksize", "=", "chunksize", ",", "*", "*", "kwargs", ")", "if", "not", "isinstance", "(", "keys", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "TypeError", "(", "\"keys must be a list/tuple\"", ")", "if", "not", "len", "(", "keys", ")", ":", "raise", "ValueError", "(", "\"keys must have a non-zero length\"", ")", "if", "selector", "is", "None", ":", "selector", "=", "keys", "[", "0", "]", "# collect the tables", "tbls", "=", "[", "self", ".", "get_storer", "(", "k", ")", "for", "k", "in", "keys", "]", "s", "=", "self", ".", "get_storer", "(", "selector", ")", "# validate rows", "nrows", "=", "None", "for", "t", ",", "k", "in", "itertools", ".", "chain", "(", "[", "(", "s", ",", "selector", ")", "]", ",", "zip", "(", "tbls", ",", "keys", ")", ")", ":", "if", "t", "is", "None", ":", "raise", "KeyError", "(", "\"Invalid table [{key}]\"", ".", "format", "(", "key", "=", "k", ")", ")", "if", "not", "t", ".", "is_table", ":", "raise", "TypeError", "(", "\"object [{obj}] is not a table, and cannot be used in all \"", "\"select as multiple\"", ".", "format", "(", "obj", "=", "t", ".", "pathname", ")", ")", "if", "nrows", "is", "None", ":", "nrows", "=", "t", ".", "nrows", "elif", "t", ".", "nrows", "!=", "nrows", ":", "raise", "ValueError", "(", "\"all tables must have exactly the same nrows!\"", ")", "# axis is the concentation axes", "axis", "=", "list", "(", "{", "t", ".", "non_index_axes", "[", "0", "]", "[", "0", "]", "for", "t", "in", "tbls", "}", ")", "[", "0", "]", "def", "func", "(", "_start", ",", "_stop", ",", "_where", ")", ":", "# retrieve the objs, _where is always passed as a set of", "# coordinates here", "objs", "=", "[", "t", ".", "read", "(", "where", "=", "_where", ",", "columns", "=", "columns", ",", "start", "=", "_start", ",", "stop", "=", "_stop", ",", "*", "*", "kwargs", ")", "for", "t", "in", "tbls", "]", "# concat and return", "return", "concat", "(", "objs", ",", "axis", "=", "axis", ",", "verify_integrity", "=", "False", ")", ".", "_consolidate", "(", ")", "# create the iterator", "it", "=", "TableIterator", "(", "self", ",", "s", ",", "func", ",", "where", "=", "where", ",", "nrows", "=", "nrows", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "iterator", "=", "iterator", ",", "chunksize", "=", "chunksize", ",", "auto_close", "=", "auto_close", ")", "return", "it", ".", "get_result", "(", "coordinates", "=", "True", ")" ]
Retrieve pandas objects from multiple tables Parameters ---------- keys : a list of the tables selector : the table to apply the where criteria (defaults to keys[0] if not supplied) columns : the columns I want back start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator Exceptions ---------- raises KeyError if keys or selector is not found or keys is empty raises TypeError if keys is not a list or tuple raises ValueError if the tables are not ALL THE SAME DIMENSIONS
[ "Retrieve", "pandas", "objects", "from", "multiple", "tables" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L765-L846
20,085
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.put
def put(self, key, value, format=None, append=False, **kwargs): """ Store object in HDFStore Parameters ---------- key : object value : {Series, DataFrame} format : 'fixed(f)|table(t)', default is 'fixed' fixed(f) : Fixed format Fast writing/reading. Not-appendable, nor searchable table(t) : Table format Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data append : boolean, default False This will force Table format, append the input data to the existing. data_columns : list of columns to create as data columns, or True to use all columns. See `here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__ # noqa encoding : default None, provide an encoding for strings dropna : boolean, default False, do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table' """ if format is None: format = get_option("io.hdf.default_format") or 'fixed' kwargs = self._validate_format(format, kwargs) self._write_to_group(key, value, append=append, **kwargs)
python
def put(self, key, value, format=None, append=False, **kwargs): """ Store object in HDFStore Parameters ---------- key : object value : {Series, DataFrame} format : 'fixed(f)|table(t)', default is 'fixed' fixed(f) : Fixed format Fast writing/reading. Not-appendable, nor searchable table(t) : Table format Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data append : boolean, default False This will force Table format, append the input data to the existing. data_columns : list of columns to create as data columns, or True to use all columns. See `here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__ # noqa encoding : default None, provide an encoding for strings dropna : boolean, default False, do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table' """ if format is None: format = get_option("io.hdf.default_format") or 'fixed' kwargs = self._validate_format(format, kwargs) self._write_to_group(key, value, append=append, **kwargs)
[ "def", "put", "(", "self", ",", "key", ",", "value", ",", "format", "=", "None", ",", "append", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "format", "is", "None", ":", "format", "=", "get_option", "(", "\"io.hdf.default_format\"", ")", "or", "'fixed'", "kwargs", "=", "self", ".", "_validate_format", "(", "format", ",", "kwargs", ")", "self", ".", "_write_to_group", "(", "key", ",", "value", ",", "append", "=", "append", ",", "*", "*", "kwargs", ")" ]
Store object in HDFStore Parameters ---------- key : object value : {Series, DataFrame} format : 'fixed(f)|table(t)', default is 'fixed' fixed(f) : Fixed format Fast writing/reading. Not-appendable, nor searchable table(t) : Table format Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data append : boolean, default False This will force Table format, append the input data to the existing. data_columns : list of columns to create as data columns, or True to use all columns. See `here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__ # noqa encoding : default None, provide an encoding for strings dropna : boolean, default False, do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table'
[ "Store", "object", "in", "HDFStore" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L848-L876
20,086
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.remove
def remove(self, key, where=None, start=None, stop=None): """ Remove pandas object partially by specifying the where condition Parameters ---------- key : string Node to remove or delete rows from where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection Returns ------- number of rows removed (or None if not a Table) Exceptions ---------- raises KeyError if key is not a valid store """ where = _ensure_term(where, scope_level=1) try: s = self.get_storer(key) except KeyError: # the key is not a valid store, re-raising KeyError raise except Exception: if where is not None: raise ValueError( "trying to remove a node with a non-None where clause!") # we are actually trying to remove a node (with children) s = self.get_node(key) if s is not None: s._f_remove(recursive=True) return None # remove the node if com._all_none(where, start, stop): s.group._f_remove(recursive=True) # delete from the table else: if not s.is_table: raise ValueError( 'can only remove with where on objects written as tables') return s.delete(where=where, start=start, stop=stop)
python
def remove(self, key, where=None, start=None, stop=None): """ Remove pandas object partially by specifying the where condition Parameters ---------- key : string Node to remove or delete rows from where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection Returns ------- number of rows removed (or None if not a Table) Exceptions ---------- raises KeyError if key is not a valid store """ where = _ensure_term(where, scope_level=1) try: s = self.get_storer(key) except KeyError: # the key is not a valid store, re-raising KeyError raise except Exception: if where is not None: raise ValueError( "trying to remove a node with a non-None where clause!") # we are actually trying to remove a node (with children) s = self.get_node(key) if s is not None: s._f_remove(recursive=True) return None # remove the node if com._all_none(where, start, stop): s.group._f_remove(recursive=True) # delete from the table else: if not s.is_table: raise ValueError( 'can only remove with where on objects written as tables') return s.delete(where=where, start=start, stop=stop)
[ "def", "remove", "(", "self", ",", "key", ",", "where", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ")", ":", "where", "=", "_ensure_term", "(", "where", ",", "scope_level", "=", "1", ")", "try", ":", "s", "=", "self", ".", "get_storer", "(", "key", ")", "except", "KeyError", ":", "# the key is not a valid store, re-raising KeyError", "raise", "except", "Exception", ":", "if", "where", "is", "not", "None", ":", "raise", "ValueError", "(", "\"trying to remove a node with a non-None where clause!\"", ")", "# we are actually trying to remove a node (with children)", "s", "=", "self", ".", "get_node", "(", "key", ")", "if", "s", "is", "not", "None", ":", "s", ".", "_f_remove", "(", "recursive", "=", "True", ")", "return", "None", "# remove the node", "if", "com", ".", "_all_none", "(", "where", ",", "start", ",", "stop", ")", ":", "s", ".", "group", ".", "_f_remove", "(", "recursive", "=", "True", ")", "# delete from the table", "else", ":", "if", "not", "s", ".", "is_table", ":", "raise", "ValueError", "(", "'can only remove with where on objects written as tables'", ")", "return", "s", ".", "delete", "(", "where", "=", "where", ",", "start", "=", "start", ",", "stop", "=", "stop", ")" ]
Remove pandas object partially by specifying the where condition Parameters ---------- key : string Node to remove or delete rows from where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection Returns ------- number of rows removed (or None if not a Table) Exceptions ---------- raises KeyError if key is not a valid store
[ "Remove", "pandas", "object", "partially", "by", "specifying", "the", "where", "condition" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L878-L926
20,087
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.append
def append(self, key, value, format=None, append=True, columns=None, dropna=None, **kwargs): """ Append to Table in file. Node must already exist and be Table format. Parameters ---------- key : object value : {Series, DataFrame} format : 'table' is the default table(t) : table format Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data append : boolean, default True, append the input data to the existing data_columns : list of columns, or True, default None List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See `here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__. min_itemsize : dict of columns that specify minimum string sizes nan_rep : string to use as string nan represenation chunksize : size to chunk the writing expectedrows : expected TOTAL row size of this table encoding : default None, provide an encoding for strings dropna : boolean, default False, do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table' Notes ----- Does *not* check if data being appended overlaps with existing data in the table, so be careful """ if columns is not None: raise TypeError("columns is not a supported keyword in append, " "try data_columns") if dropna is None: dropna = get_option("io.hdf.dropna_table") if format is None: format = get_option("io.hdf.default_format") or 'table' kwargs = self._validate_format(format, kwargs) self._write_to_group(key, value, append=append, dropna=dropna, **kwargs)
python
def append(self, key, value, format=None, append=True, columns=None, dropna=None, **kwargs): """ Append to Table in file. Node must already exist and be Table format. Parameters ---------- key : object value : {Series, DataFrame} format : 'table' is the default table(t) : table format Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data append : boolean, default True, append the input data to the existing data_columns : list of columns, or True, default None List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See `here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__. min_itemsize : dict of columns that specify minimum string sizes nan_rep : string to use as string nan represenation chunksize : size to chunk the writing expectedrows : expected TOTAL row size of this table encoding : default None, provide an encoding for strings dropna : boolean, default False, do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table' Notes ----- Does *not* check if data being appended overlaps with existing data in the table, so be careful """ if columns is not None: raise TypeError("columns is not a supported keyword in append, " "try data_columns") if dropna is None: dropna = get_option("io.hdf.dropna_table") if format is None: format = get_option("io.hdf.default_format") or 'table' kwargs = self._validate_format(format, kwargs) self._write_to_group(key, value, append=append, dropna=dropna, **kwargs)
[ "def", "append", "(", "self", ",", "key", ",", "value", ",", "format", "=", "None", ",", "append", "=", "True", ",", "columns", "=", "None", ",", "dropna", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "columns", "is", "not", "None", ":", "raise", "TypeError", "(", "\"columns is not a supported keyword in append, \"", "\"try data_columns\"", ")", "if", "dropna", "is", "None", ":", "dropna", "=", "get_option", "(", "\"io.hdf.dropna_table\"", ")", "if", "format", "is", "None", ":", "format", "=", "get_option", "(", "\"io.hdf.default_format\"", ")", "or", "'table'", "kwargs", "=", "self", ".", "_validate_format", "(", "format", ",", "kwargs", ")", "self", ".", "_write_to_group", "(", "key", ",", "value", ",", "append", "=", "append", ",", "dropna", "=", "dropna", ",", "*", "*", "kwargs", ")" ]
Append to Table in file. Node must already exist and be Table format. Parameters ---------- key : object value : {Series, DataFrame} format : 'table' is the default table(t) : table format Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data append : boolean, default True, append the input data to the existing data_columns : list of columns, or True, default None List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See `here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__. min_itemsize : dict of columns that specify minimum string sizes nan_rep : string to use as string nan represenation chunksize : size to chunk the writing expectedrows : expected TOTAL row size of this table encoding : default None, provide an encoding for strings dropna : boolean, default False, do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table' Notes ----- Does *not* check if data being appended overlaps with existing data in the table, so be careful
[ "Append", "to", "Table", "in", "file", ".", "Node", "must", "already", "exist", "and", "be", "Table", "format", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L928-L973
20,088
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.append_to_multiple
def append_to_multiple(self, d, value, selector, data_columns=None, axes=None, dropna=False, **kwargs): """ Append to multiple tables Parameters ---------- d : a dict of table_name to table_columns, None is acceptable as the values of one node (this will get all the remaining columns) value : a pandas object selector : a string that designates the indexable table; all of its columns will be designed as data_columns, unless data_columns is passed, in which case these are used data_columns : list of columns to create as data columns, or True to use all columns dropna : if evaluates to True, drop rows from all tables if any single row in each table has all NaN. Default False. Notes ----- axes parameter is currently not accepted """ if axes is not None: raise TypeError("axes is currently not accepted as a parameter to" " append_to_multiple; you can create the " "tables independently instead") if not isinstance(d, dict): raise ValueError( "append_to_multiple must have a dictionary specified as the " "way to split the value" ) if selector not in d: raise ValueError( "append_to_multiple requires a selector that is in passed dict" ) # figure out the splitting axis (the non_index_axis) axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0] # figure out how to split the value remain_key = None remain_values = [] for k, v in d.items(): if v is None: if remain_key is not None: raise ValueError( "append_to_multiple can only have one value in d that " "is None" ) remain_key = k else: remain_values.extend(v) if remain_key is not None: ordered = value.axes[axis] ordd = ordered.difference(Index(remain_values)) ordd = sorted(ordered.get_indexer(ordd)) d[remain_key] = ordered.take(ordd) # data_columns if data_columns is None: data_columns = d[selector] # ensure rows are synchronized across the tables if dropna: idxs = (value[cols].dropna(how='all').index for cols in d.values()) valid_index = next(idxs) for index in idxs: valid_index = valid_index.intersection(index) value = value.loc[valid_index] # append for k, v in d.items(): dc = data_columns if k == selector else None # compute the val val = value.reindex(v, axis=axis) self.append(k, val, data_columns=dc, **kwargs)
python
def append_to_multiple(self, d, value, selector, data_columns=None, axes=None, dropna=False, **kwargs): """ Append to multiple tables Parameters ---------- d : a dict of table_name to table_columns, None is acceptable as the values of one node (this will get all the remaining columns) value : a pandas object selector : a string that designates the indexable table; all of its columns will be designed as data_columns, unless data_columns is passed, in which case these are used data_columns : list of columns to create as data columns, or True to use all columns dropna : if evaluates to True, drop rows from all tables if any single row in each table has all NaN. Default False. Notes ----- axes parameter is currently not accepted """ if axes is not None: raise TypeError("axes is currently not accepted as a parameter to" " append_to_multiple; you can create the " "tables independently instead") if not isinstance(d, dict): raise ValueError( "append_to_multiple must have a dictionary specified as the " "way to split the value" ) if selector not in d: raise ValueError( "append_to_multiple requires a selector that is in passed dict" ) # figure out the splitting axis (the non_index_axis) axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0] # figure out how to split the value remain_key = None remain_values = [] for k, v in d.items(): if v is None: if remain_key is not None: raise ValueError( "append_to_multiple can only have one value in d that " "is None" ) remain_key = k else: remain_values.extend(v) if remain_key is not None: ordered = value.axes[axis] ordd = ordered.difference(Index(remain_values)) ordd = sorted(ordered.get_indexer(ordd)) d[remain_key] = ordered.take(ordd) # data_columns if data_columns is None: data_columns = d[selector] # ensure rows are synchronized across the tables if dropna: idxs = (value[cols].dropna(how='all').index for cols in d.values()) valid_index = next(idxs) for index in idxs: valid_index = valid_index.intersection(index) value = value.loc[valid_index] # append for k, v in d.items(): dc = data_columns if k == selector else None # compute the val val = value.reindex(v, axis=axis) self.append(k, val, data_columns=dc, **kwargs)
[ "def", "append_to_multiple", "(", "self", ",", "d", ",", "value", ",", "selector", ",", "data_columns", "=", "None", ",", "axes", "=", "None", ",", "dropna", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "axes", "is", "not", "None", ":", "raise", "TypeError", "(", "\"axes is currently not accepted as a parameter to\"", "\" append_to_multiple; you can create the \"", "\"tables independently instead\"", ")", "if", "not", "isinstance", "(", "d", ",", "dict", ")", ":", "raise", "ValueError", "(", "\"append_to_multiple must have a dictionary specified as the \"", "\"way to split the value\"", ")", "if", "selector", "not", "in", "d", ":", "raise", "ValueError", "(", "\"append_to_multiple requires a selector that is in passed dict\"", ")", "# figure out the splitting axis (the non_index_axis)", "axis", "=", "list", "(", "set", "(", "range", "(", "value", ".", "ndim", ")", ")", "-", "set", "(", "_AXES_MAP", "[", "type", "(", "value", ")", "]", ")", ")", "[", "0", "]", "# figure out how to split the value", "remain_key", "=", "None", "remain_values", "=", "[", "]", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "if", "v", "is", "None", ":", "if", "remain_key", "is", "not", "None", ":", "raise", "ValueError", "(", "\"append_to_multiple can only have one value in d that \"", "\"is None\"", ")", "remain_key", "=", "k", "else", ":", "remain_values", ".", "extend", "(", "v", ")", "if", "remain_key", "is", "not", "None", ":", "ordered", "=", "value", ".", "axes", "[", "axis", "]", "ordd", "=", "ordered", ".", "difference", "(", "Index", "(", "remain_values", ")", ")", "ordd", "=", "sorted", "(", "ordered", ".", "get_indexer", "(", "ordd", ")", ")", "d", "[", "remain_key", "]", "=", "ordered", ".", "take", "(", "ordd", ")", "# data_columns", "if", "data_columns", "is", "None", ":", "data_columns", "=", "d", "[", "selector", "]", "# ensure rows are synchronized across the tables", "if", "dropna", ":", "idxs", "=", "(", "value", "[", "cols", "]", ".", "dropna", "(", "how", "=", "'all'", ")", ".", "index", "for", "cols", "in", "d", ".", "values", "(", ")", ")", "valid_index", "=", "next", "(", "idxs", ")", "for", "index", "in", "idxs", ":", "valid_index", "=", "valid_index", ".", "intersection", "(", "index", ")", "value", "=", "value", ".", "loc", "[", "valid_index", "]", "# append", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", ":", "dc", "=", "data_columns", "if", "k", "==", "selector", "else", "None", "# compute the val", "val", "=", "value", ".", "reindex", "(", "v", ",", "axis", "=", "axis", ")", "self", ".", "append", "(", "k", ",", "val", ",", "data_columns", "=", "dc", ",", "*", "*", "kwargs", ")" ]
Append to multiple tables Parameters ---------- d : a dict of table_name to table_columns, None is acceptable as the values of one node (this will get all the remaining columns) value : a pandas object selector : a string that designates the indexable table; all of its columns will be designed as data_columns, unless data_columns is passed, in which case these are used data_columns : list of columns to create as data columns, or True to use all columns dropna : if evaluates to True, drop rows from all tables if any single row in each table has all NaN. Default False. Notes ----- axes parameter is currently not accepted
[ "Append", "to", "multiple", "tables" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L975-L1055
20,089
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.walk
def walk(self, where="/"): """ Walk the pytables group hierarchy for pandas objects This generator will yield the group path, subgroups and pandas object names for each group. Any non-pandas PyTables objects that are not a group will be ignored. The `where` group itself is listed first (preorder), then each of its child groups (following an alphanumerical order) is also traversed, following the same procedure. .. versionadded:: 0.24.0 Parameters ---------- where : str, optional Group where to start walking. If not supplied, the root group is used. Yields ------ path : str Full path to a group (without trailing '/') groups : list of str names of the groups contained in `path` leaves : list of str names of the pandas objects contained in `path` """ _tables() self._check_if_open() for g in self._handle.walk_groups(where): if getattr(g._v_attrs, 'pandas_type', None) is not None: continue groups = [] leaves = [] for child in g._v_children.values(): pandas_type = getattr(child._v_attrs, 'pandas_type', None) if pandas_type is None: if isinstance(child, _table_mod.group.Group): groups.append(child._v_name) else: leaves.append(child._v_name) yield (g._v_pathname.rstrip('/'), groups, leaves)
python
def walk(self, where="/"): """ Walk the pytables group hierarchy for pandas objects This generator will yield the group path, subgroups and pandas object names for each group. Any non-pandas PyTables objects that are not a group will be ignored. The `where` group itself is listed first (preorder), then each of its child groups (following an alphanumerical order) is also traversed, following the same procedure. .. versionadded:: 0.24.0 Parameters ---------- where : str, optional Group where to start walking. If not supplied, the root group is used. Yields ------ path : str Full path to a group (without trailing '/') groups : list of str names of the groups contained in `path` leaves : list of str names of the pandas objects contained in `path` """ _tables() self._check_if_open() for g in self._handle.walk_groups(where): if getattr(g._v_attrs, 'pandas_type', None) is not None: continue groups = [] leaves = [] for child in g._v_children.values(): pandas_type = getattr(child._v_attrs, 'pandas_type', None) if pandas_type is None: if isinstance(child, _table_mod.group.Group): groups.append(child._v_name) else: leaves.append(child._v_name) yield (g._v_pathname.rstrip('/'), groups, leaves)
[ "def", "walk", "(", "self", ",", "where", "=", "\"/\"", ")", ":", "_tables", "(", ")", "self", ".", "_check_if_open", "(", ")", "for", "g", "in", "self", ".", "_handle", ".", "walk_groups", "(", "where", ")", ":", "if", "getattr", "(", "g", ".", "_v_attrs", ",", "'pandas_type'", ",", "None", ")", "is", "not", "None", ":", "continue", "groups", "=", "[", "]", "leaves", "=", "[", "]", "for", "child", "in", "g", ".", "_v_children", ".", "values", "(", ")", ":", "pandas_type", "=", "getattr", "(", "child", ".", "_v_attrs", ",", "'pandas_type'", ",", "None", ")", "if", "pandas_type", "is", "None", ":", "if", "isinstance", "(", "child", ",", "_table_mod", ".", "group", ".", "Group", ")", ":", "groups", ".", "append", "(", "child", ".", "_v_name", ")", "else", ":", "leaves", ".", "append", "(", "child", ".", "_v_name", ")", "yield", "(", "g", ".", "_v_pathname", ".", "rstrip", "(", "'/'", ")", ",", "groups", ",", "leaves", ")" ]
Walk the pytables group hierarchy for pandas objects This generator will yield the group path, subgroups and pandas object names for each group. Any non-pandas PyTables objects that are not a group will be ignored. The `where` group itself is listed first (preorder), then each of its child groups (following an alphanumerical order) is also traversed, following the same procedure. .. versionadded:: 0.24.0 Parameters ---------- where : str, optional Group where to start walking. If not supplied, the root group is used. Yields ------ path : str Full path to a group (without trailing '/') groups : list of str names of the groups contained in `path` leaves : list of str names of the pandas objects contained in `path`
[ "Walk", "the", "pytables", "group", "hierarchy", "for", "pandas", "objects" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1095-L1139
20,090
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.get_node
def get_node(self, key): """ return the node with the key or None if it does not exist """ self._check_if_open() try: if not key.startswith('/'): key = '/' + key return self._handle.get_node(self.root, key) except _table_mod.exceptions.NoSuchNodeError: return None
python
def get_node(self, key): """ return the node with the key or None if it does not exist """ self._check_if_open() try: if not key.startswith('/'): key = '/' + key return self._handle.get_node(self.root, key) except _table_mod.exceptions.NoSuchNodeError: return None
[ "def", "get_node", "(", "self", ",", "key", ")", ":", "self", ".", "_check_if_open", "(", ")", "try", ":", "if", "not", "key", ".", "startswith", "(", "'/'", ")", ":", "key", "=", "'/'", "+", "key", "return", "self", ".", "_handle", ".", "get_node", "(", "self", ".", "root", ",", "key", ")", "except", "_table_mod", ".", "exceptions", ".", "NoSuchNodeError", ":", "return", "None" ]
return the node with the key or None if it does not exist
[ "return", "the", "node", "with", "the", "key", "or", "None", "if", "it", "does", "not", "exist" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1141-L1149
20,091
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.get_storer
def get_storer(self, key): """ return the storer object for a key, raise if not in the file """ group = self.get_node(key) if group is None: raise KeyError('No object named {key} in the file'.format(key=key)) s = self._create_storer(group) s.infer_axes() return s
python
def get_storer(self, key): """ return the storer object for a key, raise if not in the file """ group = self.get_node(key) if group is None: raise KeyError('No object named {key} in the file'.format(key=key)) s = self._create_storer(group) s.infer_axes() return s
[ "def", "get_storer", "(", "self", ",", "key", ")", ":", "group", "=", "self", ".", "get_node", "(", "key", ")", "if", "group", "is", "None", ":", "raise", "KeyError", "(", "'No object named {key} in the file'", ".", "format", "(", "key", "=", "key", ")", ")", "s", "=", "self", ".", "_create_storer", "(", "group", ")", "s", ".", "infer_axes", "(", ")", "return", "s" ]
return the storer object for a key, raise if not in the file
[ "return", "the", "storer", "object", "for", "a", "key", "raise", "if", "not", "in", "the", "file" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1151-L1159
20,092
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.copy
def copy(self, file, mode='w', propindexes=True, keys=None, complib=None, complevel=None, fletcher32=False, overwrite=True): """ copy the existing store to a new file, upgrading in place Parameters ---------- propindexes: restore indexes in copied file (defaults to True) keys : list of keys to include in the copy (defaults to all) overwrite : overwrite (remove and replace) existing nodes in the new store (default is True) mode, complib, complevel, fletcher32 same as in HDFStore.__init__ Returns ------- open file handle of the new store """ new_store = HDFStore( file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32) if keys is None: keys = list(self.keys()) if not isinstance(keys, (tuple, list)): keys = [keys] for k in keys: s = self.get_storer(k) if s is not None: if k in new_store: if overwrite: new_store.remove(k) data = self.select(k) if s.is_table: index = False if propindexes: index = [a.name for a in s.axes if a.is_indexed] new_store.append( k, data, index=index, data_columns=getattr(s, 'data_columns', None), encoding=s.encoding ) else: new_store.put(k, data, encoding=s.encoding) return new_store
python
def copy(self, file, mode='w', propindexes=True, keys=None, complib=None, complevel=None, fletcher32=False, overwrite=True): """ copy the existing store to a new file, upgrading in place Parameters ---------- propindexes: restore indexes in copied file (defaults to True) keys : list of keys to include in the copy (defaults to all) overwrite : overwrite (remove and replace) existing nodes in the new store (default is True) mode, complib, complevel, fletcher32 same as in HDFStore.__init__ Returns ------- open file handle of the new store """ new_store = HDFStore( file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32) if keys is None: keys = list(self.keys()) if not isinstance(keys, (tuple, list)): keys = [keys] for k in keys: s = self.get_storer(k) if s is not None: if k in new_store: if overwrite: new_store.remove(k) data = self.select(k) if s.is_table: index = False if propindexes: index = [a.name for a in s.axes if a.is_indexed] new_store.append( k, data, index=index, data_columns=getattr(s, 'data_columns', None), encoding=s.encoding ) else: new_store.put(k, data, encoding=s.encoding) return new_store
[ "def", "copy", "(", "self", ",", "file", ",", "mode", "=", "'w'", ",", "propindexes", "=", "True", ",", "keys", "=", "None", ",", "complib", "=", "None", ",", "complevel", "=", "None", ",", "fletcher32", "=", "False", ",", "overwrite", "=", "True", ")", ":", "new_store", "=", "HDFStore", "(", "file", ",", "mode", "=", "mode", ",", "complib", "=", "complib", ",", "complevel", "=", "complevel", ",", "fletcher32", "=", "fletcher32", ")", "if", "keys", "is", "None", ":", "keys", "=", "list", "(", "self", ".", "keys", "(", ")", ")", "if", "not", "isinstance", "(", "keys", ",", "(", "tuple", ",", "list", ")", ")", ":", "keys", "=", "[", "keys", "]", "for", "k", "in", "keys", ":", "s", "=", "self", ".", "get_storer", "(", "k", ")", "if", "s", "is", "not", "None", ":", "if", "k", "in", "new_store", ":", "if", "overwrite", ":", "new_store", ".", "remove", "(", "k", ")", "data", "=", "self", ".", "select", "(", "k", ")", "if", "s", ".", "is_table", ":", "index", "=", "False", "if", "propindexes", ":", "index", "=", "[", "a", ".", "name", "for", "a", "in", "s", ".", "axes", "if", "a", ".", "is_indexed", "]", "new_store", ".", "append", "(", "k", ",", "data", ",", "index", "=", "index", ",", "data_columns", "=", "getattr", "(", "s", ",", "'data_columns'", ",", "None", ")", ",", "encoding", "=", "s", ".", "encoding", ")", "else", ":", "new_store", ".", "put", "(", "k", ",", "data", ",", "encoding", "=", "s", ".", "encoding", ")", "return", "new_store" ]
copy the existing store to a new file, upgrading in place Parameters ---------- propindexes: restore indexes in copied file (defaults to True) keys : list of keys to include in the copy (defaults to all) overwrite : overwrite (remove and replace) existing nodes in the new store (default is True) mode, complib, complevel, fletcher32 same as in HDFStore.__init__ Returns ------- open file handle of the new store
[ "copy", "the", "existing", "store", "to", "a", "new", "file", "upgrading", "in", "place" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1161-L1210
20,093
pandas-dev/pandas
pandas/io/pytables.py
HDFStore.info
def info(self): """ Print detailed information on the store. .. versionadded:: 0.21.0 """ output = '{type}\nFile path: {path}\n'.format( type=type(self), path=pprint_thing(self._path)) if self.is_open: lkeys = sorted(list(self.keys())) if len(lkeys): keys = [] values = [] for k in lkeys: try: s = self.get_storer(k) if s is not None: keys.append(pprint_thing(s.pathname or k)) values.append( pprint_thing(s or 'invalid_HDFStore node')) except Exception as detail: keys.append(k) values.append( "[invalid_HDFStore node: {detail}]".format( detail=pprint_thing(detail))) output += adjoin(12, keys, values) else: output += 'Empty' else: output += "File is CLOSED" return output
python
def info(self): """ Print detailed information on the store. .. versionadded:: 0.21.0 """ output = '{type}\nFile path: {path}\n'.format( type=type(self), path=pprint_thing(self._path)) if self.is_open: lkeys = sorted(list(self.keys())) if len(lkeys): keys = [] values = [] for k in lkeys: try: s = self.get_storer(k) if s is not None: keys.append(pprint_thing(s.pathname or k)) values.append( pprint_thing(s or 'invalid_HDFStore node')) except Exception as detail: keys.append(k) values.append( "[invalid_HDFStore node: {detail}]".format( detail=pprint_thing(detail))) output += adjoin(12, keys, values) else: output += 'Empty' else: output += "File is CLOSED" return output
[ "def", "info", "(", "self", ")", ":", "output", "=", "'{type}\\nFile path: {path}\\n'", ".", "format", "(", "type", "=", "type", "(", "self", ")", ",", "path", "=", "pprint_thing", "(", "self", ".", "_path", ")", ")", "if", "self", ".", "is_open", ":", "lkeys", "=", "sorted", "(", "list", "(", "self", ".", "keys", "(", ")", ")", ")", "if", "len", "(", "lkeys", ")", ":", "keys", "=", "[", "]", "values", "=", "[", "]", "for", "k", "in", "lkeys", ":", "try", ":", "s", "=", "self", ".", "get_storer", "(", "k", ")", "if", "s", "is", "not", "None", ":", "keys", ".", "append", "(", "pprint_thing", "(", "s", ".", "pathname", "or", "k", ")", ")", "values", ".", "append", "(", "pprint_thing", "(", "s", "or", "'invalid_HDFStore node'", ")", ")", "except", "Exception", "as", "detail", ":", "keys", ".", "append", "(", "k", ")", "values", ".", "append", "(", "\"[invalid_HDFStore node: {detail}]\"", ".", "format", "(", "detail", "=", "pprint_thing", "(", "detail", ")", ")", ")", "output", "+=", "adjoin", "(", "12", ",", "keys", ",", "values", ")", "else", ":", "output", "+=", "'Empty'", "else", ":", "output", "+=", "\"File is CLOSED\"", "return", "output" ]
Print detailed information on the store. .. versionadded:: 0.21.0
[ "Print", "detailed", "information", "on", "the", "store", "." ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1212-L1245
20,094
pandas-dev/pandas
pandas/io/pytables.py
HDFStore._create_storer
def _create_storer(self, group, format=None, value=None, append=False, **kwargs): """ return a suitable class to operate """ def error(t): raise TypeError( "cannot properly create the storer for: [{t}] [group->" "{group},value->{value},format->{format},append->{append}," "kwargs->{kwargs}]".format(t=t, group=group, value=type(value), format=format, append=append, kwargs=kwargs)) pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None)) tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None)) # infer the pt from the passed value if pt is None: if value is None: _tables() if (getattr(group, 'table', None) or isinstance(group, _table_mod.table.Table)): pt = 'frame_table' tt = 'generic_table' else: raise TypeError( "cannot create a storer if the object is not existing " "nor a value are passed") else: try: pt = _TYPE_MAP[type(value)] except KeyError: error('_TYPE_MAP') # we are actually a table if format == 'table': pt += '_table' # a storer node if 'table' not in pt: try: return globals()[_STORER_MAP[pt]](self, group, **kwargs) except KeyError: error('_STORER_MAP') # existing node (and must be a table) if tt is None: # if we are a writer, determine the tt if value is not None: if pt == 'series_table': index = getattr(value, 'index', None) if index is not None: if index.nlevels == 1: tt = 'appendable_series' elif index.nlevels > 1: tt = 'appendable_multiseries' elif pt == 'frame_table': index = getattr(value, 'index', None) if index is not None: if index.nlevels == 1: tt = 'appendable_frame' elif index.nlevels > 1: tt = 'appendable_multiframe' elif pt == 'wide_table': tt = 'appendable_panel' elif pt == 'ndim_table': tt = 'appendable_ndim' else: # distiguish between a frame/table tt = 'legacy_panel' try: fields = group.table._v_attrs.fields if len(fields) == 1 and fields[0] == 'value': tt = 'legacy_frame' except IndexError: pass try: return globals()[_TABLE_MAP[tt]](self, group, **kwargs) except KeyError: error('_TABLE_MAP')
python
def _create_storer(self, group, format=None, value=None, append=False, **kwargs): """ return a suitable class to operate """ def error(t): raise TypeError( "cannot properly create the storer for: [{t}] [group->" "{group},value->{value},format->{format},append->{append}," "kwargs->{kwargs}]".format(t=t, group=group, value=type(value), format=format, append=append, kwargs=kwargs)) pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None)) tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None)) # infer the pt from the passed value if pt is None: if value is None: _tables() if (getattr(group, 'table', None) or isinstance(group, _table_mod.table.Table)): pt = 'frame_table' tt = 'generic_table' else: raise TypeError( "cannot create a storer if the object is not existing " "nor a value are passed") else: try: pt = _TYPE_MAP[type(value)] except KeyError: error('_TYPE_MAP') # we are actually a table if format == 'table': pt += '_table' # a storer node if 'table' not in pt: try: return globals()[_STORER_MAP[pt]](self, group, **kwargs) except KeyError: error('_STORER_MAP') # existing node (and must be a table) if tt is None: # if we are a writer, determine the tt if value is not None: if pt == 'series_table': index = getattr(value, 'index', None) if index is not None: if index.nlevels == 1: tt = 'appendable_series' elif index.nlevels > 1: tt = 'appendable_multiseries' elif pt == 'frame_table': index = getattr(value, 'index', None) if index is not None: if index.nlevels == 1: tt = 'appendable_frame' elif index.nlevels > 1: tt = 'appendable_multiframe' elif pt == 'wide_table': tt = 'appendable_panel' elif pt == 'ndim_table': tt = 'appendable_ndim' else: # distiguish between a frame/table tt = 'legacy_panel' try: fields = group.table._v_attrs.fields if len(fields) == 1 and fields[0] == 'value': tt = 'legacy_frame' except IndexError: pass try: return globals()[_TABLE_MAP[tt]](self, group, **kwargs) except KeyError: error('_TABLE_MAP')
[ "def", "_create_storer", "(", "self", ",", "group", ",", "format", "=", "None", ",", "value", "=", "None", ",", "append", "=", "False", ",", "*", "*", "kwargs", ")", ":", "def", "error", "(", "t", ")", ":", "raise", "TypeError", "(", "\"cannot properly create the storer for: [{t}] [group->\"", "\"{group},value->{value},format->{format},append->{append},\"", "\"kwargs->{kwargs}]\"", ".", "format", "(", "t", "=", "t", ",", "group", "=", "group", ",", "value", "=", "type", "(", "value", ")", ",", "format", "=", "format", ",", "append", "=", "append", ",", "kwargs", "=", "kwargs", ")", ")", "pt", "=", "_ensure_decoded", "(", "getattr", "(", "group", ".", "_v_attrs", ",", "'pandas_type'", ",", "None", ")", ")", "tt", "=", "_ensure_decoded", "(", "getattr", "(", "group", ".", "_v_attrs", ",", "'table_type'", ",", "None", ")", ")", "# infer the pt from the passed value", "if", "pt", "is", "None", ":", "if", "value", "is", "None", ":", "_tables", "(", ")", "if", "(", "getattr", "(", "group", ",", "'table'", ",", "None", ")", "or", "isinstance", "(", "group", ",", "_table_mod", ".", "table", ".", "Table", ")", ")", ":", "pt", "=", "'frame_table'", "tt", "=", "'generic_table'", "else", ":", "raise", "TypeError", "(", "\"cannot create a storer if the object is not existing \"", "\"nor a value are passed\"", ")", "else", ":", "try", ":", "pt", "=", "_TYPE_MAP", "[", "type", "(", "value", ")", "]", "except", "KeyError", ":", "error", "(", "'_TYPE_MAP'", ")", "# we are actually a table", "if", "format", "==", "'table'", ":", "pt", "+=", "'_table'", "# a storer node", "if", "'table'", "not", "in", "pt", ":", "try", ":", "return", "globals", "(", ")", "[", "_STORER_MAP", "[", "pt", "]", "]", "(", "self", ",", "group", ",", "*", "*", "kwargs", ")", "except", "KeyError", ":", "error", "(", "'_STORER_MAP'", ")", "# existing node (and must be a table)", "if", "tt", "is", "None", ":", "# if we are a writer, determine the tt", "if", "value", "is", "not", "None", ":", "if", "pt", "==", "'series_table'", ":", "index", "=", "getattr", "(", "value", ",", "'index'", ",", "None", ")", "if", "index", "is", "not", "None", ":", "if", "index", ".", "nlevels", "==", "1", ":", "tt", "=", "'appendable_series'", "elif", "index", ".", "nlevels", ">", "1", ":", "tt", "=", "'appendable_multiseries'", "elif", "pt", "==", "'frame_table'", ":", "index", "=", "getattr", "(", "value", ",", "'index'", ",", "None", ")", "if", "index", "is", "not", "None", ":", "if", "index", ".", "nlevels", "==", "1", ":", "tt", "=", "'appendable_frame'", "elif", "index", ".", "nlevels", ">", "1", ":", "tt", "=", "'appendable_multiframe'", "elif", "pt", "==", "'wide_table'", ":", "tt", "=", "'appendable_panel'", "elif", "pt", "==", "'ndim_table'", ":", "tt", "=", "'appendable_ndim'", "else", ":", "# distiguish between a frame/table", "tt", "=", "'legacy_panel'", "try", ":", "fields", "=", "group", ".", "table", ".", "_v_attrs", ".", "fields", "if", "len", "(", "fields", ")", "==", "1", "and", "fields", "[", "0", "]", "==", "'value'", ":", "tt", "=", "'legacy_frame'", "except", "IndexError", ":", "pass", "try", ":", "return", "globals", "(", ")", "[", "_TABLE_MAP", "[", "tt", "]", "]", "(", "self", ",", "group", ",", "*", "*", "kwargs", ")", "except", "KeyError", ":", "error", "(", "'_TABLE_MAP'", ")" ]
return a suitable class to operate
[ "return", "a", "suitable", "class", "to", "operate" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1265-L1350
20,095
pandas-dev/pandas
pandas/io/pytables.py
IndexCol.set_name
def set_name(self, name, kind_attr=None): """ set the name of this indexer """ self.name = name self.kind_attr = kind_attr or "{name}_kind".format(name=name) if self.cname is None: self.cname = name return self
python
def set_name(self, name, kind_attr=None): """ set the name of this indexer """ self.name = name self.kind_attr = kind_attr or "{name}_kind".format(name=name) if self.cname is None: self.cname = name return self
[ "def", "set_name", "(", "self", ",", "name", ",", "kind_attr", "=", "None", ")", ":", "self", ".", "name", "=", "name", "self", ".", "kind_attr", "=", "kind_attr", "or", "\"{name}_kind\"", ".", "format", "(", "name", "=", "name", ")", "if", "self", ".", "cname", "is", "None", ":", "self", ".", "cname", "=", "name", "return", "self" ]
set the name of this indexer
[ "set", "the", "name", "of", "this", "indexer" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1552-L1559
20,096
pandas-dev/pandas
pandas/io/pytables.py
IndexCol.set_pos
def set_pos(self, pos): """ set the position of this column in the Table """ self.pos = pos if pos is not None and self.typ is not None: self.typ._v_pos = pos return self
python
def set_pos(self, pos): """ set the position of this column in the Table """ self.pos = pos if pos is not None and self.typ is not None: self.typ._v_pos = pos return self
[ "def", "set_pos", "(", "self", ",", "pos", ")", ":", "self", ".", "pos", "=", "pos", "if", "pos", "is", "not", "None", "and", "self", ".", "typ", "is", "not", "None", ":", "self", ".", "typ", ".", "_v_pos", "=", "pos", "return", "self" ]
set the position of this column in the Table
[ "set", "the", "position", "of", "this", "column", "in", "the", "Table" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1567-L1572
20,097
pandas-dev/pandas
pandas/io/pytables.py
IndexCol.is_indexed
def is_indexed(self): """ return whether I am an indexed column """ try: return getattr(self.table.cols, self.cname).is_indexed except AttributeError: False
python
def is_indexed(self): """ return whether I am an indexed column """ try: return getattr(self.table.cols, self.cname).is_indexed except AttributeError: False
[ "def", "is_indexed", "(", "self", ")", ":", "try", ":", "return", "getattr", "(", "self", ".", "table", ".", "cols", ",", "self", ".", "cname", ")", ".", "is_indexed", "except", "AttributeError", ":", "False" ]
return whether I am an indexed column
[ "return", "whether", "I", "am", "an", "indexed", "column" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1599-L1604
20,098
pandas-dev/pandas
pandas/io/pytables.py
IndexCol.set_info
def set_info(self, info): """ set my state from the passed info """ idx = info.get(self.name) if idx is not None: self.__dict__.update(idx)
python
def set_info(self, info): """ set my state from the passed info """ idx = info.get(self.name) if idx is not None: self.__dict__.update(idx)
[ "def", "set_info", "(", "self", ",", "info", ")", ":", "idx", "=", "info", ".", "get", "(", "self", ".", "name", ")", "if", "idx", "is", "not", "None", ":", "self", ".", "__dict__", ".", "update", "(", "idx", ")" ]
set my state from the passed info
[ "set", "my", "state", "from", "the", "passed", "info" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1766-L1770
20,099
pandas-dev/pandas
pandas/io/pytables.py
IndexCol.validate_metadata
def validate_metadata(self, handler): """ validate that kind=category does not change the categories """ if self.meta == 'category': new_metadata = self.metadata cur_metadata = handler.read_metadata(self.cname) if (new_metadata is not None and cur_metadata is not None and not array_equivalent(new_metadata, cur_metadata)): raise ValueError("cannot append a categorical with " "different categories to the existing")
python
def validate_metadata(self, handler): """ validate that kind=category does not change the categories """ if self.meta == 'category': new_metadata = self.metadata cur_metadata = handler.read_metadata(self.cname) if (new_metadata is not None and cur_metadata is not None and not array_equivalent(new_metadata, cur_metadata)): raise ValueError("cannot append a categorical with " "different categories to the existing")
[ "def", "validate_metadata", "(", "self", ",", "handler", ")", ":", "if", "self", ".", "meta", "==", "'category'", ":", "new_metadata", "=", "self", ".", "metadata", "cur_metadata", "=", "handler", ".", "read_metadata", "(", "self", ".", "cname", ")", "if", "(", "new_metadata", "is", "not", "None", "and", "cur_metadata", "is", "not", "None", "and", "not", "array_equivalent", "(", "new_metadata", ",", "cur_metadata", ")", ")", ":", "raise", "ValueError", "(", "\"cannot append a categorical with \"", "\"different categories to the existing\"", ")" ]
validate that kind=category does not change the categories
[ "validate", "that", "kind", "=", "category", "does", "not", "change", "the", "categories" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1784-L1792