INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
return appropriate class of Series concat input is either dict or array - like
def _get_series_result_type(result, objs=None): """ return appropriate class of Series concat input is either dict or array-like """ from pandas import SparseSeries, SparseDataFrame, DataFrame # concat Series with axis 1 if isinstance(result, dict): # concat Series with axis 1 if all(isinstance(c, (SparseSeries, SparseDataFrame)) for c in result.values()): return SparseDataFrame else: return DataFrame # otherwise it is a SingleBlockManager (axis = 0) if result._block.is_sparse: return SparseSeries else: return objs[0]._constructor
return appropriate class of DataFrame - like concat if all blocks are sparse return SparseDataFrame otherwise return 1st obj
def _get_frame_result_type(result, objs): """ return appropriate class of DataFrame-like concat if all blocks are sparse, return SparseDataFrame otherwise, return 1st obj """ if (result.blocks and ( any(isinstance(obj, ABCSparseDataFrame) for obj in objs))): from pandas.core.sparse.api import SparseDataFrame return SparseDataFrame else: return next(obj for obj in objs if not isinstance(obj, ABCSparseDataFrame))
provide concatenation of an array of arrays each of which is a single normalized dtypes ( in that for example if it s object then it is a non - datetimelike and provide a combined dtype for the resulting array that preserves the overall dtype if possible )
def _concat_compat(to_concat, axis=0): """ provide concatenation of an array of arrays each of which is a single 'normalized' dtypes (in that for example, if it's object, then it is a non-datetimelike and provide a combined dtype for the resulting array that preserves the overall dtype if possible) Parameters ---------- to_concat : array of arrays axis : axis to provide concatenation Returns ------- a single array, preserving the combined dtypes """ # filter empty arrays # 1-d dtypes always are included here def is_nonempty(x): try: return x.shape[axis] > 0 except Exception: return True # If all arrays are empty, there's nothing to convert, just short-cut to # the concatenation, #3121. # # Creating an empty array directly is tempting, but the winnings would be # marginal given that it would still require shape & dtype calculation and # np.concatenate which has them both implemented is compiled. typs = get_dtype_kinds(to_concat) _contains_datetime = any(typ.startswith('datetime') for typ in typs) _contains_period = any(typ.startswith('period') for typ in typs) if 'category' in typs: # this must be priort to _concat_datetime, # to support Categorical + datetime-like return _concat_categorical(to_concat, axis=axis) elif _contains_datetime or 'timedelta' in typs or _contains_period: return _concat_datetime(to_concat, axis=axis, typs=typs) # these are mandated to handle empties as well elif 'sparse' in typs: return _concat_sparse(to_concat, axis=axis, typs=typs) all_empty = all(not is_nonempty(x) for x in to_concat) if any(is_extension_array_dtype(x) for x in to_concat) and axis == 1: to_concat = [np.atleast_2d(x.astype('object')) for x in to_concat] if all_empty: # we have all empties, but may need to coerce the result dtype to # object if we have non-numeric type operands (numpy would otherwise # cast this to float) typs = get_dtype_kinds(to_concat) if len(typs) != 1: if (not len(typs - {'i', 'u', 'f'}) or not len(typs - {'bool', 'i', 'u'})): # let numpy coerce pass else: # coerce to object to_concat = [x.astype('object') for x in to_concat] return np.concatenate(to_concat, axis=axis)
Concatenate an object/ categorical array of arrays each of which is a single dtype
def _concat_categorical(to_concat, axis=0): """Concatenate an object/categorical array of arrays, each of which is a single dtype Parameters ---------- to_concat : array of arrays axis : int Axis to provide concatenation in the current implementation this is always 0, e.g. we only have 1D categoricals Returns ------- Categorical A single array, preserving the combined dtypes """ # we could have object blocks and categoricals here # if we only have a single categoricals then combine everything # else its a non-compat categorical categoricals = [x for x in to_concat if is_categorical_dtype(x.dtype)] # validate the categories if len(categoricals) != len(to_concat): pass else: # when all categories are identical first = to_concat[0] if all(first.is_dtype_equal(other) for other in to_concat[1:]): return union_categoricals(categoricals) # extract the categoricals & coerce to object if needed to_concat = [x.get_values() if is_categorical_dtype(x.dtype) else np.asarray(x).ravel() if not is_datetime64tz_dtype(x) else np.asarray(x.astype(object)) for x in to_concat] result = _concat_compat(to_concat) if axis == 1: result = result.reshape(1, len(result)) return result
Combine list - like of Categorical - like unioning categories. All categories must have the same dtype.
def union_categoricals(to_union, sort_categories=False, ignore_order=False): """ Combine list-like of Categorical-like, unioning categories. All categories must have the same dtype. .. versionadded:: 0.19.0 Parameters ---------- to_union : list-like of Categorical, CategoricalIndex, or Series with dtype='category' sort_categories : boolean, default False If true, resulting categories will be lexsorted, otherwise they will be ordered as they appear in the data. ignore_order : boolean, default False If true, the ordered attribute of the Categoricals will be ignored. Results in an unordered categorical. .. versionadded:: 0.20.0 Returns ------- result : Categorical Raises ------ TypeError - all inputs do not have the same dtype - all inputs do not have the same ordered property - all inputs are ordered and their categories are not identical - sort_categories=True and Categoricals are ordered ValueError Empty list of categoricals passed Notes ----- To learn more about categories, see `link <http://pandas.pydata.org/pandas-docs/stable/categorical.html#unioning>`__ Examples -------- >>> from pandas.api.types import union_categoricals If you want to combine categoricals that do not necessarily have the same categories, `union_categoricals` will combine a list-like of categoricals. The new categories will be the union of the categories being combined. >>> a = pd.Categorical(["b", "c"]) >>> b = pd.Categorical(["a", "b"]) >>> union_categoricals([a, b]) [b, c, a, b] Categories (3, object): [b, c, a] By default, the resulting categories will be ordered as they appear in the `categories` of the data. If you want the categories to be lexsorted, use `sort_categories=True` argument. >>> union_categoricals([a, b], sort_categories=True) [b, c, a, b] Categories (3, object): [a, b, c] `union_categoricals` also works with the case of combining two categoricals of the same categories and order information (e.g. what you could also `append` for). >>> a = pd.Categorical(["a", "b"], ordered=True) >>> b = pd.Categorical(["a", "b", "a"], ordered=True) >>> union_categoricals([a, b]) [a, b, a, b, a] Categories (2, object): [a < b] Raises `TypeError` because the categories are ordered and not identical. >>> a = pd.Categorical(["a", "b"], ordered=True) >>> b = pd.Categorical(["a", "b", "c"], ordered=True) >>> union_categoricals([a, b]) TypeError: to union ordered Categoricals, all categories must be the same New in version 0.20.0 Ordered categoricals with different categories or orderings can be combined by using the `ignore_ordered=True` argument. >>> a = pd.Categorical(["a", "b", "c"], ordered=True) >>> b = pd.Categorical(["c", "b", "a"], ordered=True) >>> union_categoricals([a, b], ignore_order=True) [a, b, c, c, b, a] Categories (3, object): [a, b, c] `union_categoricals` also works with a `CategoricalIndex`, or `Series` containing categorical data, but note that the resulting array will always be a plain `Categorical` >>> a = pd.Series(["b", "c"], dtype='category') >>> b = pd.Series(["a", "b"], dtype='category') >>> union_categoricals([a, b]) [b, c, a, b] Categories (3, object): [b, c, a] """ from pandas import Index, Categorical, CategoricalIndex, Series from pandas.core.arrays.categorical import _recode_for_categories if len(to_union) == 0: raise ValueError('No Categoricals to union') def _maybe_unwrap(x): if isinstance(x, (CategoricalIndex, Series)): return x.values elif isinstance(x, Categorical): return x else: raise TypeError("all components to combine must be Categorical") to_union = [_maybe_unwrap(x) for x in to_union] first = to_union[0] if not all(is_dtype_equal(other.categories.dtype, first.categories.dtype) for other in to_union[1:]): raise TypeError("dtype of categories must be the same") ordered = False if all(first.is_dtype_equal(other) for other in to_union[1:]): # identical categories - fastpath categories = first.categories ordered = first.ordered if all(first.categories.equals(other.categories) for other in to_union[1:]): new_codes = np.concatenate([c.codes for c in to_union]) else: codes = [first.codes] + [_recode_for_categories(other.codes, other.categories, first.categories) for other in to_union[1:]] new_codes = np.concatenate(codes) if sort_categories and not ignore_order and ordered: raise TypeError("Cannot use sort_categories=True with " "ordered Categoricals") if sort_categories and not categories.is_monotonic_increasing: categories = categories.sort_values() indexer = categories.get_indexer(first.categories) from pandas.core.algorithms import take_1d new_codes = take_1d(indexer, new_codes, fill_value=-1) elif ignore_order or all(not c.ordered for c in to_union): # different categories - union and recode cats = first.categories.append([c.categories for c in to_union[1:]]) categories = Index(cats.unique()) if sort_categories: categories = categories.sort_values() new_codes = [_recode_for_categories(c.codes, c.categories, categories) for c in to_union] new_codes = np.concatenate(new_codes) else: # ordered - to show a proper error message if all(c.ordered for c in to_union): msg = ("to union ordered Categoricals, " "all categories must be the same") raise TypeError(msg) else: raise TypeError('Categorical.ordered must be the same') if ignore_order: ordered = False return Categorical(new_codes, categories=categories, ordered=ordered, fastpath=True)
provide concatenation of an datetimelike array of arrays each of which is a single M8 [ ns ] datetimet64 [ ns tz ] or m8 [ ns ] dtype
def _concat_datetime(to_concat, axis=0, typs=None): """ provide concatenation of an datetimelike array of arrays each of which is a single M8[ns], datetimet64[ns, tz] or m8[ns] dtype Parameters ---------- to_concat : array of arrays axis : axis to provide concatenation typs : set of to_concat dtypes Returns ------- a single array, preserving the combined dtypes """ if typs is None: typs = get_dtype_kinds(to_concat) # multiple types, need to coerce to object if len(typs) != 1: return _concatenate_2d([_convert_datetimelike_to_object(x) for x in to_concat], axis=axis) # must be single dtype if any(typ.startswith('datetime') for typ in typs): if 'datetime' in typs: to_concat = [x.astype(np.int64, copy=False) for x in to_concat] return _concatenate_2d(to_concat, axis=axis).view(_NS_DTYPE) else: # when to_concat has different tz, len(typs) > 1. # thus no need to care return _concat_datetimetz(to_concat) elif 'timedelta' in typs: return _concatenate_2d([x.view(np.int64) for x in to_concat], axis=axis).view(_TD_DTYPE) elif any(typ.startswith('period') for typ in typs): assert len(typs) == 1 cls = to_concat[0] new_values = cls._concat_same_type(to_concat) return new_values
concat DatetimeIndex with the same tz all inputs must be DatetimeIndex it is used in DatetimeIndex. append also
def _concat_datetimetz(to_concat, name=None): """ concat DatetimeIndex with the same tz all inputs must be DatetimeIndex it is used in DatetimeIndex.append also """ # Right now, internals will pass a List[DatetimeArray] here # for reductions like quantile. I would like to disentangle # all this before we get here. sample = to_concat[0] if isinstance(sample, ABCIndexClass): return sample._concat_same_dtype(to_concat, name=name) elif isinstance(sample, ABCDatetimeArray): return sample._concat_same_type(to_concat)
concat all inputs as object. DatetimeIndex TimedeltaIndex and PeriodIndex are converted to object dtype before concatenation
def _concat_index_asobject(to_concat, name=None): """ concat all inputs as object. DatetimeIndex, TimedeltaIndex and PeriodIndex are converted to object dtype before concatenation """ from pandas import Index from pandas.core.arrays import ExtensionArray klasses = (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex, ExtensionArray) to_concat = [x.astype(object) if isinstance(x, klasses) else x for x in to_concat] self = to_concat[0] attribs = self._get_attributes_dict() attribs['name'] = name to_concat = [x._values if isinstance(x, Index) else x for x in to_concat] return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs)
provide concatenation of an sparse/ dense array of arrays each of which is a single dtype
def _concat_sparse(to_concat, axis=0, typs=None): """ provide concatenation of an sparse/dense array of arrays each of which is a single dtype Parameters ---------- to_concat : array of arrays axis : axis to provide concatenation typs : set of to_concat dtypes Returns ------- a single array, preserving the combined dtypes """ from pandas.core.arrays import SparseArray fill_values = [x.fill_value for x in to_concat if isinstance(x, SparseArray)] fill_value = fill_values[0] # TODO: Fix join unit generation so we aren't passed this. to_concat = [x if isinstance(x, SparseArray) else SparseArray(x.squeeze(), fill_value=fill_value) for x in to_concat] return SparseArray._concat_same_type(to_concat)
Concatenates multiple RangeIndex instances. All members of indexes must be of type RangeIndex ; result will be RangeIndex if possible Int64Index otherwise. E. g.: indexes = [ RangeIndex ( 3 ) RangeIndex ( 3 6 ) ] - > RangeIndex ( 6 ) indexes = [ RangeIndex ( 3 ) RangeIndex ( 4 6 ) ] - > Int64Index ( [ 0 1 2 4 5 ] )
def _concat_rangeindex_same_dtype(indexes): """ Concatenates multiple RangeIndex instances. All members of "indexes" must be of type RangeIndex; result will be RangeIndex if possible, Int64Index otherwise. E.g.: indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6) indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5]) """ from pandas import Int64Index, RangeIndex start = step = next = None # Filter the empty indexes non_empty_indexes = [obj for obj in indexes if len(obj)] for obj in non_empty_indexes: if start is None: # This is set by the first non-empty index start = obj._start if step is None and len(obj) > 1: step = obj._step elif step is None: # First non-empty index had only one element if obj._start == start: return _concat_index_same_dtype(indexes, klass=Int64Index) step = obj._start - start non_consecutive = ((step != obj._step and len(obj) > 1) or (next is not None and obj._start != next)) if non_consecutive: return _concat_index_same_dtype(indexes, klass=Int64Index) if step is not None: next = obj[-1] + step if non_empty_indexes: # Get the stop value from "next" or alternatively # from the last non-empty index stop = non_empty_indexes[-1]._stop if next is None else next return RangeIndex(start, stop, step) # Here all "indexes" had 0 length, i.e. were empty. # In this case return an empty range index. return RangeIndex(0, 0)
Rewrite the message of an exception.
def rewrite_exception(old_name, new_name): """Rewrite the message of an exception.""" try: yield except Exception as e: msg = e.args[0] msg = msg.replace(old_name, new_name) args = (msg,) if len(e.args) > 1: args = args + e.args[1:] e.args = args raise
Given an index find the level length for each element.
def _get_level_lengths(index, hidden_elements=None): """ Given an index, find the level length for each element. Optional argument is a list of index positions which should not be visible. Result is a dictionary of (level, inital_position): span """ sentinel = object() levels = index.format(sparsify=sentinel, adjoin=False, names=False) if hidden_elements is None: hidden_elements = [] lengths = {} if index.nlevels == 1: for i, value in enumerate(levels): if(i not in hidden_elements): lengths[(0, i)] = 1 return lengths for i, lvl in enumerate(levels): for j, row in enumerate(lvl): if not get_option('display.multi_sparse'): lengths[(i, j)] = 1 elif (row != sentinel) and (j not in hidden_elements): last_label = j lengths[(i, last_label)] = 1 elif (row != sentinel): # even if its hidden, keep track of it in case # length >1 and later elements are visible last_label = j lengths[(i, last_label)] = 0 elif(j not in hidden_elements): lengths[(i, last_label)] += 1 non_zero_lengths = { element: length for element, length in lengths.items() if length >= 1} return non_zero_lengths
Convert the DataFrame in self. data and the attrs from _build_styles into a dictionary of { head body uuid cellstyle }.
def _translate(self): """ Convert the DataFrame in `self.data` and the attrs from `_build_styles` into a dictionary of {head, body, uuid, cellstyle}. """ table_styles = self.table_styles or [] caption = self.caption ctx = self.ctx precision = self.precision hidden_index = self.hidden_index hidden_columns = self.hidden_columns uuid = self.uuid or str(uuid1()).replace("-", "_") ROW_HEADING_CLASS = "row_heading" COL_HEADING_CLASS = "col_heading" INDEX_NAME_CLASS = "index_name" DATA_CLASS = "data" BLANK_CLASS = "blank" BLANK_VALUE = "" def format_attr(pair): return "{key}={value}".format(**pair) # for sparsifying a MultiIndex idx_lengths = _get_level_lengths(self.index) col_lengths = _get_level_lengths(self.columns, hidden_columns) cell_context = dict() n_rlvls = self.data.index.nlevels n_clvls = self.data.columns.nlevels rlabels = self.data.index.tolist() clabels = self.data.columns.tolist() if n_rlvls == 1: rlabels = [[x] for x in rlabels] if n_clvls == 1: clabels = [[x] for x in clabels] clabels = list(zip(*clabels)) cellstyle = [] head = [] for r in range(n_clvls): # Blank for Index columns... row_es = [{"type": "th", "value": BLANK_VALUE, "display_value": BLANK_VALUE, "is_visible": not hidden_index, "class": " ".join([BLANK_CLASS])}] * (n_rlvls - 1) # ... except maybe the last for columns.names name = self.data.columns.names[r] cs = [BLANK_CLASS if name is None else INDEX_NAME_CLASS, "level{lvl}".format(lvl=r)] name = BLANK_VALUE if name is None else name row_es.append({"type": "th", "value": name, "display_value": name, "class": " ".join(cs), "is_visible": not hidden_index}) if clabels: for c, value in enumerate(clabels[r]): cs = [COL_HEADING_CLASS, "level{lvl}".format(lvl=r), "col{col}".format(col=c)] cs.extend(cell_context.get( "col_headings", {}).get(r, {}).get(c, [])) es = { "type": "th", "value": value, "display_value": value, "class": " ".join(cs), "is_visible": _is_visible(c, r, col_lengths), } colspan = col_lengths.get((r, c), 0) if colspan > 1: es["attributes"] = [ format_attr({"key": "colspan", "value": colspan}) ] row_es.append(es) head.append(row_es) if (self.data.index.names and com._any_not_none(*self.data.index.names) and not hidden_index): index_header_row = [] for c, name in enumerate(self.data.index.names): cs = [INDEX_NAME_CLASS, "level{lvl}".format(lvl=c)] name = '' if name is None else name index_header_row.append({"type": "th", "value": name, "class": " ".join(cs)}) index_header_row.extend( [{"type": "th", "value": BLANK_VALUE, "class": " ".join([BLANK_CLASS]) }] * (len(clabels[0]) - len(hidden_columns))) head.append(index_header_row) body = [] for r, idx in enumerate(self.data.index): row_es = [] for c, value in enumerate(rlabels[r]): rid = [ROW_HEADING_CLASS, "level{lvl}".format(lvl=c), "row{row}".format(row=r)] es = { "type": "th", "is_visible": (_is_visible(r, c, idx_lengths) and not hidden_index), "value": value, "display_value": value, "id": "_".join(rid[1:]), "class": " ".join(rid) } rowspan = idx_lengths.get((c, r), 0) if rowspan > 1: es["attributes"] = [ format_attr({"key": "rowspan", "value": rowspan}) ] row_es.append(es) for c, col in enumerate(self.data.columns): cs = [DATA_CLASS, "row{row}".format(row=r), "col{col}".format(col=c)] cs.extend(cell_context.get("data", {}).get(r, {}).get(c, [])) formatter = self._display_funcs[(r, c)] value = self.data.iloc[r, c] row_dict = {"type": "td", "value": value, "class": " ".join(cs), "display_value": formatter(value), "is_visible": (c not in hidden_columns)} # only add an id if the cell has a style if (self.cell_ids or not(len(ctx[r, c]) == 1 and ctx[r, c][0] == '')): row_dict["id"] = "_".join(cs[1:]) row_es.append(row_dict) props = [] for x in ctx[r, c]: # have to handle empty styles like [''] if x.count(":"): props.append(x.split(":")) else: props.append(['', '']) cellstyle.append({'props': props, 'selector': "row{row}_col{col}" .format(row=r, col=c)}) body.append(row_es) table_attr = self.table_attributes use_mathjax = get_option("display.html.use_mathjax") if not use_mathjax: table_attr = table_attr or '' if 'class="' in table_attr: table_attr = table_attr.replace('class="', 'class="tex2jax_ignore ') else: table_attr += ' class="tex2jax_ignore"' return dict(head=head, cellstyle=cellstyle, body=body, uuid=uuid, precision=precision, table_styles=table_styles, caption=caption, table_attributes=table_attr)
Format the text display value of cells.
def format(self, formatter, subset=None): """ Format the text display value of cells. .. versionadded:: 0.18.0 Parameters ---------- formatter : str, callable, or dict subset : IndexSlice An argument to ``DataFrame.loc`` that restricts which elements ``formatter`` is applied to. Returns ------- self : Styler Notes ----- ``formatter`` is either an ``a`` or a dict ``{column name: a}`` where ``a`` is one of - str: this will be wrapped in: ``a.format(x)`` - callable: called with the value of an individual cell The default display value for numeric values is the "general" (``g``) format with ``pd.options.display.precision`` precision. Examples -------- >>> df = pd.DataFrame(np.random.randn(4, 2), columns=['a', 'b']) >>> df.style.format("{:.2%}") >>> df['c'] = ['a', 'b', 'c', 'd'] >>> df.style.format({'c': str.upper}) """ if subset is None: row_locs = range(len(self.data)) col_locs = range(len(self.data.columns)) else: subset = _non_reducing_slice(subset) if len(subset) == 1: subset = subset, self.data.columns sub_df = self.data.loc[subset] row_locs = self.data.index.get_indexer_for(sub_df.index) col_locs = self.data.columns.get_indexer_for(sub_df.columns) if is_dict_like(formatter): for col, col_formatter in formatter.items(): # formatter must be callable, so '{}' are converted to lambdas col_formatter = _maybe_wrap_formatter(col_formatter) col_num = self.data.columns.get_indexer_for([col])[0] for row_num in row_locs: self._display_funcs[(row_num, col_num)] = col_formatter else: # single scalar to format all cells with locs = product(*(row_locs, col_locs)) for i, j in locs: formatter = _maybe_wrap_formatter(formatter) self._display_funcs[(i, j)] = formatter return self
Render the built up styles to HTML.
def render(self, **kwargs): """ Render the built up styles to HTML. Parameters ---------- **kwargs Any additional keyword arguments are passed through to ``self.template.render``. This is useful when you need to provide additional variables for a custom template. .. versionadded:: 0.20 Returns ------- rendered : str The rendered HTML. Notes ----- ``Styler`` objects have defined the ``_repr_html_`` method which automatically calls ``self.render()`` when it's the last item in a Notebook cell. When calling ``Styler.render()`` directly, wrap the result in ``IPython.display.HTML`` to view the rendered HTML in the notebook. Pandas uses the following keys in render. Arguments passed in ``**kwargs`` take precedence, so think carefully if you want to override them: * head * cellstyle * body * uuid * precision * table_styles * caption * table_attributes """ self._compute() # TODO: namespace all the pandas keys d = self._translate() # filter out empty styles, every cell will have a class # but the list of props may just be [['', '']]. # so we have the neested anys below trimmed = [x for x in d['cellstyle'] if any(any(y) for y in x['props'])] d['cellstyle'] = trimmed d.update(kwargs) return self.template.render(**d)
Update the state of the Styler.
def _update_ctx(self, attrs): """ Update the state of the Styler. Collects a mapping of {index_label: ['<property>: <value>']}. attrs : Series or DataFrame should contain strings of '<property>: <value>;<prop2>: <val2>' Whitespace shouldn't matter and the final trailing ';' shouldn't matter. """ for row_label, v in attrs.iterrows(): for col_label, col in v.iteritems(): i = self.index.get_indexer([row_label])[0] j = self.columns.get_indexer([col_label])[0] for pair in col.rstrip(";").split(";"): self.ctx[(i, j)].append(pair)
Execute the style functions built up in self. _todo.
def _compute(self): """ Execute the style functions built up in `self._todo`. Relies on the conventions that all style functions go through .apply or .applymap. The append styles to apply as tuples of (application method, *args, **kwargs) """ r = self for func, args, kwargs in self._todo: r = func(self)(*args, **kwargs) return r
Apply a function column - wise row - wise or table - wise updating the HTML representation with the result.
def apply(self, func, axis=0, subset=None, **kwargs): """ Apply a function column-wise, row-wise, or table-wise, updating the HTML representation with the result. Parameters ---------- func : function ``func`` should take a Series or DataFrame (depending on ``axis``), and return an object with the same shape. Must return a DataFrame with identical index and column labels when ``axis=None`` axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``func`` Returns ------- self : Styler Notes ----- The output shape of ``func`` should match the input, i.e. if ``x`` is the input row, column, or table (depending on ``axis``), then ``func(x).shape == x.shape`` should be true. This is similar to ``DataFrame.apply``, except that ``axis=None`` applies the function to the entire DataFrame at once, rather than column-wise or row-wise. Examples -------- >>> def highlight_max(x): ... return ['background-color: yellow' if v == x.max() else '' for v in x] ... >>> df = pd.DataFrame(np.random.randn(5, 2)) >>> df.style.apply(highlight_max) """ self._todo.append((lambda instance: getattr(instance, '_apply'), (func, axis, subset), kwargs)) return self
Apply a function elementwise updating the HTML representation with the result.
def applymap(self, func, subset=None, **kwargs): """ Apply a function elementwise, updating the HTML representation with the result. Parameters ---------- func : function ``func`` should take a scalar and return a scalar subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``func`` Returns ------- self : Styler See Also -------- Styler.where """ self._todo.append((lambda instance: getattr(instance, '_applymap'), (func, subset), kwargs)) return self
Apply a function elementwise updating the HTML representation with a style which is selected in accordance with the return value of a function.
def where(self, cond, value, other=None, subset=None, **kwargs): """ Apply a function elementwise, updating the HTML representation with a style which is selected in accordance with the return value of a function. .. versionadded:: 0.21.0 Parameters ---------- cond : callable ``cond`` should take a scalar and return a boolean value : str applied when ``cond`` returns true other : str applied when ``cond`` returns false subset : IndexSlice a valid indexer to limit ``data`` to *before* applying the function. Consider using a pandas.IndexSlice kwargs : dict pass along to ``cond`` Returns ------- self : Styler See Also -------- Styler.applymap """ if other is None: other = '' return self.applymap(lambda val: value if cond(val) else other, subset=subset, **kwargs)
Hide columns from rendering.
def hide_columns(self, subset): """ Hide columns from rendering. .. versionadded:: 0.23.0 Parameters ---------- subset : IndexSlice An argument to ``DataFrame.loc`` that identifies which columns are hidden. Returns ------- self : Styler """ subset = _non_reducing_slice(subset) hidden_df = self.data.loc[subset] self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns) return self
Shade the background null_color for missing values.
def highlight_null(self, null_color='red'): """ Shade the background ``null_color`` for missing values. Parameters ---------- null_color : str Returns ------- self : Styler """ self.applymap(self._highlight_null, null_color=null_color) return self
Color the background in a gradient according to the data in each column ( optionally row ).
def background_gradient(self, cmap='PuBu', low=0, high=0, axis=0, subset=None, text_color_threshold=0.408): """ Color the background in a gradient according to the data in each column (optionally row). Requires matplotlib. Parameters ---------- cmap : str or colormap matplotlib colormap low, high : float compress the range by these values. axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. subset : IndexSlice a valid slice for ``data`` to limit the style application to. text_color_threshold : float or int luminance threshold for determining text color. Facilitates text visibility across varying background colors. From 0 to 1. 0 = all text is dark colored, 1 = all text is light colored. .. versionadded:: 0.24.0 Returns ------- self : Styler Raises ------ ValueError If ``text_color_threshold`` is not a value from 0 to 1. Notes ----- Set ``text_color_threshold`` or tune ``low`` and ``high`` to keep the text legible by not using the entire range of the color map. The range of the data is extended by ``low * (x.max() - x.min())`` and ``high * (x.max() - x.min())`` before normalizing. """ subset = _maybe_numeric_slice(self.data, subset) subset = _non_reducing_slice(subset) self.apply(self._background_gradient, cmap=cmap, subset=subset, axis=axis, low=low, high=high, text_color_threshold=text_color_threshold) return self
Color background in a range according to the data.
def _background_gradient(s, cmap='PuBu', low=0, high=0, text_color_threshold=0.408): """ Color background in a range according to the data. """ if (not isinstance(text_color_threshold, (float, int)) or not 0 <= text_color_threshold <= 1): msg = "`text_color_threshold` must be a value from 0 to 1." raise ValueError(msg) with _mpl(Styler.background_gradient) as (plt, colors): smin = s.values.min() smax = s.values.max() rng = smax - smin # extend lower / upper bounds, compresses color range norm = colors.Normalize(smin - (rng * low), smax + (rng * high)) # matplotlib colors.Normalize modifies inplace? # https://github.com/matplotlib/matplotlib/issues/5427 rgbas = plt.cm.get_cmap(cmap)(norm(s.values)) def relative_luminance(rgba): """ Calculate relative luminance of a color. The calculation adheres to the W3C standards (https://www.w3.org/WAI/GL/wiki/Relative_luminance) Parameters ---------- color : rgb or rgba tuple Returns ------- float The relative luminance as a value from 0 to 1 """ r, g, b = ( x / 12.92 if x <= 0.03928 else ((x + 0.055) / 1.055 ** 2.4) for x in rgba[:3] ) return 0.2126 * r + 0.7152 * g + 0.0722 * b def css(rgba): dark = relative_luminance(rgba) < text_color_threshold text_color = '#f1f1f1' if dark else '#000000' return 'background-color: {b};color: {c};'.format( b=colors.rgb2hex(rgba), c=text_color ) if s.ndim == 1: return [css(rgba) for rgba in rgbas] else: return pd.DataFrame( [[css(rgba) for rgba in row] for row in rgbas], index=s.index, columns=s.columns )
Convenience method for setting one or more non - data dependent properties or each cell.
def set_properties(self, subset=None, **kwargs): """ Convenience method for setting one or more non-data dependent properties or each cell. Parameters ---------- subset : IndexSlice a valid slice for ``data`` to limit the style application to kwargs : dict property: value pairs to be set for each cell Returns ------- self : Styler Examples -------- >>> df = pd.DataFrame(np.random.randn(10, 4)) >>> df.style.set_properties(color="white", align="right") >>> df.style.set_properties(**{'background-color': 'yellow'}) """ values = ';'.join('{p}: {v}'.format(p=p, v=v) for p, v in kwargs.items()) f = lambda x: values return self.applymap(f, subset=subset)
Draw bar chart in dataframe cells.
def _bar(s, align, colors, width=100, vmin=None, vmax=None): """ Draw bar chart in dataframe cells. """ # Get input value range. smin = s.min() if vmin is None else vmin if isinstance(smin, ABCSeries): smin = smin.min() smax = s.max() if vmax is None else vmax if isinstance(smax, ABCSeries): smax = smax.max() if align == 'mid': smin = min(0, smin) smax = max(0, smax) elif align == 'zero': # For "zero" mode, we want the range to be symmetrical around zero. smax = max(abs(smin), abs(smax)) smin = -smax # Transform to percent-range of linear-gradient normed = width * (s.values - smin) / (smax - smin + 1e-12) zero = -width * smin / (smax - smin + 1e-12) def css_bar(start, end, color): """ Generate CSS code to draw a bar from start to end. """ css = 'width: 10em; height: 80%;' if end > start: css += 'background: linear-gradient(90deg,' if start > 0: css += ' transparent {s:.1f}%, {c} {s:.1f}%, '.format( s=start, c=color ) css += '{c} {e:.1f}%, transparent {e:.1f}%)'.format( e=min(end, width), c=color, ) return css def css(x): if pd.isna(x): return '' # avoid deprecated indexing `colors[x > zero]` color = colors[1] if x > zero else colors[0] if align == 'left': return css_bar(0, x, color) else: return css_bar(min(x, zero), max(x, zero), color) if s.ndim == 1: return [css(x) for x in normed] else: return pd.DataFrame( [[css(x) for x in row] for row in normed], index=s.index, columns=s.columns )
Draw bar chart in the cell backgrounds.
def bar(self, subset=None, axis=0, color='#d65f5f', width=100, align='left', vmin=None, vmax=None): """ Draw bar chart in the cell backgrounds. Parameters ---------- subset : IndexSlice, optional A valid slice for `data` to limit the style application to. axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. color : str or 2-tuple/list If a str is passed, the color is the same for both negative and positive numbers. If 2-tuple/list is used, the first element is the color_negative and the second is the color_positive (eg: ['#d65f5f', '#5fba7d']). width : float, default 100 A number between 0 or 100. The largest value will cover `width` percent of the cell's width. align : {'left', 'zero',' mid'}, default 'left' How to align the bars with the cells. - 'left' : the min value starts at the left of the cell. - 'zero' : a value of zero is located at the center of the cell. - 'mid' : the center of the cell is at (max-min)/2, or if values are all negative (positive) the zero is aligned at the right (left) of the cell. .. versionadded:: 0.20.0 vmin : float, optional Minimum bar value, defining the left hand limit of the bar drawing range, lower values are clipped to `vmin`. When None (default): the minimum value of the data will be used. .. versionadded:: 0.24.0 vmax : float, optional Maximum bar value, defining the right hand limit of the bar drawing range, higher values are clipped to `vmax`. When None (default): the maximum value of the data will be used. .. versionadded:: 0.24.0 Returns ------- self : Styler """ if align not in ('left', 'zero', 'mid'): raise ValueError("`align` must be one of {'left', 'zero',' mid'}") if not (is_list_like(color)): color = [color, color] elif len(color) == 1: color = [color[0], color[0]] elif len(color) > 2: raise ValueError("`color` must be string or a list-like" " of length 2: [`color_neg`, `color_pos`]" " (eg: color=['#d65f5f', '#5fba7d'])") subset = _maybe_numeric_slice(self.data, subset) subset = _non_reducing_slice(subset) self.apply(self._bar, subset=subset, axis=axis, align=align, colors=color, width=width, vmin=vmin, vmax=vmax) return self
Highlight the maximum by shading the background.
def highlight_max(self, subset=None, color='yellow', axis=0): """ Highlight the maximum by shading the background. Parameters ---------- subset : IndexSlice, default None a valid slice for ``data`` to limit the style application to. color : str, default 'yellow' axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. Returns ------- self : Styler """ return self._highlight_handler(subset=subset, color=color, axis=axis, max_=True)
Highlight the minimum by shading the background.
def highlight_min(self, subset=None, color='yellow', axis=0): """ Highlight the minimum by shading the background. Parameters ---------- subset : IndexSlice, default None a valid slice for ``data`` to limit the style application to. color : str, default 'yellow' axis : {0 or 'index', 1 or 'columns', None}, default 0 apply to each column (``axis=0`` or ``'index'``), to each row (``axis=1`` or ``'columns'``), or to the entire DataFrame at once with ``axis=None``. Returns ------- self : Styler """ return self._highlight_handler(subset=subset, color=color, axis=axis, max_=False)
Highlight the min or max in a Series or DataFrame.
def _highlight_extrema(data, color='yellow', max_=True): """ Highlight the min or max in a Series or DataFrame. """ attr = 'background-color: {0}'.format(color) if data.ndim == 1: # Series from .apply if max_: extrema = data == data.max() else: extrema = data == data.min() return [attr if v else '' for v in extrema] else: # DataFrame from .tee if max_: extrema = data == data.max().max() else: extrema = data == data.min().min() return pd.DataFrame(np.where(extrema, attr, ''), index=data.index, columns=data.columns)
Factory function for creating a subclass of Styler with a custom template and Jinja environment.
def from_custom_template(cls, searchpath, name): """ Factory function for creating a subclass of ``Styler`` with a custom template and Jinja environment. Parameters ---------- searchpath : str or list Path or paths of directories containing the templates name : str Name of your custom template to use for rendering Returns ------- MyStyler : subclass of Styler Has the correct ``env`` and ``template`` class attributes set. """ loader = ChoiceLoader([ FileSystemLoader(searchpath), cls.loader, ]) class MyStyler(cls): env = Environment(loader=loader) template = env.get_template(name) return MyStyler
Parameters ---------- dtype: ExtensionDtype
def register(self, dtype): """ Parameters ---------- dtype : ExtensionDtype """ if not issubclass(dtype, (PandasExtensionDtype, ExtensionDtype)): raise ValueError("can only register pandas extension dtypes") self.dtypes.append(dtype)
Parameters ---------- dtype: PandasExtensionDtype or string
def find(self, dtype): """ Parameters ---------- dtype : PandasExtensionDtype or string Returns ------- return the first matching dtype, otherwise return None """ if not isinstance(dtype, str): dtype_type = dtype if not isinstance(dtype, type): dtype_type = type(dtype) if issubclass(dtype_type, ExtensionDtype): return dtype return None for dtype_type in self.dtypes: try: return dtype_type.construct_from_string(dtype) except TypeError: pass return None
provide compat for construction of strings to numpy datetime64 s with tz - changes in 1. 11 that make 2015 - 01 - 01 09: 00: 00Z show a deprecation warning when need to pass 2015 - 01 - 01 09: 00: 00
def np_datetime64_compat(s, *args, **kwargs): """ provide compat for construction of strings to numpy datetime64's with tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation warning, when need to pass '2015-01-01 09:00:00' """ s = tz_replacer(s) return np.datetime64(s, *args, **kwargs)
provide compat for construction of an array of strings to a np. array (... dtype = np. datetime64 (.. )) tz - changes in 1. 11 that make 2015 - 01 - 01 09: 00: 00Z show a deprecation warning when need to pass 2015 - 01 - 01 09: 00: 00
def np_array_datetime64_compat(arr, *args, **kwargs): """ provide compat for construction of an array of strings to a np.array(..., dtype=np.datetime64(..)) tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation warning, when need to pass '2015-01-01 09:00:00' """ # is_list_like if (hasattr(arr, '__iter__') and not isinstance(arr, (str, bytes))): arr = [tz_replacer(s) for s in arr] else: arr = tz_replacer(arr) return np.array(arr, *args, **kwargs)
Ensure incoming data can be represented as ints.
def _assert_safe_casting(cls, data, subarr): """ Ensure incoming data can be represented as ints. """ if not issubclass(data.dtype.type, np.signedinteger): if not np.array_equal(data, subarr): raise TypeError('Unsafe NumPy casting, you must ' 'explicitly cast')
we always want to get an index value never a value
def get_value(self, series, key): """ we always want to get an index value, never a value """ if not is_scalar(key): raise InvalidIndexError k = com.values_from_object(key) loc = self.get_loc(k) new_values = com.values_from_object(series)[loc] return new_values
Determines if two Index objects contain the same elements.
def equals(self, other): """ Determines if two Index objects contain the same elements. """ if self is other: return True if not isinstance(other, Index): return False # need to compare nans locations and make sure that they are the same # since nans don't compare equal this is a bit tricky try: if not isinstance(other, Float64Index): other = self._constructor(other) if (not is_dtype_equal(self.dtype, other.dtype) or self.shape != other.shape): return False left, right = self._ndarray_values, other._ndarray_values return ((left == right) | (self._isnan & other._isnan)).all() except (TypeError, ValueError): return False
if we have bytes decode them to unicode
def _ensure_decoded(s): """ if we have bytes, decode them to unicode """ if isinstance(s, np.bytes_): s = s.decode('UTF-8') return s
ensure that the where is a Term or a list of Term this makes sure that we are capturing the scope of variables that are passed create the terms here with a frame_level = 2 ( we are 2 levels down )
def _ensure_term(where, scope_level): """ ensure that the where is a Term or a list of Term this makes sure that we are capturing the scope of variables that are passed create the terms here with a frame_level=2 (we are 2 levels down) """ # only consider list/tuple here as an ndarray is automatically a coordinate # list level = scope_level + 1 if isinstance(where, (list, tuple)): wlist = [] for w in filter(lambda x: x is not None, where): if not maybe_expression(w): wlist.append(w) else: wlist.append(Term(w, scope_level=level)) where = wlist elif maybe_expression(where): where = Term(where, scope_level=level) return where
store this object close it if we opened it
def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, append=None, **kwargs): """ store this object, close it if we opened it """ if append: f = lambda store: store.append(key, value, **kwargs) else: f = lambda store: store.put(key, value, **kwargs) path_or_buf = _stringify_path(path_or_buf) if isinstance(path_or_buf, str): with HDFStore(path_or_buf, mode=mode, complevel=complevel, complib=complib) as store: f(store) else: f(path_or_buf)
Read from the store close it if we opened it.
def read_hdf(path_or_buf, key=None, mode='r', **kwargs): """ Read from the store, close it if we opened it. Retrieve pandas object stored in file, optionally based on where criteria Parameters ---------- path_or_buf : string, buffer or path object Path to the file to open, or an open :class:`pandas.HDFStore` object. Supports any object implementing the ``__fspath__`` protocol. This includes :class:`pathlib.Path` and py._path.local.LocalPath objects. .. versionadded:: 0.19.0 support for pathlib, py.path. .. versionadded:: 0.21.0 support for __fspath__ protocol. key : object, optional The group identifier in the store. Can be omitted if the HDF file contains a single pandas object. mode : {'r', 'r+', 'a'}, optional Mode to use when opening the file. Ignored if path_or_buf is a :class:`pandas.HDFStore`. Default is 'r'. where : list, optional A list of Term (or convertible) objects. start : int, optional Row number to start selection. stop : int, optional Row number to stop selection. columns : list, optional A list of columns names to return. iterator : bool, optional Return an iterator object. chunksize : int, optional Number of rows to include in an iteration when using an iterator. errors : str, default 'strict' Specifies how encoding and decoding errors are to be handled. See the errors argument for :func:`open` for a full list of options. **kwargs Additional keyword arguments passed to HDFStore. Returns ------- item : object The selected object. Return type depends on the object stored. See Also -------- DataFrame.to_hdf : Write a HDF file from a DataFrame. HDFStore : Low-level access to HDF files. Examples -------- >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z']) >>> df.to_hdf('./store.h5', 'data') >>> reread = pd.read_hdf('./store.h5') """ if mode not in ['r', 'r+', 'a']: raise ValueError('mode {0} is not allowed while performing a read. ' 'Allowed modes are r, r+ and a.'.format(mode)) # grab the scope if 'where' in kwargs: kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1) if isinstance(path_or_buf, HDFStore): if not path_or_buf.is_open: raise IOError('The HDFStore must be open for reading.') store = path_or_buf auto_close = False else: path_or_buf = _stringify_path(path_or_buf) if not isinstance(path_or_buf, str): raise NotImplementedError('Support for generic buffers has not ' 'been implemented.') try: exists = os.path.exists(path_or_buf) # if filepath is too long except (TypeError, ValueError): exists = False if not exists: raise FileNotFoundError( 'File {path} does not exist'.format(path=path_or_buf)) store = HDFStore(path_or_buf, mode=mode, **kwargs) # can't auto open/close if we are using an iterator # so delegate to the iterator auto_close = True try: if key is None: groups = store.groups() if len(groups) == 0: raise ValueError('No dataset in HDF5 file.') candidate_only_group = groups[0] # For the HDF file to have only one dataset, all other groups # should then be metadata groups for that candidate group. (This # assumes that the groups() method enumerates parent groups # before their children.) for group_to_check in groups[1:]: if not _is_metadata_of(group_to_check, candidate_only_group): raise ValueError('key must be provided when HDF5 file ' 'contains multiple datasets.') key = candidate_only_group._v_pathname return store.select(key, auto_close=auto_close, **kwargs) except (ValueError, TypeError, KeyError): # if there is an error, close the store try: store.close() except AttributeError: pass raise
Check if a given group is a metadata group for a given parent_group.
def _is_metadata_of(group, parent_group): """Check if a given group is a metadata group for a given parent_group.""" if group._v_depth <= parent_group._v_depth: return False current = group while current._v_depth > 1: parent = current._v_parent if parent == parent_group and current._v_name == 'meta': return True current = current._v_parent return False
get/ create the info for this name
def _get_info(info, name): """ get/create the info for this name """ try: idx = info[name] except KeyError: idx = info[name] = dict() return idx
for a tz - aware type return an encoded zone
def _get_tz(tz): """ for a tz-aware type, return an encoded zone """ zone = timezones.get_timezone(tz) if zone is None: zone = tz.utcoffset().total_seconds() return zone
coerce the values to a DatetimeIndex if tz is set preserve the input shape if possible
def _set_tz(values, tz, preserve_UTC=False, coerce=False): """ coerce the values to a DatetimeIndex if tz is set preserve the input shape if possible Parameters ---------- values : ndarray tz : string/pickled tz object preserve_UTC : boolean, preserve the UTC of the result coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray """ if tz is not None: name = getattr(values, 'name', None) values = values.ravel() tz = timezones.get_timezone(_ensure_decoded(tz)) values = DatetimeIndex(values, name=name) if values.tz is None: values = values.tz_localize('UTC').tz_convert(tz) if preserve_UTC: if tz == 'UTC': values = list(values) elif coerce: values = np.asarray(values, dtype='M8[ns]') return values
we take a string - like that is object dtype and coerce to a fixed size string type
def _convert_string_array(data, encoding, errors, itemsize=None): """ we take a string-like that is object dtype and coerce to a fixed size string type Parameters ---------- data : a numpy array of object dtype encoding : None or string-encoding errors : handler for encoding errors itemsize : integer, optional, defaults to the max length of the strings Returns ------- data in a fixed-length string dtype, encoded to bytes if needed """ # encode if needed if encoding is not None and len(data): data = Series(data.ravel()).str.encode( encoding, errors).values.reshape(data.shape) # create the sized dtype if itemsize is None: ensured = ensure_object(data.ravel()) itemsize = max(1, libwriters.max_len_string_array(ensured)) data = np.asarray(data, dtype="S{size}".format(size=itemsize)) return data
inverse of _convert_string_array
def _unconvert_string_array(data, nan_rep=None, encoding=None, errors='strict'): """ inverse of _convert_string_array Parameters ---------- data : fixed length string dtyped array nan_rep : the storage repr of NaN, optional encoding : the encoding of the data, optional errors : handler for encoding errors, default 'strict' Returns ------- an object array of the decoded data """ shape = data.shape data = np.asarray(data.ravel(), dtype=object) # guard against a None encoding (because of a legacy # where the passed encoding is actually None) encoding = _ensure_encoding(encoding) if encoding is not None and len(data): itemsize = libwriters.max_len_string_array(ensure_object(data)) dtype = "U{0}".format(itemsize) if isinstance(data[0], bytes): data = Series(data).str.decode(encoding, errors=errors).values else: data = data.astype(dtype, copy=False).astype(object, copy=False) if nan_rep is None: nan_rep = 'nan' data = libwriters.string_array_replace_from_nan_rep(data, nan_rep) return data.reshape(shape)
Open the file in the specified mode
def open(self, mode='a', **kwargs): """ Open the file in the specified mode Parameters ---------- mode : {'a', 'w', 'r', 'r+'}, default 'a' See HDFStore docstring or tables.open_file for info about modes """ tables = _tables() if self._mode != mode: # if we are changing a write mode to read, ok if self._mode in ['a', 'w'] and mode in ['r', 'r+']: pass elif mode in ['w']: # this would truncate, raise here if self.is_open: raise PossibleDataLossError( "Re-opening the file [{0}] with mode [{1}] " "will delete the current file!" .format(self._path, self._mode) ) self._mode = mode # close and reopen the handle if self.is_open: self.close() if self._complevel and self._complevel > 0: self._filters = _tables().Filters(self._complevel, self._complib, fletcher32=self._fletcher32) try: self._handle = tables.open_file(self._path, self._mode, **kwargs) except (IOError) as e: # pragma: no cover if 'can not be written' in str(e): print( 'Opening {path} in read-only mode'.format(path=self._path)) self._handle = tables.open_file(self._path, 'r', **kwargs) else: raise except (ValueError) as e: # trap PyTables >= 3.1 FILE_OPEN_POLICY exception # to provide an updated message if 'FILE_OPEN_POLICY' in str(e): e = ValueError( "PyTables [{version}] no longer supports opening multiple " "files\n" "even in read-only mode on this HDF5 version " "[{hdf_version}]. You can accept this\n" "and not open the same file multiple times at once,\n" "upgrade the HDF5 version, or downgrade to PyTables 3.0.0 " "which allows\n" "files to be opened multiple times at once\n" .format(version=tables.__version__, hdf_version=tables.get_hdf5_version())) raise e except (Exception) as e: # trying to read from a non-existent file causes an error which # is not part of IOError, make it one if self._mode == 'r' and 'Unable to open/create file' in str(e): raise IOError(str(e)) raise
Force all buffered modifications to be written to disk.
def flush(self, fsync=False): """ Force all buffered modifications to be written to disk. Parameters ---------- fsync : bool (default False) call ``os.fsync()`` on the file handle to force writing to disk. Notes ----- Without ``fsync=True``, flushing may not guarantee that the OS writes to disk. With fsync, the operation will block until the OS claims the file has been written; however, other caching layers may still interfere. """ if self._handle is not None: self._handle.flush() if fsync: try: os.fsync(self._handle.fileno()) except OSError: pass
Retrieve pandas object stored in file
def get(self, key): """ Retrieve pandas object stored in file Parameters ---------- key : object Returns ------- obj : same type as object stored in file """ group = self.get_node(key) if group is None: raise KeyError('No object named {key} in the file'.format(key=key)) return self._read_group(group)
Retrieve pandas object stored in file optionally based on where criteria
def select(self, key, where=None, start=None, stop=None, columns=None, iterator=False, chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas object stored in file, optionally based on where criteria Parameters ---------- key : object where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection columns : a list of columns that if not None, will limit the return columns iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator auto_close : boolean, should automatically close the store when finished, default is False Returns ------- The selected object """ group = self.get_node(key) if group is None: raise KeyError('No object named {key} in the file'.format(key=key)) # create the storer and axes where = _ensure_term(where, scope_level=1) s = self._create_storer(group) s.infer_axes() # function to call on iteration def func(_start, _stop, _where): return s.read(start=_start, stop=_stop, where=_where, columns=columns) # create the iterator it = TableIterator(self, s, func, where=where, nrows=s.nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) return it.get_result()
return the selection as an Index
def select_as_coordinates( self, key, where=None, start=None, stop=None, **kwargs): """ return the selection as an Index Parameters ---------- key : object where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection """ where = _ensure_term(where, scope_level=1) return self.get_storer(key).read_coordinates(where=where, start=start, stop=stop, **kwargs)
return a single column from the table. This is generally only useful to select an indexable
def select_column(self, key, column, **kwargs): """ return a single column from the table. This is generally only useful to select an indexable Parameters ---------- key : object column: the column of interest Exceptions ---------- raises KeyError if the column is not found (or key is not a valid store) raises ValueError if the column can not be extracted individually (it is part of a data block) """ return self.get_storer(key).read_column(column=column, **kwargs)
Retrieve pandas objects from multiple tables
def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas objects from multiple tables Parameters ---------- keys : a list of the tables selector : the table to apply the where criteria (defaults to keys[0] if not supplied) columns : the columns I want back start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection iterator : boolean, return an iterator, default False chunksize : nrows to include in iteration, return an iterator Exceptions ---------- raises KeyError if keys or selector is not found or keys is empty raises TypeError if keys is not a list or tuple raises ValueError if the tables are not ALL THE SAME DIMENSIONS """ # default to single select where = _ensure_term(where, scope_level=1) if isinstance(keys, (list, tuple)) and len(keys) == 1: keys = keys[0] if isinstance(keys, str): return self.select(key=keys, where=where, columns=columns, start=start, stop=stop, iterator=iterator, chunksize=chunksize, **kwargs) if not isinstance(keys, (list, tuple)): raise TypeError("keys must be a list/tuple") if not len(keys): raise ValueError("keys must have a non-zero length") if selector is None: selector = keys[0] # collect the tables tbls = [self.get_storer(k) for k in keys] s = self.get_storer(selector) # validate rows nrows = None for t, k in itertools.chain([(s, selector)], zip(tbls, keys)): if t is None: raise KeyError("Invalid table [{key}]".format(key=k)) if not t.is_table: raise TypeError( "object [{obj}] is not a table, and cannot be used in all " "select as multiple".format(obj=t.pathname) ) if nrows is None: nrows = t.nrows elif t.nrows != nrows: raise ValueError( "all tables must have exactly the same nrows!") # axis is the concentation axes axis = list({t.non_index_axes[0][0] for t in tbls})[0] def func(_start, _stop, _where): # retrieve the objs, _where is always passed as a set of # coordinates here objs = [t.read(where=_where, columns=columns, start=_start, stop=_stop, **kwargs) for t in tbls] # concat and return return concat(objs, axis=axis, verify_integrity=False)._consolidate() # create the iterator it = TableIterator(self, s, func, where=where, nrows=nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) return it.get_result(coordinates=True)
Store object in HDFStore
def put(self, key, value, format=None, append=False, **kwargs): """ Store object in HDFStore Parameters ---------- key : object value : {Series, DataFrame} format : 'fixed(f)|table(t)', default is 'fixed' fixed(f) : Fixed format Fast writing/reading. Not-appendable, nor searchable table(t) : Table format Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data append : boolean, default False This will force Table format, append the input data to the existing. data_columns : list of columns to create as data columns, or True to use all columns. See `here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__ # noqa encoding : default None, provide an encoding for strings dropna : boolean, default False, do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table' """ if format is None: format = get_option("io.hdf.default_format") or 'fixed' kwargs = self._validate_format(format, kwargs) self._write_to_group(key, value, append=append, **kwargs)
Remove pandas object partially by specifying the where condition
def remove(self, key, where=None, start=None, stop=None): """ Remove pandas object partially by specifying the where condition Parameters ---------- key : string Node to remove or delete rows from where : list of Term (or convertible) objects, optional start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection Returns ------- number of rows removed (or None if not a Table) Exceptions ---------- raises KeyError if key is not a valid store """ where = _ensure_term(where, scope_level=1) try: s = self.get_storer(key) except KeyError: # the key is not a valid store, re-raising KeyError raise except Exception: if where is not None: raise ValueError( "trying to remove a node with a non-None where clause!") # we are actually trying to remove a node (with children) s = self.get_node(key) if s is not None: s._f_remove(recursive=True) return None # remove the node if com._all_none(where, start, stop): s.group._f_remove(recursive=True) # delete from the table else: if not s.is_table: raise ValueError( 'can only remove with where on objects written as tables') return s.delete(where=where, start=start, stop=stop)
Append to Table in file. Node must already exist and be Table format.
def append(self, key, value, format=None, append=True, columns=None, dropna=None, **kwargs): """ Append to Table in file. Node must already exist and be Table format. Parameters ---------- key : object value : {Series, DataFrame} format : 'table' is the default table(t) : table format Write as a PyTables Table structure which may perform worse but allow more flexible operations like searching / selecting subsets of the data append : boolean, default True, append the input data to the existing data_columns : list of columns, or True, default None List of columns to create as indexed data columns for on-disk queries, or True to use all columns. By default only the axes of the object are indexed. See `here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__. min_itemsize : dict of columns that specify minimum string sizes nan_rep : string to use as string nan represenation chunksize : size to chunk the writing expectedrows : expected TOTAL row size of this table encoding : default None, provide an encoding for strings dropna : boolean, default False, do not write an ALL nan row to the store settable by the option 'io.hdf.dropna_table' Notes ----- Does *not* check if data being appended overlaps with existing data in the table, so be careful """ if columns is not None: raise TypeError("columns is not a supported keyword in append, " "try data_columns") if dropna is None: dropna = get_option("io.hdf.dropna_table") if format is None: format = get_option("io.hdf.default_format") or 'table' kwargs = self._validate_format(format, kwargs) self._write_to_group(key, value, append=append, dropna=dropna, **kwargs)
Append to multiple tables
def append_to_multiple(self, d, value, selector, data_columns=None, axes=None, dropna=False, **kwargs): """ Append to multiple tables Parameters ---------- d : a dict of table_name to table_columns, None is acceptable as the values of one node (this will get all the remaining columns) value : a pandas object selector : a string that designates the indexable table; all of its columns will be designed as data_columns, unless data_columns is passed, in which case these are used data_columns : list of columns to create as data columns, or True to use all columns dropna : if evaluates to True, drop rows from all tables if any single row in each table has all NaN. Default False. Notes ----- axes parameter is currently not accepted """ if axes is not None: raise TypeError("axes is currently not accepted as a parameter to" " append_to_multiple; you can create the " "tables independently instead") if not isinstance(d, dict): raise ValueError( "append_to_multiple must have a dictionary specified as the " "way to split the value" ) if selector not in d: raise ValueError( "append_to_multiple requires a selector that is in passed dict" ) # figure out the splitting axis (the non_index_axis) axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0] # figure out how to split the value remain_key = None remain_values = [] for k, v in d.items(): if v is None: if remain_key is not None: raise ValueError( "append_to_multiple can only have one value in d that " "is None" ) remain_key = k else: remain_values.extend(v) if remain_key is not None: ordered = value.axes[axis] ordd = ordered.difference(Index(remain_values)) ordd = sorted(ordered.get_indexer(ordd)) d[remain_key] = ordered.take(ordd) # data_columns if data_columns is None: data_columns = d[selector] # ensure rows are synchronized across the tables if dropna: idxs = (value[cols].dropna(how='all').index for cols in d.values()) valid_index = next(idxs) for index in idxs: valid_index = valid_index.intersection(index) value = value.loc[valid_index] # append for k, v in d.items(): dc = data_columns if k == selector else None # compute the val val = value.reindex(v, axis=axis) self.append(k, val, data_columns=dc, **kwargs)
Create a pytables index on the table Parameters ---------- key: object ( the node to index )
def create_table_index(self, key, **kwargs): """ Create a pytables index on the table Parameters ---------- key : object (the node to index) Exceptions ---------- raises if the node is not a table """ # version requirements _tables() s = self.get_storer(key) if s is None: return if not s.is_table: raise TypeError( "cannot create table index on a Fixed format store") s.create_index(**kwargs)
return a list of all the top - level nodes ( that are not themselves a pandas storage object )
def groups(self): """return a list of all the top-level nodes (that are not themselves a pandas storage object) """ _tables() self._check_if_open() return [ g for g in self._handle.walk_groups() if (not isinstance(g, _table_mod.link.Link) and (getattr(g._v_attrs, 'pandas_type', None) or getattr(g, 'table', None) or (isinstance(g, _table_mod.table.Table) and g._v_name != 'table'))) ]
Walk the pytables group hierarchy for pandas objects
def walk(self, where="/"): """ Walk the pytables group hierarchy for pandas objects This generator will yield the group path, subgroups and pandas object names for each group. Any non-pandas PyTables objects that are not a group will be ignored. The `where` group itself is listed first (preorder), then each of its child groups (following an alphanumerical order) is also traversed, following the same procedure. .. versionadded:: 0.24.0 Parameters ---------- where : str, optional Group where to start walking. If not supplied, the root group is used. Yields ------ path : str Full path to a group (without trailing '/') groups : list of str names of the groups contained in `path` leaves : list of str names of the pandas objects contained in `path` """ _tables() self._check_if_open() for g in self._handle.walk_groups(where): if getattr(g._v_attrs, 'pandas_type', None) is not None: continue groups = [] leaves = [] for child in g._v_children.values(): pandas_type = getattr(child._v_attrs, 'pandas_type', None) if pandas_type is None: if isinstance(child, _table_mod.group.Group): groups.append(child._v_name) else: leaves.append(child._v_name) yield (g._v_pathname.rstrip('/'), groups, leaves)
return the node with the key or None if it does not exist
def get_node(self, key): """ return the node with the key or None if it does not exist """ self._check_if_open() try: if not key.startswith('/'): key = '/' + key return self._handle.get_node(self.root, key) except _table_mod.exceptions.NoSuchNodeError: return None
return the storer object for a key raise if not in the file
def get_storer(self, key): """ return the storer object for a key, raise if not in the file """ group = self.get_node(key) if group is None: raise KeyError('No object named {key} in the file'.format(key=key)) s = self._create_storer(group) s.infer_axes() return s
copy the existing store to a new file upgrading in place
def copy(self, file, mode='w', propindexes=True, keys=None, complib=None, complevel=None, fletcher32=False, overwrite=True): """ copy the existing store to a new file, upgrading in place Parameters ---------- propindexes: restore indexes in copied file (defaults to True) keys : list of keys to include in the copy (defaults to all) overwrite : overwrite (remove and replace) existing nodes in the new store (default is True) mode, complib, complevel, fletcher32 same as in HDFStore.__init__ Returns ------- open file handle of the new store """ new_store = HDFStore( file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32) if keys is None: keys = list(self.keys()) if not isinstance(keys, (tuple, list)): keys = [keys] for k in keys: s = self.get_storer(k) if s is not None: if k in new_store: if overwrite: new_store.remove(k) data = self.select(k) if s.is_table: index = False if propindexes: index = [a.name for a in s.axes if a.is_indexed] new_store.append( k, data, index=index, data_columns=getattr(s, 'data_columns', None), encoding=s.encoding ) else: new_store.put(k, data, encoding=s.encoding) return new_store
Print detailed information on the store.
def info(self): """ Print detailed information on the store. .. versionadded:: 0.21.0 """ output = '{type}\nFile path: {path}\n'.format( type=type(self), path=pprint_thing(self._path)) if self.is_open: lkeys = sorted(list(self.keys())) if len(lkeys): keys = [] values = [] for k in lkeys: try: s = self.get_storer(k) if s is not None: keys.append(pprint_thing(s.pathname or k)) values.append( pprint_thing(s or 'invalid_HDFStore node')) except Exception as detail: keys.append(k) values.append( "[invalid_HDFStore node: {detail}]".format( detail=pprint_thing(detail))) output += adjoin(12, keys, values) else: output += 'Empty' else: output += "File is CLOSED" return output
validate/ deprecate formats ; return the new kwargs
def _validate_format(self, format, kwargs): """ validate / deprecate formats; return the new kwargs """ kwargs = kwargs.copy() # validate try: kwargs['format'] = _FORMAT_MAP[format.lower()] except KeyError: raise TypeError("invalid HDFStore format specified [{0}]" .format(format)) return kwargs
return a suitable class to operate
def _create_storer(self, group, format=None, value=None, append=False, **kwargs): """ return a suitable class to operate """ def error(t): raise TypeError( "cannot properly create the storer for: [{t}] [group->" "{group},value->{value},format->{format},append->{append}," "kwargs->{kwargs}]".format(t=t, group=group, value=type(value), format=format, append=append, kwargs=kwargs)) pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None)) tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None)) # infer the pt from the passed value if pt is None: if value is None: _tables() if (getattr(group, 'table', None) or isinstance(group, _table_mod.table.Table)): pt = 'frame_table' tt = 'generic_table' else: raise TypeError( "cannot create a storer if the object is not existing " "nor a value are passed") else: try: pt = _TYPE_MAP[type(value)] except KeyError: error('_TYPE_MAP') # we are actually a table if format == 'table': pt += '_table' # a storer node if 'table' not in pt: try: return globals()[_STORER_MAP[pt]](self, group, **kwargs) except KeyError: error('_STORER_MAP') # existing node (and must be a table) if tt is None: # if we are a writer, determine the tt if value is not None: if pt == 'series_table': index = getattr(value, 'index', None) if index is not None: if index.nlevels == 1: tt = 'appendable_series' elif index.nlevels > 1: tt = 'appendable_multiseries' elif pt == 'frame_table': index = getattr(value, 'index', None) if index is not None: if index.nlevels == 1: tt = 'appendable_frame' elif index.nlevels > 1: tt = 'appendable_multiframe' elif pt == 'wide_table': tt = 'appendable_panel' elif pt == 'ndim_table': tt = 'appendable_ndim' else: # distiguish between a frame/table tt = 'legacy_panel' try: fields = group.table._v_attrs.fields if len(fields) == 1 and fields[0] == 'value': tt = 'legacy_frame' except IndexError: pass try: return globals()[_TABLE_MAP[tt]](self, group, **kwargs) except KeyError: error('_TABLE_MAP')
set the name of this indexer
def set_name(self, name, kind_attr=None): """ set the name of this indexer """ self.name = name self.kind_attr = kind_attr or "{name}_kind".format(name=name) if self.cname is None: self.cname = name return self
set the position of this column in the Table
def set_pos(self, pos): """ set the position of this column in the Table """ self.pos = pos if pos is not None and self.typ is not None: self.typ._v_pos = pos return self
return whether I am an indexed column
def is_indexed(self): """ return whether I am an indexed column """ try: return getattr(self.table.cols, self.cname).is_indexed except AttributeError: False
infer this column from the table: create and return a new object
def infer(self, handler): """infer this column from the table: create and return a new object""" table = handler.table new_self = self.copy() new_self.set_table(table) new_self.get_attr() new_self.read_metadata(handler) return new_self
set the values from this selection: take = take ownership
def convert(self, values, nan_rep, encoding, errors): """ set the values from this selection: take = take ownership """ # values is a recarray if values.dtype.fields is not None: values = values[self.cname] values = _maybe_convert(values, self.kind, encoding, errors) kwargs = dict() if self.freq is not None: kwargs['freq'] = _ensure_decoded(self.freq) if self.index_name is not None: kwargs['name'] = _ensure_decoded(self.index_name) # making an Index instance could throw a number of different errors try: self.values = Index(values, **kwargs) except Exception: # noqa: E722 # if the output freq is different that what we recorded, # it should be None (see also 'doc example part 2') if 'freq' in kwargs: kwargs['freq'] = None self.values = Index(values, **kwargs) self.values = _set_tz(self.values, self.tz) return self
maybe set a string col itemsize: min_itemsize can be an integer or a dict with this columns name with an integer size
def maybe_set_size(self, min_itemsize=None): """ maybe set a string col itemsize: min_itemsize can be an integer or a dict with this columns name with an integer size """ if _ensure_decoded(self.kind) == 'string': if isinstance(min_itemsize, dict): min_itemsize = min_itemsize.get(self.name) if min_itemsize is not None and self.typ.itemsize < min_itemsize: self.typ = _tables( ).StringCol(itemsize=min_itemsize, pos=self.pos)
validate this column: return the compared against itemsize
def validate_col(self, itemsize=None): """ validate this column: return the compared against itemsize """ # validate this column for string truncation (or reset to the max size) if _ensure_decoded(self.kind) == 'string': c = self.col if c is not None: if itemsize is None: itemsize = self.itemsize if c.itemsize < itemsize: raise ValueError( "Trying to store a string with len [{itemsize}] in " "[{cname}] column but\nthis column has a limit of " "[{c_itemsize}]!\nConsider using min_itemsize to " "preset the sizes on these columns".format( itemsize=itemsize, cname=self.cname, c_itemsize=c.itemsize)) return c.itemsize return None
set/ update the info for this indexable with the key/ value if there is a conflict raise/ warn as needed
def update_info(self, info): """ set/update the info for this indexable with the key/value if there is a conflict raise/warn as needed """ for key in self._info_fields: value = getattr(self, key, None) idx = _get_info(info, self.name) existing_value = idx.get(key) if key in idx and value is not None and existing_value != value: # frequency/name just warn if key in ['freq', 'index_name']: ws = attribute_conflict_doc % (key, existing_value, value) warnings.warn(ws, AttributeConflictWarning, stacklevel=6) # reset idx[key] = None setattr(self, key, None) else: raise ValueError( "invalid info for [{name}] for [{key}], " "existing_value [{existing_value}] conflicts with " "new value [{value}]".format( name=self.name, key=key, existing_value=existing_value, value=value)) else: if value is not None or existing_value is not None: idx[key] = value return self
set my state from the passed info
def set_info(self, info): """ set my state from the passed info """ idx = info.get(self.name) if idx is not None: self.__dict__.update(idx)
validate that kind = category does not change the categories
def validate_metadata(self, handler): """ validate that kind=category does not change the categories """ if self.meta == 'category': new_metadata = self.metadata cur_metadata = handler.read_metadata(self.cname) if (new_metadata is not None and cur_metadata is not None and not array_equivalent(new_metadata, cur_metadata)): raise ValueError("cannot append a categorical with " "different categories to the existing")
set the meta data
def write_metadata(self, handler): """ set the meta data """ if self.metadata is not None: handler.write_metadata(self.cname, self.metadata)
set the values from this selection: take = take ownership
def convert(self, values, nan_rep, encoding, errors): """ set the values from this selection: take = take ownership """ self.values = Int64Index(np.arange(self.table.nrows)) return self
return a new datacol with the block i
def create_for_block( cls, i=None, name=None, cname=None, version=None, **kwargs): """ return a new datacol with the block i """ if cname is None: cname = name or 'values_block_{idx}'.format(idx=i) if name is None: name = cname # prior to 0.10.1, we named values blocks like: values_block_0 an the # name values_0 try: if version[0] == 0 and version[1] <= 10 and version[2] == 0: m = re.search(r"values_block_(\d+)", name) if m: name = "values_{group}".format(group=m.groups()[0]) except IndexError: pass return cls(name=name, cname=cname, **kwargs)
record the metadata
def set_metadata(self, metadata): """ record the metadata """ if metadata is not None: metadata = np.array(metadata, copy=False).ravel() self.metadata = metadata
create and setup my atom from the block b
def set_atom(self, block, block_items, existing_col, min_itemsize, nan_rep, info, encoding=None, errors='strict'): """ create and setup my atom from the block b """ self.values = list(block_items) # short-cut certain block types if block.is_categorical: return self.set_atom_categorical(block, items=block_items, info=info) elif block.is_datetimetz: return self.set_atom_datetime64tz(block, info=info) elif block.is_datetime: return self.set_atom_datetime64(block) elif block.is_timedelta: return self.set_atom_timedelta64(block) elif block.is_complex: return self.set_atom_complex(block) dtype = block.dtype.name inferred_type = lib.infer_dtype(block.values, skipna=False) if inferred_type == 'date': raise TypeError( "[date] is not implemented as a table column") elif inferred_type == 'datetime': # after 8260 # this only would be hit for a mutli-timezone dtype # which is an error raise TypeError( "too many timezones in this block, create separate " "data columns" ) elif inferred_type == 'unicode': raise TypeError( "[unicode] is not implemented as a table column") # this is basically a catchall; if say a datetime64 has nans then will # end up here ### elif inferred_type == 'string' or dtype == 'object': self.set_atom_string( block, block_items, existing_col, min_itemsize, nan_rep, encoding, errors) # set as a data block else: self.set_atom_data(block)
return the PyTables column class for this column
def get_atom_coltype(self, kind=None): """ return the PyTables column class for this column """ if kind is None: kind = self.kind if self.kind.startswith('uint'): col_name = "UInt{name}Col".format(name=kind[4:]) else: col_name = "{name}Col".format(name=kind.capitalize()) return getattr(_tables(), col_name)
validate that we have the same order as the existing & same dtype
def validate_attr(self, append): """validate that we have the same order as the existing & same dtype""" if append: existing_fields = getattr(self.attrs, self.kind_attr, None) if (existing_fields is not None and existing_fields != list(self.values)): raise ValueError("appended items do not match existing items" " in table!") existing_dtype = getattr(self.attrs, self.dtype_attr, None) if (existing_dtype is not None and existing_dtype != self.dtype): raise ValueError("appended items dtype do not match existing " "items dtype in table!")
set the data from this selection ( and convert to the correct dtype if we can )
def convert(self, values, nan_rep, encoding, errors): """set the data from this selection (and convert to the correct dtype if we can) """ # values is a recarray if values.dtype.fields is not None: values = values[self.cname] self.set_data(values) # use the meta if needed meta = _ensure_decoded(self.meta) # convert to the correct dtype if self.dtype is not None: dtype = _ensure_decoded(self.dtype) # reverse converts if dtype == 'datetime64': # recreate with tz if indicated self.data = _set_tz(self.data, self.tz, coerce=True) elif dtype == 'timedelta64': self.data = np.asarray(self.data, dtype='m8[ns]') elif dtype == 'date': try: self.data = np.asarray( [date.fromordinal(v) for v in self.data], dtype=object) except ValueError: self.data = np.asarray( [date.fromtimestamp(v) for v in self.data], dtype=object) elif dtype == 'datetime': self.data = np.asarray( [datetime.fromtimestamp(v) for v in self.data], dtype=object) elif meta == 'category': # we have a categorical categories = self.metadata codes = self.data.ravel() # if we have stored a NaN in the categories # then strip it; in theory we could have BOTH # -1s in the codes and nulls :< if categories is None: # Handle case of NaN-only categorical columns in which case # the categories are an empty array; when this is stored, # pytables cannot write a zero-len array, so on readback # the categories would be None and `read_hdf()` would fail. categories = Index([], dtype=np.float64) else: mask = isna(categories) if mask.any(): categories = categories[~mask] codes[codes != -1] -= mask.astype(int).cumsum().values self.data = Categorical.from_codes(codes, categories=categories, ordered=self.ordered) else: try: self.data = self.data.astype(dtype, copy=False) except TypeError: self.data = self.data.astype('O', copy=False) # convert nans / decode if _ensure_decoded(self.kind) == 'string': self.data = _unconvert_string_array( self.data, nan_rep=nan_rep, encoding=encoding, errors=errors) return self
get the data for this column
def get_attr(self): """ get the data for this column """ self.values = getattr(self.attrs, self.kind_attr, None) self.dtype = getattr(self.attrs, self.dtype_attr, None) self.meta = getattr(self.attrs, self.meta_attr, None) self.set_kind()
set the data for this column
def set_attr(self): """ set the data for this column """ setattr(self.attrs, self.kind_attr, self.values) setattr(self.attrs, self.meta_attr, self.meta) if self.dtype is not None: setattr(self.attrs, self.dtype_attr, self.dtype)
compute and set our version
def set_version(self): """ compute and set our version """ version = _ensure_decoded( getattr(self.group._v_attrs, 'pandas_version', None)) try: self.version = tuple(int(x) for x in version.split('.')) if len(self.version) == 2: self.version = self.version + (0,) except AttributeError: self.version = (0, 0, 0)
set my pandas type & version
def set_object_info(self): """ set my pandas type & version """ self.attrs.pandas_type = str(self.pandas_kind) self.attrs.pandas_version = str(_version) self.set_version()
infer the axes of my storer return a boolean indicating if we have a valid storer or not
def infer_axes(self): """ infer the axes of my storer return a boolean indicating if we have a valid storer or not """ s = self.storable if s is None: return False self.get_attrs() return True
support fully deleting the node in its entirety ( only ) - where specification must be None
def delete(self, where=None, start=None, stop=None, **kwargs): """ support fully deleting the node in its entirety (only) - where specification must be None """ if com._all_none(where, start, stop): self._handle.remove_node(self.group, recursive=True) return None raise TypeError("cannot delete on an abstract storer")
remove table keywords from kwargs and return raise if any keywords are passed which are not - None
def validate_read(self, kwargs): """ remove table keywords from kwargs and return raise if any keywords are passed which are not-None """ kwargs = copy.copy(kwargs) columns = kwargs.pop('columns', None) if columns is not None: raise TypeError("cannot pass a column specification when reading " "a Fixed format store. this store must be " "selected in its entirety") where = kwargs.pop('where', None) if where is not None: raise TypeError("cannot pass a where specification when reading " "from a Fixed format store. this store must be " "selected in its entirety") return kwargs
set our object attributes
def set_attrs(self): """ set our object attributes """ self.attrs.encoding = self.encoding self.attrs.errors = self.errors
retrieve our attributes
def get_attrs(self): """ retrieve our attributes """ self.encoding = _ensure_encoding(getattr(self.attrs, 'encoding', None)) self.errors = _ensure_decoded(getattr(self.attrs, 'errors', 'strict')) for n in self.attributes: setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))
read an array for the specified node ( off of group
def read_array(self, key, start=None, stop=None): """ read an array for the specified node (off of group """ import tables node = getattr(self.group, key) attrs = node._v_attrs transposed = getattr(attrs, 'transposed', False) if isinstance(node, tables.VLArray): ret = node[0][start:stop] else: dtype = getattr(attrs, 'value_type', None) shape = getattr(attrs, 'shape', None) if shape is not None: # length 0 axis ret = np.empty(shape, dtype=dtype) else: ret = node[start:stop] if dtype == 'datetime64': # reconstruct a timezone if indicated ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True) elif dtype == 'timedelta64': ret = np.asarray(ret, dtype='m8[ns]') if transposed: return ret.T else: return ret
write a 0 - len array
def write_array_empty(self, key, value): """ write a 0-len array """ # ugly hack for length 0 axes arr = np.empty((1,) * value.ndim) self._handle.create_array(self.group, key, arr) getattr(self.group, key)._v_attrs.value_type = str(value.dtype) getattr(self.group, key)._v_attrs.shape = value.shape
we don t support start stop kwds in Sparse
def validate_read(self, kwargs): """ we don't support start, stop kwds in Sparse """ kwargs = super().validate_read(kwargs) if 'start' in kwargs or 'stop' in kwargs: raise NotImplementedError("start and/or stop are not supported " "in fixed Sparse reading") return kwargs
write it as a collection of individual sparse series
def write(self, obj, **kwargs): """ write it as a collection of individual sparse series """ super().write(obj, **kwargs) for name, ss in obj.items(): key = 'sparse_series_{name}'.format(name=name) if key not in self.group._v_children: node = self._handle.create_group(self.group, key) else: node = getattr(self.group, key) s = SparseSeriesFixed(self.parent, node) s.write(ss) self.attrs.default_fill_value = obj.default_fill_value self.attrs.default_kind = obj.default_kind self.write_index('columns', obj.columns)
validate against an existing table
def validate(self, other): """ validate against an existing table """ if other is None: return if other.table_type != self.table_type: raise TypeError( "incompatible table_type with existing " "[{other} - {self}]".format( other=other.table_type, self=self.table_type)) for c in ['index_axes', 'non_index_axes', 'values_axes']: sv = getattr(self, c, None) ov = getattr(other, c, None) if sv != ov: # show the error for the specific axes for i, sax in enumerate(sv): oax = ov[i] if sax != oax: raise ValueError( "invalid combinate of [{c}] on appending data " "[{sax}] vs current table [{oax}]".format( c=c, sax=sax, oax=oax)) # should never get here raise Exception( "invalid combinate of [{c}] on appending data [{sv}] vs " "current table [{ov}]".format(c=c, sv=sv, ov=ov))