id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
20,100
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
IndexCol.write_metadata
|
def write_metadata(self, handler):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
|
python
|
def write_metadata(self, handler):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
|
[
"def",
"write_metadata",
"(",
"self",
",",
"handler",
")",
":",
"if",
"self",
".",
"metadata",
"is",
"not",
"None",
":",
"handler",
".",
"write_metadata",
"(",
"self",
".",
"cname",
",",
"self",
".",
"metadata",
")"
] |
set the meta data
|
[
"set",
"the",
"meta",
"data"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1794-L1797
|
20,101
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
DataCol.create_for_block
|
def create_for_block(
cls, i=None, name=None, cname=None, version=None, **kwargs):
""" return a new datacol with the block i """
if cname is None:
cname = name or 'values_block_{idx}'.format(idx=i)
if name is None:
name = cname
# prior to 0.10.1, we named values blocks like: values_block_0 an the
# name values_0
try:
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search(r"values_block_(\d+)", name)
if m:
name = "values_{group}".format(group=m.groups()[0])
except IndexError:
pass
return cls(name=name, cname=cname, **kwargs)
|
python
|
def create_for_block(
cls, i=None, name=None, cname=None, version=None, **kwargs):
""" return a new datacol with the block i """
if cname is None:
cname = name or 'values_block_{idx}'.format(idx=i)
if name is None:
name = cname
# prior to 0.10.1, we named values blocks like: values_block_0 an the
# name values_0
try:
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search(r"values_block_(\d+)", name)
if m:
name = "values_{group}".format(group=m.groups()[0])
except IndexError:
pass
return cls(name=name, cname=cname, **kwargs)
|
[
"def",
"create_for_block",
"(",
"cls",
",",
"i",
"=",
"None",
",",
"name",
"=",
"None",
",",
"cname",
"=",
"None",
",",
"version",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"cname",
"is",
"None",
":",
"cname",
"=",
"name",
"or",
"'values_block_{idx}'",
".",
"format",
"(",
"idx",
"=",
"i",
")",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"cname",
"# prior to 0.10.1, we named values blocks like: values_block_0 an the",
"# name values_0",
"try",
":",
"if",
"version",
"[",
"0",
"]",
"==",
"0",
"and",
"version",
"[",
"1",
"]",
"<=",
"10",
"and",
"version",
"[",
"2",
"]",
"==",
"0",
":",
"m",
"=",
"re",
".",
"search",
"(",
"r\"values_block_(\\d+)\"",
",",
"name",
")",
"if",
"m",
":",
"name",
"=",
"\"values_{group}\"",
".",
"format",
"(",
"group",
"=",
"m",
".",
"groups",
"(",
")",
"[",
"0",
"]",
")",
"except",
"IndexError",
":",
"pass",
"return",
"cls",
"(",
"name",
"=",
"name",
",",
"cname",
"=",
"cname",
",",
"*",
"*",
"kwargs",
")"
] |
return a new datacol with the block i
|
[
"return",
"a",
"new",
"datacol",
"with",
"the",
"block",
"i"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1839-L1858
|
20,102
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
DataCol.set_metadata
|
def set_metadata(self, metadata):
""" record the metadata """
if metadata is not None:
metadata = np.array(metadata, copy=False).ravel()
self.metadata = metadata
|
python
|
def set_metadata(self, metadata):
""" record the metadata """
if metadata is not None:
metadata = np.array(metadata, copy=False).ravel()
self.metadata = metadata
|
[
"def",
"set_metadata",
"(",
"self",
",",
"metadata",
")",
":",
"if",
"metadata",
"is",
"not",
"None",
":",
"metadata",
"=",
"np",
".",
"array",
"(",
"metadata",
",",
"copy",
"=",
"False",
")",
".",
"ravel",
"(",
")",
"self",
".",
"metadata",
"=",
"metadata"
] |
record the metadata
|
[
"record",
"the",
"metadata"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1904-L1908
|
20,103
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
DataCol.set_atom
|
def set_atom(self, block, block_items, existing_col, min_itemsize,
nan_rep, info, encoding=None, errors='strict'):
""" create and setup my atom from the block b """
self.values = list(block_items)
# short-cut certain block types
if block.is_categorical:
return self.set_atom_categorical(block, items=block_items,
info=info)
elif block.is_datetimetz:
return self.set_atom_datetime64tz(block, info=info)
elif block.is_datetime:
return self.set_atom_datetime64(block)
elif block.is_timedelta:
return self.set_atom_timedelta64(block)
elif block.is_complex:
return self.set_atom_complex(block)
dtype = block.dtype.name
inferred_type = lib.infer_dtype(block.values, skipna=False)
if inferred_type == 'date':
raise TypeError(
"[date] is not implemented as a table column")
elif inferred_type == 'datetime':
# after 8260
# this only would be hit for a mutli-timezone dtype
# which is an error
raise TypeError(
"too many timezones in this block, create separate "
"data columns"
)
elif inferred_type == 'unicode':
raise TypeError(
"[unicode] is not implemented as a table column")
# this is basically a catchall; if say a datetime64 has nans then will
# end up here ###
elif inferred_type == 'string' or dtype == 'object':
self.set_atom_string(
block, block_items,
existing_col,
min_itemsize,
nan_rep,
encoding,
errors)
# set as a data block
else:
self.set_atom_data(block)
|
python
|
def set_atom(self, block, block_items, existing_col, min_itemsize,
nan_rep, info, encoding=None, errors='strict'):
""" create and setup my atom from the block b """
self.values = list(block_items)
# short-cut certain block types
if block.is_categorical:
return self.set_atom_categorical(block, items=block_items,
info=info)
elif block.is_datetimetz:
return self.set_atom_datetime64tz(block, info=info)
elif block.is_datetime:
return self.set_atom_datetime64(block)
elif block.is_timedelta:
return self.set_atom_timedelta64(block)
elif block.is_complex:
return self.set_atom_complex(block)
dtype = block.dtype.name
inferred_type = lib.infer_dtype(block.values, skipna=False)
if inferred_type == 'date':
raise TypeError(
"[date] is not implemented as a table column")
elif inferred_type == 'datetime':
# after 8260
# this only would be hit for a mutli-timezone dtype
# which is an error
raise TypeError(
"too many timezones in this block, create separate "
"data columns"
)
elif inferred_type == 'unicode':
raise TypeError(
"[unicode] is not implemented as a table column")
# this is basically a catchall; if say a datetime64 has nans then will
# end up here ###
elif inferred_type == 'string' or dtype == 'object':
self.set_atom_string(
block, block_items,
existing_col,
min_itemsize,
nan_rep,
encoding,
errors)
# set as a data block
else:
self.set_atom_data(block)
|
[
"def",
"set_atom",
"(",
"self",
",",
"block",
",",
"block_items",
",",
"existing_col",
",",
"min_itemsize",
",",
"nan_rep",
",",
"info",
",",
"encoding",
"=",
"None",
",",
"errors",
"=",
"'strict'",
")",
":",
"self",
".",
"values",
"=",
"list",
"(",
"block_items",
")",
"# short-cut certain block types",
"if",
"block",
".",
"is_categorical",
":",
"return",
"self",
".",
"set_atom_categorical",
"(",
"block",
",",
"items",
"=",
"block_items",
",",
"info",
"=",
"info",
")",
"elif",
"block",
".",
"is_datetimetz",
":",
"return",
"self",
".",
"set_atom_datetime64tz",
"(",
"block",
",",
"info",
"=",
"info",
")",
"elif",
"block",
".",
"is_datetime",
":",
"return",
"self",
".",
"set_atom_datetime64",
"(",
"block",
")",
"elif",
"block",
".",
"is_timedelta",
":",
"return",
"self",
".",
"set_atom_timedelta64",
"(",
"block",
")",
"elif",
"block",
".",
"is_complex",
":",
"return",
"self",
".",
"set_atom_complex",
"(",
"block",
")",
"dtype",
"=",
"block",
".",
"dtype",
".",
"name",
"inferred_type",
"=",
"lib",
".",
"infer_dtype",
"(",
"block",
".",
"values",
",",
"skipna",
"=",
"False",
")",
"if",
"inferred_type",
"==",
"'date'",
":",
"raise",
"TypeError",
"(",
"\"[date] is not implemented as a table column\"",
")",
"elif",
"inferred_type",
"==",
"'datetime'",
":",
"# after 8260",
"# this only would be hit for a mutli-timezone dtype",
"# which is an error",
"raise",
"TypeError",
"(",
"\"too many timezones in this block, create separate \"",
"\"data columns\"",
")",
"elif",
"inferred_type",
"==",
"'unicode'",
":",
"raise",
"TypeError",
"(",
"\"[unicode] is not implemented as a table column\"",
")",
"# this is basically a catchall; if say a datetime64 has nans then will",
"# end up here ###",
"elif",
"inferred_type",
"==",
"'string'",
"or",
"dtype",
"==",
"'object'",
":",
"self",
".",
"set_atom_string",
"(",
"block",
",",
"block_items",
",",
"existing_col",
",",
"min_itemsize",
",",
"nan_rep",
",",
"encoding",
",",
"errors",
")",
"# set as a data block",
"else",
":",
"self",
".",
"set_atom_data",
"(",
"block",
")"
] |
create and setup my atom from the block b
|
[
"create",
"and",
"setup",
"my",
"atom",
"from",
"the",
"block",
"b"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L1939-L1990
|
20,104
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
DataCol.get_atom_coltype
|
def get_atom_coltype(self, kind=None):
""" return the PyTables column class for this column """
if kind is None:
kind = self.kind
if self.kind.startswith('uint'):
col_name = "UInt{name}Col".format(name=kind[4:])
else:
col_name = "{name}Col".format(name=kind.capitalize())
return getattr(_tables(), col_name)
|
python
|
def get_atom_coltype(self, kind=None):
""" return the PyTables column class for this column """
if kind is None:
kind = self.kind
if self.kind.startswith('uint'):
col_name = "UInt{name}Col".format(name=kind[4:])
else:
col_name = "{name}Col".format(name=kind.capitalize())
return getattr(_tables(), col_name)
|
[
"def",
"get_atom_coltype",
"(",
"self",
",",
"kind",
"=",
"None",
")",
":",
"if",
"kind",
"is",
"None",
":",
"kind",
"=",
"self",
".",
"kind",
"if",
"self",
".",
"kind",
".",
"startswith",
"(",
"'uint'",
")",
":",
"col_name",
"=",
"\"UInt{name}Col\"",
".",
"format",
"(",
"name",
"=",
"kind",
"[",
"4",
":",
"]",
")",
"else",
":",
"col_name",
"=",
"\"{name}Col\"",
".",
"format",
"(",
"name",
"=",
"kind",
".",
"capitalize",
"(",
")",
")",
"return",
"getattr",
"(",
"_tables",
"(",
")",
",",
"col_name",
")"
] |
return the PyTables column class for this column
|
[
"return",
"the",
"PyTables",
"column",
"class",
"for",
"this",
"column"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2043-L2052
|
20,105
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
DataCol.validate_attr
|
def validate_attr(self, append):
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if (existing_fields is not None and
existing_fields != list(self.values)):
raise ValueError("appended items do not match existing items"
" in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if (existing_dtype is not None and
existing_dtype != self.dtype):
raise ValueError("appended items dtype do not match existing "
"items dtype in table!")
|
python
|
def validate_attr(self, append):
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if (existing_fields is not None and
existing_fields != list(self.values)):
raise ValueError("appended items do not match existing items"
" in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if (existing_dtype is not None and
existing_dtype != self.dtype):
raise ValueError("appended items dtype do not match existing "
"items dtype in table!")
|
[
"def",
"validate_attr",
"(",
"self",
",",
"append",
")",
":",
"if",
"append",
":",
"existing_fields",
"=",
"getattr",
"(",
"self",
".",
"attrs",
",",
"self",
".",
"kind_attr",
",",
"None",
")",
"if",
"(",
"existing_fields",
"is",
"not",
"None",
"and",
"existing_fields",
"!=",
"list",
"(",
"self",
".",
"values",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"appended items do not match existing items\"",
"\" in table!\"",
")",
"existing_dtype",
"=",
"getattr",
"(",
"self",
".",
"attrs",
",",
"self",
".",
"dtype_attr",
",",
"None",
")",
"if",
"(",
"existing_dtype",
"is",
"not",
"None",
"and",
"existing_dtype",
"!=",
"self",
".",
"dtype",
")",
":",
"raise",
"ValueError",
"(",
"\"appended items dtype do not match existing \"",
"\"items dtype in table!\"",
")"
] |
validate that we have the same order as the existing & same dtype
|
[
"validate",
"that",
"we",
"have",
"the",
"same",
"order",
"as",
"the",
"existing",
"&",
"same",
"dtype"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2139-L2152
|
20,106
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
DataCol.get_attr
|
def get_attr(self):
""" get the data for this column """
self.values = getattr(self.attrs, self.kind_attr, None)
self.dtype = getattr(self.attrs, self.dtype_attr, None)
self.meta = getattr(self.attrs, self.meta_attr, None)
self.set_kind()
|
python
|
def get_attr(self):
""" get the data for this column """
self.values = getattr(self.attrs, self.kind_attr, None)
self.dtype = getattr(self.attrs, self.dtype_attr, None)
self.meta = getattr(self.attrs, self.meta_attr, None)
self.set_kind()
|
[
"def",
"get_attr",
"(",
"self",
")",
":",
"self",
".",
"values",
"=",
"getattr",
"(",
"self",
".",
"attrs",
",",
"self",
".",
"kind_attr",
",",
"None",
")",
"self",
".",
"dtype",
"=",
"getattr",
"(",
"self",
".",
"attrs",
",",
"self",
".",
"dtype_attr",
",",
"None",
")",
"self",
".",
"meta",
"=",
"getattr",
"(",
"self",
".",
"attrs",
",",
"self",
".",
"meta_attr",
",",
"None",
")",
"self",
".",
"set_kind",
"(",
")"
] |
get the data for this column
|
[
"get",
"the",
"data",
"for",
"this",
"column"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2232-L2237
|
20,107
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
DataCol.set_attr
|
def set_attr(self):
""" set the data for this column """
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
if self.dtype is not None:
setattr(self.attrs, self.dtype_attr, self.dtype)
|
python
|
def set_attr(self):
""" set the data for this column """
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
if self.dtype is not None:
setattr(self.attrs, self.dtype_attr, self.dtype)
|
[
"def",
"set_attr",
"(",
"self",
")",
":",
"setattr",
"(",
"self",
".",
"attrs",
",",
"self",
".",
"kind_attr",
",",
"self",
".",
"values",
")",
"setattr",
"(",
"self",
".",
"attrs",
",",
"self",
".",
"meta_attr",
",",
"self",
".",
"meta",
")",
"if",
"self",
".",
"dtype",
"is",
"not",
"None",
":",
"setattr",
"(",
"self",
".",
"attrs",
",",
"self",
".",
"dtype_attr",
",",
"self",
".",
"dtype",
")"
] |
set the data for this column
|
[
"set",
"the",
"data",
"for",
"this",
"column"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2239-L2244
|
20,108
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Fixed.set_version
|
def set_version(self):
""" compute and set our version """
version = _ensure_decoded(
getattr(self.group._v_attrs, 'pandas_version', None))
try:
self.version = tuple(int(x) for x in version.split('.'))
if len(self.version) == 2:
self.version = self.version + (0,)
except AttributeError:
self.version = (0, 0, 0)
|
python
|
def set_version(self):
""" compute and set our version """
version = _ensure_decoded(
getattr(self.group._v_attrs, 'pandas_version', None))
try:
self.version = tuple(int(x) for x in version.split('.'))
if len(self.version) == 2:
self.version = self.version + (0,)
except AttributeError:
self.version = (0, 0, 0)
|
[
"def",
"set_version",
"(",
"self",
")",
":",
"version",
"=",
"_ensure_decoded",
"(",
"getattr",
"(",
"self",
".",
"group",
".",
"_v_attrs",
",",
"'pandas_version'",
",",
"None",
")",
")",
"try",
":",
"self",
".",
"version",
"=",
"tuple",
"(",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"version",
".",
"split",
"(",
"'.'",
")",
")",
"if",
"len",
"(",
"self",
".",
"version",
")",
"==",
"2",
":",
"self",
".",
"version",
"=",
"self",
".",
"version",
"+",
"(",
"0",
",",
")",
"except",
"AttributeError",
":",
"self",
".",
"version",
"=",
"(",
"0",
",",
"0",
",",
"0",
")"
] |
compute and set our version
|
[
"compute",
"and",
"set",
"our",
"version"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2307-L2316
|
20,109
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Fixed.set_object_info
|
def set_object_info(self):
""" set my pandas type & version """
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
self.set_version()
|
python
|
def set_object_info(self):
""" set my pandas type & version """
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
self.set_version()
|
[
"def",
"set_object_info",
"(",
"self",
")",
":",
"self",
".",
"attrs",
".",
"pandas_type",
"=",
"str",
"(",
"self",
".",
"pandas_kind",
")",
"self",
".",
"attrs",
".",
"pandas_version",
"=",
"str",
"(",
"_version",
")",
"self",
".",
"set_version",
"(",
")"
] |
set my pandas type & version
|
[
"set",
"my",
"pandas",
"type",
"&",
"version"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2339-L2343
|
20,110
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Fixed.infer_axes
|
def infer_axes(self):
""" infer the axes of my storer
return a boolean indicating if we have a valid storer or not """
s = self.storable
if s is None:
return False
self.get_attrs()
return True
|
python
|
def infer_axes(self):
""" infer the axes of my storer
return a boolean indicating if we have a valid storer or not """
s = self.storable
if s is None:
return False
self.get_attrs()
return True
|
[
"def",
"infer_axes",
"(",
"self",
")",
":",
"s",
"=",
"self",
".",
"storable",
"if",
"s",
"is",
"None",
":",
"return",
"False",
"self",
".",
"get_attrs",
"(",
")",
"return",
"True"
] |
infer the axes of my storer
return a boolean indicating if we have a valid storer or not
|
[
"infer",
"the",
"axes",
"of",
"my",
"storer",
"return",
"a",
"boolean",
"indicating",
"if",
"we",
"have",
"a",
"valid",
"storer",
"or",
"not"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2416-L2424
|
20,111
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
GenericFixed.validate_read
|
def validate_read(self, kwargs):
"""
remove table keywords from kwargs and return
raise if any keywords are passed which are not-None
"""
kwargs = copy.copy(kwargs)
columns = kwargs.pop('columns', None)
if columns is not None:
raise TypeError("cannot pass a column specification when reading "
"a Fixed format store. this store must be "
"selected in its entirety")
where = kwargs.pop('where', None)
if where is not None:
raise TypeError("cannot pass a where specification when reading "
"from a Fixed format store. this store must be "
"selected in its entirety")
return kwargs
|
python
|
def validate_read(self, kwargs):
"""
remove table keywords from kwargs and return
raise if any keywords are passed which are not-None
"""
kwargs = copy.copy(kwargs)
columns = kwargs.pop('columns', None)
if columns is not None:
raise TypeError("cannot pass a column specification when reading "
"a Fixed format store. this store must be "
"selected in its entirety")
where = kwargs.pop('where', None)
if where is not None:
raise TypeError("cannot pass a where specification when reading "
"from a Fixed format store. this store must be "
"selected in its entirety")
return kwargs
|
[
"def",
"validate_read",
"(",
"self",
",",
"kwargs",
")",
":",
"kwargs",
"=",
"copy",
".",
"copy",
"(",
"kwargs",
")",
"columns",
"=",
"kwargs",
".",
"pop",
"(",
"'columns'",
",",
"None",
")",
"if",
"columns",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"\"cannot pass a column specification when reading \"",
"\"a Fixed format store. this store must be \"",
"\"selected in its entirety\"",
")",
"where",
"=",
"kwargs",
".",
"pop",
"(",
"'where'",
",",
"None",
")",
"if",
"where",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"\"cannot pass a where specification when reading \"",
"\"from a Fixed format store. this store must be \"",
"\"selected in its entirety\"",
")",
"return",
"kwargs"
] |
remove table keywords from kwargs and return
raise if any keywords are passed which are not-None
|
[
"remove",
"table",
"keywords",
"from",
"kwargs",
"and",
"return",
"raise",
"if",
"any",
"keywords",
"are",
"passed",
"which",
"are",
"not",
"-",
"None"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2480-L2497
|
20,112
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
GenericFixed.set_attrs
|
def set_attrs(self):
""" set our object attributes """
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
|
python
|
def set_attrs(self):
""" set our object attributes """
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
|
[
"def",
"set_attrs",
"(",
"self",
")",
":",
"self",
".",
"attrs",
".",
"encoding",
"=",
"self",
".",
"encoding",
"self",
".",
"attrs",
".",
"errors",
"=",
"self",
".",
"errors"
] |
set our object attributes
|
[
"set",
"our",
"object",
"attributes"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2503-L2506
|
20,113
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
GenericFixed.read_array
|
def read_array(self, key, start=None, stop=None):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
attrs = node._v_attrs
transposed = getattr(attrs, 'transposed', False)
if isinstance(node, tables.VLArray):
ret = node[0][start:stop]
else:
dtype = getattr(attrs, 'value_type', None)
shape = getattr(attrs, 'shape', None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = node[start:stop]
if dtype == 'datetime64':
# reconstruct a timezone if indicated
ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)
elif dtype == 'timedelta64':
ret = np.asarray(ret, dtype='m8[ns]')
if transposed:
return ret.T
else:
return ret
|
python
|
def read_array(self, key, start=None, stop=None):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
attrs = node._v_attrs
transposed = getattr(attrs, 'transposed', False)
if isinstance(node, tables.VLArray):
ret = node[0][start:stop]
else:
dtype = getattr(attrs, 'value_type', None)
shape = getattr(attrs, 'shape', None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = node[start:stop]
if dtype == 'datetime64':
# reconstruct a timezone if indicated
ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)
elif dtype == 'timedelta64':
ret = np.asarray(ret, dtype='m8[ns]')
if transposed:
return ret.T
else:
return ret
|
[
"def",
"read_array",
"(",
"self",
",",
"key",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
")",
":",
"import",
"tables",
"node",
"=",
"getattr",
"(",
"self",
".",
"group",
",",
"key",
")",
"attrs",
"=",
"node",
".",
"_v_attrs",
"transposed",
"=",
"getattr",
"(",
"attrs",
",",
"'transposed'",
",",
"False",
")",
"if",
"isinstance",
"(",
"node",
",",
"tables",
".",
"VLArray",
")",
":",
"ret",
"=",
"node",
"[",
"0",
"]",
"[",
"start",
":",
"stop",
"]",
"else",
":",
"dtype",
"=",
"getattr",
"(",
"attrs",
",",
"'value_type'",
",",
"None",
")",
"shape",
"=",
"getattr",
"(",
"attrs",
",",
"'shape'",
",",
"None",
")",
"if",
"shape",
"is",
"not",
"None",
":",
"# length 0 axis",
"ret",
"=",
"np",
".",
"empty",
"(",
"shape",
",",
"dtype",
"=",
"dtype",
")",
"else",
":",
"ret",
"=",
"node",
"[",
"start",
":",
"stop",
"]",
"if",
"dtype",
"==",
"'datetime64'",
":",
"# reconstruct a timezone if indicated",
"ret",
"=",
"_set_tz",
"(",
"ret",
",",
"getattr",
"(",
"attrs",
",",
"'tz'",
",",
"None",
")",
",",
"coerce",
"=",
"True",
")",
"elif",
"dtype",
"==",
"'timedelta64'",
":",
"ret",
"=",
"np",
".",
"asarray",
"(",
"ret",
",",
"dtype",
"=",
"'m8[ns]'",
")",
"if",
"transposed",
":",
"return",
"ret",
".",
"T",
"else",
":",
"return",
"ret"
] |
read an array for the specified node (off of group
|
[
"read",
"an",
"array",
"for",
"the",
"specified",
"node",
"(",
"off",
"of",
"group"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2518-L2549
|
20,114
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
GenericFixed.write_array_empty
|
def write_array_empty(self, key, value):
""" write a 0-len array """
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
getattr(self.group, key)._v_attrs.shape = value.shape
|
python
|
def write_array_empty(self, key, value):
""" write a 0-len array """
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
getattr(self.group, key)._v_attrs.shape = value.shape
|
[
"def",
"write_array_empty",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"# ugly hack for length 0 axes",
"arr",
"=",
"np",
".",
"empty",
"(",
"(",
"1",
",",
")",
"*",
"value",
".",
"ndim",
")",
"self",
".",
"_handle",
".",
"create_array",
"(",
"self",
".",
"group",
",",
"key",
",",
"arr",
")",
"getattr",
"(",
"self",
".",
"group",
",",
"key",
")",
".",
"_v_attrs",
".",
"value_type",
"=",
"str",
"(",
"value",
".",
"dtype",
")",
"getattr",
"(",
"self",
".",
"group",
",",
"key",
")",
".",
"_v_attrs",
".",
"shape",
"=",
"value",
".",
"shape"
] |
write a 0-len array
|
[
"write",
"a",
"0",
"-",
"len",
"array"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2702-L2709
|
20,115
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
SparseFixed.validate_read
|
def validate_read(self, kwargs):
"""
we don't support start, stop kwds in Sparse
"""
kwargs = super().validate_read(kwargs)
if 'start' in kwargs or 'stop' in kwargs:
raise NotImplementedError("start and/or stop are not supported "
"in fixed Sparse reading")
return kwargs
|
python
|
def validate_read(self, kwargs):
"""
we don't support start, stop kwds in Sparse
"""
kwargs = super().validate_read(kwargs)
if 'start' in kwargs or 'stop' in kwargs:
raise NotImplementedError("start and/or stop are not supported "
"in fixed Sparse reading")
return kwargs
|
[
"def",
"validate_read",
"(",
"self",
",",
"kwargs",
")",
":",
"kwargs",
"=",
"super",
"(",
")",
".",
"validate_read",
"(",
"kwargs",
")",
"if",
"'start'",
"in",
"kwargs",
"or",
"'stop'",
"in",
"kwargs",
":",
"raise",
"NotImplementedError",
"(",
"\"start and/or stop are not supported \"",
"\"in fixed Sparse reading\"",
")",
"return",
"kwargs"
] |
we don't support start, stop kwds in Sparse
|
[
"we",
"don",
"t",
"support",
"start",
"stop",
"kwds",
"in",
"Sparse"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2859-L2867
|
20,116
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
SparseFrameFixed.write
|
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """
super().write(obj, **kwargs)
for name, ss in obj.items():
key = 'sparse_series_{name}'.format(name=name)
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseSeriesFixed(self.parent, node)
s.write(ss)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('columns', obj.columns)
|
python
|
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """
super().write(obj, **kwargs)
for name, ss in obj.items():
key = 'sparse_series_{name}'.format(name=name)
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseSeriesFixed(self.parent, node)
s.write(ss)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('columns', obj.columns)
|
[
"def",
"write",
"(",
"self",
",",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
")",
".",
"write",
"(",
"obj",
",",
"*",
"*",
"kwargs",
")",
"for",
"name",
",",
"ss",
"in",
"obj",
".",
"items",
"(",
")",
":",
"key",
"=",
"'sparse_series_{name}'",
".",
"format",
"(",
"name",
"=",
"name",
")",
"if",
"key",
"not",
"in",
"self",
".",
"group",
".",
"_v_children",
":",
"node",
"=",
"self",
".",
"_handle",
".",
"create_group",
"(",
"self",
".",
"group",
",",
"key",
")",
"else",
":",
"node",
"=",
"getattr",
"(",
"self",
".",
"group",
",",
"key",
")",
"s",
"=",
"SparseSeriesFixed",
"(",
"self",
".",
"parent",
",",
"node",
")",
"s",
".",
"write",
"(",
"ss",
")",
"self",
".",
"attrs",
".",
"default_fill_value",
"=",
"obj",
".",
"default_fill_value",
"self",
".",
"attrs",
".",
"default_kind",
"=",
"obj",
".",
"default_kind",
"self",
".",
"write_index",
"(",
"'columns'",
",",
"obj",
".",
"columns",
")"
] |
write it as a collection of individual sparse series
|
[
"write",
"it",
"as",
"a",
"collection",
"of",
"individual",
"sparse",
"series"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2911-L2924
|
20,117
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table.validate
|
def validate(self, other):
""" validate against an existing table """
if other is None:
return
if other.table_type != self.table_type:
raise TypeError(
"incompatible table_type with existing "
"[{other} - {self}]".format(
other=other.table_type, self=self.table_type))
for c in ['index_axes', 'non_index_axes', 'values_axes']:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
for i, sax in enumerate(sv):
oax = ov[i]
if sax != oax:
raise ValueError(
"invalid combinate of [{c}] on appending data "
"[{sax}] vs current table [{oax}]".format(
c=c, sax=sax, oax=oax))
# should never get here
raise Exception(
"invalid combinate of [{c}] on appending data [{sv}] vs "
"current table [{ov}]".format(c=c, sv=sv, ov=ov))
|
python
|
def validate(self, other):
""" validate against an existing table """
if other is None:
return
if other.table_type != self.table_type:
raise TypeError(
"incompatible table_type with existing "
"[{other} - {self}]".format(
other=other.table_type, self=self.table_type))
for c in ['index_axes', 'non_index_axes', 'values_axes']:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
for i, sax in enumerate(sv):
oax = ov[i]
if sax != oax:
raise ValueError(
"invalid combinate of [{c}] on appending data "
"[{sax}] vs current table [{oax}]".format(
c=c, sax=sax, oax=oax))
# should never get here
raise Exception(
"invalid combinate of [{c}] on appending data [{sv}] vs "
"current table [{ov}]".format(c=c, sv=sv, ov=ov))
|
[
"def",
"validate",
"(",
"self",
",",
"other",
")",
":",
"if",
"other",
"is",
"None",
":",
"return",
"if",
"other",
".",
"table_type",
"!=",
"self",
".",
"table_type",
":",
"raise",
"TypeError",
"(",
"\"incompatible table_type with existing \"",
"\"[{other} - {self}]\"",
".",
"format",
"(",
"other",
"=",
"other",
".",
"table_type",
",",
"self",
"=",
"self",
".",
"table_type",
")",
")",
"for",
"c",
"in",
"[",
"'index_axes'",
",",
"'non_index_axes'",
",",
"'values_axes'",
"]",
":",
"sv",
"=",
"getattr",
"(",
"self",
",",
"c",
",",
"None",
")",
"ov",
"=",
"getattr",
"(",
"other",
",",
"c",
",",
"None",
")",
"if",
"sv",
"!=",
"ov",
":",
"# show the error for the specific axes",
"for",
"i",
",",
"sax",
"in",
"enumerate",
"(",
"sv",
")",
":",
"oax",
"=",
"ov",
"[",
"i",
"]",
"if",
"sax",
"!=",
"oax",
":",
"raise",
"ValueError",
"(",
"\"invalid combinate of [{c}] on appending data \"",
"\"[{sax}] vs current table [{oax}]\"",
".",
"format",
"(",
"c",
"=",
"c",
",",
"sax",
"=",
"sax",
",",
"oax",
"=",
"oax",
")",
")",
"# should never get here",
"raise",
"Exception",
"(",
"\"invalid combinate of [{c}] on appending data [{sv}] vs \"",
"\"current table [{ov}]\"",
".",
"format",
"(",
"c",
"=",
"c",
",",
"sv",
"=",
"sv",
",",
"ov",
"=",
"ov",
")",
")"
] |
validate against an existing table
|
[
"validate",
"against",
"an",
"existing",
"table"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3095-L3123
|
20,118
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table.validate_multiindex
|
def validate_multiindex(self, obj):
"""validate that we can store the multi-index; reset and return the
new object
"""
levels = [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(obj.index.names)]
try:
return obj.reset_index(), levels
except ValueError:
raise ValueError("duplicate names/columns in the multi-index when "
"storing as a table")
|
python
|
def validate_multiindex(self, obj):
"""validate that we can store the multi-index; reset and return the
new object
"""
levels = [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(obj.index.names)]
try:
return obj.reset_index(), levels
except ValueError:
raise ValueError("duplicate names/columns in the multi-index when "
"storing as a table")
|
[
"def",
"validate_multiindex",
"(",
"self",
",",
"obj",
")",
":",
"levels",
"=",
"[",
"l",
"if",
"l",
"is",
"not",
"None",
"else",
"\"level_{0}\"",
".",
"format",
"(",
"i",
")",
"for",
"i",
",",
"l",
"in",
"enumerate",
"(",
"obj",
".",
"index",
".",
"names",
")",
"]",
"try",
":",
"return",
"obj",
".",
"reset_index",
"(",
")",
",",
"levels",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"duplicate names/columns in the multi-index when \"",
"\"storing as a table\"",
")"
] |
validate that we can store the multi-index; reset and return the
new object
|
[
"validate",
"that",
"we",
"can",
"store",
"the",
"multi",
"-",
"index",
";",
"reset",
"and",
"return",
"the",
"new",
"object"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3135-L3145
|
20,119
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table.nrows_expected
|
def nrows_expected(self):
""" based on our axes, compute the expected nrows """
return np.prod([i.cvalues.shape[0] for i in self.index_axes])
|
python
|
def nrows_expected(self):
""" based on our axes, compute the expected nrows """
return np.prod([i.cvalues.shape[0] for i in self.index_axes])
|
[
"def",
"nrows_expected",
"(",
"self",
")",
":",
"return",
"np",
".",
"prod",
"(",
"[",
"i",
".",
"cvalues",
".",
"shape",
"[",
"0",
"]",
"for",
"i",
"in",
"self",
".",
"index_axes",
"]",
")"
] |
based on our axes, compute the expected nrows
|
[
"based",
"on",
"our",
"axes",
"compute",
"the",
"expected",
"nrows"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3148-L3150
|
20,120
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table.data_orientation
|
def data_orientation(self):
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes]))
|
python
|
def data_orientation(self):
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes]))
|
[
"def",
"data_orientation",
"(",
"self",
")",
":",
"return",
"tuple",
"(",
"itertools",
".",
"chain",
"(",
"[",
"int",
"(",
"a",
"[",
"0",
"]",
")",
"for",
"a",
"in",
"self",
".",
"non_index_axes",
"]",
",",
"[",
"int",
"(",
"a",
".",
"axis",
")",
"for",
"a",
"in",
"self",
".",
"index_axes",
"]",
")",
")"
] |
return a tuple of my permutated axes, non_indexable at the front
|
[
"return",
"a",
"tuple",
"of",
"my",
"permutated",
"axes",
"non_indexable",
"at",
"the",
"front"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3188-L3191
|
20,121
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table.queryables
|
def queryables(self):
""" return a dict of the kinds allowable columns for this object """
# compute the values_axes queryables
return dict(
[(a.cname, a) for a in self.index_axes] +
[(self.storage_obj_type._AXIS_NAMES[axis], None)
for axis, values in self.non_index_axes] +
[(v.cname, v) for v in self.values_axes
if v.name in set(self.data_columns)]
)
|
python
|
def queryables(self):
""" return a dict of the kinds allowable columns for this object """
# compute the values_axes queryables
return dict(
[(a.cname, a) for a in self.index_axes] +
[(self.storage_obj_type._AXIS_NAMES[axis], None)
for axis, values in self.non_index_axes] +
[(v.cname, v) for v in self.values_axes
if v.name in set(self.data_columns)]
)
|
[
"def",
"queryables",
"(",
"self",
")",
":",
"# compute the values_axes queryables",
"return",
"dict",
"(",
"[",
"(",
"a",
".",
"cname",
",",
"a",
")",
"for",
"a",
"in",
"self",
".",
"index_axes",
"]",
"+",
"[",
"(",
"self",
".",
"storage_obj_type",
".",
"_AXIS_NAMES",
"[",
"axis",
"]",
",",
"None",
")",
"for",
"axis",
",",
"values",
"in",
"self",
".",
"non_index_axes",
"]",
"+",
"[",
"(",
"v",
".",
"cname",
",",
"v",
")",
"for",
"v",
"in",
"self",
".",
"values_axes",
"if",
"v",
".",
"name",
"in",
"set",
"(",
"self",
".",
"data_columns",
")",
"]",
")"
] |
return a dict of the kinds allowable columns for this object
|
[
"return",
"a",
"dict",
"of",
"the",
"kinds",
"allowable",
"columns",
"for",
"this",
"object"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3193-L3203
|
20,122
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table._get_metadata_path
|
def _get_metadata_path(self, key):
""" return the metadata pathname for this key """
return "{group}/meta/{key}/meta".format(group=self.group._v_pathname,
key=key)
|
python
|
def _get_metadata_path(self, key):
""" return the metadata pathname for this key """
return "{group}/meta/{key}/meta".format(group=self.group._v_pathname,
key=key)
|
[
"def",
"_get_metadata_path",
"(",
"self",
",",
"key",
")",
":",
"return",
"\"{group}/meta/{key}/meta\"",
".",
"format",
"(",
"group",
"=",
"self",
".",
"group",
".",
"_v_pathname",
",",
"key",
"=",
"key",
")"
] |
return the metadata pathname for this key
|
[
"return",
"the",
"metadata",
"pathname",
"for",
"this",
"key"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3213-L3216
|
20,123
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table.write_metadata
|
def write_metadata(self, key, values):
"""
write out a meta data array to the key as a fixed-format Series
Parameters
----------
key : string
values : ndarray
"""
values = Series(values)
self.parent.put(self._get_metadata_path(key), values, format='table',
encoding=self.encoding, errors=self.errors,
nan_rep=self.nan_rep)
|
python
|
def write_metadata(self, key, values):
"""
write out a meta data array to the key as a fixed-format Series
Parameters
----------
key : string
values : ndarray
"""
values = Series(values)
self.parent.put(self._get_metadata_path(key), values, format='table',
encoding=self.encoding, errors=self.errors,
nan_rep=self.nan_rep)
|
[
"def",
"write_metadata",
"(",
"self",
",",
"key",
",",
"values",
")",
":",
"values",
"=",
"Series",
"(",
"values",
")",
"self",
".",
"parent",
".",
"put",
"(",
"self",
".",
"_get_metadata_path",
"(",
"key",
")",
",",
"values",
",",
"format",
"=",
"'table'",
",",
"encoding",
"=",
"self",
".",
"encoding",
",",
"errors",
"=",
"self",
".",
"errors",
",",
"nan_rep",
"=",
"self",
".",
"nan_rep",
")"
] |
write out a meta data array to the key as a fixed-format Series
Parameters
----------
key : string
values : ndarray
|
[
"write",
"out",
"a",
"meta",
"data",
"array",
"to",
"the",
"key",
"as",
"a",
"fixed",
"-",
"format",
"Series"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3218-L3231
|
20,124
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table.read_metadata
|
def read_metadata(self, key):
""" return the meta data array for this key """
if getattr(getattr(self.group, 'meta', None), key, None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
|
python
|
def read_metadata(self, key):
""" return the meta data array for this key """
if getattr(getattr(self.group, 'meta', None), key, None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
|
[
"def",
"read_metadata",
"(",
"self",
",",
"key",
")",
":",
"if",
"getattr",
"(",
"getattr",
"(",
"self",
".",
"group",
",",
"'meta'",
",",
"None",
")",
",",
"key",
",",
"None",
")",
"is",
"not",
"None",
":",
"return",
"self",
".",
"parent",
".",
"select",
"(",
"self",
".",
"_get_metadata_path",
"(",
"key",
")",
")",
"return",
"None"
] |
return the meta data array for this key
|
[
"return",
"the",
"meta",
"data",
"array",
"for",
"this",
"key"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3233-L3237
|
20,125
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table.set_attrs
|
def set_attrs(self):
""" set our table type & indexables """
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
self.attrs.levels = self.levels
self.attrs.metadata = self.metadata
self.set_info()
|
python
|
def set_attrs(self):
""" set our table type & indexables """
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors
self.attrs.levels = self.levels
self.attrs.metadata = self.metadata
self.set_info()
|
[
"def",
"set_attrs",
"(",
"self",
")",
":",
"self",
".",
"attrs",
".",
"table_type",
"=",
"str",
"(",
"self",
".",
"table_type",
")",
"self",
".",
"attrs",
".",
"index_cols",
"=",
"self",
".",
"index_cols",
"(",
")",
"self",
".",
"attrs",
".",
"values_cols",
"=",
"self",
".",
"values_cols",
"(",
")",
"self",
".",
"attrs",
".",
"non_index_axes",
"=",
"self",
".",
"non_index_axes",
"self",
".",
"attrs",
".",
"data_columns",
"=",
"self",
".",
"data_columns",
"self",
".",
"attrs",
".",
"nan_rep",
"=",
"self",
".",
"nan_rep",
"self",
".",
"attrs",
".",
"encoding",
"=",
"self",
".",
"encoding",
"self",
".",
"attrs",
".",
"errors",
"=",
"self",
".",
"errors",
"self",
".",
"attrs",
".",
"levels",
"=",
"self",
".",
"levels",
"self",
".",
"attrs",
".",
"metadata",
"=",
"self",
".",
"metadata",
"self",
".",
"set_info",
"(",
")"
] |
set our table type & indexables
|
[
"set",
"our",
"table",
"type",
"&",
"indexables"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3243-L3255
|
20,126
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table.validate_version
|
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
if where is not None:
if (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1):
ws = incompatibility_doc % '.'.join(
[str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
|
python
|
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
if where is not None:
if (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1):
ws = incompatibility_doc % '.'.join(
[str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
|
[
"def",
"validate_version",
"(",
"self",
",",
"where",
"=",
"None",
")",
":",
"if",
"where",
"is",
"not",
"None",
":",
"if",
"(",
"self",
".",
"version",
"[",
"0",
"]",
"<=",
"0",
"and",
"self",
".",
"version",
"[",
"1",
"]",
"<=",
"10",
"and",
"self",
".",
"version",
"[",
"2",
"]",
"<",
"1",
")",
":",
"ws",
"=",
"incompatibility_doc",
"%",
"'.'",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"self",
".",
"version",
"]",
")",
"warnings",
".",
"warn",
"(",
"ws",
",",
"IncompatibilityWarning",
")"
] |
are we trying to operate on an old version?
|
[
"are",
"we",
"trying",
"to",
"operate",
"on",
"an",
"old",
"version?"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3280-L3287
|
20,127
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table.validate_min_itemsize
|
def validate_min_itemsize(self, min_itemsize):
"""validate the min_itemisze doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k, v in min_itemsize.items():
# ok, apply generally
if k == 'values':
continue
if k not in q:
raise ValueError(
"min_itemsize has the key [{key}] which is not an axis or "
"data_column".format(key=k))
|
python
|
def validate_min_itemsize(self, min_itemsize):
"""validate the min_itemisze doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k, v in min_itemsize.items():
# ok, apply generally
if k == 'values':
continue
if k not in q:
raise ValueError(
"min_itemsize has the key [{key}] which is not an axis or "
"data_column".format(key=k))
|
[
"def",
"validate_min_itemsize",
"(",
"self",
",",
"min_itemsize",
")",
":",
"if",
"min_itemsize",
"is",
"None",
":",
"return",
"if",
"not",
"isinstance",
"(",
"min_itemsize",
",",
"dict",
")",
":",
"return",
"q",
"=",
"self",
".",
"queryables",
"(",
")",
"for",
"k",
",",
"v",
"in",
"min_itemsize",
".",
"items",
"(",
")",
":",
"# ok, apply generally",
"if",
"k",
"==",
"'values'",
":",
"continue",
"if",
"k",
"not",
"in",
"q",
":",
"raise",
"ValueError",
"(",
"\"min_itemsize has the key [{key}] which is not an axis or \"",
"\"data_column\"",
".",
"format",
"(",
"key",
"=",
"k",
")",
")"
] |
validate the min_itemisze doesn't contain items that are not in the
axes this needs data_columns to be defined
|
[
"validate",
"the",
"min_itemisze",
"doesn",
"t",
"contain",
"items",
"that",
"are",
"not",
"in",
"the",
"axes",
"this",
"needs",
"data_columns",
"to",
"be",
"defined"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3289-L3307
|
20,128
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table.validate_data_columns
|
def validate_data_columns(self, data_columns, min_itemsize):
"""take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(self.non_index_axes):
return []
axis, axis_labels = self.non_index_axes[0]
info = self.info.get(axis, dict())
if info.get('type') == 'MultiIndex' and data_columns:
raise ValueError("cannot use a multi-index on axis [{0}] with "
"data_columns {1}".format(axis, data_columns))
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns.extend([
k for k in min_itemsize.keys()
if k != 'values' and k not in existing_data_columns
])
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
|
python
|
def validate_data_columns(self, data_columns, min_itemsize):
"""take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(self.non_index_axes):
return []
axis, axis_labels = self.non_index_axes[0]
info = self.info.get(axis, dict())
if info.get('type') == 'MultiIndex' and data_columns:
raise ValueError("cannot use a multi-index on axis [{0}] with "
"data_columns {1}".format(axis, data_columns))
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns.extend([
k for k in min_itemsize.keys()
if k != 'values' and k not in existing_data_columns
])
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
|
[
"def",
"validate_data_columns",
"(",
"self",
",",
"data_columns",
",",
"min_itemsize",
")",
":",
"if",
"not",
"len",
"(",
"self",
".",
"non_index_axes",
")",
":",
"return",
"[",
"]",
"axis",
",",
"axis_labels",
"=",
"self",
".",
"non_index_axes",
"[",
"0",
"]",
"info",
"=",
"self",
".",
"info",
".",
"get",
"(",
"axis",
",",
"dict",
"(",
")",
")",
"if",
"info",
".",
"get",
"(",
"'type'",
")",
"==",
"'MultiIndex'",
"and",
"data_columns",
":",
"raise",
"ValueError",
"(",
"\"cannot use a multi-index on axis [{0}] with \"",
"\"data_columns {1}\"",
".",
"format",
"(",
"axis",
",",
"data_columns",
")",
")",
"# evaluate the passed data_columns, True == use all columns",
"# take only valide axis labels",
"if",
"data_columns",
"is",
"True",
":",
"data_columns",
"=",
"list",
"(",
"axis_labels",
")",
"elif",
"data_columns",
"is",
"None",
":",
"data_columns",
"=",
"[",
"]",
"# if min_itemsize is a dict, add the keys (exclude 'values')",
"if",
"isinstance",
"(",
"min_itemsize",
",",
"dict",
")",
":",
"existing_data_columns",
"=",
"set",
"(",
"data_columns",
")",
"data_columns",
".",
"extend",
"(",
"[",
"k",
"for",
"k",
"in",
"min_itemsize",
".",
"keys",
"(",
")",
"if",
"k",
"!=",
"'values'",
"and",
"k",
"not",
"in",
"existing_data_columns",
"]",
")",
"# return valid columns in the order of our axis",
"return",
"[",
"c",
"for",
"c",
"in",
"data_columns",
"if",
"c",
"in",
"axis_labels",
"]"
] |
take the input data_columns and min_itemize and create a data
columns spec
|
[
"take",
"the",
"input",
"data_columns",
"and",
"min_itemize",
"and",
"create",
"a",
"data",
"columns",
"spec"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3435-L3466
|
20,129
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table.process_axes
|
def process_axes(self, obj, columns=None):
""" process axes filters """
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_NAMES.values():
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = ensure_index(getattr(obj, field).values)
filt = ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
raise ValueError("cannot find the field [{field}] for "
"filtering!".format(field=field))
obj = process_filter(field, filt)
return obj
|
python
|
def process_axes(self, obj, columns=None):
""" process axes filters """
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_NAMES.values():
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = ensure_index(getattr(obj, field).values)
filt = ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
raise ValueError("cannot find the field [{field}] for "
"filtering!".format(field=field))
obj = process_filter(field, filt)
return obj
|
[
"def",
"process_axes",
"(",
"self",
",",
"obj",
",",
"columns",
"=",
"None",
")",
":",
"# make a copy to avoid side effects",
"if",
"columns",
"is",
"not",
"None",
":",
"columns",
"=",
"list",
"(",
"columns",
")",
"# make sure to include levels if we have them",
"if",
"columns",
"is",
"not",
"None",
"and",
"self",
".",
"is_multi_index",
":",
"for",
"n",
"in",
"self",
".",
"levels",
":",
"if",
"n",
"not",
"in",
"columns",
":",
"columns",
".",
"insert",
"(",
"0",
",",
"n",
")",
"# reorder by any non_index_axes & limit to the select columns",
"for",
"axis",
",",
"labels",
"in",
"self",
".",
"non_index_axes",
":",
"obj",
"=",
"_reindex_axis",
"(",
"obj",
",",
"axis",
",",
"labels",
",",
"columns",
")",
"# apply the selection filters (but keep in the same order)",
"if",
"self",
".",
"selection",
".",
"filter",
"is",
"not",
"None",
":",
"for",
"field",
",",
"op",
",",
"filt",
"in",
"self",
".",
"selection",
".",
"filter",
".",
"format",
"(",
")",
":",
"def",
"process_filter",
"(",
"field",
",",
"filt",
")",
":",
"for",
"axis_name",
"in",
"obj",
".",
"_AXIS_NAMES",
".",
"values",
"(",
")",
":",
"axis_number",
"=",
"obj",
".",
"_get_axis_number",
"(",
"axis_name",
")",
"axis_values",
"=",
"obj",
".",
"_get_axis",
"(",
"axis_name",
")",
"# see if the field is the name of an axis",
"if",
"field",
"==",
"axis_name",
":",
"# if we have a multi-index, then need to include",
"# the levels",
"if",
"self",
".",
"is_multi_index",
":",
"filt",
"=",
"filt",
".",
"union",
"(",
"Index",
"(",
"self",
".",
"levels",
")",
")",
"takers",
"=",
"op",
"(",
"axis_values",
",",
"filt",
")",
"return",
"obj",
".",
"loc",
".",
"_getitem_axis",
"(",
"takers",
",",
"axis",
"=",
"axis_number",
")",
"# this might be the name of a file IN an axis",
"elif",
"field",
"in",
"axis_values",
":",
"# we need to filter on this dimension",
"values",
"=",
"ensure_index",
"(",
"getattr",
"(",
"obj",
",",
"field",
")",
".",
"values",
")",
"filt",
"=",
"ensure_index",
"(",
"filt",
")",
"# hack until we support reversed dim flags",
"if",
"isinstance",
"(",
"obj",
",",
"DataFrame",
")",
":",
"axis_number",
"=",
"1",
"-",
"axis_number",
"takers",
"=",
"op",
"(",
"values",
",",
"filt",
")",
"return",
"obj",
".",
"loc",
".",
"_getitem_axis",
"(",
"takers",
",",
"axis",
"=",
"axis_number",
")",
"raise",
"ValueError",
"(",
"\"cannot find the field [{field}] for \"",
"\"filtering!\"",
".",
"format",
"(",
"field",
"=",
"field",
")",
")",
"obj",
"=",
"process_filter",
"(",
"field",
",",
"filt",
")",
"return",
"obj"
] |
process axes filters
|
[
"process",
"axes",
"filters"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3683-L3741
|
20,130
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table.create_description
|
def create_description(self, complib=None, complevel=None,
fletcher32=False, expectedrows=None):
""" create the description of the table from the axes & values """
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = dict(name='table', expectedrows=expectedrows)
# description from the axes & values
d['description'] = {a.cname: a.typ for a in self.axes}
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel, complib=complib,
fletcher32=fletcher32 or self._fletcher32)
d['filters'] = filters
elif self._filters is not None:
d['filters'] = self._filters
return d
|
python
|
def create_description(self, complib=None, complevel=None,
fletcher32=False, expectedrows=None):
""" create the description of the table from the axes & values """
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = dict(name='table', expectedrows=expectedrows)
# description from the axes & values
d['description'] = {a.cname: a.typ for a in self.axes}
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel, complib=complib,
fletcher32=fletcher32 or self._fletcher32)
d['filters'] = filters
elif self._filters is not None:
d['filters'] = self._filters
return d
|
[
"def",
"create_description",
"(",
"self",
",",
"complib",
"=",
"None",
",",
"complevel",
"=",
"None",
",",
"fletcher32",
"=",
"False",
",",
"expectedrows",
"=",
"None",
")",
":",
"# provided expected rows if its passed",
"if",
"expectedrows",
"is",
"None",
":",
"expectedrows",
"=",
"max",
"(",
"self",
".",
"nrows_expected",
",",
"10000",
")",
"d",
"=",
"dict",
"(",
"name",
"=",
"'table'",
",",
"expectedrows",
"=",
"expectedrows",
")",
"# description from the axes & values",
"d",
"[",
"'description'",
"]",
"=",
"{",
"a",
".",
"cname",
":",
"a",
".",
"typ",
"for",
"a",
"in",
"self",
".",
"axes",
"}",
"if",
"complib",
":",
"if",
"complevel",
"is",
"None",
":",
"complevel",
"=",
"self",
".",
"_complevel",
"or",
"9",
"filters",
"=",
"_tables",
"(",
")",
".",
"Filters",
"(",
"complevel",
"=",
"complevel",
",",
"complib",
"=",
"complib",
",",
"fletcher32",
"=",
"fletcher32",
"or",
"self",
".",
"_fletcher32",
")",
"d",
"[",
"'filters'",
"]",
"=",
"filters",
"elif",
"self",
".",
"_filters",
"is",
"not",
"None",
":",
"d",
"[",
"'filters'",
"]",
"=",
"self",
".",
"_filters",
"return",
"d"
] |
create the description of the table from the axes & values
|
[
"create",
"the",
"description",
"of",
"the",
"table",
"from",
"the",
"axes",
"&",
"values"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3743-L3766
|
20,131
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
Table.read_column
|
def read_column(self, column, where=None, start=None, stop=None):
"""return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where "
"clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
"column [{column}] can not be extracted individually; "
"it is not data indexable".format(column=column))
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
return Series(_set_tz(a.convert(c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors
).take_data(),
a.tz, True), name=column)
raise KeyError(
"column [{column}] not found in the table".format(column=column))
|
python
|
def read_column(self, column, where=None, start=None, stop=None):
"""return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where "
"clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
"column [{column}] can not be extracted individually; "
"it is not data indexable".format(column=column))
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
return Series(_set_tz(a.convert(c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors
).take_data(),
a.tz, True), name=column)
raise KeyError(
"column [{column}] not found in the table".format(column=column))
|
[
"def",
"read_column",
"(",
"self",
",",
"column",
",",
"where",
"=",
"None",
",",
"start",
"=",
"None",
",",
"stop",
"=",
"None",
")",
":",
"# validate the version",
"self",
".",
"validate_version",
"(",
")",
"# infer the data kind",
"if",
"not",
"self",
".",
"infer_axes",
"(",
")",
":",
"return",
"False",
"if",
"where",
"is",
"not",
"None",
":",
"raise",
"TypeError",
"(",
"\"read_column does not currently accept a where \"",
"\"clause\"",
")",
"# find the axes",
"for",
"a",
"in",
"self",
".",
"axes",
":",
"if",
"column",
"==",
"a",
".",
"name",
":",
"if",
"not",
"a",
".",
"is_data_indexable",
":",
"raise",
"ValueError",
"(",
"\"column [{column}] can not be extracted individually; \"",
"\"it is not data indexable\"",
".",
"format",
"(",
"column",
"=",
"column",
")",
")",
"# column must be an indexable or a data column",
"c",
"=",
"getattr",
"(",
"self",
".",
"table",
".",
"cols",
",",
"column",
")",
"a",
".",
"set_info",
"(",
"self",
".",
"info",
")",
"return",
"Series",
"(",
"_set_tz",
"(",
"a",
".",
"convert",
"(",
"c",
"[",
"start",
":",
"stop",
"]",
",",
"nan_rep",
"=",
"self",
".",
"nan_rep",
",",
"encoding",
"=",
"self",
".",
"encoding",
",",
"errors",
"=",
"self",
".",
"errors",
")",
".",
"take_data",
"(",
")",
",",
"a",
".",
"tz",
",",
"True",
")",
",",
"name",
"=",
"column",
")",
"raise",
"KeyError",
"(",
"\"column [{column}] not found in the table\"",
".",
"format",
"(",
"column",
"=",
"column",
")",
")"
] |
return a single column from the table, generally only indexables
are interesting
|
[
"return",
"a",
"single",
"column",
"from",
"the",
"table",
"generally",
"only",
"indexables",
"are",
"interesting"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3793-L3829
|
20,132
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
LegacyTable.read
|
def read(self, where=None, columns=None, **kwargs):
"""we have n indexable columns, with an arbitrary number of data
axes
"""
if not self.read_axes(where=where, **kwargs):
return None
raise NotImplementedError("Panel is removed in pandas 0.25.0")
|
python
|
def read(self, where=None, columns=None, **kwargs):
"""we have n indexable columns, with an arbitrary number of data
axes
"""
if not self.read_axes(where=where, **kwargs):
return None
raise NotImplementedError("Panel is removed in pandas 0.25.0")
|
[
"def",
"read",
"(",
"self",
",",
"where",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"read_axes",
"(",
"where",
"=",
"where",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"None",
"raise",
"NotImplementedError",
"(",
"\"Panel is removed in pandas 0.25.0\"",
")"
] |
we have n indexable columns, with an arbitrary number of data
axes
|
[
"we",
"have",
"n",
"indexable",
"columns",
"with",
"an",
"arbitrary",
"number",
"of",
"data",
"axes"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3872-L3880
|
20,133
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
AppendableTable.write_data
|
def write_data(self, chunksize, dropna=False):
""" we form the data into a 2-d including indexes,values,mask
write chunk-by-chunk """
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isna(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype('u1', copy=False))
# consolidate masks
if len(masks):
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
bindexes = []
for i, idx in enumerate(indexes):
# broadcast to all other indexes except myself
if i > 0 and i < nindexes:
repeater = np.prod(
[indexes[bi].shape[0] for bi in range(0, i)])
idx = np.tile(idx, repeater)
if i < nindexes - 1:
repeater = np.prod([indexes[bi].shape[0]
for bi in range(i + 1, nindexes)])
idx = np.repeat(idx, repeater)
bindexes.append(idx)
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1))
for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in bindexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues])
|
python
|
def write_data(self, chunksize, dropna=False):
""" we form the data into a 2-d including indexes,values,mask
write chunk-by-chunk """
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isna(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype('u1', copy=False))
# consolidate masks
if len(masks):
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
bindexes = []
for i, idx in enumerate(indexes):
# broadcast to all other indexes except myself
if i > 0 and i < nindexes:
repeater = np.prod(
[indexes[bi].shape[0] for bi in range(0, i)])
idx = np.tile(idx, repeater)
if i < nindexes - 1:
repeater = np.prod([indexes[bi].shape[0]
for bi in range(i + 1, nindexes)])
idx = np.repeat(idx, repeater)
bindexes.append(idx)
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1))
for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in bindexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues])
|
[
"def",
"write_data",
"(",
"self",
",",
"chunksize",
",",
"dropna",
"=",
"False",
")",
":",
"names",
"=",
"self",
".",
"dtype",
".",
"names",
"nrows",
"=",
"self",
".",
"nrows_expected",
"# if dropna==True, then drop ALL nan rows",
"masks",
"=",
"[",
"]",
"if",
"dropna",
":",
"for",
"a",
"in",
"self",
".",
"values_axes",
":",
"# figure the mask: only do if we can successfully process this",
"# column, otherwise ignore the mask",
"mask",
"=",
"isna",
"(",
"a",
".",
"data",
")",
".",
"all",
"(",
"axis",
"=",
"0",
")",
"if",
"isinstance",
"(",
"mask",
",",
"np",
".",
"ndarray",
")",
":",
"masks",
".",
"append",
"(",
"mask",
".",
"astype",
"(",
"'u1'",
",",
"copy",
"=",
"False",
")",
")",
"# consolidate masks",
"if",
"len",
"(",
"masks",
")",
":",
"mask",
"=",
"masks",
"[",
"0",
"]",
"for",
"m",
"in",
"masks",
"[",
"1",
":",
"]",
":",
"mask",
"=",
"mask",
"&",
"m",
"mask",
"=",
"mask",
".",
"ravel",
"(",
")",
"else",
":",
"mask",
"=",
"None",
"# broadcast the indexes if needed",
"indexes",
"=",
"[",
"a",
".",
"cvalues",
"for",
"a",
"in",
"self",
".",
"index_axes",
"]",
"nindexes",
"=",
"len",
"(",
"indexes",
")",
"bindexes",
"=",
"[",
"]",
"for",
"i",
",",
"idx",
"in",
"enumerate",
"(",
"indexes",
")",
":",
"# broadcast to all other indexes except myself",
"if",
"i",
">",
"0",
"and",
"i",
"<",
"nindexes",
":",
"repeater",
"=",
"np",
".",
"prod",
"(",
"[",
"indexes",
"[",
"bi",
"]",
".",
"shape",
"[",
"0",
"]",
"for",
"bi",
"in",
"range",
"(",
"0",
",",
"i",
")",
"]",
")",
"idx",
"=",
"np",
".",
"tile",
"(",
"idx",
",",
"repeater",
")",
"if",
"i",
"<",
"nindexes",
"-",
"1",
":",
"repeater",
"=",
"np",
".",
"prod",
"(",
"[",
"indexes",
"[",
"bi",
"]",
".",
"shape",
"[",
"0",
"]",
"for",
"bi",
"in",
"range",
"(",
"i",
"+",
"1",
",",
"nindexes",
")",
"]",
")",
"idx",
"=",
"np",
".",
"repeat",
"(",
"idx",
",",
"repeater",
")",
"bindexes",
".",
"append",
"(",
"idx",
")",
"# transpose the values so first dimension is last",
"# reshape the values if needed",
"values",
"=",
"[",
"a",
".",
"take_data",
"(",
")",
"for",
"a",
"in",
"self",
".",
"values_axes",
"]",
"values",
"=",
"[",
"v",
".",
"transpose",
"(",
"np",
".",
"roll",
"(",
"np",
".",
"arange",
"(",
"v",
".",
"ndim",
")",
",",
"v",
".",
"ndim",
"-",
"1",
")",
")",
"for",
"v",
"in",
"values",
"]",
"bvalues",
"=",
"[",
"]",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"values",
")",
":",
"new_shape",
"=",
"(",
"nrows",
",",
")",
"+",
"self",
".",
"dtype",
"[",
"names",
"[",
"nindexes",
"+",
"i",
"]",
"]",
".",
"shape",
"bvalues",
".",
"append",
"(",
"values",
"[",
"i",
"]",
".",
"reshape",
"(",
"new_shape",
")",
")",
"# write the chunks",
"if",
"chunksize",
"is",
"None",
":",
"chunksize",
"=",
"100000",
"rows",
"=",
"np",
".",
"empty",
"(",
"min",
"(",
"chunksize",
",",
"nrows",
")",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"chunks",
"=",
"int",
"(",
"nrows",
"/",
"chunksize",
")",
"+",
"1",
"for",
"i",
"in",
"range",
"(",
"chunks",
")",
":",
"start_i",
"=",
"i",
"*",
"chunksize",
"end_i",
"=",
"min",
"(",
"(",
"i",
"+",
"1",
")",
"*",
"chunksize",
",",
"nrows",
")",
"if",
"start_i",
">=",
"end_i",
":",
"break",
"self",
".",
"write_data_chunk",
"(",
"rows",
",",
"indexes",
"=",
"[",
"a",
"[",
"start_i",
":",
"end_i",
"]",
"for",
"a",
"in",
"bindexes",
"]",
",",
"mask",
"=",
"mask",
"[",
"start_i",
":",
"end_i",
"]",
"if",
"mask",
"is",
"not",
"None",
"else",
"None",
",",
"values",
"=",
"[",
"v",
"[",
"start_i",
":",
"end_i",
"]",
"for",
"v",
"in",
"bvalues",
"]",
")"
] |
we form the data into a 2-d including indexes,values,mask
write chunk-by-chunk
|
[
"we",
"form",
"the",
"data",
"into",
"a",
"2",
"-",
"d",
"including",
"indexes",
"values",
"mask",
"write",
"chunk",
"-",
"by",
"-",
"chunk"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L3930-L4003
|
20,134
|
pandas-dev/pandas
|
pandas/io/pytables.py
|
GenericTable.indexables
|
def indexables(self):
""" create the indexables from the table description """
if self._indexables is None:
d = self.description
# the index columns is just a simple index
self._indexables = [GenericIndexCol(name='index', axis=0)]
for i, n in enumerate(d._v_names):
dc = GenericDataIndexableCol(
name=n, pos=i, values=[n], version=self.version)
self._indexables.append(dc)
return self._indexables
|
python
|
def indexables(self):
""" create the indexables from the table description """
if self._indexables is None:
d = self.description
# the index columns is just a simple index
self._indexables = [GenericIndexCol(name='index', axis=0)]
for i, n in enumerate(d._v_names):
dc = GenericDataIndexableCol(
name=n, pos=i, values=[n], version=self.version)
self._indexables.append(dc)
return self._indexables
|
[
"def",
"indexables",
"(",
"self",
")",
":",
"if",
"self",
".",
"_indexables",
"is",
"None",
":",
"d",
"=",
"self",
".",
"description",
"# the index columns is just a simple index",
"self",
".",
"_indexables",
"=",
"[",
"GenericIndexCol",
"(",
"name",
"=",
"'index'",
",",
"axis",
"=",
"0",
")",
"]",
"for",
"i",
",",
"n",
"in",
"enumerate",
"(",
"d",
".",
"_v_names",
")",
":",
"dc",
"=",
"GenericDataIndexableCol",
"(",
"name",
"=",
"n",
",",
"pos",
"=",
"i",
",",
"values",
"=",
"[",
"n",
"]",
",",
"version",
"=",
"self",
".",
"version",
")",
"self",
".",
"_indexables",
".",
"append",
"(",
"dc",
")",
"return",
"self",
".",
"_indexables"
] |
create the indexables from the table description
|
[
"create",
"the",
"indexables",
"from",
"the",
"table",
"description"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L4267-L4282
|
20,135
|
pandas-dev/pandas
|
pandas/core/arrays/base.py
|
ExtensionArray.astype
|
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray
NumPy ndarray with 'dtype' for its dtype.
"""
return np.array(self, dtype=dtype, copy=copy)
|
python
|
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray
NumPy ndarray with 'dtype' for its dtype.
"""
return np.array(self, dtype=dtype, copy=copy)
|
[
"def",
"astype",
"(",
"self",
",",
"dtype",
",",
"copy",
"=",
"True",
")",
":",
"return",
"np",
".",
"array",
"(",
"self",
",",
"dtype",
"=",
"dtype",
",",
"copy",
"=",
"copy",
")"
] |
Cast to a NumPy array with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray
NumPy ndarray with 'dtype' for its dtype.
|
[
"Cast",
"to",
"a",
"NumPy",
"array",
"with",
"dtype",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/base.py#L322-L340
|
20,136
|
pandas-dev/pandas
|
pandas/core/arrays/base.py
|
ExtensionArray.argsort
|
def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):
"""
Return the indices that would sort this array.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
index_array : ndarray
Array of indices that sort ``self``.
See Also
--------
numpy.argsort : Sorting implementation used internally.
"""
# Implementor note: You have two places to override the behavior of
# argsort.
# 1. _values_for_argsort : construct the values passed to np.argsort
# 2. argsort : total control over sorting.
ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
values = self._values_for_argsort()
result = np.argsort(values, kind=kind, **kwargs)
if not ascending:
result = result[::-1]
return result
|
python
|
def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):
"""
Return the indices that would sort this array.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
index_array : ndarray
Array of indices that sort ``self``.
See Also
--------
numpy.argsort : Sorting implementation used internally.
"""
# Implementor note: You have two places to override the behavior of
# argsort.
# 1. _values_for_argsort : construct the values passed to np.argsort
# 2. argsort : total control over sorting.
ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
values = self._values_for_argsort()
result = np.argsort(values, kind=kind, **kwargs)
if not ascending:
result = result[::-1]
return result
|
[
"def",
"argsort",
"(",
"self",
",",
"ascending",
"=",
"True",
",",
"kind",
"=",
"'quicksort'",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# Implementor note: You have two places to override the behavior of",
"# argsort.",
"# 1. _values_for_argsort : construct the values passed to np.argsort",
"# 2. argsort : total control over sorting.",
"ascending",
"=",
"nv",
".",
"validate_argsort_with_ascending",
"(",
"ascending",
",",
"args",
",",
"kwargs",
")",
"values",
"=",
"self",
".",
"_values_for_argsort",
"(",
")",
"result",
"=",
"np",
".",
"argsort",
"(",
"values",
",",
"kind",
"=",
"kind",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"ascending",
":",
"result",
"=",
"result",
"[",
":",
":",
"-",
"1",
"]",
"return",
"result"
] |
Return the indices that would sort this array.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
index_array : ndarray
Array of indices that sort ``self``.
See Also
--------
numpy.argsort : Sorting implementation used internally.
|
[
"Return",
"the",
"indices",
"that",
"would",
"sort",
"this",
"array",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/base.py#L381-L413
|
20,137
|
pandas-dev/pandas
|
pandas/core/arrays/base.py
|
ExtensionArray.shift
|
def shift(
self,
periods: int = 1,
fill_value: object = None,
) -> ABCExtensionArray:
"""
Shift values by desired number.
Newly introduced missing values are filled with
``self.dtype.na_value``.
.. versionadded:: 0.24.0
Parameters
----------
periods : int, default 1
The number of periods to shift. Negative values are allowed
for shifting backwards.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default is ``self.dtype.na_value``
.. versionadded:: 0.24.0
Returns
-------
shifted : ExtensionArray
Notes
-----
If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
returned.
If ``periods > len(self)``, then an array of size
len(self) is returned, with all values filled with
``self.dtype.na_value``.
"""
# Note: this implementation assumes that `self.dtype.na_value` can be
# stored in an instance of your ExtensionArray with `self.dtype`.
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
empty = self._from_sequence(
[fill_value] * min(abs(periods), len(self)),
dtype=self.dtype
)
if periods > 0:
a = empty
b = self[:-periods]
else:
a = self[abs(periods):]
b = empty
return self._concat_same_type([a, b])
|
python
|
def shift(
self,
periods: int = 1,
fill_value: object = None,
) -> ABCExtensionArray:
"""
Shift values by desired number.
Newly introduced missing values are filled with
``self.dtype.na_value``.
.. versionadded:: 0.24.0
Parameters
----------
periods : int, default 1
The number of periods to shift. Negative values are allowed
for shifting backwards.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default is ``self.dtype.na_value``
.. versionadded:: 0.24.0
Returns
-------
shifted : ExtensionArray
Notes
-----
If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
returned.
If ``periods > len(self)``, then an array of size
len(self) is returned, with all values filled with
``self.dtype.na_value``.
"""
# Note: this implementation assumes that `self.dtype.na_value` can be
# stored in an instance of your ExtensionArray with `self.dtype`.
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
empty = self._from_sequence(
[fill_value] * min(abs(periods), len(self)),
dtype=self.dtype
)
if periods > 0:
a = empty
b = self[:-periods]
else:
a = self[abs(periods):]
b = empty
return self._concat_same_type([a, b])
|
[
"def",
"shift",
"(",
"self",
",",
"periods",
":",
"int",
"=",
"1",
",",
"fill_value",
":",
"object",
"=",
"None",
",",
")",
"->",
"ABCExtensionArray",
":",
"# Note: this implementation assumes that `self.dtype.na_value` can be",
"# stored in an instance of your ExtensionArray with `self.dtype`.",
"if",
"not",
"len",
"(",
"self",
")",
"or",
"periods",
"==",
"0",
":",
"return",
"self",
".",
"copy",
"(",
")",
"if",
"isna",
"(",
"fill_value",
")",
":",
"fill_value",
"=",
"self",
".",
"dtype",
".",
"na_value",
"empty",
"=",
"self",
".",
"_from_sequence",
"(",
"[",
"fill_value",
"]",
"*",
"min",
"(",
"abs",
"(",
"periods",
")",
",",
"len",
"(",
"self",
")",
")",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
"if",
"periods",
">",
"0",
":",
"a",
"=",
"empty",
"b",
"=",
"self",
"[",
":",
"-",
"periods",
"]",
"else",
":",
"a",
"=",
"self",
"[",
"abs",
"(",
"periods",
")",
":",
"]",
"b",
"=",
"empty",
"return",
"self",
".",
"_concat_same_type",
"(",
"[",
"a",
",",
"b",
"]",
")"
] |
Shift values by desired number.
Newly introduced missing values are filled with
``self.dtype.na_value``.
.. versionadded:: 0.24.0
Parameters
----------
periods : int, default 1
The number of periods to shift. Negative values are allowed
for shifting backwards.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default is ``self.dtype.na_value``
.. versionadded:: 0.24.0
Returns
-------
shifted : ExtensionArray
Notes
-----
If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
returned.
If ``periods > len(self)``, then an array of size
len(self) is returned, with all values filled with
``self.dtype.na_value``.
|
[
"Shift",
"values",
"by",
"desired",
"number",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/base.py#L479-L535
|
20,138
|
pandas-dev/pandas
|
pandas/core/arrays/base.py
|
ExtensionArray.unique
|
def unique(self):
"""
Compute the ExtensionArray of unique values.
Returns
-------
uniques : ExtensionArray
"""
from pandas import unique
uniques = unique(self.astype(object))
return self._from_sequence(uniques, dtype=self.dtype)
|
python
|
def unique(self):
"""
Compute the ExtensionArray of unique values.
Returns
-------
uniques : ExtensionArray
"""
from pandas import unique
uniques = unique(self.astype(object))
return self._from_sequence(uniques, dtype=self.dtype)
|
[
"def",
"unique",
"(",
"self",
")",
":",
"from",
"pandas",
"import",
"unique",
"uniques",
"=",
"unique",
"(",
"self",
".",
"astype",
"(",
"object",
")",
")",
"return",
"self",
".",
"_from_sequence",
"(",
"uniques",
",",
"dtype",
"=",
"self",
".",
"dtype",
")"
] |
Compute the ExtensionArray of unique values.
Returns
-------
uniques : ExtensionArray
|
[
"Compute",
"the",
"ExtensionArray",
"of",
"unique",
"values",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/base.py#L537-L548
|
20,139
|
pandas-dev/pandas
|
pandas/core/arrays/base.py
|
ExtensionArray._values_for_factorize
|
def _values_for_factorize(self) -> Tuple[np.ndarray, Any]:
"""
Return an array and missing value suitable for factorization.
Returns
-------
values : ndarray
An array suitable for factorization. This should maintain order
and be a supported dtype (Float64, Int64, UInt64, String, Object).
By default, the extension array is cast to object dtype.
na_value : object
The value in `values` to consider missing. This will be treated
as NA in the factorization routines, so it will be coded as
`na_sentinal` and not included in `uniques`. By default,
``np.nan`` is used.
Notes
-----
The values returned by this method are also used in
:func:`pandas.util.hash_pandas_object`.
"""
return self.astype(object), np.nan
|
python
|
def _values_for_factorize(self) -> Tuple[np.ndarray, Any]:
"""
Return an array and missing value suitable for factorization.
Returns
-------
values : ndarray
An array suitable for factorization. This should maintain order
and be a supported dtype (Float64, Int64, UInt64, String, Object).
By default, the extension array is cast to object dtype.
na_value : object
The value in `values` to consider missing. This will be treated
as NA in the factorization routines, so it will be coded as
`na_sentinal` and not included in `uniques`. By default,
``np.nan`` is used.
Notes
-----
The values returned by this method are also used in
:func:`pandas.util.hash_pandas_object`.
"""
return self.astype(object), np.nan
|
[
"def",
"_values_for_factorize",
"(",
"self",
")",
"->",
"Tuple",
"[",
"np",
".",
"ndarray",
",",
"Any",
"]",
":",
"return",
"self",
".",
"astype",
"(",
"object",
")",
",",
"np",
".",
"nan"
] |
Return an array and missing value suitable for factorization.
Returns
-------
values : ndarray
An array suitable for factorization. This should maintain order
and be a supported dtype (Float64, Int64, UInt64, String, Object).
By default, the extension array is cast to object dtype.
na_value : object
The value in `values` to consider missing. This will be treated
as NA in the factorization routines, so it will be coded as
`na_sentinal` and not included in `uniques`. By default,
``np.nan`` is used.
Notes
-----
The values returned by this method are also used in
:func:`pandas.util.hash_pandas_object`.
|
[
"Return",
"an",
"array",
"and",
"missing",
"value",
"suitable",
"for",
"factorization",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/base.py#L598-L620
|
20,140
|
pandas-dev/pandas
|
pandas/core/arrays/base.py
|
ExtensionArray.factorize
|
def factorize(
self,
na_sentinel: int = -1,
) -> Tuple[np.ndarray, ABCExtensionArray]:
"""
Encode the extension array as an enumerated type.
Parameters
----------
na_sentinel : int, default -1
Value to use in the `labels` array to indicate missing values.
Returns
-------
labels : ndarray
An integer NumPy array that's an indexer into the original
ExtensionArray.
uniques : ExtensionArray
An ExtensionArray containing the unique values of `self`.
.. note::
uniques will *not* contain an entry for the NA value of
the ExtensionArray if there are any missing values present
in `self`.
See Also
--------
pandas.factorize : Top-level factorize method that dispatches here.
Notes
-----
:meth:`pandas.factorize` offers a `sort` keyword as well.
"""
# Impelmentor note: There are two ways to override the behavior of
# pandas.factorize
# 1. _values_for_factorize and _from_factorize.
# Specify the values passed to pandas' internal factorization
# routines, and how to convert from those values back to the
# original ExtensionArray.
# 2. ExtensionArray.factorize.
# Complete control over factorization.
from pandas.core.algorithms import _factorize_array
arr, na_value = self._values_for_factorize()
labels, uniques = _factorize_array(arr, na_sentinel=na_sentinel,
na_value=na_value)
uniques = self._from_factorized(uniques, self)
return labels, uniques
|
python
|
def factorize(
self,
na_sentinel: int = -1,
) -> Tuple[np.ndarray, ABCExtensionArray]:
"""
Encode the extension array as an enumerated type.
Parameters
----------
na_sentinel : int, default -1
Value to use in the `labels` array to indicate missing values.
Returns
-------
labels : ndarray
An integer NumPy array that's an indexer into the original
ExtensionArray.
uniques : ExtensionArray
An ExtensionArray containing the unique values of `self`.
.. note::
uniques will *not* contain an entry for the NA value of
the ExtensionArray if there are any missing values present
in `self`.
See Also
--------
pandas.factorize : Top-level factorize method that dispatches here.
Notes
-----
:meth:`pandas.factorize` offers a `sort` keyword as well.
"""
# Impelmentor note: There are two ways to override the behavior of
# pandas.factorize
# 1. _values_for_factorize and _from_factorize.
# Specify the values passed to pandas' internal factorization
# routines, and how to convert from those values back to the
# original ExtensionArray.
# 2. ExtensionArray.factorize.
# Complete control over factorization.
from pandas.core.algorithms import _factorize_array
arr, na_value = self._values_for_factorize()
labels, uniques = _factorize_array(arr, na_sentinel=na_sentinel,
na_value=na_value)
uniques = self._from_factorized(uniques, self)
return labels, uniques
|
[
"def",
"factorize",
"(",
"self",
",",
"na_sentinel",
":",
"int",
"=",
"-",
"1",
",",
")",
"->",
"Tuple",
"[",
"np",
".",
"ndarray",
",",
"ABCExtensionArray",
"]",
":",
"# Impelmentor note: There are two ways to override the behavior of",
"# pandas.factorize",
"# 1. _values_for_factorize and _from_factorize.",
"# Specify the values passed to pandas' internal factorization",
"# routines, and how to convert from those values back to the",
"# original ExtensionArray.",
"# 2. ExtensionArray.factorize.",
"# Complete control over factorization.",
"from",
"pandas",
".",
"core",
".",
"algorithms",
"import",
"_factorize_array",
"arr",
",",
"na_value",
"=",
"self",
".",
"_values_for_factorize",
"(",
")",
"labels",
",",
"uniques",
"=",
"_factorize_array",
"(",
"arr",
",",
"na_sentinel",
"=",
"na_sentinel",
",",
"na_value",
"=",
"na_value",
")",
"uniques",
"=",
"self",
".",
"_from_factorized",
"(",
"uniques",
",",
"self",
")",
"return",
"labels",
",",
"uniques"
] |
Encode the extension array as an enumerated type.
Parameters
----------
na_sentinel : int, default -1
Value to use in the `labels` array to indicate missing values.
Returns
-------
labels : ndarray
An integer NumPy array that's an indexer into the original
ExtensionArray.
uniques : ExtensionArray
An ExtensionArray containing the unique values of `self`.
.. note::
uniques will *not* contain an entry for the NA value of
the ExtensionArray if there are any missing values present
in `self`.
See Also
--------
pandas.factorize : Top-level factorize method that dispatches here.
Notes
-----
:meth:`pandas.factorize` offers a `sort` keyword as well.
|
[
"Encode",
"the",
"extension",
"array",
"as",
"an",
"enumerated",
"type",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/base.py#L622-L672
|
20,141
|
pandas-dev/pandas
|
pandas/core/arrays/base.py
|
ExtensionArray._formatter
|
def _formatter(
self,
boxed: bool = False,
) -> Callable[[Any], Optional[str]]:
"""Formatting function for scalar values.
This is used in the default '__repr__'. The returned formatting
function receives instances of your scalar type.
Parameters
----------
boxed: bool, default False
An indicated for whether or not your array is being printed
within a Series, DataFrame, or Index (True), or just by
itself (False). This may be useful if you want scalar values
to appear differently within a Series versus on its own (e.g.
quoted or not).
Returns
-------
Callable[[Any], str]
A callable that gets instances of the scalar type and
returns a string. By default, :func:`repr` is used
when ``boxed=False`` and :func:`str` is used when
``boxed=True``.
"""
if boxed:
return str
return repr
|
python
|
def _formatter(
self,
boxed: bool = False,
) -> Callable[[Any], Optional[str]]:
"""Formatting function for scalar values.
This is used in the default '__repr__'. The returned formatting
function receives instances of your scalar type.
Parameters
----------
boxed: bool, default False
An indicated for whether or not your array is being printed
within a Series, DataFrame, or Index (True), or just by
itself (False). This may be useful if you want scalar values
to appear differently within a Series versus on its own (e.g.
quoted or not).
Returns
-------
Callable[[Any], str]
A callable that gets instances of the scalar type and
returns a string. By default, :func:`repr` is used
when ``boxed=False`` and :func:`str` is used when
``boxed=True``.
"""
if boxed:
return str
return repr
|
[
"def",
"_formatter",
"(",
"self",
",",
"boxed",
":",
"bool",
"=",
"False",
",",
")",
"->",
"Callable",
"[",
"[",
"Any",
"]",
",",
"Optional",
"[",
"str",
"]",
"]",
":",
"if",
"boxed",
":",
"return",
"str",
"return",
"repr"
] |
Formatting function for scalar values.
This is used in the default '__repr__'. The returned formatting
function receives instances of your scalar type.
Parameters
----------
boxed: bool, default False
An indicated for whether or not your array is being printed
within a Series, DataFrame, or Index (True), or just by
itself (False). This may be useful if you want scalar values
to appear differently within a Series versus on its own (e.g.
quoted or not).
Returns
-------
Callable[[Any], str]
A callable that gets instances of the scalar type and
returns a string. By default, :func:`repr` is used
when ``boxed=False`` and :func:`str` is used when
``boxed=True``.
|
[
"Formatting",
"function",
"for",
"scalar",
"values",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/base.py#L857-L885
|
20,142
|
pandas-dev/pandas
|
pandas/core/arrays/base.py
|
ExtensionArray._reduce
|
def _reduce(self, name, skipna=True, **kwargs):
"""
Return a scalar result of performing the reduction operation.
Parameters
----------
name : str
Name of the function, supported values are:
{ any, all, min, max, sum, mean, median, prod,
std, var, sem, kurt, skew }.
skipna : bool, default True
If True, skip NaN values.
**kwargs
Additional keyword arguments passed to the reduction function.
Currently, `ddof` is the only supported kwarg.
Returns
-------
scalar
Raises
------
TypeError : subclass does not define reductions
"""
raise TypeError("cannot perform {name} with type {dtype}".format(
name=name, dtype=self.dtype))
|
python
|
def _reduce(self, name, skipna=True, **kwargs):
"""
Return a scalar result of performing the reduction operation.
Parameters
----------
name : str
Name of the function, supported values are:
{ any, all, min, max, sum, mean, median, prod,
std, var, sem, kurt, skew }.
skipna : bool, default True
If True, skip NaN values.
**kwargs
Additional keyword arguments passed to the reduction function.
Currently, `ddof` is the only supported kwarg.
Returns
-------
scalar
Raises
------
TypeError : subclass does not define reductions
"""
raise TypeError("cannot perform {name} with type {dtype}".format(
name=name, dtype=self.dtype))
|
[
"def",
"_reduce",
"(",
"self",
",",
"name",
",",
"skipna",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"raise",
"TypeError",
"(",
"\"cannot perform {name} with type {dtype}\"",
".",
"format",
"(",
"name",
"=",
"name",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
")"
] |
Return a scalar result of performing the reduction operation.
Parameters
----------
name : str
Name of the function, supported values are:
{ any, all, min, max, sum, mean, median, prod,
std, var, sem, kurt, skew }.
skipna : bool, default True
If True, skip NaN values.
**kwargs
Additional keyword arguments passed to the reduction function.
Currently, `ddof` is the only supported kwarg.
Returns
-------
scalar
Raises
------
TypeError : subclass does not define reductions
|
[
"Return",
"a",
"scalar",
"result",
"of",
"performing",
"the",
"reduction",
"operation",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/base.py#L939-L964
|
20,143
|
pandas-dev/pandas
|
pandas/core/arrays/base.py
|
ExtensionScalarOpsMixin._create_method
|
def _create_method(cls, op, coerce_to_dtype=True):
"""
A class method that returns a method that will correspond to an
operator for an ExtensionArray subclass, by dispatching to the
relevant operator defined on the individual elements of the
ExtensionArray.
Parameters
----------
op : function
An operator that takes arguments op(a, b)
coerce_to_dtype : bool, default True
boolean indicating whether to attempt to convert
the result to the underlying ExtensionArray dtype.
If it's not possible to create a new ExtensionArray with the
values, an ndarray is returned instead.
Returns
-------
Callable[[Any, Any], Union[ndarray, ExtensionArray]]
A method that can be bound to a class. When used, the method
receives the two arguments, one of which is the instance of
this class, and should return an ExtensionArray or an ndarray.
Returning an ndarray may be necessary when the result of the
`op` cannot be stored in the ExtensionArray. The dtype of the
ndarray uses NumPy's normal inference rules.
Example
-------
Given an ExtensionArray subclass called MyExtensionArray, use
>>> __add__ = cls._create_method(operator.add)
in the class definition of MyExtensionArray to create the operator
for addition, that will be based on the operator implementation
of the underlying elements of the ExtensionArray
"""
def _binop(self, other):
def convert_values(param):
if isinstance(param, ExtensionArray) or is_list_like(param):
ovalues = param
else: # Assume its an object
ovalues = [param] * len(self)
return ovalues
if isinstance(other, (ABCSeries, ABCIndexClass)):
# rely on pandas to unbox and dispatch to us
return NotImplemented
lvalues = self
rvalues = convert_values(other)
# If the operator is not defined for the underlying objects,
# a TypeError should be raised
res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
def _maybe_convert(arr):
if coerce_to_dtype:
# https://github.com/pandas-dev/pandas/issues/22850
# We catch all regular exceptions here, and fall back
# to an ndarray.
try:
res = self._from_sequence(arr)
except Exception:
res = np.asarray(arr)
else:
res = np.asarray(arr)
return res
if op.__name__ in {'divmod', 'rdivmod'}:
a, b = zip(*res)
res = _maybe_convert(a), _maybe_convert(b)
else:
res = _maybe_convert(res)
return res
op_name = ops._get_op_name(op, True)
return set_function_name(_binop, op_name, cls)
|
python
|
def _create_method(cls, op, coerce_to_dtype=True):
"""
A class method that returns a method that will correspond to an
operator for an ExtensionArray subclass, by dispatching to the
relevant operator defined on the individual elements of the
ExtensionArray.
Parameters
----------
op : function
An operator that takes arguments op(a, b)
coerce_to_dtype : bool, default True
boolean indicating whether to attempt to convert
the result to the underlying ExtensionArray dtype.
If it's not possible to create a new ExtensionArray with the
values, an ndarray is returned instead.
Returns
-------
Callable[[Any, Any], Union[ndarray, ExtensionArray]]
A method that can be bound to a class. When used, the method
receives the two arguments, one of which is the instance of
this class, and should return an ExtensionArray or an ndarray.
Returning an ndarray may be necessary when the result of the
`op` cannot be stored in the ExtensionArray. The dtype of the
ndarray uses NumPy's normal inference rules.
Example
-------
Given an ExtensionArray subclass called MyExtensionArray, use
>>> __add__ = cls._create_method(operator.add)
in the class definition of MyExtensionArray to create the operator
for addition, that will be based on the operator implementation
of the underlying elements of the ExtensionArray
"""
def _binop(self, other):
def convert_values(param):
if isinstance(param, ExtensionArray) or is_list_like(param):
ovalues = param
else: # Assume its an object
ovalues = [param] * len(self)
return ovalues
if isinstance(other, (ABCSeries, ABCIndexClass)):
# rely on pandas to unbox and dispatch to us
return NotImplemented
lvalues = self
rvalues = convert_values(other)
# If the operator is not defined for the underlying objects,
# a TypeError should be raised
res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
def _maybe_convert(arr):
if coerce_to_dtype:
# https://github.com/pandas-dev/pandas/issues/22850
# We catch all regular exceptions here, and fall back
# to an ndarray.
try:
res = self._from_sequence(arr)
except Exception:
res = np.asarray(arr)
else:
res = np.asarray(arr)
return res
if op.__name__ in {'divmod', 'rdivmod'}:
a, b = zip(*res)
res = _maybe_convert(a), _maybe_convert(b)
else:
res = _maybe_convert(res)
return res
op_name = ops._get_op_name(op, True)
return set_function_name(_binop, op_name, cls)
|
[
"def",
"_create_method",
"(",
"cls",
",",
"op",
",",
"coerce_to_dtype",
"=",
"True",
")",
":",
"def",
"_binop",
"(",
"self",
",",
"other",
")",
":",
"def",
"convert_values",
"(",
"param",
")",
":",
"if",
"isinstance",
"(",
"param",
",",
"ExtensionArray",
")",
"or",
"is_list_like",
"(",
"param",
")",
":",
"ovalues",
"=",
"param",
"else",
":",
"# Assume its an object",
"ovalues",
"=",
"[",
"param",
"]",
"*",
"len",
"(",
"self",
")",
"return",
"ovalues",
"if",
"isinstance",
"(",
"other",
",",
"(",
"ABCSeries",
",",
"ABCIndexClass",
")",
")",
":",
"# rely on pandas to unbox and dispatch to us",
"return",
"NotImplemented",
"lvalues",
"=",
"self",
"rvalues",
"=",
"convert_values",
"(",
"other",
")",
"# If the operator is not defined for the underlying objects,",
"# a TypeError should be raised",
"res",
"=",
"[",
"op",
"(",
"a",
",",
"b",
")",
"for",
"(",
"a",
",",
"b",
")",
"in",
"zip",
"(",
"lvalues",
",",
"rvalues",
")",
"]",
"def",
"_maybe_convert",
"(",
"arr",
")",
":",
"if",
"coerce_to_dtype",
":",
"# https://github.com/pandas-dev/pandas/issues/22850",
"# We catch all regular exceptions here, and fall back",
"# to an ndarray.",
"try",
":",
"res",
"=",
"self",
".",
"_from_sequence",
"(",
"arr",
")",
"except",
"Exception",
":",
"res",
"=",
"np",
".",
"asarray",
"(",
"arr",
")",
"else",
":",
"res",
"=",
"np",
".",
"asarray",
"(",
"arr",
")",
"return",
"res",
"if",
"op",
".",
"__name__",
"in",
"{",
"'divmod'",
",",
"'rdivmod'",
"}",
":",
"a",
",",
"b",
"=",
"zip",
"(",
"*",
"res",
")",
"res",
"=",
"_maybe_convert",
"(",
"a",
")",
",",
"_maybe_convert",
"(",
"b",
")",
"else",
":",
"res",
"=",
"_maybe_convert",
"(",
"res",
")",
"return",
"res",
"op_name",
"=",
"ops",
".",
"_get_op_name",
"(",
"op",
",",
"True",
")",
"return",
"set_function_name",
"(",
"_binop",
",",
"op_name",
",",
"cls",
")"
] |
A class method that returns a method that will correspond to an
operator for an ExtensionArray subclass, by dispatching to the
relevant operator defined on the individual elements of the
ExtensionArray.
Parameters
----------
op : function
An operator that takes arguments op(a, b)
coerce_to_dtype : bool, default True
boolean indicating whether to attempt to convert
the result to the underlying ExtensionArray dtype.
If it's not possible to create a new ExtensionArray with the
values, an ndarray is returned instead.
Returns
-------
Callable[[Any, Any], Union[ndarray, ExtensionArray]]
A method that can be bound to a class. When used, the method
receives the two arguments, one of which is the instance of
this class, and should return an ExtensionArray or an ndarray.
Returning an ndarray may be necessary when the result of the
`op` cannot be stored in the ExtensionArray. The dtype of the
ndarray uses NumPy's normal inference rules.
Example
-------
Given an ExtensionArray subclass called MyExtensionArray, use
>>> __add__ = cls._create_method(operator.add)
in the class definition of MyExtensionArray to create the operator
for addition, that will be based on the operator implementation
of the underlying elements of the ExtensionArray
|
[
"A",
"class",
"method",
"that",
"returns",
"a",
"method",
"that",
"will",
"correspond",
"to",
"an",
"operator",
"for",
"an",
"ExtensionArray",
"subclass",
"by",
"dispatching",
"to",
"the",
"relevant",
"operator",
"defined",
"on",
"the",
"individual",
"elements",
"of",
"the",
"ExtensionArray",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/base.py#L1034-L1113
|
20,144
|
pandas-dev/pandas
|
pandas/core/indexes/datetimelike.py
|
ea_passthrough
|
def ea_passthrough(array_method):
"""
Make an alias for a method of the underlying ExtensionArray.
Parameters
----------
array_method : method on an Array class
Returns
-------
method
"""
def method(self, *args, **kwargs):
return array_method(self._data, *args, **kwargs)
method.__name__ = array_method.__name__
method.__doc__ = array_method.__doc__
return method
|
python
|
def ea_passthrough(array_method):
"""
Make an alias for a method of the underlying ExtensionArray.
Parameters
----------
array_method : method on an Array class
Returns
-------
method
"""
def method(self, *args, **kwargs):
return array_method(self._data, *args, **kwargs)
method.__name__ = array_method.__name__
method.__doc__ = array_method.__doc__
return method
|
[
"def",
"ea_passthrough",
"(",
"array_method",
")",
":",
"def",
"method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"array_method",
"(",
"self",
".",
"_data",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"method",
".",
"__name__",
"=",
"array_method",
".",
"__name__",
"method",
".",
"__doc__",
"=",
"array_method",
".",
"__doc__",
"return",
"method"
] |
Make an alias for a method of the underlying ExtensionArray.
Parameters
----------
array_method : method on an Array class
Returns
-------
method
|
[
"Make",
"an",
"alias",
"for",
"a",
"method",
"of",
"the",
"underlying",
"ExtensionArray",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L34-L52
|
20,145
|
pandas-dev/pandas
|
pandas/core/indexes/datetimelike.py
|
DatetimeIndexOpsMixin._create_comparison_method
|
def _create_comparison_method(cls, op):
"""
Create a comparison method that dispatches to ``cls.values``.
"""
def wrapper(self, other):
if isinstance(other, ABCSeries):
# the arrays defer to Series for comparison ops but the indexes
# don't, so we have to unwrap here.
other = other._values
result = op(self._data, maybe_unwrap_index(other))
return result
wrapper.__doc__ = op.__doc__
wrapper.__name__ = '__{}__'.format(op.__name__)
return wrapper
|
python
|
def _create_comparison_method(cls, op):
"""
Create a comparison method that dispatches to ``cls.values``.
"""
def wrapper(self, other):
if isinstance(other, ABCSeries):
# the arrays defer to Series for comparison ops but the indexes
# don't, so we have to unwrap here.
other = other._values
result = op(self._data, maybe_unwrap_index(other))
return result
wrapper.__doc__ = op.__doc__
wrapper.__name__ = '__{}__'.format(op.__name__)
return wrapper
|
[
"def",
"_create_comparison_method",
"(",
"cls",
",",
"op",
")",
":",
"def",
"wrapper",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"ABCSeries",
")",
":",
"# the arrays defer to Series for comparison ops but the indexes",
"# don't, so we have to unwrap here.",
"other",
"=",
"other",
".",
"_values",
"result",
"=",
"op",
"(",
"self",
".",
"_data",
",",
"maybe_unwrap_index",
"(",
"other",
")",
")",
"return",
"result",
"wrapper",
".",
"__doc__",
"=",
"op",
".",
"__doc__",
"wrapper",
".",
"__name__",
"=",
"'__{}__'",
".",
"format",
"(",
"op",
".",
"__name__",
")",
"return",
"wrapper"
] |
Create a comparison method that dispatches to ``cls.values``.
|
[
"Create",
"a",
"comparison",
"method",
"that",
"dispatches",
"to",
"cls",
".",
"values",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L107-L122
|
20,146
|
pandas-dev/pandas
|
pandas/core/indexes/datetimelike.py
|
DatetimeIndexOpsMixin._join_i8_wrapper
|
def _join_i8_wrapper(joinf, dtype, with_indexers=True):
"""
Create the join wrapper methods.
"""
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
@staticmethod
def wrapper(left, right):
if isinstance(left, (np.ndarray, ABCIndex, ABCSeries,
DatetimeLikeArrayMixin)):
left = left.view('i8')
if isinstance(right, (np.ndarray, ABCIndex, ABCSeries,
DatetimeLikeArrayMixin)):
right = right.view('i8')
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
return join_index, left_indexer, right_indexer
return results
return wrapper
|
python
|
def _join_i8_wrapper(joinf, dtype, with_indexers=True):
"""
Create the join wrapper methods.
"""
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
@staticmethod
def wrapper(left, right):
if isinstance(left, (np.ndarray, ABCIndex, ABCSeries,
DatetimeLikeArrayMixin)):
left = left.view('i8')
if isinstance(right, (np.ndarray, ABCIndex, ABCSeries,
DatetimeLikeArrayMixin)):
right = right.view('i8')
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
return join_index, left_indexer, right_indexer
return results
return wrapper
|
[
"def",
"_join_i8_wrapper",
"(",
"joinf",
",",
"dtype",
",",
"with_indexers",
"=",
"True",
")",
":",
"from",
"pandas",
".",
"core",
".",
"arrays",
".",
"datetimelike",
"import",
"DatetimeLikeArrayMixin",
"@",
"staticmethod",
"def",
"wrapper",
"(",
"left",
",",
"right",
")",
":",
"if",
"isinstance",
"(",
"left",
",",
"(",
"np",
".",
"ndarray",
",",
"ABCIndex",
",",
"ABCSeries",
",",
"DatetimeLikeArrayMixin",
")",
")",
":",
"left",
"=",
"left",
".",
"view",
"(",
"'i8'",
")",
"if",
"isinstance",
"(",
"right",
",",
"(",
"np",
".",
"ndarray",
",",
"ABCIndex",
",",
"ABCSeries",
",",
"DatetimeLikeArrayMixin",
")",
")",
":",
"right",
"=",
"right",
".",
"view",
"(",
"'i8'",
")",
"results",
"=",
"joinf",
"(",
"left",
",",
"right",
")",
"if",
"with_indexers",
":",
"join_index",
",",
"left_indexer",
",",
"right_indexer",
"=",
"results",
"join_index",
"=",
"join_index",
".",
"view",
"(",
"dtype",
")",
"return",
"join_index",
",",
"left_indexer",
",",
"right_indexer",
"return",
"results",
"return",
"wrapper"
] |
Create the join wrapper methods.
|
[
"Create",
"the",
"join",
"wrapper",
"methods",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L171-L192
|
20,147
|
pandas-dev/pandas
|
pandas/core/indexes/datetimelike.py
|
DatetimeIndexOpsMixin.sort_values
|
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index.
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self._ndarray_values)
attribs = self._get_attributes_dict()
freq = attribs['freq']
if freq is not None and not is_period_dtype(self):
if freq.n > 0 and not ascending:
freq = freq * -1
elif freq.n < 0 and ascending:
freq = freq * -1
attribs['freq'] = freq
if not ascending:
sorted_values = sorted_values[::-1]
return self._simple_new(sorted_values, **attribs)
|
python
|
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index.
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self._ndarray_values)
attribs = self._get_attributes_dict()
freq = attribs['freq']
if freq is not None and not is_period_dtype(self):
if freq.n > 0 and not ascending:
freq = freq * -1
elif freq.n < 0 and ascending:
freq = freq * -1
attribs['freq'] = freq
if not ascending:
sorted_values = sorted_values[::-1]
return self._simple_new(sorted_values, **attribs)
|
[
"def",
"sort_values",
"(",
"self",
",",
"return_indexer",
"=",
"False",
",",
"ascending",
"=",
"True",
")",
":",
"if",
"return_indexer",
":",
"_as",
"=",
"self",
".",
"argsort",
"(",
")",
"if",
"not",
"ascending",
":",
"_as",
"=",
"_as",
"[",
":",
":",
"-",
"1",
"]",
"sorted_index",
"=",
"self",
".",
"take",
"(",
"_as",
")",
"return",
"sorted_index",
",",
"_as",
"else",
":",
"sorted_values",
"=",
"np",
".",
"sort",
"(",
"self",
".",
"_ndarray_values",
")",
"attribs",
"=",
"self",
".",
"_get_attributes_dict",
"(",
")",
"freq",
"=",
"attribs",
"[",
"'freq'",
"]",
"if",
"freq",
"is",
"not",
"None",
"and",
"not",
"is_period_dtype",
"(",
"self",
")",
":",
"if",
"freq",
".",
"n",
">",
"0",
"and",
"not",
"ascending",
":",
"freq",
"=",
"freq",
"*",
"-",
"1",
"elif",
"freq",
".",
"n",
"<",
"0",
"and",
"ascending",
":",
"freq",
"=",
"freq",
"*",
"-",
"1",
"attribs",
"[",
"'freq'",
"]",
"=",
"freq",
"if",
"not",
"ascending",
":",
"sorted_values",
"=",
"sorted_values",
"[",
":",
":",
"-",
"1",
"]",
"return",
"self",
".",
"_simple_new",
"(",
"sorted_values",
",",
"*",
"*",
"attribs",
")"
] |
Return sorted copy of Index.
|
[
"Return",
"sorted",
"copy",
"of",
"Index",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L236-L261
|
20,148
|
pandas-dev/pandas
|
pandas/core/indexes/datetimelike.py
|
DatetimeIndexOpsMixin.min
|
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[0] != iNaT:
return self._box_func(i8[0])
if self.hasnans:
if skipna:
min_stamp = self[~self._isnan].asi8.min()
else:
return self._na_value
else:
min_stamp = i8.min()
return self._box_func(min_stamp)
except ValueError:
return self._na_value
|
python
|
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[0] != iNaT:
return self._box_func(i8[0])
if self.hasnans:
if skipna:
min_stamp = self[~self._isnan].asi8.min()
else:
return self._na_value
else:
min_stamp = i8.min()
return self._box_func(min_stamp)
except ValueError:
return self._na_value
|
[
"def",
"min",
"(",
"self",
",",
"axis",
"=",
"None",
",",
"skipna",
"=",
"True",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"nv",
".",
"validate_min",
"(",
"args",
",",
"kwargs",
")",
"nv",
".",
"validate_minmax_axis",
"(",
"axis",
")",
"if",
"not",
"len",
"(",
"self",
")",
":",
"return",
"self",
".",
"_na_value",
"i8",
"=",
"self",
".",
"asi8",
"try",
":",
"# quick check",
"if",
"len",
"(",
"i8",
")",
"and",
"self",
".",
"is_monotonic",
":",
"if",
"i8",
"[",
"0",
"]",
"!=",
"iNaT",
":",
"return",
"self",
".",
"_box_func",
"(",
"i8",
"[",
"0",
"]",
")",
"if",
"self",
".",
"hasnans",
":",
"if",
"skipna",
":",
"min_stamp",
"=",
"self",
"[",
"~",
"self",
".",
"_isnan",
"]",
".",
"asi8",
".",
"min",
"(",
")",
"else",
":",
"return",
"self",
".",
"_na_value",
"else",
":",
"min_stamp",
"=",
"i8",
".",
"min",
"(",
")",
"return",
"self",
".",
"_box_func",
"(",
"min_stamp",
")",
"except",
"ValueError",
":",
"return",
"self",
".",
"_na_value"
] |
Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series.
|
[
"Return",
"the",
"minimum",
"value",
"of",
"the",
"Index",
"or",
"minimum",
"along",
"an",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L315-L347
|
20,149
|
pandas-dev/pandas
|
pandas/core/indexes/datetimelike.py
|
DatetimeIndexOpsMixin.argmin
|
def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin()
|
python
|
def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin()
|
[
"def",
"argmin",
"(",
"self",
",",
"axis",
"=",
"None",
",",
"skipna",
"=",
"True",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"nv",
".",
"validate_argmin",
"(",
"args",
",",
"kwargs",
")",
"nv",
".",
"validate_minmax_axis",
"(",
"axis",
")",
"i8",
"=",
"self",
".",
"asi8",
"if",
"self",
".",
"hasnans",
":",
"mask",
"=",
"self",
".",
"_isnan",
"if",
"mask",
".",
"all",
"(",
")",
"or",
"not",
"skipna",
":",
"return",
"-",
"1",
"i8",
"=",
"i8",
".",
"copy",
"(",
")",
"i8",
"[",
"mask",
"]",
"=",
"np",
".",
"iinfo",
"(",
"'int64'",
")",
".",
"max",
"return",
"i8",
".",
"argmin",
"(",
")"
] |
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
|
[
"Returns",
"the",
"indices",
"of",
"the",
"minimum",
"values",
"along",
"an",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L349-L370
|
20,150
|
pandas-dev/pandas
|
pandas/core/indexes/datetimelike.py
|
DatetimeIndexOpsMixin.max
|
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Index or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Series.max : Return the maximum value in a Series.
"""
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[-1] != iNaT:
return self._box_func(i8[-1])
if self.hasnans:
if skipna:
max_stamp = self[~self._isnan].asi8.max()
else:
return self._na_value
else:
max_stamp = i8.max()
return self._box_func(max_stamp)
except ValueError:
return self._na_value
|
python
|
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Index or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Series.max : Return the maximum value in a Series.
"""
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[-1] != iNaT:
return self._box_func(i8[-1])
if self.hasnans:
if skipna:
max_stamp = self[~self._isnan].asi8.max()
else:
return self._na_value
else:
max_stamp = i8.max()
return self._box_func(max_stamp)
except ValueError:
return self._na_value
|
[
"def",
"max",
"(",
"self",
",",
"axis",
"=",
"None",
",",
"skipna",
"=",
"True",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"nv",
".",
"validate_max",
"(",
"args",
",",
"kwargs",
")",
"nv",
".",
"validate_minmax_axis",
"(",
"axis",
")",
"if",
"not",
"len",
"(",
"self",
")",
":",
"return",
"self",
".",
"_na_value",
"i8",
"=",
"self",
".",
"asi8",
"try",
":",
"# quick check",
"if",
"len",
"(",
"i8",
")",
"and",
"self",
".",
"is_monotonic",
":",
"if",
"i8",
"[",
"-",
"1",
"]",
"!=",
"iNaT",
":",
"return",
"self",
".",
"_box_func",
"(",
"i8",
"[",
"-",
"1",
"]",
")",
"if",
"self",
".",
"hasnans",
":",
"if",
"skipna",
":",
"max_stamp",
"=",
"self",
"[",
"~",
"self",
".",
"_isnan",
"]",
".",
"asi8",
".",
"max",
"(",
")",
"else",
":",
"return",
"self",
".",
"_na_value",
"else",
":",
"max_stamp",
"=",
"i8",
".",
"max",
"(",
")",
"return",
"self",
".",
"_box_func",
"(",
"max_stamp",
")",
"except",
"ValueError",
":",
"return",
"self",
".",
"_na_value"
] |
Return the maximum value of the Index or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Series.max : Return the maximum value in a Series.
|
[
"Return",
"the",
"maximum",
"value",
"of",
"the",
"Index",
"or",
"maximum",
"along",
"an",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L372-L404
|
20,151
|
pandas-dev/pandas
|
pandas/core/indexes/datetimelike.py
|
DatetimeIndexOpsMixin.argmax
|
def argmax(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the maximum values along an axis.
See `numpy.ndarray.argmax` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmax
"""
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax()
|
python
|
def argmax(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the maximum values along an axis.
See `numpy.ndarray.argmax` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmax
"""
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax()
|
[
"def",
"argmax",
"(",
"self",
",",
"axis",
"=",
"None",
",",
"skipna",
"=",
"True",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"nv",
".",
"validate_argmax",
"(",
"args",
",",
"kwargs",
")",
"nv",
".",
"validate_minmax_axis",
"(",
"axis",
")",
"i8",
"=",
"self",
".",
"asi8",
"if",
"self",
".",
"hasnans",
":",
"mask",
"=",
"self",
".",
"_isnan",
"if",
"mask",
".",
"all",
"(",
")",
"or",
"not",
"skipna",
":",
"return",
"-",
"1",
"i8",
"=",
"i8",
".",
"copy",
"(",
")",
"i8",
"[",
"mask",
"]",
"=",
"0",
"return",
"i8",
".",
"argmax",
"(",
")"
] |
Returns the indices of the maximum values along an axis.
See `numpy.ndarray.argmax` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmax
|
[
"Returns",
"the",
"indices",
"of",
"the",
"maximum",
"values",
"along",
"an",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L406-L427
|
20,152
|
pandas-dev/pandas
|
pandas/core/indexes/datetimelike.py
|
DatetimeIndexOpsMixin._convert_scalar_indexer
|
def _convert_scalar_indexer(self, key, kind=None):
"""
We don't allow integer or float indexing on datetime-like when using
loc.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# we don't allow integer/float indexing for loc
# we don't allow float indexing for ix/getitem
if is_scalar(key):
is_int = is_integer(key)
is_flt = is_float(key)
if kind in ['loc'] and (is_int or is_flt):
self._invalid_indexer('index', key)
elif kind in ['ix', 'getitem'] and is_flt:
self._invalid_indexer('index', key)
return super()._convert_scalar_indexer(key, kind=kind)
|
python
|
def _convert_scalar_indexer(self, key, kind=None):
"""
We don't allow integer or float indexing on datetime-like when using
loc.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# we don't allow integer/float indexing for loc
# we don't allow float indexing for ix/getitem
if is_scalar(key):
is_int = is_integer(key)
is_flt = is_float(key)
if kind in ['loc'] and (is_int or is_flt):
self._invalid_indexer('index', key)
elif kind in ['ix', 'getitem'] and is_flt:
self._invalid_indexer('index', key)
return super()._convert_scalar_indexer(key, kind=kind)
|
[
"def",
"_convert_scalar_indexer",
"(",
"self",
",",
"key",
",",
"kind",
"=",
"None",
")",
":",
"assert",
"kind",
"in",
"[",
"'ix'",
",",
"'loc'",
",",
"'getitem'",
",",
"'iloc'",
",",
"None",
"]",
"# we don't allow integer/float indexing for loc",
"# we don't allow float indexing for ix/getitem",
"if",
"is_scalar",
"(",
"key",
")",
":",
"is_int",
"=",
"is_integer",
"(",
"key",
")",
"is_flt",
"=",
"is_float",
"(",
"key",
")",
"if",
"kind",
"in",
"[",
"'loc'",
"]",
"and",
"(",
"is_int",
"or",
"is_flt",
")",
":",
"self",
".",
"_invalid_indexer",
"(",
"'index'",
",",
"key",
")",
"elif",
"kind",
"in",
"[",
"'ix'",
",",
"'getitem'",
"]",
"and",
"is_flt",
":",
"self",
".",
"_invalid_indexer",
"(",
"'index'",
",",
"key",
")",
"return",
"super",
"(",
")",
".",
"_convert_scalar_indexer",
"(",
"key",
",",
"kind",
"=",
"kind",
")"
] |
We don't allow integer or float indexing on datetime-like when using
loc.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
|
[
"We",
"don",
"t",
"allow",
"integer",
"or",
"float",
"indexing",
"on",
"datetime",
"-",
"like",
"when",
"using",
"loc",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L454-L477
|
20,153
|
pandas-dev/pandas
|
pandas/core/indexes/datetimelike.py
|
DatetimeIndexOpsMixin.isin
|
def isin(self, values):
"""
Compute boolean array of whether each index value is found in the
passed set of values.
Parameters
----------
values : set or sequence of values
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if not isinstance(values, type(self)):
try:
values = type(self)(values)
except ValueError:
return self.astype(object).isin(values)
return algorithms.isin(self.asi8, values.asi8)
|
python
|
def isin(self, values):
"""
Compute boolean array of whether each index value is found in the
passed set of values.
Parameters
----------
values : set or sequence of values
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if not isinstance(values, type(self)):
try:
values = type(self)(values)
except ValueError:
return self.astype(object).isin(values)
return algorithms.isin(self.asi8, values.asi8)
|
[
"def",
"isin",
"(",
"self",
",",
"values",
")",
":",
"if",
"not",
"isinstance",
"(",
"values",
",",
"type",
"(",
"self",
")",
")",
":",
"try",
":",
"values",
"=",
"type",
"(",
"self",
")",
"(",
"values",
")",
"except",
"ValueError",
":",
"return",
"self",
".",
"astype",
"(",
"object",
")",
".",
"isin",
"(",
"values",
")",
"return",
"algorithms",
".",
"isin",
"(",
"self",
".",
"asi8",
",",
"values",
".",
"asi8",
")"
] |
Compute boolean array of whether each index value is found in the
passed set of values.
Parameters
----------
values : set or sequence of values
Returns
-------
is_contained : ndarray (boolean dtype)
|
[
"Compute",
"boolean",
"array",
"of",
"whether",
"each",
"index",
"value",
"is",
"found",
"in",
"the",
"passed",
"set",
"of",
"values",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L511-L530
|
20,154
|
pandas-dev/pandas
|
pandas/core/indexes/datetimelike.py
|
DatetimeIndexOpsMixin._concat_same_dtype
|
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class.
"""
attribs = self._get_attributes_dict()
attribs['name'] = name
# do not pass tz to set because tzlocal cannot be hashed
if len({str(x.dtype) for x in to_concat}) != 1:
raise ValueError('to_concat must have the same tz')
new_data = type(self._values)._concat_same_type(to_concat).asi8
# GH 3232: If the concat result is evenly spaced, we can retain the
# original frequency
is_diff_evenly_spaced = len(unique_deltas(new_data)) == 1
if not is_period_dtype(self) and not is_diff_evenly_spaced:
# reset freq
attribs['freq'] = None
return self._simple_new(new_data, **attribs)
|
python
|
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class.
"""
attribs = self._get_attributes_dict()
attribs['name'] = name
# do not pass tz to set because tzlocal cannot be hashed
if len({str(x.dtype) for x in to_concat}) != 1:
raise ValueError('to_concat must have the same tz')
new_data = type(self._values)._concat_same_type(to_concat).asi8
# GH 3232: If the concat result is evenly spaced, we can retain the
# original frequency
is_diff_evenly_spaced = len(unique_deltas(new_data)) == 1
if not is_period_dtype(self) and not is_diff_evenly_spaced:
# reset freq
attribs['freq'] = None
return self._simple_new(new_data, **attribs)
|
[
"def",
"_concat_same_dtype",
"(",
"self",
",",
"to_concat",
",",
"name",
")",
":",
"attribs",
"=",
"self",
".",
"_get_attributes_dict",
"(",
")",
"attribs",
"[",
"'name'",
"]",
"=",
"name",
"# do not pass tz to set because tzlocal cannot be hashed",
"if",
"len",
"(",
"{",
"str",
"(",
"x",
".",
"dtype",
")",
"for",
"x",
"in",
"to_concat",
"}",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'to_concat must have the same tz'",
")",
"new_data",
"=",
"type",
"(",
"self",
".",
"_values",
")",
".",
"_concat_same_type",
"(",
"to_concat",
")",
".",
"asi8",
"# GH 3232: If the concat result is evenly spaced, we can retain the",
"# original frequency",
"is_diff_evenly_spaced",
"=",
"len",
"(",
"unique_deltas",
"(",
"new_data",
")",
")",
"==",
"1",
"if",
"not",
"is_period_dtype",
"(",
"self",
")",
"and",
"not",
"is_diff_evenly_spaced",
":",
"# reset freq",
"attribs",
"[",
"'freq'",
"]",
"=",
"None",
"return",
"self",
".",
"_simple_new",
"(",
"new_data",
",",
"*",
"*",
"attribs",
")"
] |
Concatenate to_concat which has the same class.
|
[
"Concatenate",
"to_concat",
"which",
"has",
"the",
"same",
"class",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L578-L597
|
20,155
|
pandas-dev/pandas
|
pandas/core/indexes/datetimelike.py
|
DatetimeIndexOpsMixin.shift
|
def shift(self, periods, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int
Number of periods (or increments) to shift by,
can be positive or negative.
.. versionchanged:: 0.24.0
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
"""
result = self._data._time_shift(periods, freq=freq)
return type(self)(result, name=self.name)
|
python
|
def shift(self, periods, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int
Number of periods (or increments) to shift by,
can be positive or negative.
.. versionchanged:: 0.24.0
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
"""
result = self._data._time_shift(periods, freq=freq)
return type(self)(result, name=self.name)
|
[
"def",
"shift",
"(",
"self",
",",
"periods",
",",
"freq",
"=",
"None",
")",
":",
"result",
"=",
"self",
".",
"_data",
".",
"_time_shift",
"(",
"periods",
",",
"freq",
"=",
"freq",
")",
"return",
"type",
"(",
"self",
")",
"(",
"result",
",",
"name",
"=",
"self",
".",
"name",
")"
] |
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int
Number of periods (or increments) to shift by,
can be positive or negative.
.. versionchanged:: 0.24.0
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
|
[
"Shift",
"index",
"by",
"desired",
"number",
"of",
"time",
"frequency",
"increments",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/datetimelike.py#L613-L644
|
20,156
|
pandas-dev/pandas
|
pandas/core/generic.py
|
_single_replace
|
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError('cannot replace {0} with method {1} on a {2}'
.format(to_replace, method, type(self).__name__))
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index,
dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
|
python
|
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError('cannot replace {0} with method {1} on a {2}'
.format(to_replace, method, type(self).__name__))
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index,
dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
|
[
"def",
"_single_replace",
"(",
"self",
",",
"to_replace",
",",
"method",
",",
"inplace",
",",
"limit",
")",
":",
"if",
"self",
".",
"ndim",
"!=",
"1",
":",
"raise",
"TypeError",
"(",
"'cannot replace {0} with method {1} on a {2}'",
".",
"format",
"(",
"to_replace",
",",
"method",
",",
"type",
"(",
"self",
")",
".",
"__name__",
")",
")",
"orig_dtype",
"=",
"self",
".",
"dtype",
"result",
"=",
"self",
"if",
"inplace",
"else",
"self",
".",
"copy",
"(",
")",
"fill_f",
"=",
"missing",
".",
"get_fill_func",
"(",
"method",
")",
"mask",
"=",
"missing",
".",
"mask_missing",
"(",
"result",
".",
"values",
",",
"to_replace",
")",
"values",
"=",
"fill_f",
"(",
"result",
".",
"values",
",",
"limit",
"=",
"limit",
",",
"mask",
"=",
"mask",
")",
"if",
"values",
".",
"dtype",
"==",
"orig_dtype",
"and",
"inplace",
":",
"return",
"result",
"=",
"pd",
".",
"Series",
"(",
"values",
",",
"index",
"=",
"self",
".",
"index",
",",
"dtype",
"=",
"self",
".",
"dtype",
")",
".",
"__finalize__",
"(",
"self",
")",
"if",
"inplace",
":",
"self",
".",
"_update_inplace",
"(",
"result",
".",
"_data",
")",
"return",
"return",
"result"
] |
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
|
[
"Replaces",
"values",
"in",
"a",
"Series",
"using",
"the",
"fill",
"method",
"specified",
"when",
"no",
"replacement",
"value",
"is",
"given",
"in",
"the",
"replace",
"method"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L69-L95
|
20,157
|
pandas-dev/pandas
|
pandas/core/generic.py
|
_doc_parms
|
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = "{%s}" % ', '.join("{0} ({1})".format(a, i)
for i, a in enumerate(cls._AXIS_ORDERS))
name = (cls._constructor_sliced.__name__
if cls._AXIS_LEN > 1 else 'scalar')
name2 = cls.__name__
return axis_descr, name, name2
|
python
|
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = "{%s}" % ', '.join("{0} ({1})".format(a, i)
for i, a in enumerate(cls._AXIS_ORDERS))
name = (cls._constructor_sliced.__name__
if cls._AXIS_LEN > 1 else 'scalar')
name2 = cls.__name__
return axis_descr, name, name2
|
[
"def",
"_doc_parms",
"(",
"cls",
")",
":",
"axis_descr",
"=",
"\"{%s}\"",
"%",
"', '",
".",
"join",
"(",
"\"{0} ({1})\"",
".",
"format",
"(",
"a",
",",
"i",
")",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"cls",
".",
"_AXIS_ORDERS",
")",
")",
"name",
"=",
"(",
"cls",
".",
"_constructor_sliced",
".",
"__name__",
"if",
"cls",
".",
"_AXIS_LEN",
">",
"1",
"else",
"'scalar'",
")",
"name2",
"=",
"cls",
".",
"__name__",
"return",
"axis_descr",
",",
"name",
",",
"name2"
] |
Return a tuple of the doc parms.
|
[
"Return",
"a",
"tuple",
"of",
"the",
"doc",
"parms",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L10299-L10306
|
20,158
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._init_mgr
|
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(axe,
axis=self._get_block_manager_axis(a),
copy=False)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
|
python
|
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(axe,
axis=self._get_block_manager_axis(a),
copy=False)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
|
[
"def",
"_init_mgr",
"(",
"self",
",",
"mgr",
",",
"axes",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"for",
"a",
",",
"axe",
"in",
"axes",
".",
"items",
"(",
")",
":",
"if",
"axe",
"is",
"not",
"None",
":",
"mgr",
"=",
"mgr",
".",
"reindex_axis",
"(",
"axe",
",",
"axis",
"=",
"self",
".",
"_get_block_manager_axis",
"(",
"a",
")",
",",
"copy",
"=",
"False",
")",
"# make a copy if explicitly requested",
"if",
"copy",
":",
"mgr",
"=",
"mgr",
".",
"copy",
"(",
")",
"if",
"dtype",
"is",
"not",
"None",
":",
"# avoid further copies if we can",
"if",
"len",
"(",
"mgr",
".",
"blocks",
")",
">",
"1",
"or",
"mgr",
".",
"blocks",
"[",
"0",
"]",
".",
"values",
".",
"dtype",
"!=",
"dtype",
":",
"mgr",
"=",
"mgr",
".",
"astype",
"(",
"dtype",
"=",
"dtype",
")",
"return",
"mgr"
] |
passed a manager and a axes dict
|
[
"passed",
"a",
"manager",
"and",
"a",
"axes",
"dict"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L141-L156
|
20,159
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._validate_dtype
|
def _validate_dtype(self, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == 'V':
raise NotImplementedError("compound dtypes are not implemented"
" in the {0} constructor"
.format(self.__class__.__name__))
return dtype
|
python
|
def _validate_dtype(self, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == 'V':
raise NotImplementedError("compound dtypes are not implemented"
" in the {0} constructor"
.format(self.__class__.__name__))
return dtype
|
[
"def",
"_validate_dtype",
"(",
"self",
",",
"dtype",
")",
":",
"if",
"dtype",
"is",
"not",
"None",
":",
"dtype",
"=",
"pandas_dtype",
"(",
"dtype",
")",
"# a compound dtype",
"if",
"dtype",
".",
"kind",
"==",
"'V'",
":",
"raise",
"NotImplementedError",
"(",
"\"compound dtypes are not implemented\"",
"\" in the {0} constructor\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"return",
"dtype"
] |
validate the passed dtype
|
[
"validate",
"the",
"passed",
"dtype"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L175-L187
|
20,160
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._setup_axes
|
def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None,
slicers=None, axes_are_reversed=False, build_axes=True,
ns=None, docs=None):
"""Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
"""
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)}
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = aliases or dict()
cls._AXIS_IALIASES = {v: k for k, v in cls._AXIS_ALIASES.items()}
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_SLICEMAP = slicers or None
cls._AXIS_REVERSED = axes_are_reversed
# typ
setattr(cls, '_typ', cls.__name__.lower())
# indexing support
cls._ix = None
if info_axis is not None:
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
if stat_axis is not None:
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis]
# setup the actual axis
if build_axes:
def set_axis(a, i):
setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = cls._AXIS_LEN - 1
for i, a in cls._AXIS_NAMES.items():
set_axis(a, m - i)
else:
for i, a in cls._AXIS_NAMES.items():
set_axis(a, i)
assert not isinstance(ns, dict)
|
python
|
def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None,
slicers=None, axes_are_reversed=False, build_axes=True,
ns=None, docs=None):
"""Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
"""
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)}
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = aliases or dict()
cls._AXIS_IALIASES = {v: k for k, v in cls._AXIS_ALIASES.items()}
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_SLICEMAP = slicers or None
cls._AXIS_REVERSED = axes_are_reversed
# typ
setattr(cls, '_typ', cls.__name__.lower())
# indexing support
cls._ix = None
if info_axis is not None:
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
if stat_axis is not None:
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis]
# setup the actual axis
if build_axes:
def set_axis(a, i):
setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = cls._AXIS_LEN - 1
for i, a in cls._AXIS_NAMES.items():
set_axis(a, m - i)
else:
for i, a in cls._AXIS_NAMES.items():
set_axis(a, i)
assert not isinstance(ns, dict)
|
[
"def",
"_setup_axes",
"(",
"cls",
",",
"axes",
",",
"info_axis",
"=",
"None",
",",
"stat_axis",
"=",
"None",
",",
"aliases",
"=",
"None",
",",
"slicers",
"=",
"None",
",",
"axes_are_reversed",
"=",
"False",
",",
"build_axes",
"=",
"True",
",",
"ns",
"=",
"None",
",",
"docs",
"=",
"None",
")",
":",
"cls",
".",
"_AXIS_ORDERS",
"=",
"axes",
"cls",
".",
"_AXIS_NUMBERS",
"=",
"{",
"a",
":",
"i",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"axes",
")",
"}",
"cls",
".",
"_AXIS_LEN",
"=",
"len",
"(",
"axes",
")",
"cls",
".",
"_AXIS_ALIASES",
"=",
"aliases",
"or",
"dict",
"(",
")",
"cls",
".",
"_AXIS_IALIASES",
"=",
"{",
"v",
":",
"k",
"for",
"k",
",",
"v",
"in",
"cls",
".",
"_AXIS_ALIASES",
".",
"items",
"(",
")",
"}",
"cls",
".",
"_AXIS_NAMES",
"=",
"dict",
"(",
"enumerate",
"(",
"axes",
")",
")",
"cls",
".",
"_AXIS_SLICEMAP",
"=",
"slicers",
"or",
"None",
"cls",
".",
"_AXIS_REVERSED",
"=",
"axes_are_reversed",
"# typ",
"setattr",
"(",
"cls",
",",
"'_typ'",
",",
"cls",
".",
"__name__",
".",
"lower",
"(",
")",
")",
"# indexing support",
"cls",
".",
"_ix",
"=",
"None",
"if",
"info_axis",
"is",
"not",
"None",
":",
"cls",
".",
"_info_axis_number",
"=",
"info_axis",
"cls",
".",
"_info_axis_name",
"=",
"axes",
"[",
"info_axis",
"]",
"if",
"stat_axis",
"is",
"not",
"None",
":",
"cls",
".",
"_stat_axis_number",
"=",
"stat_axis",
"cls",
".",
"_stat_axis_name",
"=",
"axes",
"[",
"stat_axis",
"]",
"# setup the actual axis",
"if",
"build_axes",
":",
"def",
"set_axis",
"(",
"a",
",",
"i",
")",
":",
"setattr",
"(",
"cls",
",",
"a",
",",
"properties",
".",
"AxisProperty",
"(",
"i",
",",
"docs",
".",
"get",
"(",
"a",
",",
"a",
")",
")",
")",
"cls",
".",
"_internal_names_set",
".",
"add",
"(",
"a",
")",
"if",
"axes_are_reversed",
":",
"m",
"=",
"cls",
".",
"_AXIS_LEN",
"-",
"1",
"for",
"i",
",",
"a",
"in",
"cls",
".",
"_AXIS_NAMES",
".",
"items",
"(",
")",
":",
"set_axis",
"(",
"a",
",",
"m",
"-",
"i",
")",
"else",
":",
"for",
"i",
",",
"a",
"in",
"cls",
".",
"_AXIS_NAMES",
".",
"items",
"(",
")",
":",
"set_axis",
"(",
"a",
",",
"i",
")",
"assert",
"not",
"isinstance",
"(",
"ns",
",",
"dict",
")"
] |
Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
|
[
"Provide",
"axes",
"setup",
"for",
"the",
"major",
"PandasObjects",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L217-L272
|
20,161
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._construct_axes_dict_from
|
def _construct_axes_dict_from(self, axes, **kwargs):
"""Return an axes dictionary for the passed axes."""
d = {a: ax for a, ax in zip(self._AXIS_ORDERS, axes)}
d.update(kwargs)
return d
|
python
|
def _construct_axes_dict_from(self, axes, **kwargs):
"""Return an axes dictionary for the passed axes."""
d = {a: ax for a, ax in zip(self._AXIS_ORDERS, axes)}
d.update(kwargs)
return d
|
[
"def",
"_construct_axes_dict_from",
"(",
"self",
",",
"axes",
",",
"*",
"*",
"kwargs",
")",
":",
"d",
"=",
"{",
"a",
":",
"ax",
"for",
"a",
",",
"ax",
"in",
"zip",
"(",
"self",
".",
"_AXIS_ORDERS",
",",
"axes",
")",
"}",
"d",
".",
"update",
"(",
"kwargs",
")",
"return",
"d"
] |
Return an axes dictionary for the passed axes.
|
[
"Return",
"an",
"axes",
"dictionary",
"for",
"the",
"passed",
"axes",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L281-L285
|
20,162
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._get_block_manager_axis
|
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
|
python
|
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
|
[
"def",
"_get_block_manager_axis",
"(",
"cls",
",",
"axis",
")",
":",
"axis",
"=",
"cls",
".",
"_get_axis_number",
"(",
"axis",
")",
"if",
"cls",
".",
"_AXIS_REVERSED",
":",
"m",
"=",
"cls",
".",
"_AXIS_LEN",
"-",
"1",
"return",
"m",
"-",
"axis",
"return",
"axis"
] |
Map the axis to the block_manager axis.
|
[
"Map",
"the",
"axis",
"to",
"the",
"block_manager",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L379-L385
|
20,163
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._get_space_character_free_column_resolvers
|
def _get_space_character_free_column_resolvers(self):
"""Return the space character free column resolvers of a dataframe.
Column names with spaces are 'cleaned up' so that they can be referred
to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.common import _remove_spaces_column_name
return {_remove_spaces_column_name(k): v for k, v
in self.iteritems()}
|
python
|
def _get_space_character_free_column_resolvers(self):
"""Return the space character free column resolvers of a dataframe.
Column names with spaces are 'cleaned up' so that they can be referred
to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.common import _remove_spaces_column_name
return {_remove_spaces_column_name(k): v for k, v
in self.iteritems()}
|
[
"def",
"_get_space_character_free_column_resolvers",
"(",
"self",
")",
":",
"from",
"pandas",
".",
"core",
".",
"computation",
".",
"common",
"import",
"_remove_spaces_column_name",
"return",
"{",
"_remove_spaces_column_name",
"(",
"k",
")",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"iteritems",
"(",
")",
"}"
] |
Return the space character free column resolvers of a dataframe.
Column names with spaces are 'cleaned up' so that they can be referred
to by backtick quoting.
Used in :meth:`DataFrame.eval`.
|
[
"Return",
"the",
"space",
"character",
"free",
"column",
"resolvers",
"of",
"a",
"dataframe",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L423-L433
|
20,164
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.shape
|
def shape(self):
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
|
python
|
def shape(self):
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
|
[
"def",
"shape",
"(",
"self",
")",
":",
"return",
"tuple",
"(",
"len",
"(",
"self",
".",
"_get_axis",
"(",
"a",
")",
")",
"for",
"a",
"in",
"self",
".",
"_AXIS_ORDERS",
")"
] |
Return a tuple of axis dimensions
|
[
"Return",
"a",
"tuple",
"of",
"axis",
"dimensions"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L444-L448
|
20,165
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.swapaxes
|
def swapaxes(self, axis1, axis2, copy=True):
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k))
for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
|
python
|
def swapaxes(self, axis1, axis2, copy=True):
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k))
for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
|
[
"def",
"swapaxes",
"(",
"self",
",",
"axis1",
",",
"axis2",
",",
"copy",
"=",
"True",
")",
":",
"i",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis1",
")",
"j",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis2",
")",
"if",
"i",
"==",
"j",
":",
"if",
"copy",
":",
"return",
"self",
".",
"copy",
"(",
")",
"return",
"self",
"mapping",
"=",
"{",
"i",
":",
"j",
",",
"j",
":",
"i",
"}",
"new_axes",
"=",
"(",
"self",
".",
"_get_axis",
"(",
"mapping",
".",
"get",
"(",
"k",
",",
"k",
")",
")",
"for",
"k",
"in",
"range",
"(",
"self",
".",
"_AXIS_LEN",
")",
")",
"new_values",
"=",
"self",
".",
"values",
".",
"swapaxes",
"(",
"i",
",",
"j",
")",
"if",
"copy",
":",
"new_values",
"=",
"new_values",
".",
"copy",
"(",
")",
"return",
"self",
".",
"_constructor",
"(",
"new_values",
",",
"*",
"new_axes",
")",
".",
"__finalize__",
"(",
"self",
")"
] |
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
|
[
"Interchange",
"axes",
"and",
"swap",
"values",
"axes",
"appropriately",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L694-L718
|
20,166
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.pop
|
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
|
python
|
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
|
[
"def",
"pop",
"(",
"self",
",",
"item",
")",
":",
"result",
"=",
"self",
"[",
"item",
"]",
"del",
"self",
"[",
"item",
"]",
"try",
":",
"result",
".",
"_reset_cacher",
"(",
")",
"except",
"AttributeError",
":",
"pass",
"return",
"result"
] |
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
|
[
"Return",
"item",
"and",
"drop",
"from",
"frame",
".",
"Raise",
"KeyError",
"if",
"not",
"found",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L779-L827
|
20,167
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.squeeze
|
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
.. versionadded:: 0.20.0
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes wil project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = (self._AXIS_NAMES if axis is None else
(self._get_axis_number(axis),))
try:
return self.iloc[
tuple(0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes))]
except Exception:
return self
|
python
|
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
.. versionadded:: 0.20.0
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes wil project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = (self._AXIS_NAMES if axis is None else
(self._get_axis_number(axis),))
try:
return self.iloc[
tuple(0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes))]
except Exception:
return self
|
[
"def",
"squeeze",
"(",
"self",
",",
"axis",
"=",
"None",
")",
":",
"axis",
"=",
"(",
"self",
".",
"_AXIS_NAMES",
"if",
"axis",
"is",
"None",
"else",
"(",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
",",
")",
")",
"try",
":",
"return",
"self",
".",
"iloc",
"[",
"tuple",
"(",
"0",
"if",
"i",
"in",
"axis",
"and",
"len",
"(",
"a",
")",
"==",
"1",
"else",
"slice",
"(",
"None",
")",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"self",
".",
"axes",
")",
")",
"]",
"except",
"Exception",
":",
"return",
"self"
] |
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
.. versionadded:: 0.20.0
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes wil project directly into a scalar:
>>> df_0a.squeeze()
1
|
[
"Squeeze",
"1",
"dimensional",
"axis",
"objects",
"into",
"scalars",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L829-L941
|
20,168
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.swaplevel
|
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, str (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : same type as caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
axis = self._get_axis_number(axis)
result = self.copy()
labels = result._data.axes[axis]
result._data.set_axis(axis, labels.swaplevel(i, j))
return result
|
python
|
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, str (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : same type as caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
axis = self._get_axis_number(axis)
result = self.copy()
labels = result._data.axes[axis]
result._data.set_axis(axis, labels.swaplevel(i, j))
return result
|
[
"def",
"swaplevel",
"(",
"self",
",",
"i",
"=",
"-",
"2",
",",
"j",
"=",
"-",
"1",
",",
"axis",
"=",
"0",
")",
":",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"result",
"=",
"self",
".",
"copy",
"(",
")",
"labels",
"=",
"result",
".",
"_data",
".",
"axes",
"[",
"axis",
"]",
"result",
".",
"_data",
".",
"set_axis",
"(",
"axis",
",",
"labels",
".",
"swaplevel",
"(",
"i",
",",
"j",
")",
")",
"return",
"result"
] |
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, str (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : same type as caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
|
[
"Swap",
"levels",
"i",
"and",
"j",
"in",
"a",
"MultiIndex",
"on",
"a",
"particular",
"axis"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L943-L965
|
20,169
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.rename_axis
|
def rename_axis(self, mapper=sentinel, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
Prior to version 0.21.0, ``rename_axis`` could also be used to change
the axis *labels* by passing a mapping or scalar. This behavior is
deprecated and will be removed in a future version. Use ``rename``
instead.
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=sentinel)
copy = kwargs.pop('copy', True)
inplace = kwargs.pop('inplace', False)
axis = kwargs.pop('axis', 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError('rename_axis() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
inplace = validate_bool_kwarg(inplace, 'inplace')
if (mapper is not sentinel):
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not
is_dict_like(mapper))
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
# Deprecated (v0.21) behavior is if mapper is specified,
# and not a list or scalar, then call rename
msg = ("Using 'rename_axis' to alter labels is deprecated. "
"Use '.rename' instead")
warnings.warn(msg, FutureWarning, stacklevel=3)
axis = self._get_axis_name(axis)
d = {'copy': copy, 'inplace': inplace}
d[axis] = mapper
return self.rename(**d)
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in lrange(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is sentinel:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not
is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com._get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis,
inplace=True)
if not inplace:
return result
|
python
|
def rename_axis(self, mapper=sentinel, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
Prior to version 0.21.0, ``rename_axis`` could also be used to change
the axis *labels* by passing a mapping or scalar. This behavior is
deprecated and will be removed in a future version. Use ``rename``
instead.
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=sentinel)
copy = kwargs.pop('copy', True)
inplace = kwargs.pop('inplace', False)
axis = kwargs.pop('axis', 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError('rename_axis() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
inplace = validate_bool_kwarg(inplace, 'inplace')
if (mapper is not sentinel):
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not
is_dict_like(mapper))
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
# Deprecated (v0.21) behavior is if mapper is specified,
# and not a list or scalar, then call rename
msg = ("Using 'rename_axis' to alter labels is deprecated. "
"Use '.rename' instead")
warnings.warn(msg, FutureWarning, stacklevel=3)
axis = self._get_axis_name(axis)
d = {'copy': copy, 'inplace': inplace}
d[axis] = mapper
return self.rename(**d)
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in lrange(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is sentinel:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not
is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com._get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis,
inplace=True)
if not inplace:
return result
|
[
"def",
"rename_axis",
"(",
"self",
",",
"mapper",
"=",
"sentinel",
",",
"*",
"*",
"kwargs",
")",
":",
"axes",
",",
"kwargs",
"=",
"self",
".",
"_construct_axes_from_arguments",
"(",
"(",
")",
",",
"kwargs",
",",
"sentinel",
"=",
"sentinel",
")",
"copy",
"=",
"kwargs",
".",
"pop",
"(",
"'copy'",
",",
"True",
")",
"inplace",
"=",
"kwargs",
".",
"pop",
"(",
"'inplace'",
",",
"False",
")",
"axis",
"=",
"kwargs",
".",
"pop",
"(",
"'axis'",
",",
"0",
")",
"if",
"axis",
"is",
"not",
"None",
":",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"if",
"kwargs",
":",
"raise",
"TypeError",
"(",
"'rename_axis() got an unexpected keyword '",
"'argument \"{0}\"'",
".",
"format",
"(",
"list",
"(",
"kwargs",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
")",
")",
"inplace",
"=",
"validate_bool_kwarg",
"(",
"inplace",
",",
"'inplace'",
")",
"if",
"(",
"mapper",
"is",
"not",
"sentinel",
")",
":",
"# Use v0.23 behavior if a scalar or list",
"non_mapper",
"=",
"is_scalar",
"(",
"mapper",
")",
"or",
"(",
"is_list_like",
"(",
"mapper",
")",
"and",
"not",
"is_dict_like",
"(",
"mapper",
")",
")",
"if",
"non_mapper",
":",
"return",
"self",
".",
"_set_axis_name",
"(",
"mapper",
",",
"axis",
"=",
"axis",
",",
"inplace",
"=",
"inplace",
")",
"else",
":",
"# Deprecated (v0.21) behavior is if mapper is specified,",
"# and not a list or scalar, then call rename",
"msg",
"=",
"(",
"\"Using 'rename_axis' to alter labels is deprecated. \"",
"\"Use '.rename' instead\"",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"3",
")",
"axis",
"=",
"self",
".",
"_get_axis_name",
"(",
"axis",
")",
"d",
"=",
"{",
"'copy'",
":",
"copy",
",",
"'inplace'",
":",
"inplace",
"}",
"d",
"[",
"axis",
"]",
"=",
"mapper",
"return",
"self",
".",
"rename",
"(",
"*",
"*",
"d",
")",
"else",
":",
"# Use new behavior. Means that index and/or columns",
"# is specified",
"result",
"=",
"self",
"if",
"inplace",
"else",
"self",
".",
"copy",
"(",
"deep",
"=",
"copy",
")",
"for",
"axis",
"in",
"lrange",
"(",
"self",
".",
"_AXIS_LEN",
")",
":",
"v",
"=",
"axes",
".",
"get",
"(",
"self",
".",
"_AXIS_NAMES",
"[",
"axis",
"]",
")",
"if",
"v",
"is",
"sentinel",
":",
"continue",
"non_mapper",
"=",
"is_scalar",
"(",
"v",
")",
"or",
"(",
"is_list_like",
"(",
"v",
")",
"and",
"not",
"is_dict_like",
"(",
"v",
")",
")",
"if",
"non_mapper",
":",
"newnames",
"=",
"v",
"else",
":",
"f",
"=",
"com",
".",
"_get_rename_function",
"(",
"v",
")",
"curnames",
"=",
"self",
".",
"_get_axis",
"(",
"axis",
")",
".",
"names",
"newnames",
"=",
"[",
"f",
"(",
"name",
")",
"for",
"name",
"in",
"curnames",
"]",
"result",
".",
"_set_axis_name",
"(",
"newnames",
",",
"axis",
"=",
"axis",
",",
"inplace",
"=",
"True",
")",
"if",
"not",
"inplace",
":",
"return",
"result"
] |
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
Prior to version 0.21.0, ``rename_axis`` could also be used to change
the axis *labels* by passing a mapping or scalar. This behavior is
deprecated and will be removed in a future version. Use ``rename``
instead.
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
|
[
"Set",
"the",
"name",
"of",
"the",
"axis",
"for",
"the",
"index",
"or",
"columns",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L1133-L1312
|
20,170
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.equals
|
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
assert_series_equal : Return True if left and right Series are equal,
False otherwise.
assert_frame_equal : Return True if left and right DataFrames are
equal, False otherwise.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
|
python
|
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
assert_series_equal : Return True if left and right Series are equal,
False otherwise.
assert_frame_equal : Return True if left and right DataFrames are
equal, False otherwise.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
|
[
"def",
"equals",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"self",
".",
"_constructor",
")",
":",
"return",
"False",
"return",
"self",
".",
"_data",
".",
"equals",
"(",
"other",
".",
"_data",
")"
] |
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
assert_series_equal : Return True if left and right Series are equal,
False otherwise.
assert_frame_equal : Return True if left and right DataFrames are
equal, False otherwise.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
|
[
"Test",
"whether",
"two",
"objects",
"contain",
"the",
"same",
"elements",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L1382-L1466
|
20,171
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.bool
|
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError("bool cannot act on a non-boolean single element "
"{0}".format(self.__class__.__name__))
self.__nonzero__()
|
python
|
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError("bool cannot act on a non-boolean single element "
"{0}".format(self.__class__.__name__))
self.__nonzero__()
|
[
"def",
"bool",
"(",
"self",
")",
":",
"v",
"=",
"self",
".",
"squeeze",
"(",
")",
"if",
"isinstance",
"(",
"v",
",",
"(",
"bool",
",",
"np",
".",
"bool_",
")",
")",
":",
"return",
"bool",
"(",
"v",
")",
"elif",
"is_scalar",
"(",
"v",
")",
":",
"raise",
"ValueError",
"(",
"\"bool cannot act on a non-boolean single element \"",
"\"{0}\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
")",
"self",
".",
"__nonzero__",
"(",
")"
] |
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
|
[
"Return",
"the",
"bool",
"of",
"a",
"single",
"element",
"PandasObject",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L1514-L1529
|
20,172
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._is_level_reference
|
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
if self.ndim > 2:
raise NotImplementedError(
"_is_level_reference is not implemented for {type}"
.format(type=type(self)))
return (key is not None and
is_hashable(key) and
key in self.axes[axis].names and
not self._is_label_reference(key, axis=axis))
|
python
|
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
if self.ndim > 2:
raise NotImplementedError(
"_is_level_reference is not implemented for {type}"
.format(type=type(self)))
return (key is not None and
is_hashable(key) and
key in self.axes[axis].names and
not self._is_label_reference(key, axis=axis))
|
[
"def",
"_is_level_reference",
"(",
"self",
",",
"key",
",",
"axis",
"=",
"0",
")",
":",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"if",
"self",
".",
"ndim",
">",
"2",
":",
"raise",
"NotImplementedError",
"(",
"\"_is_level_reference is not implemented for {type}\"",
".",
"format",
"(",
"type",
"=",
"type",
"(",
"self",
")",
")",
")",
"return",
"(",
"key",
"is",
"not",
"None",
"and",
"is_hashable",
"(",
"key",
")",
"and",
"key",
"in",
"self",
".",
"axes",
"[",
"axis",
"]",
".",
"names",
"and",
"not",
"self",
".",
"_is_label_reference",
"(",
"key",
",",
"axis",
"=",
"axis",
")",
")"
] |
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
|
[
"Test",
"whether",
"a",
"key",
"is",
"a",
"level",
"reference",
"for",
"a",
"given",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L1545-L1576
|
20,173
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._is_label_reference
|
def _is_label_reference(self, key, axis=0):
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
if self.ndim > 2:
raise NotImplementedError(
"_is_label_reference is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (key is not None and
is_hashable(key) and
any(key in self.axes[ax] for ax in other_axes))
|
python
|
def _is_label_reference(self, key, axis=0):
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
if self.ndim > 2:
raise NotImplementedError(
"_is_label_reference is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (key is not None and
is_hashable(key) and
any(key in self.axes[ax] for ax in other_axes))
|
[
"def",
"_is_label_reference",
"(",
"self",
",",
"key",
",",
"axis",
"=",
"0",
")",
":",
"if",
"self",
".",
"ndim",
">",
"2",
":",
"raise",
"NotImplementedError",
"(",
"\"_is_label_reference is not implemented for {type}\"",
".",
"format",
"(",
"type",
"=",
"type",
"(",
"self",
")",
")",
")",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"other_axes",
"=",
"(",
"ax",
"for",
"ax",
"in",
"range",
"(",
"self",
".",
"_AXIS_LEN",
")",
"if",
"ax",
"!=",
"axis",
")",
"return",
"(",
"key",
"is",
"not",
"None",
"and",
"is_hashable",
"(",
"key",
")",
"and",
"any",
"(",
"key",
"in",
"self",
".",
"axes",
"[",
"ax",
"]",
"for",
"ax",
"in",
"other_axes",
")",
")"
] |
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
|
[
"Test",
"whether",
"a",
"key",
"is",
"a",
"label",
"reference",
"for",
"a",
"given",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L1578-L1608
|
20,174
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._is_label_or_level_reference
|
def _is_label_or_level_reference(self, key, axis=0):
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
if self.ndim > 2:
raise NotImplementedError(
"_is_label_or_level_reference is not implemented for {type}"
.format(type=type(self)))
return (self._is_level_reference(key, axis=axis) or
self._is_label_reference(key, axis=axis))
|
python
|
def _is_label_or_level_reference(self, key, axis=0):
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
if self.ndim > 2:
raise NotImplementedError(
"_is_label_or_level_reference is not implemented for {type}"
.format(type=type(self)))
return (self._is_level_reference(key, axis=axis) or
self._is_label_reference(key, axis=axis))
|
[
"def",
"_is_label_or_level_reference",
"(",
"self",
",",
"key",
",",
"axis",
"=",
"0",
")",
":",
"if",
"self",
".",
"ndim",
">",
"2",
":",
"raise",
"NotImplementedError",
"(",
"\"_is_label_or_level_reference is not implemented for {type}\"",
".",
"format",
"(",
"type",
"=",
"type",
"(",
"self",
")",
")",
")",
"return",
"(",
"self",
".",
"_is_level_reference",
"(",
"key",
",",
"axis",
"=",
"axis",
")",
"or",
"self",
".",
"_is_label_reference",
"(",
"key",
",",
"axis",
"=",
"axis",
")",
")"
] |
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
|
[
"Test",
"whether",
"a",
"key",
"is",
"a",
"label",
"or",
"level",
"reference",
"for",
"a",
"given",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L1610-L1637
|
20,175
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._check_label_or_level_ambiguity
|
def _check_label_or_level_ambiguity(self, key, axis=0):
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Raises
------
ValueError: `key` is ambiguous
"""
if self.ndim > 2:
raise NotImplementedError(
"_check_label_or_level_ambiguity is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (key is not None and
is_hashable(key) and
key in self.axes[axis].names and
any(key in self.axes[ax] for ax in other_axes)):
# Build an informative and grammatical warning
level_article, level_type = (('an', 'index')
if axis == 0 else
('a', 'column'))
label_article, label_type = (('a', 'column')
if axis == 0 else
('an', 'index'))
msg = ("'{key}' is both {level_article} {level_type} level and "
"{label_article} {label_type} label, which is ambiguous."
).format(key=key,
level_article=level_article,
level_type=level_type,
label_article=label_article,
label_type=label_type)
raise ValueError(msg)
|
python
|
def _check_label_or_level_ambiguity(self, key, axis=0):
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Raises
------
ValueError: `key` is ambiguous
"""
if self.ndim > 2:
raise NotImplementedError(
"_check_label_or_level_ambiguity is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (key is not None and
is_hashable(key) and
key in self.axes[axis].names and
any(key in self.axes[ax] for ax in other_axes)):
# Build an informative and grammatical warning
level_article, level_type = (('an', 'index')
if axis == 0 else
('a', 'column'))
label_article, label_type = (('a', 'column')
if axis == 0 else
('an', 'index'))
msg = ("'{key}' is both {level_article} {level_type} level and "
"{label_article} {label_type} label, which is ambiguous."
).format(key=key,
level_article=level_article,
level_type=level_type,
label_article=label_article,
label_type=label_type)
raise ValueError(msg)
|
[
"def",
"_check_label_or_level_ambiguity",
"(",
"self",
",",
"key",
",",
"axis",
"=",
"0",
")",
":",
"if",
"self",
".",
"ndim",
">",
"2",
":",
"raise",
"NotImplementedError",
"(",
"\"_check_label_or_level_ambiguity is not implemented for {type}\"",
".",
"format",
"(",
"type",
"=",
"type",
"(",
"self",
")",
")",
")",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"other_axes",
"=",
"(",
"ax",
"for",
"ax",
"in",
"range",
"(",
"self",
".",
"_AXIS_LEN",
")",
"if",
"ax",
"!=",
"axis",
")",
"if",
"(",
"key",
"is",
"not",
"None",
"and",
"is_hashable",
"(",
"key",
")",
"and",
"key",
"in",
"self",
".",
"axes",
"[",
"axis",
"]",
".",
"names",
"and",
"any",
"(",
"key",
"in",
"self",
".",
"axes",
"[",
"ax",
"]",
"for",
"ax",
"in",
"other_axes",
")",
")",
":",
"# Build an informative and grammatical warning",
"level_article",
",",
"level_type",
"=",
"(",
"(",
"'an'",
",",
"'index'",
")",
"if",
"axis",
"==",
"0",
"else",
"(",
"'a'",
",",
"'column'",
")",
")",
"label_article",
",",
"label_type",
"=",
"(",
"(",
"'a'",
",",
"'column'",
")",
"if",
"axis",
"==",
"0",
"else",
"(",
"'an'",
",",
"'index'",
")",
")",
"msg",
"=",
"(",
"\"'{key}' is both {level_article} {level_type} level and \"",
"\"{label_article} {label_type} label, which is ambiguous.\"",
")",
".",
"format",
"(",
"key",
"=",
"key",
",",
"level_article",
"=",
"level_article",
",",
"level_type",
"=",
"level_type",
",",
"label_article",
"=",
"label_article",
",",
"label_type",
"=",
"label_type",
")",
"raise",
"ValueError",
"(",
"msg",
")"
] |
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Raises
------
ValueError: `key` is ambiguous
|
[
"Check",
"whether",
"key",
"is",
"ambiguous",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L1639-L1686
|
20,176
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._get_label_or_level_values
|
def _get_label_or_level_values(self, key, axis=0):
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
if self.ndim > 2:
raise NotImplementedError(
"_get_label_or_level_values is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(
self._get_axis(other_axes[0]), MultiIndex):
multi_message = ('\n'
'For a multi-index, the label must be a '
'tuple with elements corresponding to '
'each level.')
else:
multi_message = ''
label_axis_name = 'column' if axis == 0 else 'index'
raise ValueError(("The {label_axis_name} label '{key}' "
"is not unique.{multi_message}")
.format(key=key,
label_axis_name=label_axis_name,
multi_message=multi_message))
return values
|
python
|
def _get_label_or_level_values(self, key, axis=0):
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
if self.ndim > 2:
raise NotImplementedError(
"_get_label_or_level_values is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(
self._get_axis(other_axes[0]), MultiIndex):
multi_message = ('\n'
'For a multi-index, the label must be a '
'tuple with elements corresponding to '
'each level.')
else:
multi_message = ''
label_axis_name = 'column' if axis == 0 else 'index'
raise ValueError(("The {label_axis_name} label '{key}' "
"is not unique.{multi_message}")
.format(key=key,
label_axis_name=label_axis_name,
multi_message=multi_message))
return values
|
[
"def",
"_get_label_or_level_values",
"(",
"self",
",",
"key",
",",
"axis",
"=",
"0",
")",
":",
"if",
"self",
".",
"ndim",
">",
"2",
":",
"raise",
"NotImplementedError",
"(",
"\"_get_label_or_level_values is not implemented for {type}\"",
".",
"format",
"(",
"type",
"=",
"type",
"(",
"self",
")",
")",
")",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"other_axes",
"=",
"[",
"ax",
"for",
"ax",
"in",
"range",
"(",
"self",
".",
"_AXIS_LEN",
")",
"if",
"ax",
"!=",
"axis",
"]",
"if",
"self",
".",
"_is_label_reference",
"(",
"key",
",",
"axis",
"=",
"axis",
")",
":",
"self",
".",
"_check_label_or_level_ambiguity",
"(",
"key",
",",
"axis",
"=",
"axis",
")",
"values",
"=",
"self",
".",
"xs",
"(",
"key",
",",
"axis",
"=",
"other_axes",
"[",
"0",
"]",
")",
".",
"_values",
"elif",
"self",
".",
"_is_level_reference",
"(",
"key",
",",
"axis",
"=",
"axis",
")",
":",
"values",
"=",
"self",
".",
"axes",
"[",
"axis",
"]",
".",
"get_level_values",
"(",
"key",
")",
".",
"_values",
"else",
":",
"raise",
"KeyError",
"(",
"key",
")",
"# Check for duplicates",
"if",
"values",
".",
"ndim",
">",
"1",
":",
"if",
"other_axes",
"and",
"isinstance",
"(",
"self",
".",
"_get_axis",
"(",
"other_axes",
"[",
"0",
"]",
")",
",",
"MultiIndex",
")",
":",
"multi_message",
"=",
"(",
"'\\n'",
"'For a multi-index, the label must be a '",
"'tuple with elements corresponding to '",
"'each level.'",
")",
"else",
":",
"multi_message",
"=",
"''",
"label_axis_name",
"=",
"'column'",
"if",
"axis",
"==",
"0",
"else",
"'index'",
"raise",
"ValueError",
"(",
"(",
"\"The {label_axis_name} label '{key}' \"",
"\"is not unique.{multi_message}\"",
")",
".",
"format",
"(",
"key",
"=",
"key",
",",
"label_axis_name",
"=",
"label_axis_name",
",",
"multi_message",
"=",
"multi_message",
")",
")",
"return",
"values"
] |
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
|
[
"Return",
"a",
"1",
"-",
"D",
"array",
"of",
"values",
"associated",
"with",
"key",
"a",
"label",
"or",
"level",
"from",
"the",
"given",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L1688-L1757
|
20,177
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.empty
|
def empty(self):
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna
DataFrame.dropna
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
|
python
|
def empty(self):
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna
DataFrame.dropna
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
|
[
"def",
"empty",
"(",
"self",
")",
":",
"return",
"any",
"(",
"len",
"(",
"self",
".",
"_get_axis",
"(",
"a",
")",
")",
"==",
"0",
"for",
"a",
"in",
"self",
".",
"_AXIS_ORDERS",
")"
] |
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna
DataFrame.dropna
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
|
[
"Indicator",
"whether",
"DataFrame",
"is",
"empty",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L1878-L1924
|
20,178
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._repr_data_resource_
|
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option('display.max_rows'))
payload = json.loads(data.to_json(orient='table'),
object_pairs_hook=collections.OrderedDict)
return payload
|
python
|
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option('display.max_rows'))
payload = json.loads(data.to_json(orient='table'),
object_pairs_hook=collections.OrderedDict)
return payload
|
[
"def",
"_repr_data_resource_",
"(",
"self",
")",
":",
"if",
"config",
".",
"get_option",
"(",
"\"display.html.table_schema\"",
")",
":",
"data",
"=",
"self",
".",
"head",
"(",
"config",
".",
"get_option",
"(",
"'display.max_rows'",
")",
")",
"payload",
"=",
"json",
".",
"loads",
"(",
"data",
".",
"to_json",
"(",
"orient",
"=",
"'table'",
")",
",",
"object_pairs_hook",
"=",
"collections",
".",
"OrderedDict",
")",
"return",
"payload"
] |
Not a real Jupyter special repr method, but we use the same
naming convention.
|
[
"Not",
"a",
"real",
"Jupyter",
"special",
"repr",
"method",
"but",
"we",
"use",
"the",
"same",
"naming",
"convention",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L2021-L2030
|
20,179
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.to_json
|
def to_json(self, path_or_buf=None, orient=None, date_format=None,
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression='infer',
index=True):
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : string or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : string
Indication of expected JSON string format.
* Series
- default is 'index'
- allowed values are: {'split','records','index','table'}
* DataFrame
- default is 'columns'
- allowed values are:
{'split','records','index','columns','values','table'}
* The format of the JSON string
- 'split' : dict like {'index' -> [index],
'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
describing the data, and the data component is
like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : string, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
.. versionadded:: 0.19.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionadded:: 0.21.0
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
See Also
--------
read_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == 'table':
date_format = 'iso'
elif date_format is None:
date_format = 'epoch'
return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii, date_unit=date_unit,
default_handler=default_handler,
lines=lines, compression=compression,
index=index)
|
python
|
def to_json(self, path_or_buf=None, orient=None, date_format=None,
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression='infer',
index=True):
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : string or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : string
Indication of expected JSON string format.
* Series
- default is 'index'
- allowed values are: {'split','records','index','table'}
* DataFrame
- default is 'columns'
- allowed values are:
{'split','records','index','columns','values','table'}
* The format of the JSON string
- 'split' : dict like {'index' -> [index],
'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
describing the data, and the data component is
like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : string, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
.. versionadded:: 0.19.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionadded:: 0.21.0
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
See Also
--------
read_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == 'table':
date_format = 'iso'
elif date_format is None:
date_format = 'epoch'
return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii, date_unit=date_unit,
default_handler=default_handler,
lines=lines, compression=compression,
index=index)
|
[
"def",
"to_json",
"(",
"self",
",",
"path_or_buf",
"=",
"None",
",",
"orient",
"=",
"None",
",",
"date_format",
"=",
"None",
",",
"double_precision",
"=",
"10",
",",
"force_ascii",
"=",
"True",
",",
"date_unit",
"=",
"'ms'",
",",
"default_handler",
"=",
"None",
",",
"lines",
"=",
"False",
",",
"compression",
"=",
"'infer'",
",",
"index",
"=",
"True",
")",
":",
"from",
"pandas",
".",
"io",
"import",
"json",
"if",
"date_format",
"is",
"None",
"and",
"orient",
"==",
"'table'",
":",
"date_format",
"=",
"'iso'",
"elif",
"date_format",
"is",
"None",
":",
"date_format",
"=",
"'epoch'",
"return",
"json",
".",
"to_json",
"(",
"path_or_buf",
"=",
"path_or_buf",
",",
"obj",
"=",
"self",
",",
"orient",
"=",
"orient",
",",
"date_format",
"=",
"date_format",
",",
"double_precision",
"=",
"double_precision",
",",
"force_ascii",
"=",
"force_ascii",
",",
"date_unit",
"=",
"date_unit",
",",
"default_handler",
"=",
"default_handler",
",",
"lines",
"=",
"lines",
",",
"compression",
"=",
"compression",
",",
"index",
"=",
"index",
")"
] |
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : string or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : string
Indication of expected JSON string format.
* Series
- default is 'index'
- allowed values are: {'split','records','index','table'}
* DataFrame
- default is 'columns'
- allowed values are:
{'split','records','index','columns','values','table'}
* The format of the JSON string
- 'split' : dict like {'index' -> [index],
'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
describing the data, and the data component is
like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : string, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
.. versionadded:: 0.19.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionadded:: 0.21.0
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
See Also
--------
read_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
|
[
"Convert",
"the",
"object",
"to",
"a",
"JSON",
"string",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L2158-L2302
|
20,180
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.to_hdf
|
def to_hdf(self, path_or_buf, key, **kwargs):
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
format : {'fixed', 'table'}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
append : bool, default False
For Table formats, append the input data to the existing.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
dropna : bool, default False
If true, ALL nan rows will not be written to store.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
|
python
|
def to_hdf(self, path_or_buf, key, **kwargs):
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
format : {'fixed', 'table'}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
append : bool, default False
For Table formats, append the input data to the existing.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
dropna : bool, default False
If true, ALL nan rows will not be written to store.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
|
[
"def",
"to_hdf",
"(",
"self",
",",
"path_or_buf",
",",
"key",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"pandas",
".",
"io",
"import",
"pytables",
"return",
"pytables",
".",
"to_hdf",
"(",
"path_or_buf",
",",
"key",
",",
"self",
",",
"*",
"*",
"kwargs",
")"
] |
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
format : {'fixed', 'table'}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
append : bool, default False
For Table formats, append the input data to the existing.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
dropna : bool, default False
If true, ALL nan rows will not be written to store.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
|
[
"Write",
"the",
"contained",
"data",
"to",
"an",
"HDF5",
"file",
"using",
"HDFStore",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L2304-L2406
|
20,181
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.to_msgpack
|
def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
"""
Serialize object to input file path using msgpack format.
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path : string File path, buffer-like, or None
if None, return generated string
append : bool whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
from pandas.io import packers
return packers.to_msgpack(path_or_buf, self, encoding=encoding,
**kwargs)
|
python
|
def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
"""
Serialize object to input file path using msgpack format.
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path : string File path, buffer-like, or None
if None, return generated string
append : bool whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
from pandas.io import packers
return packers.to_msgpack(path_or_buf, self, encoding=encoding,
**kwargs)
|
[
"def",
"to_msgpack",
"(",
"self",
",",
"path_or_buf",
"=",
"None",
",",
"encoding",
"=",
"'utf-8'",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"pandas",
".",
"io",
"import",
"packers",
"return",
"packers",
".",
"to_msgpack",
"(",
"path_or_buf",
",",
"self",
",",
"encoding",
"=",
"encoding",
",",
"*",
"*",
"kwargs",
")"
] |
Serialize object to input file path using msgpack format.
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path : string File path, buffer-like, or None
if None, return generated string
append : bool whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
|
[
"Serialize",
"object",
"to",
"input",
"file",
"path",
"using",
"msgpack",
"format",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L2408-L2427
|
20,182
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.to_clipboard
|
def to_clipboard(self, excel=True, sep=None, **kwargs):
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
|
python
|
def to_clipboard(self, excel=True, sep=None, **kwargs):
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
|
[
"def",
"to_clipboard",
"(",
"self",
",",
"excel",
"=",
"True",
",",
"sep",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"pandas",
".",
"io",
"import",
"clipboards",
"clipboards",
".",
"to_clipboard",
"(",
"self",
",",
"excel",
"=",
"excel",
",",
"sep",
"=",
"sep",
",",
"*",
"*",
"kwargs",
")"
] |
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
|
[
"r",
"Copy",
"object",
"to",
"the",
"system",
"clipboard",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L2621-L2676
|
20,183
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.to_xarray
|
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <http://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}).set_index(['date',
... 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
try:
import xarray
except ImportError:
# Give a nice error message
raise ImportError("the xarray library is not installed\n"
"you can install via conda\n"
"conda install xarray\n"
"or via pip\n"
"pip install xarray\n")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
elif self.ndim == 2:
return xarray.Dataset.from_dataframe(self)
# > 2 dims
coords = [(a, self._get_axis(a)) for a in self._AXIS_ORDERS]
return xarray.DataArray(self,
coords=coords,
)
|
python
|
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <http://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}).set_index(['date',
... 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
try:
import xarray
except ImportError:
# Give a nice error message
raise ImportError("the xarray library is not installed\n"
"you can install via conda\n"
"conda install xarray\n"
"or via pip\n"
"pip install xarray\n")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
elif self.ndim == 2:
return xarray.Dataset.from_dataframe(self)
# > 2 dims
coords = [(a, self._get_axis(a)) for a in self._AXIS_ORDERS]
return xarray.DataArray(self,
coords=coords,
)
|
[
"def",
"to_xarray",
"(",
"self",
")",
":",
"try",
":",
"import",
"xarray",
"except",
"ImportError",
":",
"# Give a nice error message",
"raise",
"ImportError",
"(",
"\"the xarray library is not installed\\n\"",
"\"you can install via conda\\n\"",
"\"conda install xarray\\n\"",
"\"or via pip\\n\"",
"\"pip install xarray\\n\"",
")",
"if",
"self",
".",
"ndim",
"==",
"1",
":",
"return",
"xarray",
".",
"DataArray",
".",
"from_series",
"(",
"self",
")",
"elif",
"self",
".",
"ndim",
"==",
"2",
":",
"return",
"xarray",
".",
"Dataset",
".",
"from_dataframe",
"(",
"self",
")",
"# > 2 dims",
"coords",
"=",
"[",
"(",
"a",
",",
"self",
".",
"_get_axis",
"(",
"a",
")",
")",
"for",
"a",
"in",
"self",
".",
"_AXIS_ORDERS",
"]",
"return",
"xarray",
".",
"DataArray",
"(",
"self",
",",
"coords",
"=",
"coords",
",",
")"
] |
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <http://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}).set_index(['date',
... 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
|
[
"Return",
"an",
"xarray",
"object",
"from",
"the",
"pandas",
"object",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L2678-L2773
|
20,184
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.to_latex
|
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, bold_rows=False,
column_format=None, longtable=None, escape=None,
encoding=None, decimal='.', multicolumn=None,
multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice
this into a LaTeX document. Requires \usepackage{booktabs}.
.. versionchanged:: 0.20.2
Added to Series
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
.. versionadded:: 0.18.0
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
.. versionadded:: 0.20.0
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
.. versionadded:: 0.20.0
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
.. versionadded:: 0.20.0
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a
string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option(
"display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
header=header, index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape, decimal=decimal)
formatter.to_latex(column_format=column_format, longtable=longtable,
encoding=encoding, multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
if buf is None:
return formatter.buf.getvalue()
|
python
|
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, bold_rows=False,
column_format=None, longtable=None, escape=None,
encoding=None, decimal='.', multicolumn=None,
multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice
this into a LaTeX document. Requires \usepackage{booktabs}.
.. versionchanged:: 0.20.2
Added to Series
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
.. versionadded:: 0.18.0
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
.. versionadded:: 0.20.0
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
.. versionadded:: 0.20.0
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
.. versionadded:: 0.20.0
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a
string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option(
"display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
header=header, index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape, decimal=decimal)
formatter.to_latex(column_format=column_format, longtable=longtable,
encoding=encoding, multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
if buf is None:
return formatter.buf.getvalue()
|
[
"def",
"to_latex",
"(",
"self",
",",
"buf",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"col_space",
"=",
"None",
",",
"header",
"=",
"True",
",",
"index",
"=",
"True",
",",
"na_rep",
"=",
"'NaN'",
",",
"formatters",
"=",
"None",
",",
"float_format",
"=",
"None",
",",
"sparsify",
"=",
"None",
",",
"index_names",
"=",
"True",
",",
"bold_rows",
"=",
"False",
",",
"column_format",
"=",
"None",
",",
"longtable",
"=",
"None",
",",
"escape",
"=",
"None",
",",
"encoding",
"=",
"None",
",",
"decimal",
"=",
"'.'",
",",
"multicolumn",
"=",
"None",
",",
"multicolumn_format",
"=",
"None",
",",
"multirow",
"=",
"None",
")",
":",
"# Get defaults from the pandas config",
"if",
"self",
".",
"ndim",
"==",
"1",
":",
"self",
"=",
"self",
".",
"to_frame",
"(",
")",
"if",
"longtable",
"is",
"None",
":",
"longtable",
"=",
"config",
".",
"get_option",
"(",
"\"display.latex.longtable\"",
")",
"if",
"escape",
"is",
"None",
":",
"escape",
"=",
"config",
".",
"get_option",
"(",
"\"display.latex.escape\"",
")",
"if",
"multicolumn",
"is",
"None",
":",
"multicolumn",
"=",
"config",
".",
"get_option",
"(",
"\"display.latex.multicolumn\"",
")",
"if",
"multicolumn_format",
"is",
"None",
":",
"multicolumn_format",
"=",
"config",
".",
"get_option",
"(",
"\"display.latex.multicolumn_format\"",
")",
"if",
"multirow",
"is",
"None",
":",
"multirow",
"=",
"config",
".",
"get_option",
"(",
"\"display.latex.multirow\"",
")",
"formatter",
"=",
"DataFrameFormatter",
"(",
"self",
",",
"buf",
"=",
"buf",
",",
"columns",
"=",
"columns",
",",
"col_space",
"=",
"col_space",
",",
"na_rep",
"=",
"na_rep",
",",
"header",
"=",
"header",
",",
"index",
"=",
"index",
",",
"formatters",
"=",
"formatters",
",",
"float_format",
"=",
"float_format",
",",
"bold_rows",
"=",
"bold_rows",
",",
"sparsify",
"=",
"sparsify",
",",
"index_names",
"=",
"index_names",
",",
"escape",
"=",
"escape",
",",
"decimal",
"=",
"decimal",
")",
"formatter",
".",
"to_latex",
"(",
"column_format",
"=",
"column_format",
",",
"longtable",
"=",
"longtable",
",",
"encoding",
"=",
"encoding",
",",
"multicolumn",
"=",
"multicolumn",
",",
"multicolumn_format",
"=",
"multicolumn_format",
",",
"multirow",
"=",
"multirow",
")",
"if",
"buf",
"is",
"None",
":",
"return",
"formatter",
".",
"buf",
".",
"getvalue",
"(",
")"
] |
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice
this into a LaTeX document. Requires \usepackage{booktabs}.
.. versionchanged:: 0.20.2
Added to Series
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
.. versionadded:: 0.18.0
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
.. versionadded:: 0.20.0
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
.. versionadded:: 0.20.0
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
.. versionadded:: 0.20.0
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a
string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
|
[
"r",
"Render",
"an",
"object",
"to",
"a",
"LaTeX",
"tabular",
"environment",
"table",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L2775-L2910
|
20,185
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._create_indexer
|
def _create_indexer(cls, name, indexer):
"""Create an indexer like _name in the class."""
if getattr(cls, name, None) is None:
_indexer = functools.partial(indexer, name)
setattr(cls, name, property(_indexer, doc=indexer.__doc__))
|
python
|
def _create_indexer(cls, name, indexer):
"""Create an indexer like _name in the class."""
if getattr(cls, name, None) is None:
_indexer = functools.partial(indexer, name)
setattr(cls, name, property(_indexer, doc=indexer.__doc__))
|
[
"def",
"_create_indexer",
"(",
"cls",
",",
"name",
",",
"indexer",
")",
":",
"if",
"getattr",
"(",
"cls",
",",
"name",
",",
"None",
")",
"is",
"None",
":",
"_indexer",
"=",
"functools",
".",
"partial",
"(",
"indexer",
",",
"name",
")",
"setattr",
"(",
"cls",
",",
"name",
",",
"property",
"(",
"_indexer",
",",
"doc",
"=",
"indexer",
".",
"__doc__",
")",
")"
] |
Create an indexer like _name in the class.
|
[
"Create",
"an",
"indexer",
"like",
"_name",
"in",
"the",
"class",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3059-L3063
|
20,186
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._get_item_cache
|
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
|
python
|
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
|
[
"def",
"_get_item_cache",
"(",
"self",
",",
"item",
")",
":",
"cache",
"=",
"self",
".",
"_item_cache",
"res",
"=",
"cache",
".",
"get",
"(",
"item",
")",
"if",
"res",
"is",
"None",
":",
"values",
"=",
"self",
".",
"_data",
".",
"get",
"(",
"item",
")",
"res",
"=",
"self",
".",
"_box_item_values",
"(",
"item",
",",
"values",
")",
"cache",
"[",
"item",
"]",
"=",
"res",
"res",
".",
"_set_as_cached",
"(",
"item",
",",
"self",
")",
"# for a chain",
"res",
".",
"_is_copy",
"=",
"self",
".",
"_is_copy",
"return",
"res"
] |
Return the cached item, item represents a label indexer.
|
[
"Return",
"the",
"cached",
"item",
"item",
"represents",
"a",
"label",
"indexer",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3086-L3098
|
20,187
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._set_as_cached
|
def _set_as_cached(self, item, cacher):
"""Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
|
python
|
def _set_as_cached(self, item, cacher):
"""Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
|
[
"def",
"_set_as_cached",
"(",
"self",
",",
"item",
",",
"cacher",
")",
":",
"self",
".",
"_cacher",
"=",
"(",
"item",
",",
"weakref",
".",
"ref",
"(",
"cacher",
")",
")"
] |
Set the _cacher attribute on the calling object with a weakref to
cacher.
|
[
"Set",
"the",
"_cacher",
"attribute",
"on",
"the",
"calling",
"object",
"with",
"a",
"weakref",
"to",
"cacher",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3100-L3104
|
20,188
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._iget_item_cache
|
def _iget_item_cache(self, item):
"""Return the cached item, item represents a positional indexer."""
ax = self._info_axis
if ax.is_unique:
lower = self._get_item_cache(ax[item])
else:
lower = self._take(item, axis=self._info_axis_number)
return lower
|
python
|
def _iget_item_cache(self, item):
"""Return the cached item, item represents a positional indexer."""
ax = self._info_axis
if ax.is_unique:
lower = self._get_item_cache(ax[item])
else:
lower = self._take(item, axis=self._info_axis_number)
return lower
|
[
"def",
"_iget_item_cache",
"(",
"self",
",",
"item",
")",
":",
"ax",
"=",
"self",
".",
"_info_axis",
"if",
"ax",
".",
"is_unique",
":",
"lower",
"=",
"self",
".",
"_get_item_cache",
"(",
"ax",
"[",
"item",
"]",
")",
"else",
":",
"lower",
"=",
"self",
".",
"_take",
"(",
"item",
",",
"axis",
"=",
"self",
".",
"_info_axis_number",
")",
"return",
"lower"
] |
Return the cached item, item represents a positional indexer.
|
[
"Return",
"the",
"cached",
"item",
"item",
"represents",
"a",
"positional",
"indexer",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3111-L3118
|
20,189
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._maybe_update_cacher
|
def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : boolean, default False
clear the item cache
verify_is_copy : boolean, default True
provide is_copy checks
"""
cacher = getattr(self, '_cacher', None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
try:
ref._maybe_cache_changed(cacher[0], self)
except Exception:
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t='referant')
if clear:
self._clear_item_cache()
|
python
|
def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : boolean, default False
clear the item cache
verify_is_copy : boolean, default True
provide is_copy checks
"""
cacher = getattr(self, '_cacher', None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
try:
ref._maybe_cache_changed(cacher[0], self)
except Exception:
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t='referant')
if clear:
self._clear_item_cache()
|
[
"def",
"_maybe_update_cacher",
"(",
"self",
",",
"clear",
"=",
"False",
",",
"verify_is_copy",
"=",
"True",
")",
":",
"cacher",
"=",
"getattr",
"(",
"self",
",",
"'_cacher'",
",",
"None",
")",
"if",
"cacher",
"is",
"not",
"None",
":",
"ref",
"=",
"cacher",
"[",
"1",
"]",
"(",
")",
"# we are trying to reference a dead referant, hence",
"# a copy",
"if",
"ref",
"is",
"None",
":",
"del",
"self",
".",
"_cacher",
"else",
":",
"try",
":",
"ref",
".",
"_maybe_cache_changed",
"(",
"cacher",
"[",
"0",
"]",
",",
"self",
")",
"except",
"Exception",
":",
"pass",
"if",
"verify_is_copy",
":",
"self",
".",
"_check_setitem_copy",
"(",
"stacklevel",
"=",
"5",
",",
"t",
"=",
"'referant'",
")",
"if",
"clear",
":",
"self",
".",
"_clear_item_cache",
"(",
")"
] |
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : boolean, default False
clear the item cache
verify_is_copy : boolean, default True
provide is_copy checks
|
[
"See",
"if",
"we",
"need",
"to",
"update",
"our",
"parent",
"cacher",
"if",
"clear",
"then",
"clear",
"our",
"cache",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3145-L3177
|
20,190
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._slice
|
def _slice(self, slobj, axis=0, kind=None):
"""
Construct a slice of this container.
kind parameter is maintained for compatibility with Series slicing.
"""
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view slicable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
|
python
|
def _slice(self, slobj, axis=0, kind=None):
"""
Construct a slice of this container.
kind parameter is maintained for compatibility with Series slicing.
"""
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view slicable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
|
[
"def",
"_slice",
"(",
"self",
",",
"slobj",
",",
"axis",
"=",
"0",
",",
"kind",
"=",
"None",
")",
":",
"axis",
"=",
"self",
".",
"_get_block_manager_axis",
"(",
"axis",
")",
"result",
"=",
"self",
".",
"_constructor",
"(",
"self",
".",
"_data",
".",
"get_slice",
"(",
"slobj",
",",
"axis",
"=",
"axis",
")",
")",
"result",
"=",
"result",
".",
"__finalize__",
"(",
"self",
")",
"# this could be a view",
"# but only in a single-dtyped view slicable case",
"is_copy",
"=",
"axis",
"!=",
"0",
"or",
"result",
".",
"_is_view",
"result",
".",
"_set_is_copy",
"(",
"self",
",",
"copy",
"=",
"is_copy",
")",
"return",
"result"
] |
Construct a slice of this container.
kind parameter is maintained for compatibility with Series slicing.
|
[
"Construct",
"a",
"slice",
"of",
"this",
"container",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3185-L3199
|
20,191
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._check_is_chained_assignment_possible
|
def _check_is_chained_assignment_possible(self):
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t='referant',
force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t='referant')
return False
|
python
|
def _check_is_chained_assignment_possible(self):
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t='referant',
force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t='referant')
return False
|
[
"def",
"_check_is_chained_assignment_possible",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_view",
"and",
"self",
".",
"_is_cached",
":",
"ref",
"=",
"self",
".",
"_get_cacher",
"(",
")",
"if",
"ref",
"is",
"not",
"None",
"and",
"ref",
".",
"_is_mixed_type",
":",
"self",
".",
"_check_setitem_copy",
"(",
"stacklevel",
"=",
"4",
",",
"t",
"=",
"'referant'",
",",
"force",
"=",
"True",
")",
"return",
"True",
"elif",
"self",
".",
"_is_copy",
":",
"self",
".",
"_check_setitem_copy",
"(",
"stacklevel",
"=",
"4",
",",
"t",
"=",
"'referant'",
")",
"return",
"False"
] |
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
|
[
"Check",
"if",
"we",
"are",
"a",
"view",
"have",
"a",
"cacher",
"and",
"are",
"of",
"mixed",
"type",
".",
"If",
"so",
"then",
"force",
"a",
"setitem_copy",
"check",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3214-L3233
|
20,192
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.select
|
def select(self, crit, axis=0):
"""
Return data corresponding to axis labels matching criteria.
.. deprecated:: 0.21.0
Use df.loc[df.index.map(crit)] to select via labels
Parameters
----------
crit : function
To be called on each index (label). Should return True or False
axis : int
Returns
-------
selection : same type as caller
"""
warnings.warn("'select' is deprecated and will be removed in a "
"future release. You can use "
".loc[labels.map(crit)] as a replacement",
FutureWarning, stacklevel=2)
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis)
if len(axis_values) > 0:
new_axis = axis_values[
np.asarray([bool(crit(label)) for label in axis_values])]
else:
new_axis = axis_values
return self.reindex(**{axis_name: new_axis})
|
python
|
def select(self, crit, axis=0):
"""
Return data corresponding to axis labels matching criteria.
.. deprecated:: 0.21.0
Use df.loc[df.index.map(crit)] to select via labels
Parameters
----------
crit : function
To be called on each index (label). Should return True or False
axis : int
Returns
-------
selection : same type as caller
"""
warnings.warn("'select' is deprecated and will be removed in a "
"future release. You can use "
".loc[labels.map(crit)] as a replacement",
FutureWarning, stacklevel=2)
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis)
if len(axis_values) > 0:
new_axis = axis_values[
np.asarray([bool(crit(label)) for label in axis_values])]
else:
new_axis = axis_values
return self.reindex(**{axis_name: new_axis})
|
[
"def",
"select",
"(",
"self",
",",
"crit",
",",
"axis",
"=",
"0",
")",
":",
"warnings",
".",
"warn",
"(",
"\"'select' is deprecated and will be removed in a \"",
"\"future release. You can use \"",
"\".loc[labels.map(crit)] as a replacement\"",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"axis_name",
"=",
"self",
".",
"_get_axis_name",
"(",
"axis",
")",
"axis_values",
"=",
"self",
".",
"_get_axis",
"(",
"axis",
")",
"if",
"len",
"(",
"axis_values",
")",
">",
"0",
":",
"new_axis",
"=",
"axis_values",
"[",
"np",
".",
"asarray",
"(",
"[",
"bool",
"(",
"crit",
"(",
"label",
")",
")",
"for",
"label",
"in",
"axis_values",
"]",
")",
"]",
"else",
":",
"new_axis",
"=",
"axis_values",
"return",
"self",
".",
"reindex",
"(",
"*",
"*",
"{",
"axis_name",
":",
"new_axis",
"}",
")"
] |
Return data corresponding to axis labels matching criteria.
.. deprecated:: 0.21.0
Use df.loc[df.index.map(crit)] to select via labels
Parameters
----------
crit : function
To be called on each index (label). Should return True or False
axis : int
Returns
-------
selection : same type as caller
|
[
"Return",
"data",
"corresponding",
"to",
"axis",
"labels",
"matching",
"criteria",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3653-L3685
|
20,193
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.reindex_like
|
def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method,
copy=copy, limit=limit,
tolerance=tolerance)
return self.reindex(**d)
|
python
|
def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method,
copy=copy, limit=limit,
tolerance=tolerance)
return self.reindex(**d)
|
[
"def",
"reindex_like",
"(",
"self",
",",
"other",
",",
"method",
"=",
"None",
",",
"copy",
"=",
"True",
",",
"limit",
"=",
"None",
",",
"tolerance",
"=",
"None",
")",
":",
"d",
"=",
"other",
".",
"_construct_axes_dict",
"(",
"axes",
"=",
"self",
".",
"_AXIS_ORDERS",
",",
"method",
"=",
"method",
",",
"copy",
"=",
"copy",
",",
"limit",
"=",
"limit",
",",
"tolerance",
"=",
"tolerance",
")",
"return",
"self",
".",
"reindex",
"(",
"*",
"*",
"d",
")"
] |
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
|
[
"Return",
"an",
"object",
"with",
"matching",
"indices",
"as",
"other",
"object",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3687-L3787
|
20,194
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._drop_axis
|
def _drop_axis(self, labels, axis, level=None, errors='raise'):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == 'raise' and indexer.all():
raise KeyError('{} not found in axis'.format(labels))
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == 'raise' and labels_missing:
raise KeyError('{} not found in axis'.format(labels))
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
|
python
|
def _drop_axis(self, labels, axis, level=None, errors='raise'):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == 'raise' and indexer.all():
raise KeyError('{} not found in axis'.format(labels))
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == 'raise' and labels_missing:
raise KeyError('{} not found in axis'.format(labels))
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
|
[
"def",
"_drop_axis",
"(",
"self",
",",
"labels",
",",
"axis",
",",
"level",
"=",
"None",
",",
"errors",
"=",
"'raise'",
")",
":",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"axis_name",
"=",
"self",
".",
"_get_axis_name",
"(",
"axis",
")",
"axis",
"=",
"self",
".",
"_get_axis",
"(",
"axis",
")",
"if",
"axis",
".",
"is_unique",
":",
"if",
"level",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"axis",
",",
"MultiIndex",
")",
":",
"raise",
"AssertionError",
"(",
"'axis must be a MultiIndex'",
")",
"new_axis",
"=",
"axis",
".",
"drop",
"(",
"labels",
",",
"level",
"=",
"level",
",",
"errors",
"=",
"errors",
")",
"else",
":",
"new_axis",
"=",
"axis",
".",
"drop",
"(",
"labels",
",",
"errors",
"=",
"errors",
")",
"result",
"=",
"self",
".",
"reindex",
"(",
"*",
"*",
"{",
"axis_name",
":",
"new_axis",
"}",
")",
"# Case for non-unique axis",
"else",
":",
"labels",
"=",
"ensure_object",
"(",
"com",
".",
"index_labels_to_array",
"(",
"labels",
")",
")",
"if",
"level",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"axis",
",",
"MultiIndex",
")",
":",
"raise",
"AssertionError",
"(",
"'axis must be a MultiIndex'",
")",
"indexer",
"=",
"~",
"axis",
".",
"get_level_values",
"(",
"level",
")",
".",
"isin",
"(",
"labels",
")",
"# GH 18561 MultiIndex.drop should raise if label is absent",
"if",
"errors",
"==",
"'raise'",
"and",
"indexer",
".",
"all",
"(",
")",
":",
"raise",
"KeyError",
"(",
"'{} not found in axis'",
".",
"format",
"(",
"labels",
")",
")",
"else",
":",
"indexer",
"=",
"~",
"axis",
".",
"isin",
"(",
"labels",
")",
"# Check if label doesn't exist along axis",
"labels_missing",
"=",
"(",
"axis",
".",
"get_indexer_for",
"(",
"labels",
")",
"==",
"-",
"1",
")",
".",
"any",
"(",
")",
"if",
"errors",
"==",
"'raise'",
"and",
"labels_missing",
":",
"raise",
"KeyError",
"(",
"'{} not found in axis'",
".",
"format",
"(",
"labels",
")",
")",
"slicer",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"self",
".",
"ndim",
"slicer",
"[",
"self",
".",
"_get_axis_number",
"(",
"axis_name",
")",
"]",
"=",
"indexer",
"result",
"=",
"self",
".",
"loc",
"[",
"tuple",
"(",
"slicer",
")",
"]",
"return",
"result"
] |
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
|
[
"Drop",
"labels",
"from",
"specified",
"axis",
".",
"Used",
"in",
"the",
"drop",
"method",
"internally",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3817-L3868
|
20,195
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._update_inplace
|
def _update_inplace(self, result, verify_is_copy=True):
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : boolean, default True
provide is_copy checks
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, '_data', result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
|
python
|
def _update_inplace(self, result, verify_is_copy=True):
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : boolean, default True
provide is_copy checks
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, '_data', result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
|
[
"def",
"_update_inplace",
"(",
"self",
",",
"result",
",",
"verify_is_copy",
"=",
"True",
")",
":",
"# NOTE: This does *not* call __finalize__ and that's an explicit",
"# decision that we may revisit in the future.",
"self",
".",
"_reset_cache",
"(",
")",
"self",
".",
"_clear_item_cache",
"(",
")",
"self",
".",
"_data",
"=",
"getattr",
"(",
"result",
",",
"'_data'",
",",
"result",
")",
"self",
".",
"_maybe_update_cacher",
"(",
"verify_is_copy",
"=",
"verify_is_copy",
")"
] |
Replace self internals with result.
Parameters
----------
verify_is_copy : boolean, default True
provide is_copy checks
|
[
"Replace",
"self",
"internals",
"with",
"result",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3870-L3886
|
20,196
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.add_prefix
|
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial('{prefix}{}'.format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper)
|
python
|
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial('{prefix}{}'.format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper)
|
[
"def",
"add_prefix",
"(",
"self",
",",
"prefix",
")",
":",
"f",
"=",
"functools",
".",
"partial",
"(",
"'{prefix}{}'",
".",
"format",
",",
"prefix",
"=",
"prefix",
")",
"mapper",
"=",
"{",
"self",
".",
"_info_axis_name",
":",
"f",
"}",
"return",
"self",
".",
"rename",
"(",
"*",
"*",
"mapper",
")"
] |
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
|
[
"Prefix",
"labels",
"with",
"string",
"prefix",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3888-L3945
|
20,197
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.add_suffix
|
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial('{}{suffix}'.format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper)
|
python
|
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial('{}{suffix}'.format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper)
|
[
"def",
"add_suffix",
"(",
"self",
",",
"suffix",
")",
":",
"f",
"=",
"functools",
".",
"partial",
"(",
"'{}{suffix}'",
".",
"format",
",",
"suffix",
"=",
"suffix",
")",
"mapper",
"=",
"{",
"self",
".",
"_info_axis_name",
":",
"f",
"}",
"return",
"self",
".",
"rename",
"(",
"*",
"*",
"mapper",
")"
] |
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
|
[
"Suffix",
"labels",
"with",
"string",
"suffix",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3947-L4004
|
20,198
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame.sort_values
|
def sort_values(self, by=None, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise NotImplementedError("sort_values has not been implemented "
"on Panel or Panel4D objects.")
|
python
|
def sort_values(self, by=None, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise NotImplementedError("sort_values has not been implemented "
"on Panel or Panel4D objects.")
|
[
"def",
"sort_values",
"(",
"self",
",",
"by",
"=",
"None",
",",
"axis",
"=",
"0",
",",
"ascending",
"=",
"True",
",",
"inplace",
"=",
"False",
",",
"kind",
"=",
"'quicksort'",
",",
"na_position",
"=",
"'last'",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"sort_values has not been implemented \"",
"\"on Panel or Panel4D objects.\"",
")"
] |
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
|
[
"Sort",
"by",
"the",
"values",
"along",
"either",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L4006-L4096
|
20,199
|
pandas-dev/pandas
|
pandas/core/generic.py
|
NDFrame._reindex_axes
|
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(labels, level=level, limit=limit,
tolerance=tolerance, method=method)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers({axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy, allow_dups=False)
return obj
|
python
|
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(labels, level=level, limit=limit,
tolerance=tolerance, method=method)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers({axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy, allow_dups=False)
return obj
|
[
"def",
"_reindex_axes",
"(",
"self",
",",
"axes",
",",
"level",
",",
"limit",
",",
"tolerance",
",",
"method",
",",
"fill_value",
",",
"copy",
")",
":",
"obj",
"=",
"self",
"for",
"a",
"in",
"self",
".",
"_AXIS_ORDERS",
":",
"labels",
"=",
"axes",
"[",
"a",
"]",
"if",
"labels",
"is",
"None",
":",
"continue",
"ax",
"=",
"self",
".",
"_get_axis",
"(",
"a",
")",
"new_index",
",",
"indexer",
"=",
"ax",
".",
"reindex",
"(",
"labels",
",",
"level",
"=",
"level",
",",
"limit",
"=",
"limit",
",",
"tolerance",
"=",
"tolerance",
",",
"method",
"=",
"method",
")",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"a",
")",
"obj",
"=",
"obj",
".",
"_reindex_with_indexers",
"(",
"{",
"axis",
":",
"[",
"new_index",
",",
"indexer",
"]",
"}",
",",
"fill_value",
"=",
"fill_value",
",",
"copy",
"=",
"copy",
",",
"allow_dups",
"=",
"False",
")",
"return",
"obj"
] |
Perform the reindex for all the axes.
|
[
"Perform",
"the",
"reindex",
"for",
"all",
"the",
"axes",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L4393-L4411
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.