id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
19,900
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
_validate_names
|
def _validate_names(names):
"""
Check if the `names` parameter contains duplicates.
If duplicates are found, we issue a warning before returning.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output DataFrame.
Returns
-------
names : array-like or None
The original `names` parameter.
"""
if names is not None:
if len(names) != len(set(names)):
msg = ("Duplicate names specified. This "
"will raise an error in the future.")
warnings.warn(msg, UserWarning, stacklevel=3)
return names
|
python
|
def _validate_names(names):
"""
Check if the `names` parameter contains duplicates.
If duplicates are found, we issue a warning before returning.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output DataFrame.
Returns
-------
names : array-like or None
The original `names` parameter.
"""
if names is not None:
if len(names) != len(set(names)):
msg = ("Duplicate names specified. This "
"will raise an error in the future.")
warnings.warn(msg, UserWarning, stacklevel=3)
return names
|
[
"def",
"_validate_names",
"(",
"names",
")",
":",
"if",
"names",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"names",
")",
"!=",
"len",
"(",
"set",
"(",
"names",
")",
")",
":",
"msg",
"=",
"(",
"\"Duplicate names specified. This \"",
"\"will raise an error in the future.\"",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"UserWarning",
",",
"stacklevel",
"=",
"3",
")",
"return",
"names"
] |
Check if the `names` parameter contains duplicates.
If duplicates are found, we issue a warning before returning.
Parameters
----------
names : array-like or None
An array containing a list of the names used for the output DataFrame.
Returns
-------
names : array-like or None
The original `names` parameter.
|
[
"Check",
"if",
"the",
"names",
"parameter",
"contains",
"duplicates",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L379-L402
|
19,901
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
_read
|
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
encoding = kwds.get('encoding', None)
if encoding is not None:
encoding = re.sub('_', '-', encoding).lower()
kwds['encoding'] = encoding
compression = kwds.get('compression', 'infer')
compression = _infer_compression(filepath_or_buffer, compression)
# TODO: get_filepath_or_buffer could return
# Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile]
# though mypy handling of conditional imports is difficult.
# See https://github.com/python/mypy/issues/1297
fp_or_buf, _, compression, should_close = get_filepath_or_buffer(
filepath_or_buffer, encoding, compression)
kwds['compression'] = compression
if kwds.get('date_parser', None) is not None:
if isinstance(kwds['parse_dates'], bool):
kwds['parse_dates'] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get('iterator', False)
chunksize = _validate_integer('chunksize', kwds.get('chunksize', None), 1)
nrows = kwds.get('nrows', None)
# Check for duplicates in names.
_validate_names(kwds.get("names", None))
# Create the parser.
parser = TextFileReader(fp_or_buf, **kwds)
if chunksize or iterator:
return parser
try:
data = parser.read(nrows)
finally:
parser.close()
if should_close:
try:
fp_or_buf.close()
except ValueError:
pass
return data
|
python
|
def _read(filepath_or_buffer: FilePathOrBuffer, kwds):
"""Generic reader of line files."""
encoding = kwds.get('encoding', None)
if encoding is not None:
encoding = re.sub('_', '-', encoding).lower()
kwds['encoding'] = encoding
compression = kwds.get('compression', 'infer')
compression = _infer_compression(filepath_or_buffer, compression)
# TODO: get_filepath_or_buffer could return
# Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile]
# though mypy handling of conditional imports is difficult.
# See https://github.com/python/mypy/issues/1297
fp_or_buf, _, compression, should_close = get_filepath_or_buffer(
filepath_or_buffer, encoding, compression)
kwds['compression'] = compression
if kwds.get('date_parser', None) is not None:
if isinstance(kwds['parse_dates'], bool):
kwds['parse_dates'] = True
# Extract some of the arguments (pass chunksize on).
iterator = kwds.get('iterator', False)
chunksize = _validate_integer('chunksize', kwds.get('chunksize', None), 1)
nrows = kwds.get('nrows', None)
# Check for duplicates in names.
_validate_names(kwds.get("names", None))
# Create the parser.
parser = TextFileReader(fp_or_buf, **kwds)
if chunksize or iterator:
return parser
try:
data = parser.read(nrows)
finally:
parser.close()
if should_close:
try:
fp_or_buf.close()
except ValueError:
pass
return data
|
[
"def",
"_read",
"(",
"filepath_or_buffer",
":",
"FilePathOrBuffer",
",",
"kwds",
")",
":",
"encoding",
"=",
"kwds",
".",
"get",
"(",
"'encoding'",
",",
"None",
")",
"if",
"encoding",
"is",
"not",
"None",
":",
"encoding",
"=",
"re",
".",
"sub",
"(",
"'_'",
",",
"'-'",
",",
"encoding",
")",
".",
"lower",
"(",
")",
"kwds",
"[",
"'encoding'",
"]",
"=",
"encoding",
"compression",
"=",
"kwds",
".",
"get",
"(",
"'compression'",
",",
"'infer'",
")",
"compression",
"=",
"_infer_compression",
"(",
"filepath_or_buffer",
",",
"compression",
")",
"# TODO: get_filepath_or_buffer could return",
"# Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile]",
"# though mypy handling of conditional imports is difficult.",
"# See https://github.com/python/mypy/issues/1297",
"fp_or_buf",
",",
"_",
",",
"compression",
",",
"should_close",
"=",
"get_filepath_or_buffer",
"(",
"filepath_or_buffer",
",",
"encoding",
",",
"compression",
")",
"kwds",
"[",
"'compression'",
"]",
"=",
"compression",
"if",
"kwds",
".",
"get",
"(",
"'date_parser'",
",",
"None",
")",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"kwds",
"[",
"'parse_dates'",
"]",
",",
"bool",
")",
":",
"kwds",
"[",
"'parse_dates'",
"]",
"=",
"True",
"# Extract some of the arguments (pass chunksize on).",
"iterator",
"=",
"kwds",
".",
"get",
"(",
"'iterator'",
",",
"False",
")",
"chunksize",
"=",
"_validate_integer",
"(",
"'chunksize'",
",",
"kwds",
".",
"get",
"(",
"'chunksize'",
",",
"None",
")",
",",
"1",
")",
"nrows",
"=",
"kwds",
".",
"get",
"(",
"'nrows'",
",",
"None",
")",
"# Check for duplicates in names.",
"_validate_names",
"(",
"kwds",
".",
"get",
"(",
"\"names\"",
",",
"None",
")",
")",
"# Create the parser.",
"parser",
"=",
"TextFileReader",
"(",
"fp_or_buf",
",",
"*",
"*",
"kwds",
")",
"if",
"chunksize",
"or",
"iterator",
":",
"return",
"parser",
"try",
":",
"data",
"=",
"parser",
".",
"read",
"(",
"nrows",
")",
"finally",
":",
"parser",
".",
"close",
"(",
")",
"if",
"should_close",
":",
"try",
":",
"fp_or_buf",
".",
"close",
"(",
")",
"except",
"ValueError",
":",
"pass",
"return",
"data"
] |
Generic reader of line files.
|
[
"Generic",
"reader",
"of",
"line",
"files",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L405-L452
|
19,902
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
read_fwf
|
def read_fwf(filepath_or_buffer: FilePathOrBuffer,
colspecs='infer',
widths=None,
infer_nrows=100,
**kwds):
r"""
Read a table of fixed-width formatted lines into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<http://pandas.pydata.org/pandas-docs/stable/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object, or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts either
``pathlib.Path`` or ``py._path.local.LocalPath``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser determine the
`colspecs`.
.. versionadded:: 0.24.0
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
>>> pd.read_fwf('data.csv') # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, 'infer') and widths is not None:
raise ValueError("You must specify only one of 'widths' and "
"'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.append((col, col + w))
col += w
kwds['colspecs'] = colspecs
kwds['infer_nrows'] = infer_nrows
kwds['engine'] = 'python-fwf'
return _read(filepath_or_buffer, kwds)
|
python
|
def read_fwf(filepath_or_buffer: FilePathOrBuffer,
colspecs='infer',
widths=None,
infer_nrows=100,
**kwds):
r"""
Read a table of fixed-width formatted lines into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<http://pandas.pydata.org/pandas-docs/stable/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object, or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts either
``pathlib.Path`` or ``py._path.local.LocalPath``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser determine the
`colspecs`.
.. versionadded:: 0.24.0
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
>>> pd.read_fwf('data.csv') # doctest: +SKIP
"""
# Check input arguments.
if colspecs is None and widths is None:
raise ValueError("Must specify either colspecs or widths")
elif colspecs not in (None, 'infer') and widths is not None:
raise ValueError("You must specify only one of 'widths' and "
"'colspecs'")
# Compute 'colspecs' from 'widths', if specified.
if widths is not None:
colspecs, col = [], 0
for w in widths:
colspecs.append((col, col + w))
col += w
kwds['colspecs'] = colspecs
kwds['infer_nrows'] = infer_nrows
kwds['engine'] = 'python-fwf'
return _read(filepath_or_buffer, kwds)
|
[
"def",
"read_fwf",
"(",
"filepath_or_buffer",
":",
"FilePathOrBuffer",
",",
"colspecs",
"=",
"'infer'",
",",
"widths",
"=",
"None",
",",
"infer_nrows",
"=",
"100",
",",
"*",
"*",
"kwds",
")",
":",
"# Check input arguments.",
"if",
"colspecs",
"is",
"None",
"and",
"widths",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Must specify either colspecs or widths\"",
")",
"elif",
"colspecs",
"not",
"in",
"(",
"None",
",",
"'infer'",
")",
"and",
"widths",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"You must specify only one of 'widths' and \"",
"\"'colspecs'\"",
")",
"# Compute 'colspecs' from 'widths', if specified.",
"if",
"widths",
"is",
"not",
"None",
":",
"colspecs",
",",
"col",
"=",
"[",
"]",
",",
"0",
"for",
"w",
"in",
"widths",
":",
"colspecs",
".",
"append",
"(",
"(",
"col",
",",
"col",
"+",
"w",
")",
")",
"col",
"+=",
"w",
"kwds",
"[",
"'colspecs'",
"]",
"=",
"colspecs",
"kwds",
"[",
"'infer_nrows'",
"]",
"=",
"infer_nrows",
"kwds",
"[",
"'engine'",
"]",
"=",
"'python-fwf'",
"return",
"_read",
"(",
"filepath_or_buffer",
",",
"kwds",
")"
] |
r"""
Read a table of fixed-width formatted lines into DataFrame.
Also supports optionally iterating or breaking of the file
into chunks.
Additional help can be found in the `online docs for IO Tools
<http://pandas.pydata.org/pandas-docs/stable/io.html>`_.
Parameters
----------
filepath_or_buffer : str, path object, or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: file://localhost/path/to/table.csv.
If you want to pass in a path object, pandas accepts either
``pathlib.Path`` or ``py._path.local.LocalPath``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
colspecs : list of tuple (int, int) or 'infer'. optional
A list of tuples giving the extents of the fixed-width
fields of each line as half-open intervals (i.e., [from, to[ ).
String value 'infer' can be used to instruct the parser to try
detecting the column specifications from the first 100 rows of
the data which are not being skipped via skiprows (default='infer').
widths : list of int, optional
A list of field widths which can be used instead of 'colspecs' if
the intervals are contiguous.
infer_nrows : int, default 100
The number of rows to consider when letting the parser determine the
`colspecs`.
.. versionadded:: 0.24.0
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or TextParser
A comma-separated values (csv) file is returned as two-dimensional
data structure with labeled axes.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
>>> pd.read_fwf('data.csv') # doctest: +SKIP
|
[
"r",
"Read",
"a",
"table",
"of",
"fixed",
"-",
"width",
"formatted",
"lines",
"into",
"DataFrame",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L735-L813
|
19,903
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
_is_potential_multi_index
|
def _is_potential_multi_index(columns):
"""
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
Returns
-------
boolean : Whether or not columns could become a MultiIndex
"""
return (len(columns) and not isinstance(columns, MultiIndex) and
all(isinstance(c, tuple) for c in columns))
|
python
|
def _is_potential_multi_index(columns):
"""
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
Returns
-------
boolean : Whether or not columns could become a MultiIndex
"""
return (len(columns) and not isinstance(columns, MultiIndex) and
all(isinstance(c, tuple) for c in columns))
|
[
"def",
"_is_potential_multi_index",
"(",
"columns",
")",
":",
"return",
"(",
"len",
"(",
"columns",
")",
"and",
"not",
"isinstance",
"(",
"columns",
",",
"MultiIndex",
")",
"and",
"all",
"(",
"isinstance",
"(",
"c",
",",
"tuple",
")",
"for",
"c",
"in",
"columns",
")",
")"
] |
Check whether or not the `columns` parameter
could be converted into a MultiIndex.
Parameters
----------
columns : array-like
Object which may or may not be convertible into a MultiIndex
Returns
-------
boolean : Whether or not columns could become a MultiIndex
|
[
"Check",
"whether",
"or",
"not",
"the",
"columns",
"parameter",
"could",
"be",
"converted",
"into",
"a",
"MultiIndex",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1190-L1205
|
19,904
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
_evaluate_usecols
|
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
"""
if callable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols
|
python
|
def _evaluate_usecols(usecols, names):
"""
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
"""
if callable(usecols):
return {i for i, name in enumerate(names) if usecols(name)}
return usecols
|
[
"def",
"_evaluate_usecols",
"(",
"usecols",
",",
"names",
")",
":",
"if",
"callable",
"(",
"usecols",
")",
":",
"return",
"{",
"i",
"for",
"i",
",",
"name",
"in",
"enumerate",
"(",
"names",
")",
"if",
"usecols",
"(",
"name",
")",
"}",
"return",
"usecols"
] |
Check whether or not the 'usecols' parameter
is a callable. If so, enumerates the 'names'
parameter and returns a set of indices for
each entry in 'names' that evaluates to True.
If not a callable, returns 'usecols'.
|
[
"Check",
"whether",
"or",
"not",
"the",
"usecols",
"parameter",
"is",
"a",
"callable",
".",
"If",
"so",
"enumerates",
"the",
"names",
"parameter",
"and",
"returns",
"a",
"set",
"of",
"indices",
"for",
"each",
"entry",
"in",
"names",
"that",
"evaluates",
"to",
"True",
".",
"If",
"not",
"a",
"callable",
"returns",
"usecols",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1208-L1218
|
19,905
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
_validate_usecols_names
|
def _validate_usecols_names(usecols, names):
"""
Validates that all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if len(missing) > 0:
raise ValueError(
"Usecols do not match columns, "
"columns expected but not found: {missing}".format(missing=missing)
)
return usecols
|
python
|
def _validate_usecols_names(usecols, names):
"""
Validates that all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
"""
missing = [c for c in usecols if c not in names]
if len(missing) > 0:
raise ValueError(
"Usecols do not match columns, "
"columns expected but not found: {missing}".format(missing=missing)
)
return usecols
|
[
"def",
"_validate_usecols_names",
"(",
"usecols",
",",
"names",
")",
":",
"missing",
"=",
"[",
"c",
"for",
"c",
"in",
"usecols",
"if",
"c",
"not",
"in",
"names",
"]",
"if",
"len",
"(",
"missing",
")",
">",
"0",
":",
"raise",
"ValueError",
"(",
"\"Usecols do not match columns, \"",
"\"columns expected but not found: {missing}\"",
".",
"format",
"(",
"missing",
"=",
"missing",
")",
")",
"return",
"usecols"
] |
Validates that all usecols are present in a given
list of names. If not, raise a ValueError that
shows what usecols are missing.
Parameters
----------
usecols : iterable of usecols
The columns to validate are present in names.
names : iterable of names
The column names to check against.
Returns
-------
usecols : iterable of usecols
The `usecols` parameter if the validation succeeds.
Raises
------
ValueError : Columns were missing. Error message will list them.
|
[
"Validates",
"that",
"all",
"usecols",
"are",
"present",
"in",
"a",
"given",
"list",
"of",
"names",
".",
"If",
"not",
"raise",
"a",
"ValueError",
"that",
"shows",
"what",
"usecols",
"are",
"missing",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1221-L1250
|
19,906
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
_validate_usecols_arg
|
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains all integers
(column selection by index), strings (column by name) or is a callable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, callable, or None
List of columns to use when parsing or a callable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a callable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a callable or None is passed in.
"""
msg = ("'usecols' must either be list-like of all strings, all unicode, "
"all integers or a callable.")
if usecols is not None:
if callable(usecols):
return usecols, None
if not is_list_like(usecols):
# see gh-20529
#
# Ensure it is iterable container but not string.
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer",
"string", "unicode"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
|
python
|
def _validate_usecols_arg(usecols):
"""
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains all integers
(column selection by index), strings (column by name) or is a callable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, callable, or None
List of columns to use when parsing or a callable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a callable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a callable or None is passed in.
"""
msg = ("'usecols' must either be list-like of all strings, all unicode, "
"all integers or a callable.")
if usecols is not None:
if callable(usecols):
return usecols, None
if not is_list_like(usecols):
# see gh-20529
#
# Ensure it is iterable container but not string.
raise ValueError(msg)
usecols_dtype = lib.infer_dtype(usecols, skipna=False)
if usecols_dtype not in ("empty", "integer",
"string", "unicode"):
raise ValueError(msg)
usecols = set(usecols)
return usecols, usecols_dtype
return usecols, None
|
[
"def",
"_validate_usecols_arg",
"(",
"usecols",
")",
":",
"msg",
"=",
"(",
"\"'usecols' must either be list-like of all strings, all unicode, \"",
"\"all integers or a callable.\"",
")",
"if",
"usecols",
"is",
"not",
"None",
":",
"if",
"callable",
"(",
"usecols",
")",
":",
"return",
"usecols",
",",
"None",
"if",
"not",
"is_list_like",
"(",
"usecols",
")",
":",
"# see gh-20529",
"#",
"# Ensure it is iterable container but not string.",
"raise",
"ValueError",
"(",
"msg",
")",
"usecols_dtype",
"=",
"lib",
".",
"infer_dtype",
"(",
"usecols",
",",
"skipna",
"=",
"False",
")",
"if",
"usecols_dtype",
"not",
"in",
"(",
"\"empty\"",
",",
"\"integer\"",
",",
"\"string\"",
",",
"\"unicode\"",
")",
":",
"raise",
"ValueError",
"(",
"msg",
")",
"usecols",
"=",
"set",
"(",
"usecols",
")",
"return",
"usecols",
",",
"usecols_dtype",
"return",
"usecols",
",",
"None"
] |
Validate the 'usecols' parameter.
Checks whether or not the 'usecols' parameter contains all integers
(column selection by index), strings (column by name) or is a callable.
Raises a ValueError if that is not the case.
Parameters
----------
usecols : list-like, callable, or None
List of columns to use when parsing or a callable that can be used
to filter a list of table columns.
Returns
-------
usecols_tuple : tuple
A tuple of (verified_usecols, usecols_dtype).
'verified_usecols' is either a set if an array-like is passed in or
'usecols' if a callable or None is passed in.
'usecols_dtype` is the inferred dtype of 'usecols' if an array-like
is passed in or None if a callable or None is passed in.
|
[
"Validate",
"the",
"usecols",
"parameter",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1284-L1330
|
19,907
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
_validate_parse_dates_arg
|
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = ("Only booleans, lists, and "
"dictionaries are accepted "
"for the 'parse_dates' parameter")
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not isinstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
|
python
|
def _validate_parse_dates_arg(parse_dates):
"""
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
"""
msg = ("Only booleans, lists, and "
"dictionaries are accepted "
"for the 'parse_dates' parameter")
if parse_dates is not None:
if is_scalar(parse_dates):
if not lib.is_bool(parse_dates):
raise TypeError(msg)
elif not isinstance(parse_dates, (list, dict)):
raise TypeError(msg)
return parse_dates
|
[
"def",
"_validate_parse_dates_arg",
"(",
"parse_dates",
")",
":",
"msg",
"=",
"(",
"\"Only booleans, lists, and \"",
"\"dictionaries are accepted \"",
"\"for the 'parse_dates' parameter\"",
")",
"if",
"parse_dates",
"is",
"not",
"None",
":",
"if",
"is_scalar",
"(",
"parse_dates",
")",
":",
"if",
"not",
"lib",
".",
"is_bool",
"(",
"parse_dates",
")",
":",
"raise",
"TypeError",
"(",
"msg",
")",
"elif",
"not",
"isinstance",
"(",
"parse_dates",
",",
"(",
"list",
",",
"dict",
")",
")",
":",
"raise",
"TypeError",
"(",
"msg",
")",
"return",
"parse_dates"
] |
Check whether or not the 'parse_dates' parameter
is a non-boolean scalar. Raises a ValueError if
that is the case.
|
[
"Check",
"whether",
"or",
"not",
"the",
"parse_dates",
"parameter",
"is",
"a",
"non",
"-",
"boolean",
"scalar",
".",
"Raises",
"a",
"ValueError",
"if",
"that",
"is",
"the",
"case",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1333-L1351
|
19,908
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
_stringify_na_values
|
def _stringify_na_values(na_values):
""" return a stringified and numeric for these values """
result = []
for x in na_values:
result.append(str(x))
result.append(x)
try:
v = float(x)
# we are like 999 here
if v == int(v):
v = int(v)
result.append("{value}.0".format(value=v))
result.append(str(v))
result.append(v)
except (TypeError, ValueError, OverflowError):
pass
try:
result.append(int(x))
except (TypeError, ValueError, OverflowError):
pass
return set(result)
|
python
|
def _stringify_na_values(na_values):
""" return a stringified and numeric for these values """
result = []
for x in na_values:
result.append(str(x))
result.append(x)
try:
v = float(x)
# we are like 999 here
if v == int(v):
v = int(v)
result.append("{value}.0".format(value=v))
result.append(str(v))
result.append(v)
except (TypeError, ValueError, OverflowError):
pass
try:
result.append(int(x))
except (TypeError, ValueError, OverflowError):
pass
return set(result)
|
[
"def",
"_stringify_na_values",
"(",
"na_values",
")",
":",
"result",
"=",
"[",
"]",
"for",
"x",
"in",
"na_values",
":",
"result",
".",
"append",
"(",
"str",
"(",
"x",
")",
")",
"result",
".",
"append",
"(",
"x",
")",
"try",
":",
"v",
"=",
"float",
"(",
"x",
")",
"# we are like 999 here",
"if",
"v",
"==",
"int",
"(",
"v",
")",
":",
"v",
"=",
"int",
"(",
"v",
")",
"result",
".",
"append",
"(",
"\"{value}.0\"",
".",
"format",
"(",
"value",
"=",
"v",
")",
")",
"result",
".",
"append",
"(",
"str",
"(",
"v",
")",
")",
"result",
".",
"append",
"(",
"v",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
",",
"OverflowError",
")",
":",
"pass",
"try",
":",
"result",
".",
"append",
"(",
"int",
"(",
"x",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
",",
"OverflowError",
")",
":",
"pass",
"return",
"set",
"(",
"result",
")"
] |
return a stringified and numeric for these values
|
[
"return",
"a",
"stringified",
"and",
"numeric",
"for",
"these",
"values"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L3425-L3447
|
19,909
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
_get_na_values
|
def _get_na_values(col, na_values, na_fvalues, keep_default_na):
"""
Get the NaN values for a given column.
Parameters
----------
col : str
The name of the column.
na_values : array-like, dict
The object listing the NaN values as strings.
na_fvalues : array-like, dict
The object listing the NaN values as floats.
keep_default_na : bool
If `na_values` is a dict, and the column is not mapped in the
dictionary, whether to return the default NaN values or the empty set.
Returns
-------
nan_tuple : A length-two tuple composed of
1) na_values : the string NaN values for that column.
2) na_fvalues : the float NaN values for that column.
"""
if isinstance(na_values, dict):
if col in na_values:
return na_values[col], na_fvalues[col]
else:
if keep_default_na:
return _NA_VALUES, set()
return set(), set()
else:
return na_values, na_fvalues
|
python
|
def _get_na_values(col, na_values, na_fvalues, keep_default_na):
"""
Get the NaN values for a given column.
Parameters
----------
col : str
The name of the column.
na_values : array-like, dict
The object listing the NaN values as strings.
na_fvalues : array-like, dict
The object listing the NaN values as floats.
keep_default_na : bool
If `na_values` is a dict, and the column is not mapped in the
dictionary, whether to return the default NaN values or the empty set.
Returns
-------
nan_tuple : A length-two tuple composed of
1) na_values : the string NaN values for that column.
2) na_fvalues : the float NaN values for that column.
"""
if isinstance(na_values, dict):
if col in na_values:
return na_values[col], na_fvalues[col]
else:
if keep_default_na:
return _NA_VALUES, set()
return set(), set()
else:
return na_values, na_fvalues
|
[
"def",
"_get_na_values",
"(",
"col",
",",
"na_values",
",",
"na_fvalues",
",",
"keep_default_na",
")",
":",
"if",
"isinstance",
"(",
"na_values",
",",
"dict",
")",
":",
"if",
"col",
"in",
"na_values",
":",
"return",
"na_values",
"[",
"col",
"]",
",",
"na_fvalues",
"[",
"col",
"]",
"else",
":",
"if",
"keep_default_na",
":",
"return",
"_NA_VALUES",
",",
"set",
"(",
")",
"return",
"set",
"(",
")",
",",
"set",
"(",
")",
"else",
":",
"return",
"na_values",
",",
"na_fvalues"
] |
Get the NaN values for a given column.
Parameters
----------
col : str
The name of the column.
na_values : array-like, dict
The object listing the NaN values as strings.
na_fvalues : array-like, dict
The object listing the NaN values as floats.
keep_default_na : bool
If `na_values` is a dict, and the column is not mapped in the
dictionary, whether to return the default NaN values or the empty set.
Returns
-------
nan_tuple : A length-two tuple composed of
1) na_values : the string NaN values for that column.
2) na_fvalues : the float NaN values for that column.
|
[
"Get",
"the",
"NaN",
"values",
"for",
"a",
"given",
"column",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L3450-L3483
|
19,910
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
ParserBase._extract_multi_indexer_columns
|
def _extract_multi_indexer_columns(self, header, index_names, col_names,
passed_names=False):
""" extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers """
if len(header) < 2:
return header[0], index_names, col_names, passed_names
# the names are the tuples of the header that are not the index cols
# 0 is the name of the index, assuming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not isinstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header.pop(-1)
index_names, names, index_col = _clean_index_names(index_names,
self.index_col,
self.unnamed_cols)
# extract the columns
field_count = len(header[0])
def extract(r):
return tuple(r[i] for i in range(field_count) if i not in sic)
columns = lzip(*[extract(r) for r in header])
names = ic + columns
# If we find unnamed columns all in a single
# level, then our header was too long.
for n in range(len(columns[0])):
if all(compat.to_str(c[n]) in self.unnamed_cols for c in columns):
raise ParserError(
"Passed header=[{header}] are too many rows for this "
"multi_index of columns"
.format(header=','.join(str(x) for x in self.header))
)
# Clean the column names (if we have an index_col).
if len(ic):
col_names = [r[0] if (len(r[0]) and
r[0] not in self.unnamed_cols) else None
for r in header]
else:
col_names = [None] * len(header)
passed_names = True
return names, index_names, col_names, passed_names
|
python
|
def _extract_multi_indexer_columns(self, header, index_names, col_names,
passed_names=False):
""" extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers """
if len(header) < 2:
return header[0], index_names, col_names, passed_names
# the names are the tuples of the header that are not the index cols
# 0 is the name of the index, assuming index_col is a list of column
# numbers
ic = self.index_col
if ic is None:
ic = []
if not isinstance(ic, (list, tuple, np.ndarray)):
ic = [ic]
sic = set(ic)
# clean the index_names
index_names = header.pop(-1)
index_names, names, index_col = _clean_index_names(index_names,
self.index_col,
self.unnamed_cols)
# extract the columns
field_count = len(header[0])
def extract(r):
return tuple(r[i] for i in range(field_count) if i not in sic)
columns = lzip(*[extract(r) for r in header])
names = ic + columns
# If we find unnamed columns all in a single
# level, then our header was too long.
for n in range(len(columns[0])):
if all(compat.to_str(c[n]) in self.unnamed_cols for c in columns):
raise ParserError(
"Passed header=[{header}] are too many rows for this "
"multi_index of columns"
.format(header=','.join(str(x) for x in self.header))
)
# Clean the column names (if we have an index_col).
if len(ic):
col_names = [r[0] if (len(r[0]) and
r[0] not in self.unnamed_cols) else None
for r in header]
else:
col_names = [None] * len(header)
passed_names = True
return names, index_names, col_names, passed_names
|
[
"def",
"_extract_multi_indexer_columns",
"(",
"self",
",",
"header",
",",
"index_names",
",",
"col_names",
",",
"passed_names",
"=",
"False",
")",
":",
"if",
"len",
"(",
"header",
")",
"<",
"2",
":",
"return",
"header",
"[",
"0",
"]",
",",
"index_names",
",",
"col_names",
",",
"passed_names",
"# the names are the tuples of the header that are not the index cols",
"# 0 is the name of the index, assuming index_col is a list of column",
"# numbers",
"ic",
"=",
"self",
".",
"index_col",
"if",
"ic",
"is",
"None",
":",
"ic",
"=",
"[",
"]",
"if",
"not",
"isinstance",
"(",
"ic",
",",
"(",
"list",
",",
"tuple",
",",
"np",
".",
"ndarray",
")",
")",
":",
"ic",
"=",
"[",
"ic",
"]",
"sic",
"=",
"set",
"(",
"ic",
")",
"# clean the index_names",
"index_names",
"=",
"header",
".",
"pop",
"(",
"-",
"1",
")",
"index_names",
",",
"names",
",",
"index_col",
"=",
"_clean_index_names",
"(",
"index_names",
",",
"self",
".",
"index_col",
",",
"self",
".",
"unnamed_cols",
")",
"# extract the columns",
"field_count",
"=",
"len",
"(",
"header",
"[",
"0",
"]",
")",
"def",
"extract",
"(",
"r",
")",
":",
"return",
"tuple",
"(",
"r",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"field_count",
")",
"if",
"i",
"not",
"in",
"sic",
")",
"columns",
"=",
"lzip",
"(",
"*",
"[",
"extract",
"(",
"r",
")",
"for",
"r",
"in",
"header",
"]",
")",
"names",
"=",
"ic",
"+",
"columns",
"# If we find unnamed columns all in a single",
"# level, then our header was too long.",
"for",
"n",
"in",
"range",
"(",
"len",
"(",
"columns",
"[",
"0",
"]",
")",
")",
":",
"if",
"all",
"(",
"compat",
".",
"to_str",
"(",
"c",
"[",
"n",
"]",
")",
"in",
"self",
".",
"unnamed_cols",
"for",
"c",
"in",
"columns",
")",
":",
"raise",
"ParserError",
"(",
"\"Passed header=[{header}] are too many rows for this \"",
"\"multi_index of columns\"",
".",
"format",
"(",
"header",
"=",
"','",
".",
"join",
"(",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"self",
".",
"header",
")",
")",
")",
"# Clean the column names (if we have an index_col).",
"if",
"len",
"(",
"ic",
")",
":",
"col_names",
"=",
"[",
"r",
"[",
"0",
"]",
"if",
"(",
"len",
"(",
"r",
"[",
"0",
"]",
")",
"and",
"r",
"[",
"0",
"]",
"not",
"in",
"self",
".",
"unnamed_cols",
")",
"else",
"None",
"for",
"r",
"in",
"header",
"]",
"else",
":",
"col_names",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"header",
")",
"passed_names",
"=",
"True",
"return",
"names",
",",
"index_names",
",",
"col_names",
",",
"passed_names"
] |
extract and return the names, index_names, col_names
header is a list-of-lists returned from the parsers
|
[
"extract",
"and",
"return",
"the",
"names",
"index_names",
"col_names",
"header",
"is",
"a",
"list",
"-",
"of",
"-",
"lists",
"returned",
"from",
"the",
"parsers"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1451-L1504
|
19,911
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
ParserBase._infer_types
|
def _infer_types(self, values, na_values, try_num_bool=True):
"""
Infer types of values, possibly casting
Parameters
----------
values : ndarray
na_values : set
try_num_bool : bool, default try
try to cast values to numeric (first preference) or boolean
Returns:
--------
converted : ndarray
na_count : int
"""
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
mask = algorithms.isin(values, list(na_values))
na_count = mask.sum()
if na_count > 0:
if is_integer_dtype(values):
values = values.astype(np.float64)
np.putmask(values, mask, np.nan)
return values, na_count
if try_num_bool:
try:
result = lib.maybe_convert_numeric(values, na_values, False)
na_count = isna(result).sum()
except Exception:
result = values
if values.dtype == np.object_:
na_count = parsers.sanitize_objects(result,
na_values, False)
else:
result = values
if values.dtype == np.object_:
na_count = parsers.sanitize_objects(values, na_values, False)
if result.dtype == np.object_ and try_num_bool:
result = libops.maybe_convert_bool(np.asarray(values),
true_values=self.true_values,
false_values=self.false_values)
return result, na_count
|
python
|
def _infer_types(self, values, na_values, try_num_bool=True):
"""
Infer types of values, possibly casting
Parameters
----------
values : ndarray
na_values : set
try_num_bool : bool, default try
try to cast values to numeric (first preference) or boolean
Returns:
--------
converted : ndarray
na_count : int
"""
na_count = 0
if issubclass(values.dtype.type, (np.number, np.bool_)):
mask = algorithms.isin(values, list(na_values))
na_count = mask.sum()
if na_count > 0:
if is_integer_dtype(values):
values = values.astype(np.float64)
np.putmask(values, mask, np.nan)
return values, na_count
if try_num_bool:
try:
result = lib.maybe_convert_numeric(values, na_values, False)
na_count = isna(result).sum()
except Exception:
result = values
if values.dtype == np.object_:
na_count = parsers.sanitize_objects(result,
na_values, False)
else:
result = values
if values.dtype == np.object_:
na_count = parsers.sanitize_objects(values, na_values, False)
if result.dtype == np.object_ and try_num_bool:
result = libops.maybe_convert_bool(np.asarray(values),
true_values=self.true_values,
false_values=self.false_values)
return result, na_count
|
[
"def",
"_infer_types",
"(",
"self",
",",
"values",
",",
"na_values",
",",
"try_num_bool",
"=",
"True",
")",
":",
"na_count",
"=",
"0",
"if",
"issubclass",
"(",
"values",
".",
"dtype",
".",
"type",
",",
"(",
"np",
".",
"number",
",",
"np",
".",
"bool_",
")",
")",
":",
"mask",
"=",
"algorithms",
".",
"isin",
"(",
"values",
",",
"list",
"(",
"na_values",
")",
")",
"na_count",
"=",
"mask",
".",
"sum",
"(",
")",
"if",
"na_count",
">",
"0",
":",
"if",
"is_integer_dtype",
"(",
"values",
")",
":",
"values",
"=",
"values",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"np",
".",
"putmask",
"(",
"values",
",",
"mask",
",",
"np",
".",
"nan",
")",
"return",
"values",
",",
"na_count",
"if",
"try_num_bool",
":",
"try",
":",
"result",
"=",
"lib",
".",
"maybe_convert_numeric",
"(",
"values",
",",
"na_values",
",",
"False",
")",
"na_count",
"=",
"isna",
"(",
"result",
")",
".",
"sum",
"(",
")",
"except",
"Exception",
":",
"result",
"=",
"values",
"if",
"values",
".",
"dtype",
"==",
"np",
".",
"object_",
":",
"na_count",
"=",
"parsers",
".",
"sanitize_objects",
"(",
"result",
",",
"na_values",
",",
"False",
")",
"else",
":",
"result",
"=",
"values",
"if",
"values",
".",
"dtype",
"==",
"np",
".",
"object_",
":",
"na_count",
"=",
"parsers",
".",
"sanitize_objects",
"(",
"values",
",",
"na_values",
",",
"False",
")",
"if",
"result",
".",
"dtype",
"==",
"np",
".",
"object_",
"and",
"try_num_bool",
":",
"result",
"=",
"libops",
".",
"maybe_convert_bool",
"(",
"np",
".",
"asarray",
"(",
"values",
")",
",",
"true_values",
"=",
"self",
".",
"true_values",
",",
"false_values",
"=",
"self",
".",
"false_values",
")",
"return",
"result",
",",
"na_count"
] |
Infer types of values, possibly casting
Parameters
----------
values : ndarray
na_values : set
try_num_bool : bool, default try
try to cast values to numeric (first preference) or boolean
Returns:
--------
converted : ndarray
na_count : int
|
[
"Infer",
"types",
"of",
"values",
"possibly",
"casting"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1719-L1764
|
19,912
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
ParserBase._cast_types
|
def _cast_types(self, values, cast_type, column):
"""
Cast values to specified type
Parameters
----------
values : ndarray
cast_type : string or np.dtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
-------
converted : ndarray
"""
if is_categorical_dtype(cast_type):
known_cats = (isinstance(cast_type, CategoricalDtype) and
cast_type.categories is not None)
if not is_object_dtype(values) and not known_cats:
# XXX this is for consistency with
# c-parser which parses all categories
# as strings
values = astype_nansafe(values, str)
cats = Index(values).unique().dropna()
values = Categorical._from_inferred_categories(
cats, cats.get_indexer(values), cast_type,
true_values=self.true_values)
# use the EA's implementation of casting
elif is_extension_array_dtype(cast_type):
# ensure cast_type is an actual dtype and not a string
cast_type = pandas_dtype(cast_type)
array_type = cast_type.construct_array_type()
try:
return array_type._from_sequence_of_strings(values,
dtype=cast_type)
except NotImplementedError:
raise NotImplementedError(
"Extension Array: {ea} must implement "
"_from_sequence_of_strings in order "
"to be used in parser methods".format(ea=array_type))
else:
try:
values = astype_nansafe(values, cast_type,
copy=True, skipna=True)
except ValueError:
raise ValueError(
"Unable to convert column {column} to type "
"{cast_type}".format(
column=column, cast_type=cast_type))
return values
|
python
|
def _cast_types(self, values, cast_type, column):
"""
Cast values to specified type
Parameters
----------
values : ndarray
cast_type : string or np.dtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
-------
converted : ndarray
"""
if is_categorical_dtype(cast_type):
known_cats = (isinstance(cast_type, CategoricalDtype) and
cast_type.categories is not None)
if not is_object_dtype(values) and not known_cats:
# XXX this is for consistency with
# c-parser which parses all categories
# as strings
values = astype_nansafe(values, str)
cats = Index(values).unique().dropna()
values = Categorical._from_inferred_categories(
cats, cats.get_indexer(values), cast_type,
true_values=self.true_values)
# use the EA's implementation of casting
elif is_extension_array_dtype(cast_type):
# ensure cast_type is an actual dtype and not a string
cast_type = pandas_dtype(cast_type)
array_type = cast_type.construct_array_type()
try:
return array_type._from_sequence_of_strings(values,
dtype=cast_type)
except NotImplementedError:
raise NotImplementedError(
"Extension Array: {ea} must implement "
"_from_sequence_of_strings in order "
"to be used in parser methods".format(ea=array_type))
else:
try:
values = astype_nansafe(values, cast_type,
copy=True, skipna=True)
except ValueError:
raise ValueError(
"Unable to convert column {column} to type "
"{cast_type}".format(
column=column, cast_type=cast_type))
return values
|
[
"def",
"_cast_types",
"(",
"self",
",",
"values",
",",
"cast_type",
",",
"column",
")",
":",
"if",
"is_categorical_dtype",
"(",
"cast_type",
")",
":",
"known_cats",
"=",
"(",
"isinstance",
"(",
"cast_type",
",",
"CategoricalDtype",
")",
"and",
"cast_type",
".",
"categories",
"is",
"not",
"None",
")",
"if",
"not",
"is_object_dtype",
"(",
"values",
")",
"and",
"not",
"known_cats",
":",
"# XXX this is for consistency with",
"# c-parser which parses all categories",
"# as strings",
"values",
"=",
"astype_nansafe",
"(",
"values",
",",
"str",
")",
"cats",
"=",
"Index",
"(",
"values",
")",
".",
"unique",
"(",
")",
".",
"dropna",
"(",
")",
"values",
"=",
"Categorical",
".",
"_from_inferred_categories",
"(",
"cats",
",",
"cats",
".",
"get_indexer",
"(",
"values",
")",
",",
"cast_type",
",",
"true_values",
"=",
"self",
".",
"true_values",
")",
"# use the EA's implementation of casting",
"elif",
"is_extension_array_dtype",
"(",
"cast_type",
")",
":",
"# ensure cast_type is an actual dtype and not a string",
"cast_type",
"=",
"pandas_dtype",
"(",
"cast_type",
")",
"array_type",
"=",
"cast_type",
".",
"construct_array_type",
"(",
")",
"try",
":",
"return",
"array_type",
".",
"_from_sequence_of_strings",
"(",
"values",
",",
"dtype",
"=",
"cast_type",
")",
"except",
"NotImplementedError",
":",
"raise",
"NotImplementedError",
"(",
"\"Extension Array: {ea} must implement \"",
"\"_from_sequence_of_strings in order \"",
"\"to be used in parser methods\"",
".",
"format",
"(",
"ea",
"=",
"array_type",
")",
")",
"else",
":",
"try",
":",
"values",
"=",
"astype_nansafe",
"(",
"values",
",",
"cast_type",
",",
"copy",
"=",
"True",
",",
"skipna",
"=",
"True",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Unable to convert column {column} to type \"",
"\"{cast_type}\"",
".",
"format",
"(",
"column",
"=",
"column",
",",
"cast_type",
"=",
"cast_type",
")",
")",
"return",
"values"
] |
Cast values to specified type
Parameters
----------
values : ndarray
cast_type : string or np.dtype
dtype to cast values to
column : string
column name - used only for error reporting
Returns
-------
converted : ndarray
|
[
"Cast",
"values",
"to",
"specified",
"type"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1766-L1821
|
19,913
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
CParserWrapper._set_noconvert_columns
|
def _set_noconvert_columns(self):
"""
Set the columns that should not undergo dtype conversions.
Currently, any column that is involved with date parsing will not
undergo such conversions.
"""
names = self.orig_names
if self.usecols_dtype == 'integer':
# A set of integers will be converted to a list in
# the correct order every single time.
usecols = list(self.usecols)
usecols.sort()
elif (callable(self.usecols) or
self.usecols_dtype not in ('empty', None)):
# The names attribute should have the correct columns
# in the proper order for indexing with parse_dates.
usecols = self.names[:]
else:
# Usecols is empty.
usecols = None
def _set(x):
if usecols is not None and is_integer(x):
x = usecols[x]
if not is_integer(x):
x = names.index(x)
self._reader.set_noconvert(x)
if isinstance(self.parse_dates, list):
for val in self.parse_dates:
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif isinstance(self.parse_dates, dict):
for val in self.parse_dates.values():
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif self.parse_dates:
if isinstance(self.index_col, list):
for k in self.index_col:
_set(k)
elif self.index_col is not None:
_set(self.index_col)
|
python
|
def _set_noconvert_columns(self):
"""
Set the columns that should not undergo dtype conversions.
Currently, any column that is involved with date parsing will not
undergo such conversions.
"""
names = self.orig_names
if self.usecols_dtype == 'integer':
# A set of integers will be converted to a list in
# the correct order every single time.
usecols = list(self.usecols)
usecols.sort()
elif (callable(self.usecols) or
self.usecols_dtype not in ('empty', None)):
# The names attribute should have the correct columns
# in the proper order for indexing with parse_dates.
usecols = self.names[:]
else:
# Usecols is empty.
usecols = None
def _set(x):
if usecols is not None and is_integer(x):
x = usecols[x]
if not is_integer(x):
x = names.index(x)
self._reader.set_noconvert(x)
if isinstance(self.parse_dates, list):
for val in self.parse_dates:
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif isinstance(self.parse_dates, dict):
for val in self.parse_dates.values():
if isinstance(val, list):
for k in val:
_set(k)
else:
_set(val)
elif self.parse_dates:
if isinstance(self.index_col, list):
for k in self.index_col:
_set(k)
elif self.index_col is not None:
_set(self.index_col)
|
[
"def",
"_set_noconvert_columns",
"(",
"self",
")",
":",
"names",
"=",
"self",
".",
"orig_names",
"if",
"self",
".",
"usecols_dtype",
"==",
"'integer'",
":",
"# A set of integers will be converted to a list in",
"# the correct order every single time.",
"usecols",
"=",
"list",
"(",
"self",
".",
"usecols",
")",
"usecols",
".",
"sort",
"(",
")",
"elif",
"(",
"callable",
"(",
"self",
".",
"usecols",
")",
"or",
"self",
".",
"usecols_dtype",
"not",
"in",
"(",
"'empty'",
",",
"None",
")",
")",
":",
"# The names attribute should have the correct columns",
"# in the proper order for indexing with parse_dates.",
"usecols",
"=",
"self",
".",
"names",
"[",
":",
"]",
"else",
":",
"# Usecols is empty.",
"usecols",
"=",
"None",
"def",
"_set",
"(",
"x",
")",
":",
"if",
"usecols",
"is",
"not",
"None",
"and",
"is_integer",
"(",
"x",
")",
":",
"x",
"=",
"usecols",
"[",
"x",
"]",
"if",
"not",
"is_integer",
"(",
"x",
")",
":",
"x",
"=",
"names",
".",
"index",
"(",
"x",
")",
"self",
".",
"_reader",
".",
"set_noconvert",
"(",
"x",
")",
"if",
"isinstance",
"(",
"self",
".",
"parse_dates",
",",
"list",
")",
":",
"for",
"val",
"in",
"self",
".",
"parse_dates",
":",
"if",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"for",
"k",
"in",
"val",
":",
"_set",
"(",
"k",
")",
"else",
":",
"_set",
"(",
"val",
")",
"elif",
"isinstance",
"(",
"self",
".",
"parse_dates",
",",
"dict",
")",
":",
"for",
"val",
"in",
"self",
".",
"parse_dates",
".",
"values",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"for",
"k",
"in",
"val",
":",
"_set",
"(",
"k",
")",
"else",
":",
"_set",
"(",
"val",
")",
"elif",
"self",
".",
"parse_dates",
":",
"if",
"isinstance",
"(",
"self",
".",
"index_col",
",",
"list",
")",
":",
"for",
"k",
"in",
"self",
".",
"index_col",
":",
"_set",
"(",
"k",
")",
"elif",
"self",
".",
"index_col",
"is",
"not",
"None",
":",
"_set",
"(",
"self",
".",
"index_col",
")"
] |
Set the columns that should not undergo dtype conversions.
Currently, any column that is involved with date parsing will not
undergo such conversions.
|
[
"Set",
"the",
"columns",
"that",
"should",
"not",
"undergo",
"dtype",
"conversions",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1951-L2003
|
19,914
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
PythonParser._handle_usecols
|
def _handle_usecols(self, columns, usecols_key):
"""
Sets self._col_indices
usecols_key is used if there are string usecols.
"""
if self.usecols is not None:
if callable(self.usecols):
col_indices = _evaluate_usecols(self.usecols, usecols_key)
elif any(isinstance(u, str) for u in self.usecols):
if len(columns) > 1:
raise ValueError("If using multiple headers, usecols must "
"be integers.")
col_indices = []
for col in self.usecols:
if isinstance(col, str):
try:
col_indices.append(usecols_key.index(col))
except ValueError:
_validate_usecols_names(self.usecols, usecols_key)
else:
col_indices.append(col)
else:
col_indices = self.usecols
columns = [[n for i, n in enumerate(column) if i in col_indices]
for column in columns]
self._col_indices = col_indices
return columns
|
python
|
def _handle_usecols(self, columns, usecols_key):
"""
Sets self._col_indices
usecols_key is used if there are string usecols.
"""
if self.usecols is not None:
if callable(self.usecols):
col_indices = _evaluate_usecols(self.usecols, usecols_key)
elif any(isinstance(u, str) for u in self.usecols):
if len(columns) > 1:
raise ValueError("If using multiple headers, usecols must "
"be integers.")
col_indices = []
for col in self.usecols:
if isinstance(col, str):
try:
col_indices.append(usecols_key.index(col))
except ValueError:
_validate_usecols_names(self.usecols, usecols_key)
else:
col_indices.append(col)
else:
col_indices = self.usecols
columns = [[n for i, n in enumerate(column) if i in col_indices]
for column in columns]
self._col_indices = col_indices
return columns
|
[
"def",
"_handle_usecols",
"(",
"self",
",",
"columns",
",",
"usecols_key",
")",
":",
"if",
"self",
".",
"usecols",
"is",
"not",
"None",
":",
"if",
"callable",
"(",
"self",
".",
"usecols",
")",
":",
"col_indices",
"=",
"_evaluate_usecols",
"(",
"self",
".",
"usecols",
",",
"usecols_key",
")",
"elif",
"any",
"(",
"isinstance",
"(",
"u",
",",
"str",
")",
"for",
"u",
"in",
"self",
".",
"usecols",
")",
":",
"if",
"len",
"(",
"columns",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"If using multiple headers, usecols must \"",
"\"be integers.\"",
")",
"col_indices",
"=",
"[",
"]",
"for",
"col",
"in",
"self",
".",
"usecols",
":",
"if",
"isinstance",
"(",
"col",
",",
"str",
")",
":",
"try",
":",
"col_indices",
".",
"append",
"(",
"usecols_key",
".",
"index",
"(",
"col",
")",
")",
"except",
"ValueError",
":",
"_validate_usecols_names",
"(",
"self",
".",
"usecols",
",",
"usecols_key",
")",
"else",
":",
"col_indices",
".",
"append",
"(",
"col",
")",
"else",
":",
"col_indices",
"=",
"self",
".",
"usecols",
"columns",
"=",
"[",
"[",
"n",
"for",
"i",
",",
"n",
"in",
"enumerate",
"(",
"column",
")",
"if",
"i",
"in",
"col_indices",
"]",
"for",
"column",
"in",
"columns",
"]",
"self",
".",
"_col_indices",
"=",
"col_indices",
"return",
"columns"
] |
Sets self._col_indices
usecols_key is used if there are string usecols.
|
[
"Sets",
"self",
".",
"_col_indices"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L2678-L2707
|
19,915
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
PythonParser._check_for_bom
|
def _check_for_bom(self, first_row):
"""
Checks whether the file begins with the BOM character.
If it does, remove it. In addition, if there is quoting
in the field subsequent to the BOM, remove it as well
because it technically takes place at the beginning of
the name, not the middle of it.
"""
# first_row will be a list, so we need to check
# that that list is not empty before proceeding.
if not first_row:
return first_row
# The first element of this row is the one that could have the
# BOM that we want to remove. Check that the first element is a
# string before proceeding.
if not isinstance(first_row[0], str):
return first_row
# Check that the string is not empty, as that would
# obviously not have a BOM at the start of it.
if not first_row[0]:
return first_row
# Since the string is non-empty, check that it does
# in fact begin with a BOM.
first_elt = first_row[0][0]
if first_elt != _BOM:
return first_row
first_row = first_row[0]
if len(first_row) > 1 and first_row[1] == self.quotechar:
start = 2
quote = first_row[1]
end = first_row[2:].index(quote) + 2
# Extract the data between the quotation marks
new_row = first_row[start:end]
# Extract any remaining data after the second
# quotation mark.
if len(first_row) > end + 1:
new_row += first_row[end + 1:]
return [new_row]
elif len(first_row) > 1:
return [first_row[1:]]
else:
# First row is just the BOM, so we
# return an empty string.
return [""]
|
python
|
def _check_for_bom(self, first_row):
"""
Checks whether the file begins with the BOM character.
If it does, remove it. In addition, if there is quoting
in the field subsequent to the BOM, remove it as well
because it technically takes place at the beginning of
the name, not the middle of it.
"""
# first_row will be a list, so we need to check
# that that list is not empty before proceeding.
if not first_row:
return first_row
# The first element of this row is the one that could have the
# BOM that we want to remove. Check that the first element is a
# string before proceeding.
if not isinstance(first_row[0], str):
return first_row
# Check that the string is not empty, as that would
# obviously not have a BOM at the start of it.
if not first_row[0]:
return first_row
# Since the string is non-empty, check that it does
# in fact begin with a BOM.
first_elt = first_row[0][0]
if first_elt != _BOM:
return first_row
first_row = first_row[0]
if len(first_row) > 1 and first_row[1] == self.quotechar:
start = 2
quote = first_row[1]
end = first_row[2:].index(quote) + 2
# Extract the data between the quotation marks
new_row = first_row[start:end]
# Extract any remaining data after the second
# quotation mark.
if len(first_row) > end + 1:
new_row += first_row[end + 1:]
return [new_row]
elif len(first_row) > 1:
return [first_row[1:]]
else:
# First row is just the BOM, so we
# return an empty string.
return [""]
|
[
"def",
"_check_for_bom",
"(",
"self",
",",
"first_row",
")",
":",
"# first_row will be a list, so we need to check",
"# that that list is not empty before proceeding.",
"if",
"not",
"first_row",
":",
"return",
"first_row",
"# The first element of this row is the one that could have the",
"# BOM that we want to remove. Check that the first element is a",
"# string before proceeding.",
"if",
"not",
"isinstance",
"(",
"first_row",
"[",
"0",
"]",
",",
"str",
")",
":",
"return",
"first_row",
"# Check that the string is not empty, as that would",
"# obviously not have a BOM at the start of it.",
"if",
"not",
"first_row",
"[",
"0",
"]",
":",
"return",
"first_row",
"# Since the string is non-empty, check that it does",
"# in fact begin with a BOM.",
"first_elt",
"=",
"first_row",
"[",
"0",
"]",
"[",
"0",
"]",
"if",
"first_elt",
"!=",
"_BOM",
":",
"return",
"first_row",
"first_row",
"=",
"first_row",
"[",
"0",
"]",
"if",
"len",
"(",
"first_row",
")",
">",
"1",
"and",
"first_row",
"[",
"1",
"]",
"==",
"self",
".",
"quotechar",
":",
"start",
"=",
"2",
"quote",
"=",
"first_row",
"[",
"1",
"]",
"end",
"=",
"first_row",
"[",
"2",
":",
"]",
".",
"index",
"(",
"quote",
")",
"+",
"2",
"# Extract the data between the quotation marks",
"new_row",
"=",
"first_row",
"[",
"start",
":",
"end",
"]",
"# Extract any remaining data after the second",
"# quotation mark.",
"if",
"len",
"(",
"first_row",
")",
">",
"end",
"+",
"1",
":",
"new_row",
"+=",
"first_row",
"[",
"end",
"+",
"1",
":",
"]",
"return",
"[",
"new_row",
"]",
"elif",
"len",
"(",
"first_row",
")",
">",
"1",
":",
"return",
"[",
"first_row",
"[",
"1",
":",
"]",
"]",
"else",
":",
"# First row is just the BOM, so we",
"# return an empty string.",
"return",
"[",
"\"\"",
"]"
] |
Checks whether the file begins with the BOM character.
If it does, remove it. In addition, if there is quoting
in the field subsequent to the BOM, remove it as well
because it technically takes place at the beginning of
the name, not the middle of it.
|
[
"Checks",
"whether",
"the",
"file",
"begins",
"with",
"the",
"BOM",
"character",
".",
"If",
"it",
"does",
"remove",
"it",
".",
"In",
"addition",
"if",
"there",
"is",
"quoting",
"in",
"the",
"field",
"subsequent",
"to",
"the",
"BOM",
"remove",
"it",
"as",
"well",
"because",
"it",
"technically",
"takes",
"place",
"at",
"the",
"beginning",
"of",
"the",
"name",
"not",
"the",
"middle",
"of",
"it",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L2718-L2768
|
19,916
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
PythonParser._alert_malformed
|
def _alert_malformed(self, msg, row_num):
"""
Alert a user about a malformed row.
If `self.error_bad_lines` is True, the alert will be `ParserError`.
If `self.warn_bad_lines` is True, the alert will be printed out.
Parameters
----------
msg : The error message to display.
row_num : The row number where the parsing error occurred.
Because this row number is displayed, we 1-index,
even though we 0-index internally.
"""
if self.error_bad_lines:
raise ParserError(msg)
elif self.warn_bad_lines:
base = 'Skipping line {row_num}: '.format(row_num=row_num)
sys.stderr.write(base + msg + '\n')
|
python
|
def _alert_malformed(self, msg, row_num):
"""
Alert a user about a malformed row.
If `self.error_bad_lines` is True, the alert will be `ParserError`.
If `self.warn_bad_lines` is True, the alert will be printed out.
Parameters
----------
msg : The error message to display.
row_num : The row number where the parsing error occurred.
Because this row number is displayed, we 1-index,
even though we 0-index internally.
"""
if self.error_bad_lines:
raise ParserError(msg)
elif self.warn_bad_lines:
base = 'Skipping line {row_num}: '.format(row_num=row_num)
sys.stderr.write(base + msg + '\n')
|
[
"def",
"_alert_malformed",
"(",
"self",
",",
"msg",
",",
"row_num",
")",
":",
"if",
"self",
".",
"error_bad_lines",
":",
"raise",
"ParserError",
"(",
"msg",
")",
"elif",
"self",
".",
"warn_bad_lines",
":",
"base",
"=",
"'Skipping line {row_num}: '",
".",
"format",
"(",
"row_num",
"=",
"row_num",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"base",
"+",
"msg",
"+",
"'\\n'",
")"
] |
Alert a user about a malformed row.
If `self.error_bad_lines` is True, the alert will be `ParserError`.
If `self.warn_bad_lines` is True, the alert will be printed out.
Parameters
----------
msg : The error message to display.
row_num : The row number where the parsing error occurred.
Because this row number is displayed, we 1-index,
even though we 0-index internally.
|
[
"Alert",
"a",
"user",
"about",
"a",
"malformed",
"row",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L2837-L2856
|
19,917
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
PythonParser._remove_empty_lines
|
def _remove_empty_lines(self, lines):
"""
Iterate through the lines and remove any that are
either empty or contain only one whitespace value
Parameters
----------
lines : array-like
The array of lines that we are to filter.
Returns
-------
filtered_lines : array-like
The same array of lines with the "empty" ones removed.
"""
ret = []
for l in lines:
# Remove empty lines and lines with only one whitespace value
if (len(l) > 1 or len(l) == 1 and
(not isinstance(l[0], str) or l[0].strip())):
ret.append(l)
return ret
|
python
|
def _remove_empty_lines(self, lines):
"""
Iterate through the lines and remove any that are
either empty or contain only one whitespace value
Parameters
----------
lines : array-like
The array of lines that we are to filter.
Returns
-------
filtered_lines : array-like
The same array of lines with the "empty" ones removed.
"""
ret = []
for l in lines:
# Remove empty lines and lines with only one whitespace value
if (len(l) > 1 or len(l) == 1 and
(not isinstance(l[0], str) or l[0].strip())):
ret.append(l)
return ret
|
[
"def",
"_remove_empty_lines",
"(",
"self",
",",
"lines",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"l",
"in",
"lines",
":",
"# Remove empty lines and lines with only one whitespace value",
"if",
"(",
"len",
"(",
"l",
")",
">",
"1",
"or",
"len",
"(",
"l",
")",
"==",
"1",
"and",
"(",
"not",
"isinstance",
"(",
"l",
"[",
"0",
"]",
",",
"str",
")",
"or",
"l",
"[",
"0",
"]",
".",
"strip",
"(",
")",
")",
")",
":",
"ret",
".",
"append",
"(",
"l",
")",
"return",
"ret"
] |
Iterate through the lines and remove any that are
either empty or contain only one whitespace value
Parameters
----------
lines : array-like
The array of lines that we are to filter.
Returns
-------
filtered_lines : array-like
The same array of lines with the "empty" ones removed.
|
[
"Iterate",
"through",
"the",
"lines",
"and",
"remove",
"any",
"that",
"are",
"either",
"empty",
"or",
"contain",
"only",
"one",
"whitespace",
"value"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L2912-L2934
|
19,918
|
pandas-dev/pandas
|
pandas/io/parsers.py
|
FixedWidthReader.get_rows
|
def get_rows(self, infer_nrows, skiprows=None):
"""
Read rows from self.f, skipping as specified.
We distinguish buffer_rows (the first <= infer_nrows
lines) from the rows returned to detect_colspecs
because it's simpler to leave the other locations
with skiprows logic alone than to modify them to
deal with the fact we skipped some rows here as
well.
Parameters
----------
infer_nrows : int
Number of rows to read from self.f, not counting
rows that are skipped.
skiprows: set, optional
Indices of rows to skip.
Returns
-------
detect_rows : list of str
A list containing the rows to read.
"""
if skiprows is None:
skiprows = set()
buffer_rows = []
detect_rows = []
for i, row in enumerate(self.f):
if i not in skiprows:
detect_rows.append(row)
buffer_rows.append(row)
if len(detect_rows) >= infer_nrows:
break
self.buffer = iter(buffer_rows)
return detect_rows
|
python
|
def get_rows(self, infer_nrows, skiprows=None):
"""
Read rows from self.f, skipping as specified.
We distinguish buffer_rows (the first <= infer_nrows
lines) from the rows returned to detect_colspecs
because it's simpler to leave the other locations
with skiprows logic alone than to modify them to
deal with the fact we skipped some rows here as
well.
Parameters
----------
infer_nrows : int
Number of rows to read from self.f, not counting
rows that are skipped.
skiprows: set, optional
Indices of rows to skip.
Returns
-------
detect_rows : list of str
A list containing the rows to read.
"""
if skiprows is None:
skiprows = set()
buffer_rows = []
detect_rows = []
for i, row in enumerate(self.f):
if i not in skiprows:
detect_rows.append(row)
buffer_rows.append(row)
if len(detect_rows) >= infer_nrows:
break
self.buffer = iter(buffer_rows)
return detect_rows
|
[
"def",
"get_rows",
"(",
"self",
",",
"infer_nrows",
",",
"skiprows",
"=",
"None",
")",
":",
"if",
"skiprows",
"is",
"None",
":",
"skiprows",
"=",
"set",
"(",
")",
"buffer_rows",
"=",
"[",
"]",
"detect_rows",
"=",
"[",
"]",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"self",
".",
"f",
")",
":",
"if",
"i",
"not",
"in",
"skiprows",
":",
"detect_rows",
".",
"append",
"(",
"row",
")",
"buffer_rows",
".",
"append",
"(",
"row",
")",
"if",
"len",
"(",
"detect_rows",
")",
">=",
"infer_nrows",
":",
"break",
"self",
".",
"buffer",
"=",
"iter",
"(",
"buffer_rows",
")",
"return",
"detect_rows"
] |
Read rows from self.f, skipping as specified.
We distinguish buffer_rows (the first <= infer_nrows
lines) from the rows returned to detect_colspecs
because it's simpler to leave the other locations
with skiprows logic alone than to modify them to
deal with the fact we skipped some rows here as
well.
Parameters
----------
infer_nrows : int
Number of rows to read from self.f, not counting
rows that are skipped.
skiprows: set, optional
Indices of rows to skip.
Returns
-------
detect_rows : list of str
A list containing the rows to read.
|
[
"Read",
"rows",
"from",
"self",
".",
"f",
"skipping",
"as",
"specified",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L3535-L3571
|
19,919
|
pandas-dev/pandas
|
pandas/io/msgpack/__init__.py
|
pack
|
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
|
python
|
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
|
[
"def",
"pack",
"(",
"o",
",",
"stream",
",",
"*",
"*",
"kwargs",
")",
":",
"packer",
"=",
"Packer",
"(",
"*",
"*",
"kwargs",
")",
"stream",
".",
"write",
"(",
"packer",
".",
"pack",
"(",
"o",
")",
")"
] |
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
|
[
"Pack",
"object",
"o",
"and",
"write",
"it",
"to",
"stream"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/msgpack/__init__.py#L26-L33
|
19,920
|
pandas-dev/pandas
|
pandas/core/internals/concat.py
|
get_mgr_concatenation_plan
|
def get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = len(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in libinternals.get_blkno_placements(blknos,
mgr.nblocks,
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape = list(mgr_shape)
shape[0] = len(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (len(placements) == len(blk.mgr_locs) and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
((ax0_indexer is None and
blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()))
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
|
python
|
def get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = len(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in libinternals.get_blkno_placements(blknos,
mgr.nblocks,
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape = list(mgr_shape)
shape[0] = len(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (len(placements) == len(blk.mgr_locs) and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
((ax0_indexer is None and
blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()))
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
|
[
"def",
"get_mgr_concatenation_plan",
"(",
"mgr",
",",
"indexers",
")",
":",
"# Calculate post-reindex shape , save for item axis which will be separate",
"# for each block anyway.",
"mgr_shape",
"=",
"list",
"(",
"mgr",
".",
"shape",
")",
"for",
"ax",
",",
"indexer",
"in",
"indexers",
".",
"items",
"(",
")",
":",
"mgr_shape",
"[",
"ax",
"]",
"=",
"len",
"(",
"indexer",
")",
"mgr_shape",
"=",
"tuple",
"(",
"mgr_shape",
")",
"if",
"0",
"in",
"indexers",
":",
"ax0_indexer",
"=",
"indexers",
".",
"pop",
"(",
"0",
")",
"blknos",
"=",
"algos",
".",
"take_1d",
"(",
"mgr",
".",
"_blknos",
",",
"ax0_indexer",
",",
"fill_value",
"=",
"-",
"1",
")",
"blklocs",
"=",
"algos",
".",
"take_1d",
"(",
"mgr",
".",
"_blklocs",
",",
"ax0_indexer",
",",
"fill_value",
"=",
"-",
"1",
")",
"else",
":",
"if",
"mgr",
".",
"_is_single_block",
":",
"blk",
"=",
"mgr",
".",
"blocks",
"[",
"0",
"]",
"return",
"[",
"(",
"blk",
".",
"mgr_locs",
",",
"JoinUnit",
"(",
"blk",
",",
"mgr_shape",
",",
"indexers",
")",
")",
"]",
"ax0_indexer",
"=",
"None",
"blknos",
"=",
"mgr",
".",
"_blknos",
"blklocs",
"=",
"mgr",
".",
"_blklocs",
"plan",
"=",
"[",
"]",
"for",
"blkno",
",",
"placements",
"in",
"libinternals",
".",
"get_blkno_placements",
"(",
"blknos",
",",
"mgr",
".",
"nblocks",
",",
"group",
"=",
"False",
")",
":",
"assert",
"placements",
".",
"is_slice_like",
"join_unit_indexers",
"=",
"indexers",
".",
"copy",
"(",
")",
"shape",
"=",
"list",
"(",
"mgr_shape",
")",
"shape",
"[",
"0",
"]",
"=",
"len",
"(",
"placements",
")",
"shape",
"=",
"tuple",
"(",
"shape",
")",
"if",
"blkno",
"==",
"-",
"1",
":",
"unit",
"=",
"JoinUnit",
"(",
"None",
",",
"shape",
")",
"else",
":",
"blk",
"=",
"mgr",
".",
"blocks",
"[",
"blkno",
"]",
"ax0_blk_indexer",
"=",
"blklocs",
"[",
"placements",
".",
"indexer",
"]",
"unit_no_ax0_reindexing",
"=",
"(",
"len",
"(",
"placements",
")",
"==",
"len",
"(",
"blk",
".",
"mgr_locs",
")",
"and",
"# Fastpath detection of join unit not",
"# needing to reindex its block: no ax0",
"# reindexing took place and block",
"# placement was sequential before.",
"(",
"(",
"ax0_indexer",
"is",
"None",
"and",
"blk",
".",
"mgr_locs",
".",
"is_slice_like",
"and",
"blk",
".",
"mgr_locs",
".",
"as_slice",
".",
"step",
"==",
"1",
")",
"or",
"# Slow-ish detection: all indexer locs",
"# are sequential (and length match is",
"# checked above).",
"(",
"np",
".",
"diff",
"(",
"ax0_blk_indexer",
")",
"==",
"1",
")",
".",
"all",
"(",
")",
")",
")",
"# Omit indexer if no item reindexing is required.",
"if",
"unit_no_ax0_reindexing",
":",
"join_unit_indexers",
".",
"pop",
"(",
"0",
",",
"None",
")",
"else",
":",
"join_unit_indexers",
"[",
"0",
"]",
"=",
"ax0_blk_indexer",
"unit",
"=",
"JoinUnit",
"(",
"blk",
",",
"shape",
",",
"join_unit_indexers",
")",
"plan",
".",
"append",
"(",
"(",
"placements",
",",
"unit",
")",
")",
"return",
"plan"
] |
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
|
[
"Construct",
"concatenation",
"plan",
"for",
"given",
"block",
"manager",
"and",
"indexers",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/concat.py#L21-L98
|
19,921
|
pandas-dev/pandas
|
pandas/core/internals/concat.py
|
concatenate_join_units
|
def concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units)
to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy:
if isinstance(concat_values, np.ndarray):
# non-reindexed (=not yet copied) arrays are made into a view
# in JoinUnit.get_reindexed_values
if concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = concat_values.copy()
else:
concat_values = _concat._concat_compat(to_concat, axis=concat_axis)
return concat_values
|
python
|
def concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units)
to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy:
if isinstance(concat_values, np.ndarray):
# non-reindexed (=not yet copied) arrays are made into a view
# in JoinUnit.get_reindexed_values
if concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = concat_values.copy()
else:
concat_values = _concat._concat_compat(to_concat, axis=concat_axis)
return concat_values
|
[
"def",
"concatenate_join_units",
"(",
"join_units",
",",
"concat_axis",
",",
"copy",
")",
":",
"if",
"concat_axis",
"==",
"0",
"and",
"len",
"(",
"join_units",
")",
">",
"1",
":",
"# Concatenating join units along ax0 is handled in _merge_blocks.",
"raise",
"AssertionError",
"(",
"\"Concatenating join units along axis0\"",
")",
"empty_dtype",
",",
"upcasted_na",
"=",
"get_empty_dtype_and_na",
"(",
"join_units",
")",
"to_concat",
"=",
"[",
"ju",
".",
"get_reindexed_values",
"(",
"empty_dtype",
"=",
"empty_dtype",
",",
"upcasted_na",
"=",
"upcasted_na",
")",
"for",
"ju",
"in",
"join_units",
"]",
"if",
"len",
"(",
"to_concat",
")",
"==",
"1",
":",
"# Only one block, nothing to concatenate.",
"concat_values",
"=",
"to_concat",
"[",
"0",
"]",
"if",
"copy",
":",
"if",
"isinstance",
"(",
"concat_values",
",",
"np",
".",
"ndarray",
")",
":",
"# non-reindexed (=not yet copied) arrays are made into a view",
"# in JoinUnit.get_reindexed_values",
"if",
"concat_values",
".",
"base",
"is",
"not",
"None",
":",
"concat_values",
"=",
"concat_values",
".",
"copy",
"(",
")",
"else",
":",
"concat_values",
"=",
"concat_values",
".",
"copy",
"(",
")",
"else",
":",
"concat_values",
"=",
"_concat",
".",
"_concat_compat",
"(",
"to_concat",
",",
"axis",
"=",
"concat_axis",
")",
"return",
"concat_values"
] |
Concatenate values from several join units along selected axis.
|
[
"Concatenate",
"values",
"from",
"several",
"join",
"units",
"along",
"selected",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/concat.py#L229-L257
|
19,922
|
pandas-dev/pandas
|
pandas/core/internals/concat.py
|
trim_join_unit
|
def trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers,
shape=extra_shape)
|
python
|
def trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers,
shape=extra_shape)
|
[
"def",
"trim_join_unit",
"(",
"join_unit",
",",
"length",
")",
":",
"if",
"0",
"not",
"in",
"join_unit",
".",
"indexers",
":",
"extra_indexers",
"=",
"join_unit",
".",
"indexers",
"if",
"join_unit",
".",
"block",
"is",
"None",
":",
"extra_block",
"=",
"None",
"else",
":",
"extra_block",
"=",
"join_unit",
".",
"block",
".",
"getitem_block",
"(",
"slice",
"(",
"length",
",",
"None",
")",
")",
"join_unit",
".",
"block",
"=",
"join_unit",
".",
"block",
".",
"getitem_block",
"(",
"slice",
"(",
"length",
")",
")",
"else",
":",
"extra_block",
"=",
"join_unit",
".",
"block",
"extra_indexers",
"=",
"copy",
".",
"copy",
"(",
"join_unit",
".",
"indexers",
")",
"extra_indexers",
"[",
"0",
"]",
"=",
"extra_indexers",
"[",
"0",
"]",
"[",
"length",
":",
"]",
"join_unit",
".",
"indexers",
"[",
"0",
"]",
"=",
"join_unit",
".",
"indexers",
"[",
"0",
"]",
"[",
":",
"length",
"]",
"extra_shape",
"=",
"(",
"join_unit",
".",
"shape",
"[",
"0",
"]",
"-",
"length",
",",
")",
"+",
"join_unit",
".",
"shape",
"[",
"1",
":",
"]",
"join_unit",
".",
"shape",
"=",
"(",
"length",
",",
")",
"+",
"join_unit",
".",
"shape",
"[",
"1",
":",
"]",
"return",
"JoinUnit",
"(",
"block",
"=",
"extra_block",
",",
"indexers",
"=",
"extra_indexers",
",",
"shape",
"=",
"extra_shape",
")"
] |
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
|
[
"Reduce",
"join_unit",
"s",
"shape",
"along",
"item",
"axis",
"to",
"length",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/concat.py#L395-L421
|
19,923
|
pandas-dev/pandas
|
pandas/core/internals/concat.py
|
combine_concat_plans
|
def combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:],
trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
|
python
|
def combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:],
trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
|
[
"def",
"combine_concat_plans",
"(",
"plans",
",",
"concat_axis",
")",
":",
"if",
"len",
"(",
"plans",
")",
"==",
"1",
":",
"for",
"p",
"in",
"plans",
"[",
"0",
"]",
":",
"yield",
"p",
"[",
"0",
"]",
",",
"[",
"p",
"[",
"1",
"]",
"]",
"elif",
"concat_axis",
"==",
"0",
":",
"offset",
"=",
"0",
"for",
"plan",
"in",
"plans",
":",
"last_plc",
"=",
"None",
"for",
"plc",
",",
"unit",
"in",
"plan",
":",
"yield",
"plc",
".",
"add",
"(",
"offset",
")",
",",
"[",
"unit",
"]",
"last_plc",
"=",
"plc",
"if",
"last_plc",
"is",
"not",
"None",
":",
"offset",
"+=",
"last_plc",
".",
"as_slice",
".",
"stop",
"else",
":",
"num_ended",
"=",
"[",
"0",
"]",
"def",
"_next_or_none",
"(",
"seq",
")",
":",
"retval",
"=",
"next",
"(",
"seq",
",",
"None",
")",
"if",
"retval",
"is",
"None",
":",
"num_ended",
"[",
"0",
"]",
"+=",
"1",
"return",
"retval",
"plans",
"=",
"list",
"(",
"map",
"(",
"iter",
",",
"plans",
")",
")",
"next_items",
"=",
"list",
"(",
"map",
"(",
"_next_or_none",
",",
"plans",
")",
")",
"while",
"num_ended",
"[",
"0",
"]",
"!=",
"len",
"(",
"next_items",
")",
":",
"if",
"num_ended",
"[",
"0",
"]",
">",
"0",
":",
"raise",
"ValueError",
"(",
"\"Plan shapes are not aligned\"",
")",
"placements",
",",
"units",
"=",
"zip",
"(",
"*",
"next_items",
")",
"lengths",
"=",
"list",
"(",
"map",
"(",
"len",
",",
"placements",
")",
")",
"min_len",
",",
"max_len",
"=",
"min",
"(",
"lengths",
")",
",",
"max",
"(",
"lengths",
")",
"if",
"min_len",
"==",
"max_len",
":",
"yield",
"placements",
"[",
"0",
"]",
",",
"units",
"next_items",
"[",
":",
"]",
"=",
"map",
"(",
"_next_or_none",
",",
"plans",
")",
"else",
":",
"yielded_placement",
"=",
"None",
"yielded_units",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"next_items",
")",
"for",
"i",
",",
"(",
"plc",
",",
"unit",
")",
"in",
"enumerate",
"(",
"next_items",
")",
":",
"yielded_units",
"[",
"i",
"]",
"=",
"unit",
"if",
"len",
"(",
"plc",
")",
">",
"min_len",
":",
"# trim_join_unit updates unit in place, so only",
"# placement needs to be sliced to skip min_len.",
"next_items",
"[",
"i",
"]",
"=",
"(",
"plc",
"[",
"min_len",
":",
"]",
",",
"trim_join_unit",
"(",
"unit",
",",
"min_len",
")",
")",
"else",
":",
"yielded_placement",
"=",
"plc",
"next_items",
"[",
"i",
"]",
"=",
"_next_or_none",
"(",
"plans",
"[",
"i",
"]",
")",
"yield",
"yielded_placement",
",",
"yielded_units"
] |
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
|
[
"Combine",
"multiple",
"concatenation",
"plans",
"into",
"one",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/concat.py#L424-L484
|
19,924
|
pandas-dev/pandas
|
pandas/plotting/_style.py
|
_Options.use
|
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
|
python
|
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
|
[
"def",
"use",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"old_value",
"=",
"self",
"[",
"key",
"]",
"try",
":",
"self",
"[",
"key",
"]",
"=",
"value",
"yield",
"self",
"finally",
":",
"self",
"[",
"key",
"]",
"=",
"old_value"
] |
Temporarily set a parameter value using the with statement.
Aliasing allowed.
|
[
"Temporarily",
"set",
"a",
"parameter",
"value",
"using",
"the",
"with",
"statement",
".",
"Aliasing",
"allowed",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_style.py#L151-L161
|
19,925
|
pandas-dev/pandas
|
pandas/io/stata.py
|
_dtype_to_stata_type
|
def _dtype_to_stata_type(dtype, column):
"""
Convert dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - for int8 byte
252 - for int16 int
253 - for int32 long
254 - for float32 float
255 - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(ensure_object(column.values))
return max(itemsize, 1)
elif dtype == np.float64:
return 255
elif dtype == np.float32:
return 254
elif dtype == np.int32:
return 253
elif dtype == np.int16:
return 252
elif dtype == np.int8:
return 251
else: # pragma : no cover
raise NotImplementedError(
"Data type {dtype} not supported.".format(dtype=dtype))
|
python
|
def _dtype_to_stata_type(dtype, column):
"""
Convert dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - for int8 byte
252 - for int16 int
253 - for int32 long
254 - for float32 float
255 - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(ensure_object(column.values))
return max(itemsize, 1)
elif dtype == np.float64:
return 255
elif dtype == np.float32:
return 254
elif dtype == np.int32:
return 253
elif dtype == np.int16:
return 252
elif dtype == np.int8:
return 251
else: # pragma : no cover
raise NotImplementedError(
"Data type {dtype} not supported.".format(dtype=dtype))
|
[
"def",
"_dtype_to_stata_type",
"(",
"dtype",
",",
"column",
")",
":",
"# TODO: expand to handle datetime to integer conversion",
"if",
"dtype",
".",
"type",
"==",
"np",
".",
"object_",
":",
"# try to coerce it to the biggest string",
"# not memory efficient, what else could we",
"# do?",
"itemsize",
"=",
"max_len_string_array",
"(",
"ensure_object",
"(",
"column",
".",
"values",
")",
")",
"return",
"max",
"(",
"itemsize",
",",
"1",
")",
"elif",
"dtype",
"==",
"np",
".",
"float64",
":",
"return",
"255",
"elif",
"dtype",
"==",
"np",
".",
"float32",
":",
"return",
"254",
"elif",
"dtype",
"==",
"np",
".",
"int32",
":",
"return",
"253",
"elif",
"dtype",
"==",
"np",
".",
"int16",
":",
"return",
"252",
"elif",
"dtype",
"==",
"np",
".",
"int8",
":",
"return",
"251",
"else",
":",
"# pragma : no cover",
"raise",
"NotImplementedError",
"(",
"\"Data type {dtype} not supported.\"",
".",
"format",
"(",
"dtype",
"=",
"dtype",
")",
")"
] |
Convert dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - for int8 byte
252 - for int16 int
253 - for int32 long
254 - for float32 float
255 - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
|
[
"Convert",
"dtype",
"types",
"to",
"stata",
"types",
".",
"Returns",
"the",
"byte",
"of",
"the",
"given",
"ordinal",
".",
"See",
"TYPE_MAP",
"and",
"comments",
"for",
"an",
"explanation",
".",
"This",
"is",
"also",
"explained",
"in",
"the",
"dta",
"spec",
".",
"1",
"-",
"244",
"are",
"strings",
"of",
"this",
"length",
"Pandas",
"Stata",
"251",
"-",
"for",
"int8",
"byte",
"252",
"-",
"for",
"int16",
"int",
"253",
"-",
"for",
"int32",
"long",
"254",
"-",
"for",
"float32",
"float",
"255",
"-",
"for",
"double",
"double"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L1832-L1866
|
19,926
|
pandas-dev/pandas
|
pandas/io/stata.py
|
_dtype_to_default_stata_fmt
|
def _dtype_to_default_stata_fmt(dtype, column, dta_version=114,
force_strl=False):
"""
Map numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
strl -> "%9s"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dta_version < 117:
max_str_len = 244
else:
max_str_len = 2045
if force_strl:
return '%9s'
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column, skipna=True)
if not (inferred_dtype in ('string', 'unicode') or
len(column) == 0):
raise ValueError('Column `{col}` cannot be exported.\n\nOnly '
'string-like object arrays containing all '
'strings or a mix of strings and None can be '
'exported. Object arrays containing only null '
'values are prohibited. Other object types'
'cannot be exported and must first be converted '
'to one of the supported '
'types.'.format(col=column.name))
itemsize = max_len_string_array(ensure_object(column.values))
if itemsize > max_str_len:
if dta_version >= 117:
return '%9s'
else:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError(
"Data type {dtype} not supported.".format(dtype=dtype))
|
python
|
def _dtype_to_default_stata_fmt(dtype, column, dta_version=114,
force_strl=False):
"""
Map numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
strl -> "%9s"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dta_version < 117:
max_str_len = 244
else:
max_str_len = 2045
if force_strl:
return '%9s'
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column, skipna=True)
if not (inferred_dtype in ('string', 'unicode') or
len(column) == 0):
raise ValueError('Column `{col}` cannot be exported.\n\nOnly '
'string-like object arrays containing all '
'strings or a mix of strings and None can be '
'exported. Object arrays containing only null '
'values are prohibited. Other object types'
'cannot be exported and must first be converted '
'to one of the supported '
'types.'.format(col=column.name))
itemsize = max_len_string_array(ensure_object(column.values))
if itemsize > max_str_len:
if dta_version >= 117:
return '%9s'
else:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError(
"Data type {dtype} not supported.".format(dtype=dtype))
|
[
"def",
"_dtype_to_default_stata_fmt",
"(",
"dtype",
",",
"column",
",",
"dta_version",
"=",
"114",
",",
"force_strl",
"=",
"False",
")",
":",
"# TODO: Refactor to combine type with format",
"# TODO: expand this to handle a default datetime format?",
"if",
"dta_version",
"<",
"117",
":",
"max_str_len",
"=",
"244",
"else",
":",
"max_str_len",
"=",
"2045",
"if",
"force_strl",
":",
"return",
"'%9s'",
"if",
"dtype",
".",
"type",
"==",
"np",
".",
"object_",
":",
"inferred_dtype",
"=",
"infer_dtype",
"(",
"column",
",",
"skipna",
"=",
"True",
")",
"if",
"not",
"(",
"inferred_dtype",
"in",
"(",
"'string'",
",",
"'unicode'",
")",
"or",
"len",
"(",
"column",
")",
"==",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'Column `{col}` cannot be exported.\\n\\nOnly '",
"'string-like object arrays containing all '",
"'strings or a mix of strings and None can be '",
"'exported. Object arrays containing only null '",
"'values are prohibited. Other object types'",
"'cannot be exported and must first be converted '",
"'to one of the supported '",
"'types.'",
".",
"format",
"(",
"col",
"=",
"column",
".",
"name",
")",
")",
"itemsize",
"=",
"max_len_string_array",
"(",
"ensure_object",
"(",
"column",
".",
"values",
")",
")",
"if",
"itemsize",
">",
"max_str_len",
":",
"if",
"dta_version",
">=",
"117",
":",
"return",
"'%9s'",
"else",
":",
"raise",
"ValueError",
"(",
"excessive_string_length_error",
"%",
"column",
".",
"name",
")",
"return",
"\"%\"",
"+",
"str",
"(",
"max",
"(",
"itemsize",
",",
"1",
")",
")",
"+",
"\"s\"",
"elif",
"dtype",
"==",
"np",
".",
"float64",
":",
"return",
"\"%10.0g\"",
"elif",
"dtype",
"==",
"np",
".",
"float32",
":",
"return",
"\"%9.0g\"",
"elif",
"dtype",
"==",
"np",
".",
"int32",
":",
"return",
"\"%12.0g\"",
"elif",
"dtype",
"==",
"np",
".",
"int8",
"or",
"dtype",
"==",
"np",
".",
"int16",
":",
"return",
"\"%8.0g\"",
"else",
":",
"# pragma : no cover",
"raise",
"NotImplementedError",
"(",
"\"Data type {dtype} not supported.\"",
".",
"format",
"(",
"dtype",
"=",
"dtype",
")",
")"
] |
Map numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
strl -> "%9s"
|
[
"Map",
"numpy",
"dtype",
"to",
"stata",
"s",
"default",
"format",
"for",
"this",
"type",
".",
"Not",
"terribly",
"important",
"since",
"users",
"can",
"change",
"this",
"in",
"Stata",
".",
"Semantics",
"are"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L1869-L1922
|
19,927
|
pandas-dev/pandas
|
pandas/io/stata.py
|
_pad_bytes_new
|
def _pad_bytes_new(name, length):
"""
Takes a bytes instance and pads it with null bytes until it's length chars.
"""
if isinstance(name, str):
name = bytes(name, 'utf-8')
return name + b'\x00' * (length - len(name))
|
python
|
def _pad_bytes_new(name, length):
"""
Takes a bytes instance and pads it with null bytes until it's length chars.
"""
if isinstance(name, str):
name = bytes(name, 'utf-8')
return name + b'\x00' * (length - len(name))
|
[
"def",
"_pad_bytes_new",
"(",
"name",
",",
"length",
")",
":",
"if",
"isinstance",
"(",
"name",
",",
"str",
")",
":",
"name",
"=",
"bytes",
"(",
"name",
",",
"'utf-8'",
")",
"return",
"name",
"+",
"b'\\x00'",
"*",
"(",
"length",
"-",
"len",
"(",
"name",
")",
")"
] |
Takes a bytes instance and pads it with null bytes until it's length chars.
|
[
"Takes",
"a",
"bytes",
"instance",
"and",
"pads",
"it",
"with",
"null",
"bytes",
"until",
"it",
"s",
"length",
"chars",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L2477-L2483
|
19,928
|
pandas-dev/pandas
|
pandas/io/stata.py
|
StataReader._setup_dtype
|
def _setup_dtype(self):
"""Map between numpy and state dtypes"""
if self._dtype is not None:
return self._dtype
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder +
self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
return self._dtype
|
python
|
def _setup_dtype(self):
"""Map between numpy and state dtypes"""
if self._dtype is not None:
return self._dtype
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder +
self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
return self._dtype
|
[
"def",
"_setup_dtype",
"(",
"self",
")",
":",
"if",
"self",
".",
"_dtype",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_dtype",
"dtype",
"=",
"[",
"]",
"# Convert struct data types to numpy data type",
"for",
"i",
",",
"typ",
"in",
"enumerate",
"(",
"self",
".",
"typlist",
")",
":",
"if",
"typ",
"in",
"self",
".",
"NUMPY_TYPE_MAP",
":",
"dtype",
".",
"append",
"(",
"(",
"'s'",
"+",
"str",
"(",
"i",
")",
",",
"self",
".",
"byteorder",
"+",
"self",
".",
"NUMPY_TYPE_MAP",
"[",
"typ",
"]",
")",
")",
"else",
":",
"dtype",
".",
"append",
"(",
"(",
"'s'",
"+",
"str",
"(",
"i",
")",
",",
"'S'",
"+",
"str",
"(",
"typ",
")",
")",
")",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"dtype",
")",
"self",
".",
"_dtype",
"=",
"dtype",
"return",
"self",
".",
"_dtype"
] |
Map between numpy and state dtypes
|
[
"Map",
"between",
"numpy",
"and",
"state",
"dtypes"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L1307-L1322
|
19,929
|
pandas-dev/pandas
|
pandas/io/stata.py
|
StataWriter._write
|
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
|
python
|
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
|
[
"def",
"_write",
"(",
"self",
",",
"to_write",
")",
":",
"self",
".",
"_file",
".",
"write",
"(",
"to_write",
".",
"encode",
"(",
"self",
".",
"_encoding",
"or",
"self",
".",
"_default_encoding",
")",
")"
] |
Helper to call encode before writing to file for Python 3 compat.
|
[
"Helper",
"to",
"call",
"encode",
"before",
"writing",
"to",
"file",
"for",
"Python",
"3",
"compat",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L2018-L2023
|
19,930
|
pandas-dev/pandas
|
pandas/io/stata.py
|
StataWriter._prepare_categoricals
|
def _prepare_categoricals(self, data):
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export '
'int64-based categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_dict(OrderedDict(data_formatted))
|
python
|
def _prepare_categoricals(self, data):
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export '
'int64-based categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_dict(OrderedDict(data_formatted))
|
[
"def",
"_prepare_categoricals",
"(",
"self",
",",
"data",
")",
":",
"is_cat",
"=",
"[",
"is_categorical_dtype",
"(",
"data",
"[",
"col",
"]",
")",
"for",
"col",
"in",
"data",
"]",
"self",
".",
"_is_col_cat",
"=",
"is_cat",
"self",
".",
"_value_labels",
"=",
"[",
"]",
"if",
"not",
"any",
"(",
"is_cat",
")",
":",
"return",
"data",
"get_base_missing_value",
"=",
"StataMissingValue",
".",
"get_base_missing_value",
"data_formatted",
"=",
"[",
"]",
"for",
"col",
",",
"col_is_cat",
"in",
"zip",
"(",
"data",
",",
"is_cat",
")",
":",
"if",
"col_is_cat",
":",
"self",
".",
"_value_labels",
".",
"append",
"(",
"StataValueLabel",
"(",
"data",
"[",
"col",
"]",
")",
")",
"dtype",
"=",
"data",
"[",
"col",
"]",
".",
"cat",
".",
"codes",
".",
"dtype",
"if",
"dtype",
"==",
"np",
".",
"int64",
":",
"raise",
"ValueError",
"(",
"'It is not possible to export '",
"'int64-based categorical data to Stata.'",
")",
"values",
"=",
"data",
"[",
"col",
"]",
".",
"cat",
".",
"codes",
".",
"values",
".",
"copy",
"(",
")",
"# Upcast if needed so that correct missing values can be set",
"if",
"values",
".",
"max",
"(",
")",
">=",
"get_base_missing_value",
"(",
"dtype",
")",
":",
"if",
"dtype",
"==",
"np",
".",
"int8",
":",
"dtype",
"=",
"np",
".",
"int16",
"elif",
"dtype",
"==",
"np",
".",
"int16",
":",
"dtype",
"=",
"np",
".",
"int32",
"else",
":",
"dtype",
"=",
"np",
".",
"float64",
"values",
"=",
"np",
".",
"array",
"(",
"values",
",",
"dtype",
"=",
"dtype",
")",
"# Replace missing values with Stata missing value for type",
"values",
"[",
"values",
"==",
"-",
"1",
"]",
"=",
"get_base_missing_value",
"(",
"dtype",
")",
"data_formatted",
".",
"append",
"(",
"(",
"col",
",",
"values",
")",
")",
"else",
":",
"data_formatted",
".",
"append",
"(",
"(",
"col",
",",
"data",
"[",
"col",
"]",
")",
")",
"return",
"DataFrame",
".",
"from_dict",
"(",
"OrderedDict",
"(",
"data_formatted",
")",
")"
] |
Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int
|
[
"Check",
"for",
"categorical",
"columns",
"retain",
"categorical",
"information",
"for",
"Stata",
"file",
"and",
"convert",
"categorical",
"data",
"to",
"int"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L2025-L2061
|
19,931
|
pandas-dev/pandas
|
pandas/io/stata.py
|
StataWriter._close
|
def _close(self):
"""
Close the file if it was created by the writer.
If a buffer or file-like object was passed in, for example a GzipFile,
then leave this file open for the caller to close. In either case,
attempt to flush the file contents to ensure they are written to disk
(if supported)
"""
# Some file-like objects might not support flush
try:
self._file.flush()
except AttributeError:
pass
if self._own_file:
self._file.close()
|
python
|
def _close(self):
"""
Close the file if it was created by the writer.
If a buffer or file-like object was passed in, for example a GzipFile,
then leave this file open for the caller to close. In either case,
attempt to flush the file contents to ensure they are written to disk
(if supported)
"""
# Some file-like objects might not support flush
try:
self._file.flush()
except AttributeError:
pass
if self._own_file:
self._file.close()
|
[
"def",
"_close",
"(",
"self",
")",
":",
"# Some file-like objects might not support flush",
"try",
":",
"self",
".",
"_file",
".",
"flush",
"(",
")",
"except",
"AttributeError",
":",
"pass",
"if",
"self",
".",
"_own_file",
":",
"self",
".",
"_file",
".",
"close",
"(",
")"
] |
Close the file if it was created by the writer.
If a buffer or file-like object was passed in, for example a GzipFile,
then leave this file open for the caller to close. In either case,
attempt to flush the file contents to ensure they are written to disk
(if supported)
|
[
"Close",
"the",
"file",
"if",
"it",
"was",
"created",
"by",
"the",
"writer",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L2249-L2264
|
19,932
|
pandas-dev/pandas
|
pandas/io/stata.py
|
StataStrLWriter.generate_table
|
def generate_table(self):
"""
Generates the GSO lookup table for the DataFRame
Returns
-------
gso_table : OrderedDict
Ordered dictionary using the string found as keys
and their lookup position (v,o) as values
gso_df : DataFrame
DataFrame where strl columns have been converted to
(v,o) values
Notes
-----
Modifies the DataFrame in-place.
The DataFrame returned encodes the (v,o) values as uint64s. The
encoding depends on teh dta version, and can be expressed as
enc = v + o * 2 ** (o_size * 8)
so that v is stored in the lower bits and o is in the upper
bits. o_size is
* 117: 4
* 118: 6
* 119: 5
"""
gso_table = self._gso_table
gso_df = self.df
columns = list(gso_df.columns)
selected = gso_df[self.columns]
col_index = [(col, columns.index(col)) for col in self.columns]
keys = np.empty(selected.shape, dtype=np.uint64)
for o, (idx, row) in enumerate(selected.iterrows()):
for j, (col, v) in enumerate(col_index):
val = row[col]
# Allow columns with mixed str and None (GH 23633)
val = '' if val is None else val
key = gso_table.get(val, None)
if key is None:
# Stata prefers human numbers
key = (v + 1, o + 1)
gso_table[val] = key
keys[o, j] = self._convert_key(key)
for i, col in enumerate(self.columns):
gso_df[col] = keys[:, i]
return gso_table, gso_df
|
python
|
def generate_table(self):
"""
Generates the GSO lookup table for the DataFRame
Returns
-------
gso_table : OrderedDict
Ordered dictionary using the string found as keys
and their lookup position (v,o) as values
gso_df : DataFrame
DataFrame where strl columns have been converted to
(v,o) values
Notes
-----
Modifies the DataFrame in-place.
The DataFrame returned encodes the (v,o) values as uint64s. The
encoding depends on teh dta version, and can be expressed as
enc = v + o * 2 ** (o_size * 8)
so that v is stored in the lower bits and o is in the upper
bits. o_size is
* 117: 4
* 118: 6
* 119: 5
"""
gso_table = self._gso_table
gso_df = self.df
columns = list(gso_df.columns)
selected = gso_df[self.columns]
col_index = [(col, columns.index(col)) for col in self.columns]
keys = np.empty(selected.shape, dtype=np.uint64)
for o, (idx, row) in enumerate(selected.iterrows()):
for j, (col, v) in enumerate(col_index):
val = row[col]
# Allow columns with mixed str and None (GH 23633)
val = '' if val is None else val
key = gso_table.get(val, None)
if key is None:
# Stata prefers human numbers
key = (v + 1, o + 1)
gso_table[val] = key
keys[o, j] = self._convert_key(key)
for i, col in enumerate(self.columns):
gso_df[col] = keys[:, i]
return gso_table, gso_df
|
[
"def",
"generate_table",
"(",
"self",
")",
":",
"gso_table",
"=",
"self",
".",
"_gso_table",
"gso_df",
"=",
"self",
".",
"df",
"columns",
"=",
"list",
"(",
"gso_df",
".",
"columns",
")",
"selected",
"=",
"gso_df",
"[",
"self",
".",
"columns",
"]",
"col_index",
"=",
"[",
"(",
"col",
",",
"columns",
".",
"index",
"(",
"col",
")",
")",
"for",
"col",
"in",
"self",
".",
"columns",
"]",
"keys",
"=",
"np",
".",
"empty",
"(",
"selected",
".",
"shape",
",",
"dtype",
"=",
"np",
".",
"uint64",
")",
"for",
"o",
",",
"(",
"idx",
",",
"row",
")",
"in",
"enumerate",
"(",
"selected",
".",
"iterrows",
"(",
")",
")",
":",
"for",
"j",
",",
"(",
"col",
",",
"v",
")",
"in",
"enumerate",
"(",
"col_index",
")",
":",
"val",
"=",
"row",
"[",
"col",
"]",
"# Allow columns with mixed str and None (GH 23633)",
"val",
"=",
"''",
"if",
"val",
"is",
"None",
"else",
"val",
"key",
"=",
"gso_table",
".",
"get",
"(",
"val",
",",
"None",
")",
"if",
"key",
"is",
"None",
":",
"# Stata prefers human numbers",
"key",
"=",
"(",
"v",
"+",
"1",
",",
"o",
"+",
"1",
")",
"gso_table",
"[",
"val",
"]",
"=",
"key",
"keys",
"[",
"o",
",",
"j",
"]",
"=",
"self",
".",
"_convert_key",
"(",
"key",
")",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"self",
".",
"columns",
")",
":",
"gso_df",
"[",
"col",
"]",
"=",
"keys",
"[",
":",
",",
"i",
"]",
"return",
"gso_table",
",",
"gso_df"
] |
Generates the GSO lookup table for the DataFRame
Returns
-------
gso_table : OrderedDict
Ordered dictionary using the string found as keys
and their lookup position (v,o) as values
gso_df : DataFrame
DataFrame where strl columns have been converted to
(v,o) values
Notes
-----
Modifies the DataFrame in-place.
The DataFrame returned encodes the (v,o) values as uint64s. The
encoding depends on teh dta version, and can be expressed as
enc = v + o * 2 ** (o_size * 8)
so that v is stored in the lower bits and o is in the upper
bits. o_size is
* 117: 4
* 118: 6
* 119: 5
|
[
"Generates",
"the",
"GSO",
"lookup",
"table",
"for",
"the",
"DataFRame"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L2546-L2596
|
19,933
|
pandas-dev/pandas
|
pandas/io/stata.py
|
StataStrLWriter.generate_blob
|
def generate_blob(self, gso_table):
"""
Generates the binary blob of GSOs that is written to the dta file.
Parameters
----------
gso_table : OrderedDict
Ordered dictionary (str, vo)
Returns
-------
gso : bytes
Binary content of dta file to be placed between strl tags
Notes
-----
Output format depends on dta version. 117 uses two uint32s to
express v and o while 118+ uses a uint32 for v and a uint64 for o.
"""
# Format information
# Length includes null term
# 117
# GSOvvvvooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u4 u1 u4 string + null term
#
# 118, 119
# GSOvvvvooooooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u8 u1 u4 string + null term
bio = BytesIO()
gso = bytes('GSO', 'ascii')
gso_type = struct.pack(self._byteorder + 'B', 130)
null = struct.pack(self._byteorder + 'B', 0)
v_type = self._byteorder + self._gso_v_type
o_type = self._byteorder + self._gso_o_type
len_type = self._byteorder + 'I'
for strl, vo in gso_table.items():
if vo == (0, 0):
continue
v, o = vo
# GSO
bio.write(gso)
# vvvv
bio.write(struct.pack(v_type, v))
# oooo / oooooooo
bio.write(struct.pack(o_type, o))
# t
bio.write(gso_type)
# llll
utf8_string = bytes(strl, 'utf-8')
bio.write(struct.pack(len_type, len(utf8_string) + 1))
# xxx...xxx
bio.write(utf8_string)
bio.write(null)
bio.seek(0)
return bio.read()
|
python
|
def generate_blob(self, gso_table):
"""
Generates the binary blob of GSOs that is written to the dta file.
Parameters
----------
gso_table : OrderedDict
Ordered dictionary (str, vo)
Returns
-------
gso : bytes
Binary content of dta file to be placed between strl tags
Notes
-----
Output format depends on dta version. 117 uses two uint32s to
express v and o while 118+ uses a uint32 for v and a uint64 for o.
"""
# Format information
# Length includes null term
# 117
# GSOvvvvooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u4 u1 u4 string + null term
#
# 118, 119
# GSOvvvvooooooootllllxxxxxxxxxxxxxxx...x
# 3 u4 u8 u1 u4 string + null term
bio = BytesIO()
gso = bytes('GSO', 'ascii')
gso_type = struct.pack(self._byteorder + 'B', 130)
null = struct.pack(self._byteorder + 'B', 0)
v_type = self._byteorder + self._gso_v_type
o_type = self._byteorder + self._gso_o_type
len_type = self._byteorder + 'I'
for strl, vo in gso_table.items():
if vo == (0, 0):
continue
v, o = vo
# GSO
bio.write(gso)
# vvvv
bio.write(struct.pack(v_type, v))
# oooo / oooooooo
bio.write(struct.pack(o_type, o))
# t
bio.write(gso_type)
# llll
utf8_string = bytes(strl, 'utf-8')
bio.write(struct.pack(len_type, len(utf8_string) + 1))
# xxx...xxx
bio.write(utf8_string)
bio.write(null)
bio.seek(0)
return bio.read()
|
[
"def",
"generate_blob",
"(",
"self",
",",
"gso_table",
")",
":",
"# Format information",
"# Length includes null term",
"# 117",
"# GSOvvvvooootllllxxxxxxxxxxxxxxx...x",
"# 3 u4 u4 u1 u4 string + null term",
"#",
"# 118, 119",
"# GSOvvvvooooooootllllxxxxxxxxxxxxxxx...x",
"# 3 u4 u8 u1 u4 string + null term",
"bio",
"=",
"BytesIO",
"(",
")",
"gso",
"=",
"bytes",
"(",
"'GSO'",
",",
"'ascii'",
")",
"gso_type",
"=",
"struct",
".",
"pack",
"(",
"self",
".",
"_byteorder",
"+",
"'B'",
",",
"130",
")",
"null",
"=",
"struct",
".",
"pack",
"(",
"self",
".",
"_byteorder",
"+",
"'B'",
",",
"0",
")",
"v_type",
"=",
"self",
".",
"_byteorder",
"+",
"self",
".",
"_gso_v_type",
"o_type",
"=",
"self",
".",
"_byteorder",
"+",
"self",
".",
"_gso_o_type",
"len_type",
"=",
"self",
".",
"_byteorder",
"+",
"'I'",
"for",
"strl",
",",
"vo",
"in",
"gso_table",
".",
"items",
"(",
")",
":",
"if",
"vo",
"==",
"(",
"0",
",",
"0",
")",
":",
"continue",
"v",
",",
"o",
"=",
"vo",
"# GSO",
"bio",
".",
"write",
"(",
"gso",
")",
"# vvvv",
"bio",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"v_type",
",",
"v",
")",
")",
"# oooo / oooooooo",
"bio",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"o_type",
",",
"o",
")",
")",
"# t",
"bio",
".",
"write",
"(",
"gso_type",
")",
"# llll",
"utf8_string",
"=",
"bytes",
"(",
"strl",
",",
"'utf-8'",
")",
"bio",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"len_type",
",",
"len",
"(",
"utf8_string",
")",
"+",
"1",
")",
")",
"# xxx...xxx",
"bio",
".",
"write",
"(",
"utf8_string",
")",
"bio",
".",
"write",
"(",
"null",
")",
"bio",
".",
"seek",
"(",
"0",
")",
"return",
"bio",
".",
"read",
"(",
")"
] |
Generates the binary blob of GSOs that is written to the dta file.
Parameters
----------
gso_table : OrderedDict
Ordered dictionary (str, vo)
Returns
-------
gso : bytes
Binary content of dta file to be placed between strl tags
Notes
-----
Output format depends on dta version. 117 uses two uint32s to
express v and o while 118+ uses a uint32 for v and a uint64 for o.
|
[
"Generates",
"the",
"binary",
"blob",
"of",
"GSOs",
"that",
"is",
"written",
"to",
"the",
"dta",
"file",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L2604-L2666
|
19,934
|
pandas-dev/pandas
|
pandas/io/stata.py
|
StataWriter117._write_header
|
def _write_header(self, data_label=None, time_stamp=None):
"""Write the file header"""
byteorder = self._byteorder
self._file.write(bytes('<stata_dta>', 'utf-8'))
bio = BytesIO()
# ds_format - 117
bio.write(self._tag(bytes('117', 'utf-8'), 'release'))
# byteorder
bio.write(self._tag(byteorder == ">" and "MSF" or "LSF", 'byteorder'))
# number of vars, 2 bytes
assert self.nvar < 2 ** 16
bio.write(self._tag(struct.pack(byteorder + "H", self.nvar), 'K'))
# number of obs, 4 bytes
bio.write(self._tag(struct.pack(byteorder + "I", self.nobs), 'N'))
# data label 81 bytes, char, null terminated
label = data_label[:80] if data_label is not None else ''
label_len = struct.pack(byteorder + "B", len(label))
label = label_len + bytes(label, 'utf-8')
bio.write(self._tag(label, 'label'))
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
# Avoid locale-specific month conversion
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
month_lookup = {i + 1: month for i, month in enumerate(months)}
ts = (time_stamp.strftime("%d ") +
month_lookup[time_stamp.month] +
time_stamp.strftime(" %Y %H:%M"))
# '\x11' added due to inspection of Stata file
ts = b'\x11' + bytes(ts, 'utf8')
bio.write(self._tag(ts, 'timestamp'))
bio.seek(0)
self._file.write(self._tag(bio.read(), 'header'))
|
python
|
def _write_header(self, data_label=None, time_stamp=None):
"""Write the file header"""
byteorder = self._byteorder
self._file.write(bytes('<stata_dta>', 'utf-8'))
bio = BytesIO()
# ds_format - 117
bio.write(self._tag(bytes('117', 'utf-8'), 'release'))
# byteorder
bio.write(self._tag(byteorder == ">" and "MSF" or "LSF", 'byteorder'))
# number of vars, 2 bytes
assert self.nvar < 2 ** 16
bio.write(self._tag(struct.pack(byteorder + "H", self.nvar), 'K'))
# number of obs, 4 bytes
bio.write(self._tag(struct.pack(byteorder + "I", self.nobs), 'N'))
# data label 81 bytes, char, null terminated
label = data_label[:80] if data_label is not None else ''
label_len = struct.pack(byteorder + "B", len(label))
label = label_len + bytes(label, 'utf-8')
bio.write(self._tag(label, 'label'))
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
# Avoid locale-specific month conversion
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
month_lookup = {i + 1: month for i, month in enumerate(months)}
ts = (time_stamp.strftime("%d ") +
month_lookup[time_stamp.month] +
time_stamp.strftime(" %Y %H:%M"))
# '\x11' added due to inspection of Stata file
ts = b'\x11' + bytes(ts, 'utf8')
bio.write(self._tag(ts, 'timestamp'))
bio.seek(0)
self._file.write(self._tag(bio.read(), 'header'))
|
[
"def",
"_write_header",
"(",
"self",
",",
"data_label",
"=",
"None",
",",
"time_stamp",
"=",
"None",
")",
":",
"byteorder",
"=",
"self",
".",
"_byteorder",
"self",
".",
"_file",
".",
"write",
"(",
"bytes",
"(",
"'<stata_dta>'",
",",
"'utf-8'",
")",
")",
"bio",
"=",
"BytesIO",
"(",
")",
"# ds_format - 117",
"bio",
".",
"write",
"(",
"self",
".",
"_tag",
"(",
"bytes",
"(",
"'117'",
",",
"'utf-8'",
")",
",",
"'release'",
")",
")",
"# byteorder",
"bio",
".",
"write",
"(",
"self",
".",
"_tag",
"(",
"byteorder",
"==",
"\">\"",
"and",
"\"MSF\"",
"or",
"\"LSF\"",
",",
"'byteorder'",
")",
")",
"# number of vars, 2 bytes",
"assert",
"self",
".",
"nvar",
"<",
"2",
"**",
"16",
"bio",
".",
"write",
"(",
"self",
".",
"_tag",
"(",
"struct",
".",
"pack",
"(",
"byteorder",
"+",
"\"H\"",
",",
"self",
".",
"nvar",
")",
",",
"'K'",
")",
")",
"# number of obs, 4 bytes",
"bio",
".",
"write",
"(",
"self",
".",
"_tag",
"(",
"struct",
".",
"pack",
"(",
"byteorder",
"+",
"\"I\"",
",",
"self",
".",
"nobs",
")",
",",
"'N'",
")",
")",
"# data label 81 bytes, char, null terminated",
"label",
"=",
"data_label",
"[",
":",
"80",
"]",
"if",
"data_label",
"is",
"not",
"None",
"else",
"''",
"label_len",
"=",
"struct",
".",
"pack",
"(",
"byteorder",
"+",
"\"B\"",
",",
"len",
"(",
"label",
")",
")",
"label",
"=",
"label_len",
"+",
"bytes",
"(",
"label",
",",
"'utf-8'",
")",
"bio",
".",
"write",
"(",
"self",
".",
"_tag",
"(",
"label",
",",
"'label'",
")",
")",
"# time stamp, 18 bytes, char, null terminated",
"# format dd Mon yyyy hh:mm",
"if",
"time_stamp",
"is",
"None",
":",
"time_stamp",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"elif",
"not",
"isinstance",
"(",
"time_stamp",
",",
"datetime",
".",
"datetime",
")",
":",
"raise",
"ValueError",
"(",
"\"time_stamp should be datetime type\"",
")",
"# Avoid locale-specific month conversion",
"months",
"=",
"[",
"'Jan'",
",",
"'Feb'",
",",
"'Mar'",
",",
"'Apr'",
",",
"'May'",
",",
"'Jun'",
",",
"'Jul'",
",",
"'Aug'",
",",
"'Sep'",
",",
"'Oct'",
",",
"'Nov'",
",",
"'Dec'",
"]",
"month_lookup",
"=",
"{",
"i",
"+",
"1",
":",
"month",
"for",
"i",
",",
"month",
"in",
"enumerate",
"(",
"months",
")",
"}",
"ts",
"=",
"(",
"time_stamp",
".",
"strftime",
"(",
"\"%d \"",
")",
"+",
"month_lookup",
"[",
"time_stamp",
".",
"month",
"]",
"+",
"time_stamp",
".",
"strftime",
"(",
"\" %Y %H:%M\"",
")",
")",
"# '\\x11' added due to inspection of Stata file",
"ts",
"=",
"b'\\x11'",
"+",
"bytes",
"(",
"ts",
",",
"'utf8'",
")",
"bio",
".",
"write",
"(",
"self",
".",
"_tag",
"(",
"ts",
",",
"'timestamp'",
")",
")",
"bio",
".",
"seek",
"(",
"0",
")",
"self",
".",
"_file",
".",
"write",
"(",
"self",
".",
"_tag",
"(",
"bio",
".",
"read",
"(",
")",
",",
"'header'",
")",
")"
] |
Write the file header
|
[
"Write",
"the",
"file",
"header"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L2772-L2808
|
19,935
|
pandas-dev/pandas
|
pandas/io/stata.py
|
StataWriter117._write_map
|
def _write_map(self):
"""Called twice during file write. The first populates the values in
the map with 0s. The second call writes the final map locations when
all blocks have been written."""
if self._map is None:
self._map = OrderedDict((('stata_data', 0),
('map', self._file.tell()),
('variable_types', 0),
('varnames', 0),
('sortlist', 0),
('formats', 0),
('value_label_names', 0),
('variable_labels', 0),
('characteristics', 0),
('data', 0),
('strls', 0),
('value_labels', 0),
('stata_data_close', 0),
('end-of-file', 0)))
# Move to start of map
self._file.seek(self._map['map'])
bio = BytesIO()
for val in self._map.values():
bio.write(struct.pack(self._byteorder + 'Q', val))
bio.seek(0)
self._file.write(self._tag(bio.read(), 'map'))
|
python
|
def _write_map(self):
"""Called twice during file write. The first populates the values in
the map with 0s. The second call writes the final map locations when
all blocks have been written."""
if self._map is None:
self._map = OrderedDict((('stata_data', 0),
('map', self._file.tell()),
('variable_types', 0),
('varnames', 0),
('sortlist', 0),
('formats', 0),
('value_label_names', 0),
('variable_labels', 0),
('characteristics', 0),
('data', 0),
('strls', 0),
('value_labels', 0),
('stata_data_close', 0),
('end-of-file', 0)))
# Move to start of map
self._file.seek(self._map['map'])
bio = BytesIO()
for val in self._map.values():
bio.write(struct.pack(self._byteorder + 'Q', val))
bio.seek(0)
self._file.write(self._tag(bio.read(), 'map'))
|
[
"def",
"_write_map",
"(",
"self",
")",
":",
"if",
"self",
".",
"_map",
"is",
"None",
":",
"self",
".",
"_map",
"=",
"OrderedDict",
"(",
"(",
"(",
"'stata_data'",
",",
"0",
")",
",",
"(",
"'map'",
",",
"self",
".",
"_file",
".",
"tell",
"(",
")",
")",
",",
"(",
"'variable_types'",
",",
"0",
")",
",",
"(",
"'varnames'",
",",
"0",
")",
",",
"(",
"'sortlist'",
",",
"0",
")",
",",
"(",
"'formats'",
",",
"0",
")",
",",
"(",
"'value_label_names'",
",",
"0",
")",
",",
"(",
"'variable_labels'",
",",
"0",
")",
",",
"(",
"'characteristics'",
",",
"0",
")",
",",
"(",
"'data'",
",",
"0",
")",
",",
"(",
"'strls'",
",",
"0",
")",
",",
"(",
"'value_labels'",
",",
"0",
")",
",",
"(",
"'stata_data_close'",
",",
"0",
")",
",",
"(",
"'end-of-file'",
",",
"0",
")",
")",
")",
"# Move to start of map",
"self",
".",
"_file",
".",
"seek",
"(",
"self",
".",
"_map",
"[",
"'map'",
"]",
")",
"bio",
"=",
"BytesIO",
"(",
")",
"for",
"val",
"in",
"self",
".",
"_map",
".",
"values",
"(",
")",
":",
"bio",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"self",
".",
"_byteorder",
"+",
"'Q'",
",",
"val",
")",
")",
"bio",
".",
"seek",
"(",
"0",
")",
"self",
".",
"_file",
".",
"write",
"(",
"self",
".",
"_tag",
"(",
"bio",
".",
"read",
"(",
")",
",",
"'map'",
")",
")"
] |
Called twice during file write. The first populates the values in
the map with 0s. The second call writes the final map locations when
all blocks have been written.
|
[
"Called",
"twice",
"during",
"file",
"write",
".",
"The",
"first",
"populates",
"the",
"values",
"in",
"the",
"map",
"with",
"0s",
".",
"The",
"second",
"call",
"writes",
"the",
"final",
"map",
"locations",
"when",
"all",
"blocks",
"have",
"been",
"written",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L2810-L2835
|
19,936
|
pandas-dev/pandas
|
pandas/io/stata.py
|
StataWriter117._update_strl_names
|
def _update_strl_names(self):
"""Update column names for conversion to strl if they might have been
changed to comply with Stata naming rules"""
# Update convert_strl if names changed
for orig, new in self._converted_names.items():
if orig in self._convert_strl:
idx = self._convert_strl.index(orig)
self._convert_strl[idx] = new
|
python
|
def _update_strl_names(self):
"""Update column names for conversion to strl if they might have been
changed to comply with Stata naming rules"""
# Update convert_strl if names changed
for orig, new in self._converted_names.items():
if orig in self._convert_strl:
idx = self._convert_strl.index(orig)
self._convert_strl[idx] = new
|
[
"def",
"_update_strl_names",
"(",
"self",
")",
":",
"# Update convert_strl if names changed",
"for",
"orig",
",",
"new",
"in",
"self",
".",
"_converted_names",
".",
"items",
"(",
")",
":",
"if",
"orig",
"in",
"self",
".",
"_convert_strl",
":",
"idx",
"=",
"self",
".",
"_convert_strl",
".",
"index",
"(",
"orig",
")",
"self",
".",
"_convert_strl",
"[",
"idx",
"]",
"=",
"new"
] |
Update column names for conversion to strl if they might have been
changed to comply with Stata naming rules
|
[
"Update",
"column",
"names",
"for",
"conversion",
"to",
"strl",
"if",
"they",
"might",
"have",
"been",
"changed",
"to",
"comply",
"with",
"Stata",
"naming",
"rules"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L2948-L2955
|
19,937
|
pandas-dev/pandas
|
pandas/io/stata.py
|
StataWriter117._convert_strls
|
def _convert_strls(self, data):
"""Convert columns to StrLs if either very large or in the
convert_strl variable"""
convert_cols = [
col for i, col in enumerate(data)
if self.typlist[i] == 32768 or col in self._convert_strl]
if convert_cols:
ssw = StataStrLWriter(data, convert_cols)
tab, new_data = ssw.generate_table()
data = new_data
self._strl_blob = ssw.generate_blob(tab)
return data
|
python
|
def _convert_strls(self, data):
"""Convert columns to StrLs if either very large or in the
convert_strl variable"""
convert_cols = [
col for i, col in enumerate(data)
if self.typlist[i] == 32768 or col in self._convert_strl]
if convert_cols:
ssw = StataStrLWriter(data, convert_cols)
tab, new_data = ssw.generate_table()
data = new_data
self._strl_blob = ssw.generate_blob(tab)
return data
|
[
"def",
"_convert_strls",
"(",
"self",
",",
"data",
")",
":",
"convert_cols",
"=",
"[",
"col",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"data",
")",
"if",
"self",
".",
"typlist",
"[",
"i",
"]",
"==",
"32768",
"or",
"col",
"in",
"self",
".",
"_convert_strl",
"]",
"if",
"convert_cols",
":",
"ssw",
"=",
"StataStrLWriter",
"(",
"data",
",",
"convert_cols",
")",
"tab",
",",
"new_data",
"=",
"ssw",
".",
"generate_table",
"(",
")",
"data",
"=",
"new_data",
"self",
".",
"_strl_blob",
"=",
"ssw",
".",
"generate_blob",
"(",
"tab",
")",
"return",
"data"
] |
Convert columns to StrLs if either very large or in the
convert_strl variable
|
[
"Convert",
"columns",
"to",
"StrLs",
"if",
"either",
"very",
"large",
"or",
"in",
"the",
"convert_strl",
"variable"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/stata.py#L2957-L2969
|
19,938
|
pandas-dev/pandas
|
pandas/plotting/_converter.py
|
register
|
def register(explicit=True):
"""
Register Pandas Formatters and Converters with matplotlib
This function modifies the global ``matplotlib.units.registry``
dictionary. Pandas adds custom converters for
* pd.Timestamp
* pd.Period
* np.datetime64
* datetime.datetime
* datetime.date
* datetime.time
See Also
--------
deregister_matplotlib_converter
"""
# Renamed in pandas.plotting.__init__
global _WARN
if explicit:
_WARN = False
pairs = get_pairs()
for type_, cls in pairs:
converter = cls()
if type_ in units.registry:
previous = units.registry[type_]
_mpl_units[type_] = previous
units.registry[type_] = converter
|
python
|
def register(explicit=True):
"""
Register Pandas Formatters and Converters with matplotlib
This function modifies the global ``matplotlib.units.registry``
dictionary. Pandas adds custom converters for
* pd.Timestamp
* pd.Period
* np.datetime64
* datetime.datetime
* datetime.date
* datetime.time
See Also
--------
deregister_matplotlib_converter
"""
# Renamed in pandas.plotting.__init__
global _WARN
if explicit:
_WARN = False
pairs = get_pairs()
for type_, cls in pairs:
converter = cls()
if type_ in units.registry:
previous = units.registry[type_]
_mpl_units[type_] = previous
units.registry[type_] = converter
|
[
"def",
"register",
"(",
"explicit",
"=",
"True",
")",
":",
"# Renamed in pandas.plotting.__init__",
"global",
"_WARN",
"if",
"explicit",
":",
"_WARN",
"=",
"False",
"pairs",
"=",
"get_pairs",
"(",
")",
"for",
"type_",
",",
"cls",
"in",
"pairs",
":",
"converter",
"=",
"cls",
"(",
")",
"if",
"type_",
"in",
"units",
".",
"registry",
":",
"previous",
"=",
"units",
".",
"registry",
"[",
"type_",
"]",
"_mpl_units",
"[",
"type_",
"]",
"=",
"previous",
"units",
".",
"registry",
"[",
"type_",
"]",
"=",
"converter"
] |
Register Pandas Formatters and Converters with matplotlib
This function modifies the global ``matplotlib.units.registry``
dictionary. Pandas adds custom converters for
* pd.Timestamp
* pd.Period
* np.datetime64
* datetime.datetime
* datetime.date
* datetime.time
See Also
--------
deregister_matplotlib_converter
|
[
"Register",
"Pandas",
"Formatters",
"and",
"Converters",
"with",
"matplotlib"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_converter.py#L54-L84
|
19,939
|
pandas-dev/pandas
|
pandas/plotting/_converter.py
|
deregister
|
def deregister():
"""
Remove pandas' formatters and converters
Removes the custom converters added by :func:`register`. This
attempts to set the state of the registry back to the state before
pandas registered its own units. Converters for pandas' own types like
Timestamp and Period are removed completely. Converters for types
pandas overwrites, like ``datetime.datetime``, are restored to their
original value.
See Also
--------
deregister_matplotlib_converters
"""
# Renamed in pandas.plotting.__init__
for type_, cls in get_pairs():
# We use type to catch our classes directly, no inheritance
if type(units.registry.get(type_)) is cls:
units.registry.pop(type_)
# restore the old keys
for unit, formatter in _mpl_units.items():
if type(formatter) not in {DatetimeConverter, PeriodConverter,
TimeConverter}:
# make it idempotent by excluding ours.
units.registry[unit] = formatter
|
python
|
def deregister():
"""
Remove pandas' formatters and converters
Removes the custom converters added by :func:`register`. This
attempts to set the state of the registry back to the state before
pandas registered its own units. Converters for pandas' own types like
Timestamp and Period are removed completely. Converters for types
pandas overwrites, like ``datetime.datetime``, are restored to their
original value.
See Also
--------
deregister_matplotlib_converters
"""
# Renamed in pandas.plotting.__init__
for type_, cls in get_pairs():
# We use type to catch our classes directly, no inheritance
if type(units.registry.get(type_)) is cls:
units.registry.pop(type_)
# restore the old keys
for unit, formatter in _mpl_units.items():
if type(formatter) not in {DatetimeConverter, PeriodConverter,
TimeConverter}:
# make it idempotent by excluding ours.
units.registry[unit] = formatter
|
[
"def",
"deregister",
"(",
")",
":",
"# Renamed in pandas.plotting.__init__",
"for",
"type_",
",",
"cls",
"in",
"get_pairs",
"(",
")",
":",
"# We use type to catch our classes directly, no inheritance",
"if",
"type",
"(",
"units",
".",
"registry",
".",
"get",
"(",
"type_",
")",
")",
"is",
"cls",
":",
"units",
".",
"registry",
".",
"pop",
"(",
"type_",
")",
"# restore the old keys",
"for",
"unit",
",",
"formatter",
"in",
"_mpl_units",
".",
"items",
"(",
")",
":",
"if",
"type",
"(",
"formatter",
")",
"not",
"in",
"{",
"DatetimeConverter",
",",
"PeriodConverter",
",",
"TimeConverter",
"}",
":",
"# make it idempotent by excluding ours.",
"units",
".",
"registry",
"[",
"unit",
"]",
"=",
"formatter"
] |
Remove pandas' formatters and converters
Removes the custom converters added by :func:`register`. This
attempts to set the state of the registry back to the state before
pandas registered its own units. Converters for pandas' own types like
Timestamp and Period are removed completely. Converters for types
pandas overwrites, like ``datetime.datetime``, are restored to their
original value.
See Also
--------
deregister_matplotlib_converters
|
[
"Remove",
"pandas",
"formatters",
"and",
"converters"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_converter.py#L87-L113
|
19,940
|
pandas-dev/pandas
|
pandas/plotting/_converter.py
|
_get_default_annual_spacing
|
def _get_default_annual_spacing(nyears):
"""
Returns a default spacing between consecutive ticks for annual data.
"""
if nyears < 11:
(min_spacing, maj_spacing) = (1, 1)
elif nyears < 20:
(min_spacing, maj_spacing) = (1, 2)
elif nyears < 50:
(min_spacing, maj_spacing) = (1, 5)
elif nyears < 100:
(min_spacing, maj_spacing) = (5, 10)
elif nyears < 200:
(min_spacing, maj_spacing) = (5, 25)
elif nyears < 600:
(min_spacing, maj_spacing) = (10, 50)
else:
factor = nyears // 1000 + 1
(min_spacing, maj_spacing) = (factor * 20, factor * 100)
return (min_spacing, maj_spacing)
|
python
|
def _get_default_annual_spacing(nyears):
"""
Returns a default spacing between consecutive ticks for annual data.
"""
if nyears < 11:
(min_spacing, maj_spacing) = (1, 1)
elif nyears < 20:
(min_spacing, maj_spacing) = (1, 2)
elif nyears < 50:
(min_spacing, maj_spacing) = (1, 5)
elif nyears < 100:
(min_spacing, maj_spacing) = (5, 10)
elif nyears < 200:
(min_spacing, maj_spacing) = (5, 25)
elif nyears < 600:
(min_spacing, maj_spacing) = (10, 50)
else:
factor = nyears // 1000 + 1
(min_spacing, maj_spacing) = (factor * 20, factor * 100)
return (min_spacing, maj_spacing)
|
[
"def",
"_get_default_annual_spacing",
"(",
"nyears",
")",
":",
"if",
"nyears",
"<",
"11",
":",
"(",
"min_spacing",
",",
"maj_spacing",
")",
"=",
"(",
"1",
",",
"1",
")",
"elif",
"nyears",
"<",
"20",
":",
"(",
"min_spacing",
",",
"maj_spacing",
")",
"=",
"(",
"1",
",",
"2",
")",
"elif",
"nyears",
"<",
"50",
":",
"(",
"min_spacing",
",",
"maj_spacing",
")",
"=",
"(",
"1",
",",
"5",
")",
"elif",
"nyears",
"<",
"100",
":",
"(",
"min_spacing",
",",
"maj_spacing",
")",
"=",
"(",
"5",
",",
"10",
")",
"elif",
"nyears",
"<",
"200",
":",
"(",
"min_spacing",
",",
"maj_spacing",
")",
"=",
"(",
"5",
",",
"25",
")",
"elif",
"nyears",
"<",
"600",
":",
"(",
"min_spacing",
",",
"maj_spacing",
")",
"=",
"(",
"10",
",",
"50",
")",
"else",
":",
"factor",
"=",
"nyears",
"//",
"1000",
"+",
"1",
"(",
"min_spacing",
",",
"maj_spacing",
")",
"=",
"(",
"factor",
"*",
"20",
",",
"factor",
"*",
"100",
")",
"return",
"(",
"min_spacing",
",",
"maj_spacing",
")"
] |
Returns a default spacing between consecutive ticks for annual data.
|
[
"Returns",
"a",
"default",
"spacing",
"between",
"consecutive",
"ticks",
"for",
"annual",
"data",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_converter.py#L537-L556
|
19,941
|
pandas-dev/pandas
|
pandas/plotting/_converter.py
|
period_break
|
def period_break(dates, period):
"""
Returns the indices where the given period changes.
Parameters
----------
dates : PeriodIndex
Array of intervals to monitor.
period : string
Name of the period to monitor.
"""
current = getattr(dates, period)
previous = getattr(dates - 1 * dates.freq, period)
return np.nonzero(current - previous)[0]
|
python
|
def period_break(dates, period):
"""
Returns the indices where the given period changes.
Parameters
----------
dates : PeriodIndex
Array of intervals to monitor.
period : string
Name of the period to monitor.
"""
current = getattr(dates, period)
previous = getattr(dates - 1 * dates.freq, period)
return np.nonzero(current - previous)[0]
|
[
"def",
"period_break",
"(",
"dates",
",",
"period",
")",
":",
"current",
"=",
"getattr",
"(",
"dates",
",",
"period",
")",
"previous",
"=",
"getattr",
"(",
"dates",
"-",
"1",
"*",
"dates",
".",
"freq",
",",
"period",
")",
"return",
"np",
".",
"nonzero",
"(",
"current",
"-",
"previous",
")",
"[",
"0",
"]"
] |
Returns the indices where the given period changes.
Parameters
----------
dates : PeriodIndex
Array of intervals to monitor.
period : string
Name of the period to monitor.
|
[
"Returns",
"the",
"indices",
"where",
"the",
"given",
"period",
"changes",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_converter.py#L559-L572
|
19,942
|
pandas-dev/pandas
|
pandas/plotting/_converter.py
|
has_level_label
|
def has_level_label(label_flags, vmin):
"""
Returns true if the ``label_flags`` indicate there is at least one label
for this level.
if the minimum view limit is not an exact integer, then the first tick
label won't be shown, so we must adjust for that.
"""
if label_flags.size == 0 or (label_flags.size == 1 and
label_flags[0] == 0 and
vmin % 1 > 0.0):
return False
else:
return True
|
python
|
def has_level_label(label_flags, vmin):
"""
Returns true if the ``label_flags`` indicate there is at least one label
for this level.
if the minimum view limit is not an exact integer, then the first tick
label won't be shown, so we must adjust for that.
"""
if label_flags.size == 0 or (label_flags.size == 1 and
label_flags[0] == 0 and
vmin % 1 > 0.0):
return False
else:
return True
|
[
"def",
"has_level_label",
"(",
"label_flags",
",",
"vmin",
")",
":",
"if",
"label_flags",
".",
"size",
"==",
"0",
"or",
"(",
"label_flags",
".",
"size",
"==",
"1",
"and",
"label_flags",
"[",
"0",
"]",
"==",
"0",
"and",
"vmin",
"%",
"1",
">",
"0.0",
")",
":",
"return",
"False",
"else",
":",
"return",
"True"
] |
Returns true if the ``label_flags`` indicate there is at least one label
for this level.
if the minimum view limit is not an exact integer, then the first tick
label won't be shown, so we must adjust for that.
|
[
"Returns",
"true",
"if",
"the",
"label_flags",
"indicate",
"there",
"is",
"at",
"least",
"one",
"label",
"for",
"this",
"level",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_converter.py#L575-L588
|
19,943
|
pandas-dev/pandas
|
pandas/plotting/_converter.py
|
PandasAutoDateLocator.get_locator
|
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
_check_implicitly_registered()
delta = relativedelta(dmax, dmin)
num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days
num_sec = (delta.hours * 60.0 + delta.minutes) * 60.0 + delta.seconds
tot_sec = num_days * 86400. + num_sec
if abs(tot_sec) < self.minticks:
self._freq = -1
locator = MilliSecondLocator(self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
return dates.AutoDateLocator.get_locator(self, dmin, dmax)
|
python
|
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
_check_implicitly_registered()
delta = relativedelta(dmax, dmin)
num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days
num_sec = (delta.hours * 60.0 + delta.minutes) * 60.0 + delta.seconds
tot_sec = num_days * 86400. + num_sec
if abs(tot_sec) < self.minticks:
self._freq = -1
locator = MilliSecondLocator(self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
return dates.AutoDateLocator.get_locator(self, dmin, dmax)
|
[
"def",
"get_locator",
"(",
"self",
",",
"dmin",
",",
"dmax",
")",
":",
"_check_implicitly_registered",
"(",
")",
"delta",
"=",
"relativedelta",
"(",
"dmax",
",",
"dmin",
")",
"num_days",
"=",
"(",
"delta",
".",
"years",
"*",
"12.0",
"+",
"delta",
".",
"months",
")",
"*",
"31.0",
"+",
"delta",
".",
"days",
"num_sec",
"=",
"(",
"delta",
".",
"hours",
"*",
"60.0",
"+",
"delta",
".",
"minutes",
")",
"*",
"60.0",
"+",
"delta",
".",
"seconds",
"tot_sec",
"=",
"num_days",
"*",
"86400.",
"+",
"num_sec",
"if",
"abs",
"(",
"tot_sec",
")",
"<",
"self",
".",
"minticks",
":",
"self",
".",
"_freq",
"=",
"-",
"1",
"locator",
"=",
"MilliSecondLocator",
"(",
"self",
".",
"tz",
")",
"locator",
".",
"set_axis",
"(",
"self",
".",
"axis",
")",
"locator",
".",
"set_view_interval",
"(",
"*",
"self",
".",
"axis",
".",
"get_view_interval",
"(",
")",
")",
"locator",
".",
"set_data_interval",
"(",
"*",
"self",
".",
"axis",
".",
"get_data_interval",
"(",
")",
")",
"return",
"locator",
"return",
"dates",
".",
"AutoDateLocator",
".",
"get_locator",
"(",
"self",
",",
"dmin",
",",
"dmax",
")"
] |
Pick the best locator based on a distance.
|
[
"Pick",
"the",
"best",
"locator",
"based",
"on",
"a",
"distance",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_converter.py#L369-L387
|
19,944
|
pandas-dev/pandas
|
pandas/plotting/_converter.py
|
MilliSecondLocator.autoscale
|
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm): unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
dmin, dmax = self.datalim_to_dt()
vmin = dates.date2num(dmin)
vmax = dates.date2num(dmax)
return self.nonsingular(vmin, vmax)
|
python
|
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm): unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
dmin, dmax = self.datalim_to_dt()
vmin = dates.date2num(dmin)
vmax = dates.date2num(dmax)
return self.nonsingular(vmin, vmax)
|
[
"def",
"autoscale",
"(",
"self",
")",
":",
"dmin",
",",
"dmax",
"=",
"self",
".",
"datalim_to_dt",
"(",
")",
"if",
"dmin",
">",
"dmax",
":",
"dmax",
",",
"dmin",
"=",
"dmin",
",",
"dmax",
"# We need to cap at the endpoints of valid datetime",
"# TODO(wesm): unused?",
"# delta = relativedelta(dmax, dmin)",
"# try:",
"# start = dmin - delta",
"# except ValueError:",
"# start = _from_ordinal(1.0)",
"# try:",
"# stop = dmax + delta",
"# except ValueError:",
"# # The magic number!",
"# stop = _from_ordinal(3652059.9999999)",
"dmin",
",",
"dmax",
"=",
"self",
".",
"datalim_to_dt",
"(",
")",
"vmin",
"=",
"dates",
".",
"date2num",
"(",
"dmin",
")",
"vmax",
"=",
"dates",
".",
"date2num",
"(",
"dmax",
")",
"return",
"self",
".",
"nonsingular",
"(",
"vmin",
",",
"vmax",
")"
] |
Set the view limits to include the data range.
|
[
"Set",
"the",
"view",
"limits",
"to",
"include",
"the",
"data",
"range",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_converter.py#L478-L507
|
19,945
|
pandas-dev/pandas
|
pandas/plotting/_converter.py
|
TimeSeries_DateLocator._get_default_locs
|
def _get_default_locs(self, vmin, vmax):
"Returns the default locations of ticks."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
locator = self.plot_obj.date_axis_info
if self.isminor:
return np.compress(locator['min'], locator['val'])
return np.compress(locator['maj'], locator['val'])
|
python
|
def _get_default_locs(self, vmin, vmax):
"Returns the default locations of ticks."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
locator = self.plot_obj.date_axis_info
if self.isminor:
return np.compress(locator['min'], locator['val'])
return np.compress(locator['maj'], locator['val'])
|
[
"def",
"_get_default_locs",
"(",
"self",
",",
"vmin",
",",
"vmax",
")",
":",
"if",
"self",
".",
"plot_obj",
".",
"date_axis_info",
"is",
"None",
":",
"self",
".",
"plot_obj",
".",
"date_axis_info",
"=",
"self",
".",
"finder",
"(",
"vmin",
",",
"vmax",
",",
"self",
".",
"freq",
")",
"locator",
"=",
"self",
".",
"plot_obj",
".",
"date_axis_info",
"if",
"self",
".",
"isminor",
":",
"return",
"np",
".",
"compress",
"(",
"locator",
"[",
"'min'",
"]",
",",
"locator",
"[",
"'val'",
"]",
")",
"return",
"np",
".",
"compress",
"(",
"locator",
"[",
"'maj'",
"]",
",",
"locator",
"[",
"'val'",
"]",
")"
] |
Returns the default locations of ticks.
|
[
"Returns",
"the",
"default",
"locations",
"of",
"ticks",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_converter.py#L1002-L1012
|
19,946
|
pandas-dev/pandas
|
pandas/plotting/_converter.py
|
TimeSeries_DateLocator.autoscale
|
def autoscale(self):
"""
Sets the view limits to the nearest multiples of base that contain the
data.
"""
# requires matplotlib >= 0.98.0
(vmin, vmax) = self.axis.get_data_interval()
locs = self._get_default_locs(vmin, vmax)
(vmin, vmax) = locs[[0, -1]]
if vmin == vmax:
vmin -= 1
vmax += 1
return nonsingular(vmin, vmax)
|
python
|
def autoscale(self):
"""
Sets the view limits to the nearest multiples of base that contain the
data.
"""
# requires matplotlib >= 0.98.0
(vmin, vmax) = self.axis.get_data_interval()
locs = self._get_default_locs(vmin, vmax)
(vmin, vmax) = locs[[0, -1]]
if vmin == vmax:
vmin -= 1
vmax += 1
return nonsingular(vmin, vmax)
|
[
"def",
"autoscale",
"(",
"self",
")",
":",
"# requires matplotlib >= 0.98.0",
"(",
"vmin",
",",
"vmax",
")",
"=",
"self",
".",
"axis",
".",
"get_data_interval",
"(",
")",
"locs",
"=",
"self",
".",
"_get_default_locs",
"(",
"vmin",
",",
"vmax",
")",
"(",
"vmin",
",",
"vmax",
")",
"=",
"locs",
"[",
"[",
"0",
",",
"-",
"1",
"]",
"]",
"if",
"vmin",
"==",
"vmax",
":",
"vmin",
"-=",
"1",
"vmax",
"+=",
"1",
"return",
"nonsingular",
"(",
"vmin",
",",
"vmax",
")"
] |
Sets the view limits to the nearest multiples of base that contain the
data.
|
[
"Sets",
"the",
"view",
"limits",
"to",
"the",
"nearest",
"multiples",
"of",
"base",
"that",
"contain",
"the",
"data",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_converter.py#L1035-L1048
|
19,947
|
pandas-dev/pandas
|
pandas/plotting/_converter.py
|
TimeSeries_DateFormatter._set_default_format
|
def _set_default_format(self, vmin, vmax):
"Returns the default ticks spacing."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
info = self.plot_obj.date_axis_info
if self.isminor:
format = np.compress(info['min'] & np.logical_not(info['maj']),
info)
else:
format = np.compress(info['maj'], info)
self.formatdict = {x: f for (x, _, _, f) in format}
return self.formatdict
|
python
|
def _set_default_format(self, vmin, vmax):
"Returns the default ticks spacing."
if self.plot_obj.date_axis_info is None:
self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq)
info = self.plot_obj.date_axis_info
if self.isminor:
format = np.compress(info['min'] & np.logical_not(info['maj']),
info)
else:
format = np.compress(info['maj'], info)
self.formatdict = {x: f for (x, _, _, f) in format}
return self.formatdict
|
[
"def",
"_set_default_format",
"(",
"self",
",",
"vmin",
",",
"vmax",
")",
":",
"if",
"self",
".",
"plot_obj",
".",
"date_axis_info",
"is",
"None",
":",
"self",
".",
"plot_obj",
".",
"date_axis_info",
"=",
"self",
".",
"finder",
"(",
"vmin",
",",
"vmax",
",",
"self",
".",
"freq",
")",
"info",
"=",
"self",
".",
"plot_obj",
".",
"date_axis_info",
"if",
"self",
".",
"isminor",
":",
"format",
"=",
"np",
".",
"compress",
"(",
"info",
"[",
"'min'",
"]",
"&",
"np",
".",
"logical_not",
"(",
"info",
"[",
"'maj'",
"]",
")",
",",
"info",
")",
"else",
":",
"format",
"=",
"np",
".",
"compress",
"(",
"info",
"[",
"'maj'",
"]",
",",
"info",
")",
"self",
".",
"formatdict",
"=",
"{",
"x",
":",
"f",
"for",
"(",
"x",
",",
"_",
",",
"_",
",",
"f",
")",
"in",
"format",
"}",
"return",
"self",
".",
"formatdict"
] |
Returns the default ticks spacing.
|
[
"Returns",
"the",
"default",
"ticks",
"spacing",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_converter.py#L1084-L1097
|
19,948
|
pandas-dev/pandas
|
pandas/plotting/_converter.py
|
TimeSeries_DateFormatter.set_locs
|
def set_locs(self, locs):
'Sets the locations of the ticks'
# don't actually use the locs. This is just needed to work with
# matplotlib. Force to use vmin, vmax
_check_implicitly_registered()
self.locs = locs
(vmin, vmax) = vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
if vmax < vmin:
(vmin, vmax) = (vmax, vmin)
self._set_default_format(vmin, vmax)
|
python
|
def set_locs(self, locs):
'Sets the locations of the ticks'
# don't actually use the locs. This is just needed to work with
# matplotlib. Force to use vmin, vmax
_check_implicitly_registered()
self.locs = locs
(vmin, vmax) = vi = tuple(self.axis.get_view_interval())
if vi != self.plot_obj.view_interval:
self.plot_obj.date_axis_info = None
self.plot_obj.view_interval = vi
if vmax < vmin:
(vmin, vmax) = (vmax, vmin)
self._set_default_format(vmin, vmax)
|
[
"def",
"set_locs",
"(",
"self",
",",
"locs",
")",
":",
"# don't actually use the locs. This is just needed to work with",
"# matplotlib. Force to use vmin, vmax",
"_check_implicitly_registered",
"(",
")",
"self",
".",
"locs",
"=",
"locs",
"(",
"vmin",
",",
"vmax",
")",
"=",
"vi",
"=",
"tuple",
"(",
"self",
".",
"axis",
".",
"get_view_interval",
"(",
")",
")",
"if",
"vi",
"!=",
"self",
".",
"plot_obj",
".",
"view_interval",
":",
"self",
".",
"plot_obj",
".",
"date_axis_info",
"=",
"None",
"self",
".",
"plot_obj",
".",
"view_interval",
"=",
"vi",
"if",
"vmax",
"<",
"vmin",
":",
"(",
"vmin",
",",
"vmax",
")",
"=",
"(",
"vmax",
",",
"vmin",
")",
"self",
".",
"_set_default_format",
"(",
"vmin",
",",
"vmax",
")"
] |
Sets the locations of the ticks
|
[
"Sets",
"the",
"locations",
"of",
"the",
"ticks"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_converter.py#L1099-L1113
|
19,949
|
pandas-dev/pandas
|
pandas/io/json/table_schema.py
|
build_table_schema
|
def build_table_schema(data, index=True, primary_key=None, version=True):
"""
Create a Table schema from ``data``.
Parameters
----------
data : Series, DataFrame
index : bool, default True
Whether to include ``data.index`` in the schema.
primary_key : bool or None, default True
column names to designate as the primary key.
The default `None` will set `'primaryKey'` to the index
level or levels if the index is unique.
version : bool, default True
Whether to include a field `pandas_version` with the version
of pandas that generated the schema.
Returns
-------
schema : dict
Notes
-----
See `_as_json_table_type` for conversion types.
Timedeltas as converted to ISO8601 duration format with
9 decimal places after the seconds field for nanosecond precision.
Categoricals are converted to the `any` dtype, and use the `enum` field
constraint to list the allowed values. The `ordered` attribute is included
in an `ordered` field.
Examples
--------
>>> df = pd.DataFrame(
... {'A': [1, 2, 3],
... 'B': ['a', 'b', 'c'],
... 'C': pd.date_range('2016-01-01', freq='d', periods=3),
... }, index=pd.Index(range(3), name='idx'))
>>> build_table_schema(df)
{'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'}],
'pandas_version': '0.20.0',
'primaryKey': ['idx']}
"""
if index is True:
data = set_default_names(data)
schema = {}
fields = []
if index:
if data.index.nlevels > 1:
for level in data.index.levels:
fields.append(convert_pandas_type_to_json_field(level))
else:
fields.append(convert_pandas_type_to_json_field(data.index))
if data.ndim > 1:
for column, s in data.iteritems():
fields.append(convert_pandas_type_to_json_field(s))
else:
fields.append(convert_pandas_type_to_json_field(data))
schema['fields'] = fields
if index and data.index.is_unique and primary_key is None:
if data.index.nlevels == 1:
schema['primaryKey'] = [data.index.name]
else:
schema['primaryKey'] = data.index.names
elif primary_key is not None:
schema['primaryKey'] = primary_key
if version:
schema['pandas_version'] = '0.20.0'
return schema
|
python
|
def build_table_schema(data, index=True, primary_key=None, version=True):
"""
Create a Table schema from ``data``.
Parameters
----------
data : Series, DataFrame
index : bool, default True
Whether to include ``data.index`` in the schema.
primary_key : bool or None, default True
column names to designate as the primary key.
The default `None` will set `'primaryKey'` to the index
level or levels if the index is unique.
version : bool, default True
Whether to include a field `pandas_version` with the version
of pandas that generated the schema.
Returns
-------
schema : dict
Notes
-----
See `_as_json_table_type` for conversion types.
Timedeltas as converted to ISO8601 duration format with
9 decimal places after the seconds field for nanosecond precision.
Categoricals are converted to the `any` dtype, and use the `enum` field
constraint to list the allowed values. The `ordered` attribute is included
in an `ordered` field.
Examples
--------
>>> df = pd.DataFrame(
... {'A': [1, 2, 3],
... 'B': ['a', 'b', 'c'],
... 'C': pd.date_range('2016-01-01', freq='d', periods=3),
... }, index=pd.Index(range(3), name='idx'))
>>> build_table_schema(df)
{'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'}],
'pandas_version': '0.20.0',
'primaryKey': ['idx']}
"""
if index is True:
data = set_default_names(data)
schema = {}
fields = []
if index:
if data.index.nlevels > 1:
for level in data.index.levels:
fields.append(convert_pandas_type_to_json_field(level))
else:
fields.append(convert_pandas_type_to_json_field(data.index))
if data.ndim > 1:
for column, s in data.iteritems():
fields.append(convert_pandas_type_to_json_field(s))
else:
fields.append(convert_pandas_type_to_json_field(data))
schema['fields'] = fields
if index and data.index.is_unique and primary_key is None:
if data.index.nlevels == 1:
schema['primaryKey'] = [data.index.name]
else:
schema['primaryKey'] = data.index.names
elif primary_key is not None:
schema['primaryKey'] = primary_key
if version:
schema['pandas_version'] = '0.20.0'
return schema
|
[
"def",
"build_table_schema",
"(",
"data",
",",
"index",
"=",
"True",
",",
"primary_key",
"=",
"None",
",",
"version",
"=",
"True",
")",
":",
"if",
"index",
"is",
"True",
":",
"data",
"=",
"set_default_names",
"(",
"data",
")",
"schema",
"=",
"{",
"}",
"fields",
"=",
"[",
"]",
"if",
"index",
":",
"if",
"data",
".",
"index",
".",
"nlevels",
">",
"1",
":",
"for",
"level",
"in",
"data",
".",
"index",
".",
"levels",
":",
"fields",
".",
"append",
"(",
"convert_pandas_type_to_json_field",
"(",
"level",
")",
")",
"else",
":",
"fields",
".",
"append",
"(",
"convert_pandas_type_to_json_field",
"(",
"data",
".",
"index",
")",
")",
"if",
"data",
".",
"ndim",
">",
"1",
":",
"for",
"column",
",",
"s",
"in",
"data",
".",
"iteritems",
"(",
")",
":",
"fields",
".",
"append",
"(",
"convert_pandas_type_to_json_field",
"(",
"s",
")",
")",
"else",
":",
"fields",
".",
"append",
"(",
"convert_pandas_type_to_json_field",
"(",
"data",
")",
")",
"schema",
"[",
"'fields'",
"]",
"=",
"fields",
"if",
"index",
"and",
"data",
".",
"index",
".",
"is_unique",
"and",
"primary_key",
"is",
"None",
":",
"if",
"data",
".",
"index",
".",
"nlevels",
"==",
"1",
":",
"schema",
"[",
"'primaryKey'",
"]",
"=",
"[",
"data",
".",
"index",
".",
"name",
"]",
"else",
":",
"schema",
"[",
"'primaryKey'",
"]",
"=",
"data",
".",
"index",
".",
"names",
"elif",
"primary_key",
"is",
"not",
"None",
":",
"schema",
"[",
"'primaryKey'",
"]",
"=",
"primary_key",
"if",
"version",
":",
"schema",
"[",
"'pandas_version'",
"]",
"=",
"'0.20.0'",
"return",
"schema"
] |
Create a Table schema from ``data``.
Parameters
----------
data : Series, DataFrame
index : bool, default True
Whether to include ``data.index`` in the schema.
primary_key : bool or None, default True
column names to designate as the primary key.
The default `None` will set `'primaryKey'` to the index
level or levels if the index is unique.
version : bool, default True
Whether to include a field `pandas_version` with the version
of pandas that generated the schema.
Returns
-------
schema : dict
Notes
-----
See `_as_json_table_type` for conversion types.
Timedeltas as converted to ISO8601 duration format with
9 decimal places after the seconds field for nanosecond precision.
Categoricals are converted to the `any` dtype, and use the `enum` field
constraint to list the allowed values. The `ordered` attribute is included
in an `ordered` field.
Examples
--------
>>> df = pd.DataFrame(
... {'A': [1, 2, 3],
... 'B': ['a', 'b', 'c'],
... 'C': pd.date_range('2016-01-01', freq='d', periods=3),
... }, index=pd.Index(range(3), name='idx'))
>>> build_table_schema(df)
{'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'}],
'pandas_version': '0.20.0',
'primaryKey': ['idx']}
|
[
"Create",
"a",
"Table",
"schema",
"from",
"data",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/table_schema.py#L183-L259
|
19,950
|
pandas-dev/pandas
|
pandas/io/json/table_schema.py
|
parse_table_schema
|
def parse_table_schema(json, precise_float):
"""
Builds a DataFrame from a given schema
Parameters
----------
json :
A JSON table schema
precise_float : boolean
Flag controlling precision when decoding string to double values, as
dictated by ``read_json``
Returns
-------
df : DataFrame
Raises
------
NotImplementedError
If the JSON table schema contains either timezone or timedelta data
Notes
-----
Because :func:`DataFrame.to_json` uses the string 'index' to denote a
name-less :class:`Index`, this function sets the name of the returned
:class:`DataFrame` to ``None`` when said string is encountered with a
normal :class:`Index`. For a :class:`MultiIndex`, the same limitation
applies to any strings beginning with 'level_'. Therefore, an
:class:`Index` name of 'index' and :class:`MultiIndex` names starting
with 'level_' are not supported.
See Also
--------
build_table_schema : Inverse function.
pandas.read_json
"""
table = loads(json, precise_float=precise_float)
col_order = [field['name'] for field in table['schema']['fields']]
df = DataFrame(table['data'], columns=col_order)[col_order]
dtypes = {field['name']: convert_json_field_to_pandas_type(field)
for field in table['schema']['fields']}
# Cannot directly use as_type with timezone data on object; raise for now
if any(str(x).startswith('datetime64[ns, ') for x in dtypes.values()):
raise NotImplementedError('table="orient" can not yet read timezone '
'data')
# No ISO constructor for Timedelta as of yet, so need to raise
if 'timedelta64' in dtypes.values():
raise NotImplementedError('table="orient" can not yet read '
'ISO-formatted Timedelta data')
df = df.astype(dtypes)
if 'primaryKey' in table['schema']:
df = df.set_index(table['schema']['primaryKey'])
if len(df.index.names) == 1:
if df.index.name == 'index':
df.index.name = None
else:
df.index.names = [None if x.startswith('level_') else x for x in
df.index.names]
return df
|
python
|
def parse_table_schema(json, precise_float):
"""
Builds a DataFrame from a given schema
Parameters
----------
json :
A JSON table schema
precise_float : boolean
Flag controlling precision when decoding string to double values, as
dictated by ``read_json``
Returns
-------
df : DataFrame
Raises
------
NotImplementedError
If the JSON table schema contains either timezone or timedelta data
Notes
-----
Because :func:`DataFrame.to_json` uses the string 'index' to denote a
name-less :class:`Index`, this function sets the name of the returned
:class:`DataFrame` to ``None`` when said string is encountered with a
normal :class:`Index`. For a :class:`MultiIndex`, the same limitation
applies to any strings beginning with 'level_'. Therefore, an
:class:`Index` name of 'index' and :class:`MultiIndex` names starting
with 'level_' are not supported.
See Also
--------
build_table_schema : Inverse function.
pandas.read_json
"""
table = loads(json, precise_float=precise_float)
col_order = [field['name'] for field in table['schema']['fields']]
df = DataFrame(table['data'], columns=col_order)[col_order]
dtypes = {field['name']: convert_json_field_to_pandas_type(field)
for field in table['schema']['fields']}
# Cannot directly use as_type with timezone data on object; raise for now
if any(str(x).startswith('datetime64[ns, ') for x in dtypes.values()):
raise NotImplementedError('table="orient" can not yet read timezone '
'data')
# No ISO constructor for Timedelta as of yet, so need to raise
if 'timedelta64' in dtypes.values():
raise NotImplementedError('table="orient" can not yet read '
'ISO-formatted Timedelta data')
df = df.astype(dtypes)
if 'primaryKey' in table['schema']:
df = df.set_index(table['schema']['primaryKey'])
if len(df.index.names) == 1:
if df.index.name == 'index':
df.index.name = None
else:
df.index.names = [None if x.startswith('level_') else x for x in
df.index.names]
return df
|
[
"def",
"parse_table_schema",
"(",
"json",
",",
"precise_float",
")",
":",
"table",
"=",
"loads",
"(",
"json",
",",
"precise_float",
"=",
"precise_float",
")",
"col_order",
"=",
"[",
"field",
"[",
"'name'",
"]",
"for",
"field",
"in",
"table",
"[",
"'schema'",
"]",
"[",
"'fields'",
"]",
"]",
"df",
"=",
"DataFrame",
"(",
"table",
"[",
"'data'",
"]",
",",
"columns",
"=",
"col_order",
")",
"[",
"col_order",
"]",
"dtypes",
"=",
"{",
"field",
"[",
"'name'",
"]",
":",
"convert_json_field_to_pandas_type",
"(",
"field",
")",
"for",
"field",
"in",
"table",
"[",
"'schema'",
"]",
"[",
"'fields'",
"]",
"}",
"# Cannot directly use as_type with timezone data on object; raise for now",
"if",
"any",
"(",
"str",
"(",
"x",
")",
".",
"startswith",
"(",
"'datetime64[ns, '",
")",
"for",
"x",
"in",
"dtypes",
".",
"values",
"(",
")",
")",
":",
"raise",
"NotImplementedError",
"(",
"'table=\"orient\" can not yet read timezone '",
"'data'",
")",
"# No ISO constructor for Timedelta as of yet, so need to raise",
"if",
"'timedelta64'",
"in",
"dtypes",
".",
"values",
"(",
")",
":",
"raise",
"NotImplementedError",
"(",
"'table=\"orient\" can not yet read '",
"'ISO-formatted Timedelta data'",
")",
"df",
"=",
"df",
".",
"astype",
"(",
"dtypes",
")",
"if",
"'primaryKey'",
"in",
"table",
"[",
"'schema'",
"]",
":",
"df",
"=",
"df",
".",
"set_index",
"(",
"table",
"[",
"'schema'",
"]",
"[",
"'primaryKey'",
"]",
")",
"if",
"len",
"(",
"df",
".",
"index",
".",
"names",
")",
"==",
"1",
":",
"if",
"df",
".",
"index",
".",
"name",
"==",
"'index'",
":",
"df",
".",
"index",
".",
"name",
"=",
"None",
"else",
":",
"df",
".",
"index",
".",
"names",
"=",
"[",
"None",
"if",
"x",
".",
"startswith",
"(",
"'level_'",
")",
"else",
"x",
"for",
"x",
"in",
"df",
".",
"index",
".",
"names",
"]",
"return",
"df"
] |
Builds a DataFrame from a given schema
Parameters
----------
json :
A JSON table schema
precise_float : boolean
Flag controlling precision when decoding string to double values, as
dictated by ``read_json``
Returns
-------
df : DataFrame
Raises
------
NotImplementedError
If the JSON table schema contains either timezone or timedelta data
Notes
-----
Because :func:`DataFrame.to_json` uses the string 'index' to denote a
name-less :class:`Index`, this function sets the name of the returned
:class:`DataFrame` to ``None`` when said string is encountered with a
normal :class:`Index`. For a :class:`MultiIndex`, the same limitation
applies to any strings beginning with 'level_'. Therefore, an
:class:`Index` name of 'index' and :class:`MultiIndex` names starting
with 'level_' are not supported.
See Also
--------
build_table_schema : Inverse function.
pandas.read_json
|
[
"Builds",
"a",
"DataFrame",
"from",
"a",
"given",
"schema"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/table_schema.py#L262-L326
|
19,951
|
pandas-dev/pandas
|
pandas/core/ops.py
|
get_op_result_name
|
def get_op_result_name(left, right):
"""
Find the appropriate name to pin to an operation result. This result
should always be either an Index or a Series.
Parameters
----------
left : {Series, Index}
right : object
Returns
-------
name : object
Usually a string
"""
# `left` is always a pd.Series when called from within ops
if isinstance(right, (ABCSeries, pd.Index)):
name = _maybe_match_name(left, right)
else:
name = left.name
return name
|
python
|
def get_op_result_name(left, right):
"""
Find the appropriate name to pin to an operation result. This result
should always be either an Index or a Series.
Parameters
----------
left : {Series, Index}
right : object
Returns
-------
name : object
Usually a string
"""
# `left` is always a pd.Series when called from within ops
if isinstance(right, (ABCSeries, pd.Index)):
name = _maybe_match_name(left, right)
else:
name = left.name
return name
|
[
"def",
"get_op_result_name",
"(",
"left",
",",
"right",
")",
":",
"# `left` is always a pd.Series when called from within ops",
"if",
"isinstance",
"(",
"right",
",",
"(",
"ABCSeries",
",",
"pd",
".",
"Index",
")",
")",
":",
"name",
"=",
"_maybe_match_name",
"(",
"left",
",",
"right",
")",
"else",
":",
"name",
"=",
"left",
".",
"name",
"return",
"name"
] |
Find the appropriate name to pin to an operation result. This result
should always be either an Index or a Series.
Parameters
----------
left : {Series, Index}
right : object
Returns
-------
name : object
Usually a string
|
[
"Find",
"the",
"appropriate",
"name",
"to",
"pin",
"to",
"an",
"operation",
"result",
".",
"This",
"result",
"should",
"always",
"be",
"either",
"an",
"Index",
"or",
"a",
"Series",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L38-L58
|
19,952
|
pandas-dev/pandas
|
pandas/core/ops.py
|
_maybe_match_name
|
def _maybe_match_name(a, b):
"""
Try to find a name to attach to the result of an operation between
a and b. If only one of these has a `name` attribute, return that
name. Otherwise return a consensus name if they match of None if
they have different names.
Parameters
----------
a : object
b : object
Returns
-------
name : str or None
See Also
--------
pandas.core.common.consensus_name_attr
"""
a_has = hasattr(a, 'name')
b_has = hasattr(b, 'name')
if a_has and b_has:
if a.name == b.name:
return a.name
else:
# TODO: what if they both have np.nan for their names?
return None
elif a_has:
return a.name
elif b_has:
return b.name
return None
|
python
|
def _maybe_match_name(a, b):
"""
Try to find a name to attach to the result of an operation between
a and b. If only one of these has a `name` attribute, return that
name. Otherwise return a consensus name if they match of None if
they have different names.
Parameters
----------
a : object
b : object
Returns
-------
name : str or None
See Also
--------
pandas.core.common.consensus_name_attr
"""
a_has = hasattr(a, 'name')
b_has = hasattr(b, 'name')
if a_has and b_has:
if a.name == b.name:
return a.name
else:
# TODO: what if they both have np.nan for their names?
return None
elif a_has:
return a.name
elif b_has:
return b.name
return None
|
[
"def",
"_maybe_match_name",
"(",
"a",
",",
"b",
")",
":",
"a_has",
"=",
"hasattr",
"(",
"a",
",",
"'name'",
")",
"b_has",
"=",
"hasattr",
"(",
"b",
",",
"'name'",
")",
"if",
"a_has",
"and",
"b_has",
":",
"if",
"a",
".",
"name",
"==",
"b",
".",
"name",
":",
"return",
"a",
".",
"name",
"else",
":",
"# TODO: what if they both have np.nan for their names?",
"return",
"None",
"elif",
"a_has",
":",
"return",
"a",
".",
"name",
"elif",
"b_has",
":",
"return",
"b",
".",
"name",
"return",
"None"
] |
Try to find a name to attach to the result of an operation between
a and b. If only one of these has a `name` attribute, return that
name. Otherwise return a consensus name if they match of None if
they have different names.
Parameters
----------
a : object
b : object
Returns
-------
name : str or None
See Also
--------
pandas.core.common.consensus_name_attr
|
[
"Try",
"to",
"find",
"a",
"name",
"to",
"attach",
"to",
"the",
"result",
"of",
"an",
"operation",
"between",
"a",
"and",
"b",
".",
"If",
"only",
"one",
"of",
"these",
"has",
"a",
"name",
"attribute",
"return",
"that",
"name",
".",
"Otherwise",
"return",
"a",
"consensus",
"name",
"if",
"they",
"match",
"of",
"None",
"if",
"they",
"have",
"different",
"names",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L61-L93
|
19,953
|
pandas-dev/pandas
|
pandas/core/ops.py
|
maybe_upcast_for_op
|
def maybe_upcast_for_op(obj):
"""
Cast non-pandas objects to pandas types to unify behavior of arithmetic
and comparison operations.
Parameters
----------
obj: object
Returns
-------
out : object
Notes
-----
Be careful to call this *after* determining the `name` attribute to be
attached to the result of the arithmetic operation.
"""
if type(obj) is datetime.timedelta:
# GH#22390 cast up to Timedelta to rely on Timedelta
# implementation; otherwise operation against numeric-dtype
# raises TypeError
return pd.Timedelta(obj)
elif isinstance(obj, np.timedelta64) and not isna(obj):
# In particular non-nanosecond timedelta64 needs to be cast to
# nanoseconds, or else we get undesired behavior like
# np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D')
# The isna check is to avoid casting timedelta64("NaT"), which would
# return NaT and incorrectly be treated as a datetime-NaT.
return pd.Timedelta(obj)
elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj):
# GH#22390 Unfortunately we need to special-case right-hand
# timedelta64 dtypes because numpy casts integer dtypes to
# timedelta64 when operating with timedelta64
return pd.TimedeltaIndex(obj)
return obj
|
python
|
def maybe_upcast_for_op(obj):
"""
Cast non-pandas objects to pandas types to unify behavior of arithmetic
and comparison operations.
Parameters
----------
obj: object
Returns
-------
out : object
Notes
-----
Be careful to call this *after* determining the `name` attribute to be
attached to the result of the arithmetic operation.
"""
if type(obj) is datetime.timedelta:
# GH#22390 cast up to Timedelta to rely on Timedelta
# implementation; otherwise operation against numeric-dtype
# raises TypeError
return pd.Timedelta(obj)
elif isinstance(obj, np.timedelta64) and not isna(obj):
# In particular non-nanosecond timedelta64 needs to be cast to
# nanoseconds, or else we get undesired behavior like
# np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D')
# The isna check is to avoid casting timedelta64("NaT"), which would
# return NaT and incorrectly be treated as a datetime-NaT.
return pd.Timedelta(obj)
elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj):
# GH#22390 Unfortunately we need to special-case right-hand
# timedelta64 dtypes because numpy casts integer dtypes to
# timedelta64 when operating with timedelta64
return pd.TimedeltaIndex(obj)
return obj
|
[
"def",
"maybe_upcast_for_op",
"(",
"obj",
")",
":",
"if",
"type",
"(",
"obj",
")",
"is",
"datetime",
".",
"timedelta",
":",
"# GH#22390 cast up to Timedelta to rely on Timedelta",
"# implementation; otherwise operation against numeric-dtype",
"# raises TypeError",
"return",
"pd",
".",
"Timedelta",
"(",
"obj",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"np",
".",
"timedelta64",
")",
"and",
"not",
"isna",
"(",
"obj",
")",
":",
"# In particular non-nanosecond timedelta64 needs to be cast to",
"# nanoseconds, or else we get undesired behavior like",
"# np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D')",
"# The isna check is to avoid casting timedelta64(\"NaT\"), which would",
"# return NaT and incorrectly be treated as a datetime-NaT.",
"return",
"pd",
".",
"Timedelta",
"(",
"obj",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"np",
".",
"ndarray",
")",
"and",
"is_timedelta64_dtype",
"(",
"obj",
")",
":",
"# GH#22390 Unfortunately we need to special-case right-hand",
"# timedelta64 dtypes because numpy casts integer dtypes to",
"# timedelta64 when operating with timedelta64",
"return",
"pd",
".",
"TimedeltaIndex",
"(",
"obj",
")",
"return",
"obj"
] |
Cast non-pandas objects to pandas types to unify behavior of arithmetic
and comparison operations.
Parameters
----------
obj: object
Returns
-------
out : object
Notes
-----
Be careful to call this *after* determining the `name` attribute to be
attached to the result of the arithmetic operation.
|
[
"Cast",
"non",
"-",
"pandas",
"objects",
"to",
"pandas",
"types",
"to",
"unify",
"behavior",
"of",
"arithmetic",
"and",
"comparison",
"operations",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L96-L131
|
19,954
|
pandas-dev/pandas
|
pandas/core/ops.py
|
make_invalid_op
|
def make_invalid_op(name):
"""
Return a binary method that always raises a TypeError.
Parameters
----------
name : str
Returns
-------
invalid_op : function
"""
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self).__name__))
invalid_op.__name__ = name
return invalid_op
|
python
|
def make_invalid_op(name):
"""
Return a binary method that always raises a TypeError.
Parameters
----------
name : str
Returns
-------
invalid_op : function
"""
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self).__name__))
invalid_op.__name__ = name
return invalid_op
|
[
"def",
"make_invalid_op",
"(",
"name",
")",
":",
"def",
"invalid_op",
"(",
"self",
",",
"other",
"=",
"None",
")",
":",
"raise",
"TypeError",
"(",
"\"cannot perform {name} with this index type: \"",
"\"{typ}\"",
".",
"format",
"(",
"name",
"=",
"name",
",",
"typ",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
")",
")",
"invalid_op",
".",
"__name__",
"=",
"name",
"return",
"invalid_op"
] |
Return a binary method that always raises a TypeError.
Parameters
----------
name : str
Returns
-------
invalid_op : function
|
[
"Return",
"a",
"binary",
"method",
"that",
"always",
"raises",
"a",
"TypeError",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L195-L212
|
19,955
|
pandas-dev/pandas
|
pandas/core/ops.py
|
_gen_eval_kwargs
|
def _gen_eval_kwargs(name):
"""
Find the keyword arguments to pass to numexpr for the given operation.
Parameters
----------
name : str
Returns
-------
eval_kwargs : dict
Examples
--------
>>> _gen_eval_kwargs("__add__")
{}
>>> _gen_eval_kwargs("rtruediv")
{'reversed': True, 'truediv': True}
"""
kwargs = {}
# Series and Panel appear to only pass __add__, __radd__, ...
# but DataFrame gets both these dunder names _and_ non-dunder names
# add, radd, ...
name = name.replace('__', '')
if name.startswith('r'):
if name not in ['radd', 'rand', 'ror', 'rxor']:
# Exclude commutative operations
kwargs['reversed'] = True
if name in ['truediv', 'rtruediv']:
kwargs['truediv'] = True
if name in ['ne']:
kwargs['masker'] = True
return kwargs
|
python
|
def _gen_eval_kwargs(name):
"""
Find the keyword arguments to pass to numexpr for the given operation.
Parameters
----------
name : str
Returns
-------
eval_kwargs : dict
Examples
--------
>>> _gen_eval_kwargs("__add__")
{}
>>> _gen_eval_kwargs("rtruediv")
{'reversed': True, 'truediv': True}
"""
kwargs = {}
# Series and Panel appear to only pass __add__, __radd__, ...
# but DataFrame gets both these dunder names _and_ non-dunder names
# add, radd, ...
name = name.replace('__', '')
if name.startswith('r'):
if name not in ['radd', 'rand', 'ror', 'rxor']:
# Exclude commutative operations
kwargs['reversed'] = True
if name in ['truediv', 'rtruediv']:
kwargs['truediv'] = True
if name in ['ne']:
kwargs['masker'] = True
return kwargs
|
[
"def",
"_gen_eval_kwargs",
"(",
"name",
")",
":",
"kwargs",
"=",
"{",
"}",
"# Series and Panel appear to only pass __add__, __radd__, ...",
"# but DataFrame gets both these dunder names _and_ non-dunder names",
"# add, radd, ...",
"name",
"=",
"name",
".",
"replace",
"(",
"'__'",
",",
"''",
")",
"if",
"name",
".",
"startswith",
"(",
"'r'",
")",
":",
"if",
"name",
"not",
"in",
"[",
"'radd'",
",",
"'rand'",
",",
"'ror'",
",",
"'rxor'",
"]",
":",
"# Exclude commutative operations",
"kwargs",
"[",
"'reversed'",
"]",
"=",
"True",
"if",
"name",
"in",
"[",
"'truediv'",
",",
"'rtruediv'",
"]",
":",
"kwargs",
"[",
"'truediv'",
"]",
"=",
"True",
"if",
"name",
"in",
"[",
"'ne'",
"]",
":",
"kwargs",
"[",
"'masker'",
"]",
"=",
"True",
"return",
"kwargs"
] |
Find the keyword arguments to pass to numexpr for the given operation.
Parameters
----------
name : str
Returns
-------
eval_kwargs : dict
Examples
--------
>>> _gen_eval_kwargs("__add__")
{}
>>> _gen_eval_kwargs("rtruediv")
{'reversed': True, 'truediv': True}
|
[
"Find",
"the",
"keyword",
"arguments",
"to",
"pass",
"to",
"numexpr",
"for",
"the",
"given",
"operation",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L215-L253
|
19,956
|
pandas-dev/pandas
|
pandas/core/ops.py
|
_get_opstr
|
def _get_opstr(op, cls):
"""
Find the operation string, if any, to pass to numexpr for this
operation.
Parameters
----------
op : binary operator
cls : class
Returns
-------
op_str : string or None
"""
# numexpr is available for non-sparse classes
subtyp = getattr(cls, '_subtyp', '')
use_numexpr = 'sparse' not in subtyp
if not use_numexpr:
# if we're not using numexpr, then don't pass a str_rep
return None
return {operator.add: '+',
radd: '+',
operator.mul: '*',
rmul: '*',
operator.sub: '-',
rsub: '-',
operator.truediv: '/',
rtruediv: '/',
operator.floordiv: '//',
rfloordiv: '//',
operator.mod: None, # TODO: Why None for mod but '%' for rmod?
rmod: '%',
operator.pow: '**',
rpow: '**',
operator.eq: '==',
operator.ne: '!=',
operator.le: '<=',
operator.lt: '<',
operator.ge: '>=',
operator.gt: '>',
operator.and_: '&',
rand_: '&',
operator.or_: '|',
ror_: '|',
operator.xor: '^',
rxor: '^',
divmod: None,
rdivmod: None}[op]
|
python
|
def _get_opstr(op, cls):
"""
Find the operation string, if any, to pass to numexpr for this
operation.
Parameters
----------
op : binary operator
cls : class
Returns
-------
op_str : string or None
"""
# numexpr is available for non-sparse classes
subtyp = getattr(cls, '_subtyp', '')
use_numexpr = 'sparse' not in subtyp
if not use_numexpr:
# if we're not using numexpr, then don't pass a str_rep
return None
return {operator.add: '+',
radd: '+',
operator.mul: '*',
rmul: '*',
operator.sub: '-',
rsub: '-',
operator.truediv: '/',
rtruediv: '/',
operator.floordiv: '//',
rfloordiv: '//',
operator.mod: None, # TODO: Why None for mod but '%' for rmod?
rmod: '%',
operator.pow: '**',
rpow: '**',
operator.eq: '==',
operator.ne: '!=',
operator.le: '<=',
operator.lt: '<',
operator.ge: '>=',
operator.gt: '>',
operator.and_: '&',
rand_: '&',
operator.or_: '|',
ror_: '|',
operator.xor: '^',
rxor: '^',
divmod: None,
rdivmod: None}[op]
|
[
"def",
"_get_opstr",
"(",
"op",
",",
"cls",
")",
":",
"# numexpr is available for non-sparse classes",
"subtyp",
"=",
"getattr",
"(",
"cls",
",",
"'_subtyp'",
",",
"''",
")",
"use_numexpr",
"=",
"'sparse'",
"not",
"in",
"subtyp",
"if",
"not",
"use_numexpr",
":",
"# if we're not using numexpr, then don't pass a str_rep",
"return",
"None",
"return",
"{",
"operator",
".",
"add",
":",
"'+'",
",",
"radd",
":",
"'+'",
",",
"operator",
".",
"mul",
":",
"'*'",
",",
"rmul",
":",
"'*'",
",",
"operator",
".",
"sub",
":",
"'-'",
",",
"rsub",
":",
"'-'",
",",
"operator",
".",
"truediv",
":",
"'/'",
",",
"rtruediv",
":",
"'/'",
",",
"operator",
".",
"floordiv",
":",
"'//'",
",",
"rfloordiv",
":",
"'//'",
",",
"operator",
".",
"mod",
":",
"None",
",",
"# TODO: Why None for mod but '%' for rmod?",
"rmod",
":",
"'%'",
",",
"operator",
".",
"pow",
":",
"'**'",
",",
"rpow",
":",
"'**'",
",",
"operator",
".",
"eq",
":",
"'=='",
",",
"operator",
".",
"ne",
":",
"'!='",
",",
"operator",
".",
"le",
":",
"'<='",
",",
"operator",
".",
"lt",
":",
"'<'",
",",
"operator",
".",
"ge",
":",
"'>='",
",",
"operator",
".",
"gt",
":",
"'>'",
",",
"operator",
".",
"and_",
":",
"'&'",
",",
"rand_",
":",
"'&'",
",",
"operator",
".",
"or_",
":",
"'|'",
",",
"ror_",
":",
"'|'",
",",
"operator",
".",
"xor",
":",
"'^'",
",",
"rxor",
":",
"'^'",
",",
"divmod",
":",
"None",
",",
"rdivmod",
":",
"None",
"}",
"[",
"op",
"]"
] |
Find the operation string, if any, to pass to numexpr for this
operation.
Parameters
----------
op : binary operator
cls : class
Returns
-------
op_str : string or None
|
[
"Find",
"the",
"operation",
"string",
"if",
"any",
"to",
"pass",
"to",
"numexpr",
"for",
"this",
"operation",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L307-L356
|
19,957
|
pandas-dev/pandas
|
pandas/core/ops.py
|
_get_op_name
|
def _get_op_name(op, special):
"""
Find the name to attach to this method according to conventions
for special and non-special methods.
Parameters
----------
op : binary operator
special : bool
Returns
-------
op_name : str
"""
opname = op.__name__.strip('_')
if special:
opname = '__{opname}__'.format(opname=opname)
return opname
|
python
|
def _get_op_name(op, special):
"""
Find the name to attach to this method according to conventions
for special and non-special methods.
Parameters
----------
op : binary operator
special : bool
Returns
-------
op_name : str
"""
opname = op.__name__.strip('_')
if special:
opname = '__{opname}__'.format(opname=opname)
return opname
|
[
"def",
"_get_op_name",
"(",
"op",
",",
"special",
")",
":",
"opname",
"=",
"op",
".",
"__name__",
".",
"strip",
"(",
"'_'",
")",
"if",
"special",
":",
"opname",
"=",
"'__{opname}__'",
".",
"format",
"(",
"opname",
"=",
"opname",
")",
"return",
"opname"
] |
Find the name to attach to this method according to conventions
for special and non-special methods.
Parameters
----------
op : binary operator
special : bool
Returns
-------
op_name : str
|
[
"Find",
"the",
"name",
"to",
"attach",
"to",
"this",
"method",
"according",
"to",
"conventions",
"for",
"special",
"and",
"non",
"-",
"special",
"methods",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L359-L376
|
19,958
|
pandas-dev/pandas
|
pandas/core/ops.py
|
_make_flex_doc
|
def _make_flex_doc(op_name, typ):
"""
Make the appropriate substitutions for the given operation and class-typ
into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring
to attach to a generated method.
Parameters
----------
op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...}
typ : str {series, 'dataframe']}
Returns
-------
doc : str
"""
op_name = op_name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' ' + typ
else:
equiv = typ + ' ' + op_desc['op'] + ' other'
if typ == 'series':
base_doc = _flex_doc_SERIES
doc_no_examples = base_doc.format(
desc=op_desc['desc'],
op_name=op_name,
equiv=equiv,
reverse=op_desc['reverse']
)
if op_desc['series_examples']:
doc = doc_no_examples + op_desc['series_examples']
else:
doc = doc_no_examples
elif typ == 'dataframe':
base_doc = _flex_doc_FRAME
doc = base_doc.format(
desc=op_desc['desc'],
op_name=op_name,
equiv=equiv,
reverse=op_desc['reverse']
)
elif typ == 'panel':
base_doc = _flex_doc_PANEL
doc = base_doc.format(
desc=op_desc['desc'],
op_name=op_name,
equiv=equiv,
reverse=op_desc['reverse']
)
else:
raise AssertionError('Invalid typ argument.')
return doc
|
python
|
def _make_flex_doc(op_name, typ):
"""
Make the appropriate substitutions for the given operation and class-typ
into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring
to attach to a generated method.
Parameters
----------
op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...}
typ : str {series, 'dataframe']}
Returns
-------
doc : str
"""
op_name = op_name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' ' + typ
else:
equiv = typ + ' ' + op_desc['op'] + ' other'
if typ == 'series':
base_doc = _flex_doc_SERIES
doc_no_examples = base_doc.format(
desc=op_desc['desc'],
op_name=op_name,
equiv=equiv,
reverse=op_desc['reverse']
)
if op_desc['series_examples']:
doc = doc_no_examples + op_desc['series_examples']
else:
doc = doc_no_examples
elif typ == 'dataframe':
base_doc = _flex_doc_FRAME
doc = base_doc.format(
desc=op_desc['desc'],
op_name=op_name,
equiv=equiv,
reverse=op_desc['reverse']
)
elif typ == 'panel':
base_doc = _flex_doc_PANEL
doc = base_doc.format(
desc=op_desc['desc'],
op_name=op_name,
equiv=equiv,
reverse=op_desc['reverse']
)
else:
raise AssertionError('Invalid typ argument.')
return doc
|
[
"def",
"_make_flex_doc",
"(",
"op_name",
",",
"typ",
")",
":",
"op_name",
"=",
"op_name",
".",
"replace",
"(",
"'__'",
",",
"''",
")",
"op_desc",
"=",
"_op_descriptions",
"[",
"op_name",
"]",
"if",
"op_desc",
"[",
"'reversed'",
"]",
":",
"equiv",
"=",
"'other '",
"+",
"op_desc",
"[",
"'op'",
"]",
"+",
"' '",
"+",
"typ",
"else",
":",
"equiv",
"=",
"typ",
"+",
"' '",
"+",
"op_desc",
"[",
"'op'",
"]",
"+",
"' other'",
"if",
"typ",
"==",
"'series'",
":",
"base_doc",
"=",
"_flex_doc_SERIES",
"doc_no_examples",
"=",
"base_doc",
".",
"format",
"(",
"desc",
"=",
"op_desc",
"[",
"'desc'",
"]",
",",
"op_name",
"=",
"op_name",
",",
"equiv",
"=",
"equiv",
",",
"reverse",
"=",
"op_desc",
"[",
"'reverse'",
"]",
")",
"if",
"op_desc",
"[",
"'series_examples'",
"]",
":",
"doc",
"=",
"doc_no_examples",
"+",
"op_desc",
"[",
"'series_examples'",
"]",
"else",
":",
"doc",
"=",
"doc_no_examples",
"elif",
"typ",
"==",
"'dataframe'",
":",
"base_doc",
"=",
"_flex_doc_FRAME",
"doc",
"=",
"base_doc",
".",
"format",
"(",
"desc",
"=",
"op_desc",
"[",
"'desc'",
"]",
",",
"op_name",
"=",
"op_name",
",",
"equiv",
"=",
"equiv",
",",
"reverse",
"=",
"op_desc",
"[",
"'reverse'",
"]",
")",
"elif",
"typ",
"==",
"'panel'",
":",
"base_doc",
"=",
"_flex_doc_PANEL",
"doc",
"=",
"base_doc",
".",
"format",
"(",
"desc",
"=",
"op_desc",
"[",
"'desc'",
"]",
",",
"op_name",
"=",
"op_name",
",",
"equiv",
"=",
"equiv",
",",
"reverse",
"=",
"op_desc",
"[",
"'reverse'",
"]",
")",
"else",
":",
"raise",
"AssertionError",
"(",
"'Invalid typ argument.'",
")",
"return",
"doc"
] |
Make the appropriate substitutions for the given operation and class-typ
into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring
to attach to a generated method.
Parameters
----------
op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...}
typ : str {series, 'dataframe']}
Returns
-------
doc : str
|
[
"Make",
"the",
"appropriate",
"substitutions",
"for",
"the",
"given",
"operation",
"and",
"class",
"-",
"typ",
"into",
"either",
"_flex_doc_SERIES",
"or",
"_flex_doc_FRAME",
"to",
"return",
"the",
"docstring",
"to",
"attach",
"to",
"a",
"generated",
"method",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L1029-L1082
|
19,959
|
pandas-dev/pandas
|
pandas/core/ops.py
|
mask_cmp_op
|
def mask_cmp_op(x, y, op, allowed_types):
"""
Apply the function `op` to only non-null points in x and y.
Parameters
----------
x : array-like
y : array-like
op : binary operation
allowed_types : class or tuple of classes
Returns
-------
result : ndarray[bool]
"""
# TODO: Can we make the allowed_types arg unnecessary?
xrav = x.ravel()
result = np.empty(x.size, dtype=bool)
if isinstance(y, allowed_types):
yrav = y.ravel()
mask = notna(xrav) & notna(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notna(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
|
python
|
def mask_cmp_op(x, y, op, allowed_types):
"""
Apply the function `op` to only non-null points in x and y.
Parameters
----------
x : array-like
y : array-like
op : binary operation
allowed_types : class or tuple of classes
Returns
-------
result : ndarray[bool]
"""
# TODO: Can we make the allowed_types arg unnecessary?
xrav = x.ravel()
result = np.empty(x.size, dtype=bool)
if isinstance(y, allowed_types):
yrav = y.ravel()
mask = notna(xrav) & notna(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notna(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
|
[
"def",
"mask_cmp_op",
"(",
"x",
",",
"y",
",",
"op",
",",
"allowed_types",
")",
":",
"# TODO: Can we make the allowed_types arg unnecessary?",
"xrav",
"=",
"x",
".",
"ravel",
"(",
")",
"result",
"=",
"np",
".",
"empty",
"(",
"x",
".",
"size",
",",
"dtype",
"=",
"bool",
")",
"if",
"isinstance",
"(",
"y",
",",
"allowed_types",
")",
":",
"yrav",
"=",
"y",
".",
"ravel",
"(",
")",
"mask",
"=",
"notna",
"(",
"xrav",
")",
"&",
"notna",
"(",
"yrav",
")",
"result",
"[",
"mask",
"]",
"=",
"op",
"(",
"np",
".",
"array",
"(",
"list",
"(",
"xrav",
"[",
"mask",
"]",
")",
")",
",",
"np",
".",
"array",
"(",
"list",
"(",
"yrav",
"[",
"mask",
"]",
")",
")",
")",
"else",
":",
"mask",
"=",
"notna",
"(",
"xrav",
")",
"result",
"[",
"mask",
"]",
"=",
"op",
"(",
"np",
".",
"array",
"(",
"list",
"(",
"xrav",
"[",
"mask",
"]",
")",
")",
",",
"y",
")",
"if",
"op",
"==",
"operator",
".",
"ne",
":",
"# pragma: no cover",
"np",
".",
"putmask",
"(",
"result",
",",
"~",
"mask",
",",
"True",
")",
"else",
":",
"np",
".",
"putmask",
"(",
"result",
",",
"~",
"mask",
",",
"False",
")",
"result",
"=",
"result",
".",
"reshape",
"(",
"x",
".",
"shape",
")",
"return",
"result"
] |
Apply the function `op` to only non-null points in x and y.
Parameters
----------
x : array-like
y : array-like
op : binary operation
allowed_types : class or tuple of classes
Returns
-------
result : ndarray[bool]
|
[
"Apply",
"the",
"function",
"op",
"to",
"only",
"non",
"-",
"null",
"points",
"in",
"x",
"and",
"y",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L1123-L1155
|
19,960
|
pandas-dev/pandas
|
pandas/core/ops.py
|
should_series_dispatch
|
def should_series_dispatch(left, right, op):
"""
Identify cases where a DataFrame operation should dispatch to its
Series counterpart.
Parameters
----------
left : DataFrame
right : DataFrame
op : binary operator
Returns
-------
override : bool
"""
if left._is_mixed_type or right._is_mixed_type:
return True
if not len(left.columns) or not len(right.columns):
# ensure obj.dtypes[0] exists for each obj
return False
ldtype = left.dtypes.iloc[0]
rdtype = right.dtypes.iloc[0]
if ((is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or
(is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype))):
# numpy integer dtypes as timedelta64 dtypes in this scenario
return True
if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype):
# in particular case where right is an array of DateOffsets
return True
return False
|
python
|
def should_series_dispatch(left, right, op):
"""
Identify cases where a DataFrame operation should dispatch to its
Series counterpart.
Parameters
----------
left : DataFrame
right : DataFrame
op : binary operator
Returns
-------
override : bool
"""
if left._is_mixed_type or right._is_mixed_type:
return True
if not len(left.columns) or not len(right.columns):
# ensure obj.dtypes[0] exists for each obj
return False
ldtype = left.dtypes.iloc[0]
rdtype = right.dtypes.iloc[0]
if ((is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or
(is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype))):
# numpy integer dtypes as timedelta64 dtypes in this scenario
return True
if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype):
# in particular case where right is an array of DateOffsets
return True
return False
|
[
"def",
"should_series_dispatch",
"(",
"left",
",",
"right",
",",
"op",
")",
":",
"if",
"left",
".",
"_is_mixed_type",
"or",
"right",
".",
"_is_mixed_type",
":",
"return",
"True",
"if",
"not",
"len",
"(",
"left",
".",
"columns",
")",
"or",
"not",
"len",
"(",
"right",
".",
"columns",
")",
":",
"# ensure obj.dtypes[0] exists for each obj",
"return",
"False",
"ldtype",
"=",
"left",
".",
"dtypes",
".",
"iloc",
"[",
"0",
"]",
"rdtype",
"=",
"right",
".",
"dtypes",
".",
"iloc",
"[",
"0",
"]",
"if",
"(",
"(",
"is_timedelta64_dtype",
"(",
"ldtype",
")",
"and",
"is_integer_dtype",
"(",
"rdtype",
")",
")",
"or",
"(",
"is_timedelta64_dtype",
"(",
"rdtype",
")",
"and",
"is_integer_dtype",
"(",
"ldtype",
")",
")",
")",
":",
"# numpy integer dtypes as timedelta64 dtypes in this scenario",
"return",
"True",
"if",
"is_datetime64_dtype",
"(",
"ldtype",
")",
"and",
"is_object_dtype",
"(",
"rdtype",
")",
":",
"# in particular case where right is an array of DateOffsets",
"return",
"True",
"return",
"False"
] |
Identify cases where a DataFrame operation should dispatch to its
Series counterpart.
Parameters
----------
left : DataFrame
right : DataFrame
op : binary operator
Returns
-------
override : bool
|
[
"Identify",
"cases",
"where",
"a",
"DataFrame",
"operation",
"should",
"dispatch",
"to",
"its",
"Series",
"counterpart",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L1248-L1282
|
19,961
|
pandas-dev/pandas
|
pandas/core/ops.py
|
dispatch_to_index_op
|
def dispatch_to_index_op(op, left, right, index_class):
"""
Wrap Series left in the given index_class to delegate the operation op
to the index implementation. DatetimeIndex and TimedeltaIndex perform
type checking, timezone handling, overflow checks, etc.
Parameters
----------
op : binary operator (operator.add, operator.sub, ...)
left : Series
right : object
index_class : DatetimeIndex or TimedeltaIndex
Returns
-------
result : object, usually DatetimeIndex, TimedeltaIndex, or Series
"""
left_idx = index_class(left)
# avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes,
# left_idx may inherit a freq from a cached DatetimeIndex.
# See discussion in GH#19147.
if getattr(left_idx, 'freq', None) is not None:
left_idx = left_idx._shallow_copy(freq=None)
try:
result = op(left_idx, right)
except NullFrequencyError:
# DatetimeIndex and TimedeltaIndex with freq == None raise ValueError
# on add/sub of integers (or int-like). We re-raise as a TypeError.
raise TypeError('incompatible type for a datetime/timedelta '
'operation [{name}]'.format(name=op.__name__))
return result
|
python
|
def dispatch_to_index_op(op, left, right, index_class):
"""
Wrap Series left in the given index_class to delegate the operation op
to the index implementation. DatetimeIndex and TimedeltaIndex perform
type checking, timezone handling, overflow checks, etc.
Parameters
----------
op : binary operator (operator.add, operator.sub, ...)
left : Series
right : object
index_class : DatetimeIndex or TimedeltaIndex
Returns
-------
result : object, usually DatetimeIndex, TimedeltaIndex, or Series
"""
left_idx = index_class(left)
# avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes,
# left_idx may inherit a freq from a cached DatetimeIndex.
# See discussion in GH#19147.
if getattr(left_idx, 'freq', None) is not None:
left_idx = left_idx._shallow_copy(freq=None)
try:
result = op(left_idx, right)
except NullFrequencyError:
# DatetimeIndex and TimedeltaIndex with freq == None raise ValueError
# on add/sub of integers (or int-like). We re-raise as a TypeError.
raise TypeError('incompatible type for a datetime/timedelta '
'operation [{name}]'.format(name=op.__name__))
return result
|
[
"def",
"dispatch_to_index_op",
"(",
"op",
",",
"left",
",",
"right",
",",
"index_class",
")",
":",
"left_idx",
"=",
"index_class",
"(",
"left",
")",
"# avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes,",
"# left_idx may inherit a freq from a cached DatetimeIndex.",
"# See discussion in GH#19147.",
"if",
"getattr",
"(",
"left_idx",
",",
"'freq'",
",",
"None",
")",
"is",
"not",
"None",
":",
"left_idx",
"=",
"left_idx",
".",
"_shallow_copy",
"(",
"freq",
"=",
"None",
")",
"try",
":",
"result",
"=",
"op",
"(",
"left_idx",
",",
"right",
")",
"except",
"NullFrequencyError",
":",
"# DatetimeIndex and TimedeltaIndex with freq == None raise ValueError",
"# on add/sub of integers (or int-like). We re-raise as a TypeError.",
"raise",
"TypeError",
"(",
"'incompatible type for a datetime/timedelta '",
"'operation [{name}]'",
".",
"format",
"(",
"name",
"=",
"op",
".",
"__name__",
")",
")",
"return",
"result"
] |
Wrap Series left in the given index_class to delegate the operation op
to the index implementation. DatetimeIndex and TimedeltaIndex perform
type checking, timezone handling, overflow checks, etc.
Parameters
----------
op : binary operator (operator.add, operator.sub, ...)
left : Series
right : object
index_class : DatetimeIndex or TimedeltaIndex
Returns
-------
result : object, usually DatetimeIndex, TimedeltaIndex, or Series
|
[
"Wrap",
"Series",
"left",
"in",
"the",
"given",
"index_class",
"to",
"delegate",
"the",
"operation",
"op",
"to",
"the",
"index",
"implementation",
".",
"DatetimeIndex",
"and",
"TimedeltaIndex",
"perform",
"type",
"checking",
"timezone",
"handling",
"overflow",
"checks",
"etc",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L1349-L1380
|
19,962
|
pandas-dev/pandas
|
pandas/core/ops.py
|
dispatch_to_extension_op
|
def dispatch_to_extension_op(op, left, right):
"""
Assume that left or right is a Series backed by an ExtensionArray,
apply the operator defined by op.
"""
# The op calls will raise TypeError if the op is not defined
# on the ExtensionArray
# unbox Series and Index to arrays
if isinstance(left, (ABCSeries, ABCIndexClass)):
new_left = left._values
else:
new_left = left
if isinstance(right, (ABCSeries, ABCIndexClass)):
new_right = right._values
else:
new_right = right
res_values = op(new_left, new_right)
res_name = get_op_result_name(left, right)
if op.__name__ in ['divmod', 'rdivmod']:
return _construct_divmod_result(
left, res_values, left.index, res_name)
return _construct_result(left, res_values, left.index, res_name)
|
python
|
def dispatch_to_extension_op(op, left, right):
"""
Assume that left or right is a Series backed by an ExtensionArray,
apply the operator defined by op.
"""
# The op calls will raise TypeError if the op is not defined
# on the ExtensionArray
# unbox Series and Index to arrays
if isinstance(left, (ABCSeries, ABCIndexClass)):
new_left = left._values
else:
new_left = left
if isinstance(right, (ABCSeries, ABCIndexClass)):
new_right = right._values
else:
new_right = right
res_values = op(new_left, new_right)
res_name = get_op_result_name(left, right)
if op.__name__ in ['divmod', 'rdivmod']:
return _construct_divmod_result(
left, res_values, left.index, res_name)
return _construct_result(left, res_values, left.index, res_name)
|
[
"def",
"dispatch_to_extension_op",
"(",
"op",
",",
"left",
",",
"right",
")",
":",
"# The op calls will raise TypeError if the op is not defined",
"# on the ExtensionArray",
"# unbox Series and Index to arrays",
"if",
"isinstance",
"(",
"left",
",",
"(",
"ABCSeries",
",",
"ABCIndexClass",
")",
")",
":",
"new_left",
"=",
"left",
".",
"_values",
"else",
":",
"new_left",
"=",
"left",
"if",
"isinstance",
"(",
"right",
",",
"(",
"ABCSeries",
",",
"ABCIndexClass",
")",
")",
":",
"new_right",
"=",
"right",
".",
"_values",
"else",
":",
"new_right",
"=",
"right",
"res_values",
"=",
"op",
"(",
"new_left",
",",
"new_right",
")",
"res_name",
"=",
"get_op_result_name",
"(",
"left",
",",
"right",
")",
"if",
"op",
".",
"__name__",
"in",
"[",
"'divmod'",
",",
"'rdivmod'",
"]",
":",
"return",
"_construct_divmod_result",
"(",
"left",
",",
"res_values",
",",
"left",
".",
"index",
",",
"res_name",
")",
"return",
"_construct_result",
"(",
"left",
",",
"res_values",
",",
"left",
".",
"index",
",",
"res_name",
")"
] |
Assume that left or right is a Series backed by an ExtensionArray,
apply the operator defined by op.
|
[
"Assume",
"that",
"left",
"or",
"right",
"is",
"a",
"Series",
"backed",
"by",
"an",
"ExtensionArray",
"apply",
"the",
"operator",
"defined",
"by",
"op",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L1383-L1410
|
19,963
|
pandas-dev/pandas
|
pandas/core/ops.py
|
_align_method_SERIES
|
def _align_method_SERIES(left, right, align_asobject=False):
""" align lhs and rhs Series """
# ToDo: Different from _align_method_FRAME, list, tuple and ndarray
# are not coerced here
# because Series has inconsistencies described in #13637
if isinstance(right, ABCSeries):
# avoid repeated alignment
if not left.index.equals(right.index):
if align_asobject:
# to keep original value's dtype for bool ops
left = left.astype(object)
right = right.astype(object)
left, right = left.align(right, copy=False)
return left, right
|
python
|
def _align_method_SERIES(left, right, align_asobject=False):
""" align lhs and rhs Series """
# ToDo: Different from _align_method_FRAME, list, tuple and ndarray
# are not coerced here
# because Series has inconsistencies described in #13637
if isinstance(right, ABCSeries):
# avoid repeated alignment
if not left.index.equals(right.index):
if align_asobject:
# to keep original value's dtype for bool ops
left = left.astype(object)
right = right.astype(object)
left, right = left.align(right, copy=False)
return left, right
|
[
"def",
"_align_method_SERIES",
"(",
"left",
",",
"right",
",",
"align_asobject",
"=",
"False",
")",
":",
"# ToDo: Different from _align_method_FRAME, list, tuple and ndarray",
"# are not coerced here",
"# because Series has inconsistencies described in #13637",
"if",
"isinstance",
"(",
"right",
",",
"ABCSeries",
")",
":",
"# avoid repeated alignment",
"if",
"not",
"left",
".",
"index",
".",
"equals",
"(",
"right",
".",
"index",
")",
":",
"if",
"align_asobject",
":",
"# to keep original value's dtype for bool ops",
"left",
"=",
"left",
".",
"astype",
"(",
"object",
")",
"right",
"=",
"right",
".",
"astype",
"(",
"object",
")",
"left",
",",
"right",
"=",
"left",
".",
"align",
"(",
"right",
",",
"copy",
"=",
"False",
")",
"return",
"left",
",",
"right"
] |
align lhs and rhs Series
|
[
"align",
"lhs",
"and",
"rhs",
"Series"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L1628-L1646
|
19,964
|
pandas-dev/pandas
|
pandas/core/ops.py
|
_construct_divmod_result
|
def _construct_divmod_result(left, result, index, name, dtype=None):
"""divmod returns a tuple of like indexed series instead of a single series.
"""
return (
_construct_result(left, result[0], index=index, name=name,
dtype=dtype),
_construct_result(left, result[1], index=index, name=name,
dtype=dtype),
)
|
python
|
def _construct_divmod_result(left, result, index, name, dtype=None):
"""divmod returns a tuple of like indexed series instead of a single series.
"""
return (
_construct_result(left, result[0], index=index, name=name,
dtype=dtype),
_construct_result(left, result[1], index=index, name=name,
dtype=dtype),
)
|
[
"def",
"_construct_divmod_result",
"(",
"left",
",",
"result",
",",
"index",
",",
"name",
",",
"dtype",
"=",
"None",
")",
":",
"return",
"(",
"_construct_result",
"(",
"left",
",",
"result",
"[",
"0",
"]",
",",
"index",
"=",
"index",
",",
"name",
"=",
"name",
",",
"dtype",
"=",
"dtype",
")",
",",
"_construct_result",
"(",
"left",
",",
"result",
"[",
"1",
"]",
",",
"index",
"=",
"index",
",",
"name",
"=",
"name",
",",
"dtype",
"=",
"dtype",
")",
",",
")"
] |
divmod returns a tuple of like indexed series instead of a single series.
|
[
"divmod",
"returns",
"a",
"tuple",
"of",
"like",
"indexed",
"series",
"instead",
"of",
"a",
"single",
"series",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L1661-L1669
|
19,965
|
pandas-dev/pandas
|
pandas/core/ops.py
|
_combine_series_frame
|
def _combine_series_frame(self, other, func, fill_value=None, axis=None,
level=None):
"""
Apply binary operator `func` to self, other using alignment and fill
conventions determined by the fill_value, axis, and level kwargs.
Parameters
----------
self : DataFrame
other : Series
func : binary operator
fill_value : object, default None
axis : {0, 1, 'columns', 'index', None}, default None
level : int or None, default None
Returns
-------
result : DataFrame
"""
if fill_value is not None:
raise NotImplementedError("fill_value {fill} not supported."
.format(fill=fill_value))
if axis is not None:
axis = self._get_axis_number(axis)
if axis == 0:
return self._combine_match_index(other, func, level=level)
else:
return self._combine_match_columns(other, func, level=level)
else:
if not len(other):
return self * np.nan
if not len(self):
# Ambiguous case, use _series so works with DataFrame
return self._constructor(data=self._series, index=self.index,
columns=self.columns)
# default axis is columns
return self._combine_match_columns(other, func, level=level)
|
python
|
def _combine_series_frame(self, other, func, fill_value=None, axis=None,
level=None):
"""
Apply binary operator `func` to self, other using alignment and fill
conventions determined by the fill_value, axis, and level kwargs.
Parameters
----------
self : DataFrame
other : Series
func : binary operator
fill_value : object, default None
axis : {0, 1, 'columns', 'index', None}, default None
level : int or None, default None
Returns
-------
result : DataFrame
"""
if fill_value is not None:
raise NotImplementedError("fill_value {fill} not supported."
.format(fill=fill_value))
if axis is not None:
axis = self._get_axis_number(axis)
if axis == 0:
return self._combine_match_index(other, func, level=level)
else:
return self._combine_match_columns(other, func, level=level)
else:
if not len(other):
return self * np.nan
if not len(self):
# Ambiguous case, use _series so works with DataFrame
return self._constructor(data=self._series, index=self.index,
columns=self.columns)
# default axis is columns
return self._combine_match_columns(other, func, level=level)
|
[
"def",
"_combine_series_frame",
"(",
"self",
",",
"other",
",",
"func",
",",
"fill_value",
"=",
"None",
",",
"axis",
"=",
"None",
",",
"level",
"=",
"None",
")",
":",
"if",
"fill_value",
"is",
"not",
"None",
":",
"raise",
"NotImplementedError",
"(",
"\"fill_value {fill} not supported.\"",
".",
"format",
"(",
"fill",
"=",
"fill_value",
")",
")",
"if",
"axis",
"is",
"not",
"None",
":",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"if",
"axis",
"==",
"0",
":",
"return",
"self",
".",
"_combine_match_index",
"(",
"other",
",",
"func",
",",
"level",
"=",
"level",
")",
"else",
":",
"return",
"self",
".",
"_combine_match_columns",
"(",
"other",
",",
"func",
",",
"level",
"=",
"level",
")",
"else",
":",
"if",
"not",
"len",
"(",
"other",
")",
":",
"return",
"self",
"*",
"np",
".",
"nan",
"if",
"not",
"len",
"(",
"self",
")",
":",
"# Ambiguous case, use _series so works with DataFrame",
"return",
"self",
".",
"_constructor",
"(",
"data",
"=",
"self",
".",
"_series",
",",
"index",
"=",
"self",
".",
"index",
",",
"columns",
"=",
"self",
".",
"columns",
")",
"# default axis is columns",
"return",
"self",
".",
"_combine_match_columns",
"(",
"other",
",",
"func",
",",
"level",
"=",
"level",
")"
] |
Apply binary operator `func` to self, other using alignment and fill
conventions determined by the fill_value, axis, and level kwargs.
Parameters
----------
self : DataFrame
other : Series
func : binary operator
fill_value : object, default None
axis : {0, 1, 'columns', 'index', None}, default None
level : int or None, default None
Returns
-------
result : DataFrame
|
[
"Apply",
"binary",
"operator",
"func",
"to",
"self",
"other",
"using",
"alignment",
"and",
"fill",
"conventions",
"determined",
"by",
"the",
"fill_value",
"axis",
"and",
"level",
"kwargs",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L2073-L2112
|
19,966
|
pandas-dev/pandas
|
pandas/core/ops.py
|
_align_method_FRAME
|
def _align_method_FRAME(left, right, axis):
""" convert rhs to meet lhs dims if input is list, tuple or np.ndarray """
def to_series(right):
msg = ('Unable to coerce to Series, length must be {req_len}: '
'given {given_len}')
if axis is not None and left._get_axis_name(axis) == 'index':
if len(left.index) != len(right):
raise ValueError(msg.format(req_len=len(left.index),
given_len=len(right)))
right = left._constructor_sliced(right, index=left.index)
else:
if len(left.columns) != len(right):
raise ValueError(msg.format(req_len=len(left.columns),
given_len=len(right)))
right = left._constructor_sliced(right, index=left.columns)
return right
if isinstance(right, np.ndarray):
if right.ndim == 1:
right = to_series(right)
elif right.ndim == 2:
if right.shape == left.shape:
right = left._constructor(right, index=left.index,
columns=left.columns)
elif right.shape[0] == left.shape[0] and right.shape[1] == 1:
# Broadcast across columns
right = np.broadcast_to(right, left.shape)
right = left._constructor(right,
index=left.index,
columns=left.columns)
elif right.shape[1] == left.shape[1] and right.shape[0] == 1:
# Broadcast along rows
right = to_series(right[0, :])
else:
raise ValueError("Unable to coerce to DataFrame, shape "
"must be {req_shape}: given {given_shape}"
.format(req_shape=left.shape,
given_shape=right.shape))
elif right.ndim > 2:
raise ValueError('Unable to coerce to Series/DataFrame, dim '
'must be <= 2: {dim}'.format(dim=right.shape))
elif (is_list_like(right) and
not isinstance(right, (ABCSeries, ABCDataFrame))):
# GH17901
right = to_series(right)
return right
|
python
|
def _align_method_FRAME(left, right, axis):
""" convert rhs to meet lhs dims if input is list, tuple or np.ndarray """
def to_series(right):
msg = ('Unable to coerce to Series, length must be {req_len}: '
'given {given_len}')
if axis is not None and left._get_axis_name(axis) == 'index':
if len(left.index) != len(right):
raise ValueError(msg.format(req_len=len(left.index),
given_len=len(right)))
right = left._constructor_sliced(right, index=left.index)
else:
if len(left.columns) != len(right):
raise ValueError(msg.format(req_len=len(left.columns),
given_len=len(right)))
right = left._constructor_sliced(right, index=left.columns)
return right
if isinstance(right, np.ndarray):
if right.ndim == 1:
right = to_series(right)
elif right.ndim == 2:
if right.shape == left.shape:
right = left._constructor(right, index=left.index,
columns=left.columns)
elif right.shape[0] == left.shape[0] and right.shape[1] == 1:
# Broadcast across columns
right = np.broadcast_to(right, left.shape)
right = left._constructor(right,
index=left.index,
columns=left.columns)
elif right.shape[1] == left.shape[1] and right.shape[0] == 1:
# Broadcast along rows
right = to_series(right[0, :])
else:
raise ValueError("Unable to coerce to DataFrame, shape "
"must be {req_shape}: given {given_shape}"
.format(req_shape=left.shape,
given_shape=right.shape))
elif right.ndim > 2:
raise ValueError('Unable to coerce to Series/DataFrame, dim '
'must be <= 2: {dim}'.format(dim=right.shape))
elif (is_list_like(right) and
not isinstance(right, (ABCSeries, ABCDataFrame))):
# GH17901
right = to_series(right)
return right
|
[
"def",
"_align_method_FRAME",
"(",
"left",
",",
"right",
",",
"axis",
")",
":",
"def",
"to_series",
"(",
"right",
")",
":",
"msg",
"=",
"(",
"'Unable to coerce to Series, length must be {req_len}: '",
"'given {given_len}'",
")",
"if",
"axis",
"is",
"not",
"None",
"and",
"left",
".",
"_get_axis_name",
"(",
"axis",
")",
"==",
"'index'",
":",
"if",
"len",
"(",
"left",
".",
"index",
")",
"!=",
"len",
"(",
"right",
")",
":",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"req_len",
"=",
"len",
"(",
"left",
".",
"index",
")",
",",
"given_len",
"=",
"len",
"(",
"right",
")",
")",
")",
"right",
"=",
"left",
".",
"_constructor_sliced",
"(",
"right",
",",
"index",
"=",
"left",
".",
"index",
")",
"else",
":",
"if",
"len",
"(",
"left",
".",
"columns",
")",
"!=",
"len",
"(",
"right",
")",
":",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"req_len",
"=",
"len",
"(",
"left",
".",
"columns",
")",
",",
"given_len",
"=",
"len",
"(",
"right",
")",
")",
")",
"right",
"=",
"left",
".",
"_constructor_sliced",
"(",
"right",
",",
"index",
"=",
"left",
".",
"columns",
")",
"return",
"right",
"if",
"isinstance",
"(",
"right",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"right",
".",
"ndim",
"==",
"1",
":",
"right",
"=",
"to_series",
"(",
"right",
")",
"elif",
"right",
".",
"ndim",
"==",
"2",
":",
"if",
"right",
".",
"shape",
"==",
"left",
".",
"shape",
":",
"right",
"=",
"left",
".",
"_constructor",
"(",
"right",
",",
"index",
"=",
"left",
".",
"index",
",",
"columns",
"=",
"left",
".",
"columns",
")",
"elif",
"right",
".",
"shape",
"[",
"0",
"]",
"==",
"left",
".",
"shape",
"[",
"0",
"]",
"and",
"right",
".",
"shape",
"[",
"1",
"]",
"==",
"1",
":",
"# Broadcast across columns",
"right",
"=",
"np",
".",
"broadcast_to",
"(",
"right",
",",
"left",
".",
"shape",
")",
"right",
"=",
"left",
".",
"_constructor",
"(",
"right",
",",
"index",
"=",
"left",
".",
"index",
",",
"columns",
"=",
"left",
".",
"columns",
")",
"elif",
"right",
".",
"shape",
"[",
"1",
"]",
"==",
"left",
".",
"shape",
"[",
"1",
"]",
"and",
"right",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
":",
"# Broadcast along rows",
"right",
"=",
"to_series",
"(",
"right",
"[",
"0",
",",
":",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unable to coerce to DataFrame, shape \"",
"\"must be {req_shape}: given {given_shape}\"",
".",
"format",
"(",
"req_shape",
"=",
"left",
".",
"shape",
",",
"given_shape",
"=",
"right",
".",
"shape",
")",
")",
"elif",
"right",
".",
"ndim",
">",
"2",
":",
"raise",
"ValueError",
"(",
"'Unable to coerce to Series/DataFrame, dim '",
"'must be <= 2: {dim}'",
".",
"format",
"(",
"dim",
"=",
"right",
".",
"shape",
")",
")",
"elif",
"(",
"is_list_like",
"(",
"right",
")",
"and",
"not",
"isinstance",
"(",
"right",
",",
"(",
"ABCSeries",
",",
"ABCDataFrame",
")",
")",
")",
":",
"# GH17901",
"right",
"=",
"to_series",
"(",
"right",
")",
"return",
"right"
] |
convert rhs to meet lhs dims if input is list, tuple or np.ndarray
|
[
"convert",
"rhs",
"to",
"meet",
"lhs",
"dims",
"if",
"input",
"is",
"list",
"tuple",
"or",
"np",
".",
"ndarray"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L2115-L2169
|
19,967
|
pandas-dev/pandas
|
pandas/core/ops.py
|
_cast_sparse_series_op
|
def _cast_sparse_series_op(left, right, opname):
"""
For SparseSeries operation, coerce to float64 if the result is expected
to have NaN or inf values
Parameters
----------
left : SparseArray
right : SparseArray
opname : str
Returns
-------
left : SparseArray
right : SparseArray
"""
from pandas.core.sparse.api import SparseDtype
opname = opname.strip('_')
# TODO: This should be moved to the array?
if is_integer_dtype(left) and is_integer_dtype(right):
# series coerces to float64 if result should have NaN/inf
if opname in ('floordiv', 'mod') and (right.values == 0).any():
left = left.astype(SparseDtype(np.float64, left.fill_value))
right = right.astype(SparseDtype(np.float64, right.fill_value))
elif opname in ('rfloordiv', 'rmod') and (left.values == 0).any():
left = left.astype(SparseDtype(np.float64, left.fill_value))
right = right.astype(SparseDtype(np.float64, right.fill_value))
return left, right
|
python
|
def _cast_sparse_series_op(left, right, opname):
"""
For SparseSeries operation, coerce to float64 if the result is expected
to have NaN or inf values
Parameters
----------
left : SparseArray
right : SparseArray
opname : str
Returns
-------
left : SparseArray
right : SparseArray
"""
from pandas.core.sparse.api import SparseDtype
opname = opname.strip('_')
# TODO: This should be moved to the array?
if is_integer_dtype(left) and is_integer_dtype(right):
# series coerces to float64 if result should have NaN/inf
if opname in ('floordiv', 'mod') and (right.values == 0).any():
left = left.astype(SparseDtype(np.float64, left.fill_value))
right = right.astype(SparseDtype(np.float64, right.fill_value))
elif opname in ('rfloordiv', 'rmod') and (left.values == 0).any():
left = left.astype(SparseDtype(np.float64, left.fill_value))
right = right.astype(SparseDtype(np.float64, right.fill_value))
return left, right
|
[
"def",
"_cast_sparse_series_op",
"(",
"left",
",",
"right",
",",
"opname",
")",
":",
"from",
"pandas",
".",
"core",
".",
"sparse",
".",
"api",
"import",
"SparseDtype",
"opname",
"=",
"opname",
".",
"strip",
"(",
"'_'",
")",
"# TODO: This should be moved to the array?",
"if",
"is_integer_dtype",
"(",
"left",
")",
"and",
"is_integer_dtype",
"(",
"right",
")",
":",
"# series coerces to float64 if result should have NaN/inf",
"if",
"opname",
"in",
"(",
"'floordiv'",
",",
"'mod'",
")",
"and",
"(",
"right",
".",
"values",
"==",
"0",
")",
".",
"any",
"(",
")",
":",
"left",
"=",
"left",
".",
"astype",
"(",
"SparseDtype",
"(",
"np",
".",
"float64",
",",
"left",
".",
"fill_value",
")",
")",
"right",
"=",
"right",
".",
"astype",
"(",
"SparseDtype",
"(",
"np",
".",
"float64",
",",
"right",
".",
"fill_value",
")",
")",
"elif",
"opname",
"in",
"(",
"'rfloordiv'",
",",
"'rmod'",
")",
"and",
"(",
"left",
".",
"values",
"==",
"0",
")",
".",
"any",
"(",
")",
":",
"left",
"=",
"left",
".",
"astype",
"(",
"SparseDtype",
"(",
"np",
".",
"float64",
",",
"left",
".",
"fill_value",
")",
")",
"right",
"=",
"right",
".",
"astype",
"(",
"SparseDtype",
"(",
"np",
".",
"float64",
",",
"right",
".",
"fill_value",
")",
")",
"return",
"left",
",",
"right"
] |
For SparseSeries operation, coerce to float64 if the result is expected
to have NaN or inf values
Parameters
----------
left : SparseArray
right : SparseArray
opname : str
Returns
-------
left : SparseArray
right : SparseArray
|
[
"For",
"SparseSeries",
"operation",
"coerce",
"to",
"float64",
"if",
"the",
"result",
"is",
"expected",
"to",
"have",
"NaN",
"or",
"inf",
"values"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/ops.py#L2389-L2419
|
19,968
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
validate_inferred_freq
|
def validate_inferred_freq(freq, inferred_freq, freq_infer):
"""
If the user passes a freq and another freq is inferred from passed data,
require that they match.
Parameters
----------
freq : DateOffset or None
inferred_freq : DateOffset or None
freq_infer : bool
Returns
-------
freq : DateOffset or None
freq_infer : bool
Notes
-----
We assume at this point that `maybe_infer_freq` has been called, so
`freq` is either a DateOffset object or None.
"""
if inferred_freq is not None:
if freq is not None and freq != inferred_freq:
raise ValueError('Inferred frequency {inferred} from passed '
'values does not conform to passed frequency '
'{passed}'
.format(inferred=inferred_freq,
passed=freq.freqstr))
elif freq is None:
freq = inferred_freq
freq_infer = False
return freq, freq_infer
|
python
|
def validate_inferred_freq(freq, inferred_freq, freq_infer):
"""
If the user passes a freq and another freq is inferred from passed data,
require that they match.
Parameters
----------
freq : DateOffset or None
inferred_freq : DateOffset or None
freq_infer : bool
Returns
-------
freq : DateOffset or None
freq_infer : bool
Notes
-----
We assume at this point that `maybe_infer_freq` has been called, so
`freq` is either a DateOffset object or None.
"""
if inferred_freq is not None:
if freq is not None and freq != inferred_freq:
raise ValueError('Inferred frequency {inferred} from passed '
'values does not conform to passed frequency '
'{passed}'
.format(inferred=inferred_freq,
passed=freq.freqstr))
elif freq is None:
freq = inferred_freq
freq_infer = False
return freq, freq_infer
|
[
"def",
"validate_inferred_freq",
"(",
"freq",
",",
"inferred_freq",
",",
"freq_infer",
")",
":",
"if",
"inferred_freq",
"is",
"not",
"None",
":",
"if",
"freq",
"is",
"not",
"None",
"and",
"freq",
"!=",
"inferred_freq",
":",
"raise",
"ValueError",
"(",
"'Inferred frequency {inferred} from passed '",
"'values does not conform to passed frequency '",
"'{passed}'",
".",
"format",
"(",
"inferred",
"=",
"inferred_freq",
",",
"passed",
"=",
"freq",
".",
"freqstr",
")",
")",
"elif",
"freq",
"is",
"None",
":",
"freq",
"=",
"inferred_freq",
"freq_infer",
"=",
"False",
"return",
"freq",
",",
"freq_infer"
] |
If the user passes a freq and another freq is inferred from passed data,
require that they match.
Parameters
----------
freq : DateOffset or None
inferred_freq : DateOffset or None
freq_infer : bool
Returns
-------
freq : DateOffset or None
freq_infer : bool
Notes
-----
We assume at this point that `maybe_infer_freq` has been called, so
`freq` is either a DateOffset object or None.
|
[
"If",
"the",
"user",
"passes",
"a",
"freq",
"and",
"another",
"freq",
"is",
"inferred",
"from",
"passed",
"data",
"require",
"that",
"they",
"match",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L1501-L1533
|
19,969
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
maybe_infer_freq
|
def maybe_infer_freq(freq):
"""
Comparing a DateOffset to the string "infer" raises, so we need to
be careful about comparisons. Make a dummy variable `freq_infer` to
signify the case where the given freq is "infer" and set freq to None
to avoid comparison trouble later on.
Parameters
----------
freq : {DateOffset, None, str}
Returns
-------
freq : {DateOffset, None}
freq_infer : bool
"""
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = frequencies.to_offset(freq)
else:
freq_infer = True
freq = None
return freq, freq_infer
|
python
|
def maybe_infer_freq(freq):
"""
Comparing a DateOffset to the string "infer" raises, so we need to
be careful about comparisons. Make a dummy variable `freq_infer` to
signify the case where the given freq is "infer" and set freq to None
to avoid comparison trouble later on.
Parameters
----------
freq : {DateOffset, None, str}
Returns
-------
freq : {DateOffset, None}
freq_infer : bool
"""
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = frequencies.to_offset(freq)
else:
freq_infer = True
freq = None
return freq, freq_infer
|
[
"def",
"maybe_infer_freq",
"(",
"freq",
")",
":",
"freq_infer",
"=",
"False",
"if",
"not",
"isinstance",
"(",
"freq",
",",
"DateOffset",
")",
":",
"# if a passed freq is None, don't infer automatically",
"if",
"freq",
"!=",
"'infer'",
":",
"freq",
"=",
"frequencies",
".",
"to_offset",
"(",
"freq",
")",
"else",
":",
"freq_infer",
"=",
"True",
"freq",
"=",
"None",
"return",
"freq",
",",
"freq_infer"
] |
Comparing a DateOffset to the string "infer" raises, so we need to
be careful about comparisons. Make a dummy variable `freq_infer` to
signify the case where the given freq is "infer" and set freq to None
to avoid comparison trouble later on.
Parameters
----------
freq : {DateOffset, None, str}
Returns
-------
freq : {DateOffset, None}
freq_infer : bool
|
[
"Comparing",
"a",
"DateOffset",
"to",
"the",
"string",
"infer",
"raises",
"so",
"we",
"need",
"to",
"be",
"careful",
"about",
"comparisons",
".",
"Make",
"a",
"dummy",
"variable",
"freq_infer",
"to",
"signify",
"the",
"case",
"where",
"the",
"given",
"freq",
"is",
"infer",
"and",
"set",
"freq",
"to",
"None",
"to",
"avoid",
"comparison",
"trouble",
"later",
"on",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L1536-L1560
|
19,970
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
_ensure_datetimelike_to_i8
|
def _ensure_datetimelike_to_i8(other, to_utc=False):
"""
Helper for coercing an input scalar or array to i8.
Parameters
----------
other : 1d array
to_utc : bool, default False
If True, convert the values to UTC before extracting the i8 values
If False, extract the i8 values directly.
Returns
-------
i8 1d array
"""
from pandas import Index
from pandas.core.arrays import PeriodArray
if lib.is_scalar(other) and isna(other):
return iNaT
elif isinstance(other, (PeriodArray, ABCIndexClass,
DatetimeLikeArrayMixin)):
# convert tz if needed
if getattr(other, 'tz', None) is not None:
if to_utc:
other = other.tz_convert('UTC')
else:
other = other.tz_localize(None)
else:
try:
return np.array(other, copy=False).view('i8')
except TypeError:
# period array cannot be coerced to int
other = Index(other)
return other.asi8
|
python
|
def _ensure_datetimelike_to_i8(other, to_utc=False):
"""
Helper for coercing an input scalar or array to i8.
Parameters
----------
other : 1d array
to_utc : bool, default False
If True, convert the values to UTC before extracting the i8 values
If False, extract the i8 values directly.
Returns
-------
i8 1d array
"""
from pandas import Index
from pandas.core.arrays import PeriodArray
if lib.is_scalar(other) and isna(other):
return iNaT
elif isinstance(other, (PeriodArray, ABCIndexClass,
DatetimeLikeArrayMixin)):
# convert tz if needed
if getattr(other, 'tz', None) is not None:
if to_utc:
other = other.tz_convert('UTC')
else:
other = other.tz_localize(None)
else:
try:
return np.array(other, copy=False).view('i8')
except TypeError:
# period array cannot be coerced to int
other = Index(other)
return other.asi8
|
[
"def",
"_ensure_datetimelike_to_i8",
"(",
"other",
",",
"to_utc",
"=",
"False",
")",
":",
"from",
"pandas",
"import",
"Index",
"from",
"pandas",
".",
"core",
".",
"arrays",
"import",
"PeriodArray",
"if",
"lib",
".",
"is_scalar",
"(",
"other",
")",
"and",
"isna",
"(",
"other",
")",
":",
"return",
"iNaT",
"elif",
"isinstance",
"(",
"other",
",",
"(",
"PeriodArray",
",",
"ABCIndexClass",
",",
"DatetimeLikeArrayMixin",
")",
")",
":",
"# convert tz if needed",
"if",
"getattr",
"(",
"other",
",",
"'tz'",
",",
"None",
")",
"is",
"not",
"None",
":",
"if",
"to_utc",
":",
"other",
"=",
"other",
".",
"tz_convert",
"(",
"'UTC'",
")",
"else",
":",
"other",
"=",
"other",
".",
"tz_localize",
"(",
"None",
")",
"else",
":",
"try",
":",
"return",
"np",
".",
"array",
"(",
"other",
",",
"copy",
"=",
"False",
")",
".",
"view",
"(",
"'i8'",
")",
"except",
"TypeError",
":",
"# period array cannot be coerced to int",
"other",
"=",
"Index",
"(",
"other",
")",
"return",
"other",
".",
"asi8"
] |
Helper for coercing an input scalar or array to i8.
Parameters
----------
other : 1d array
to_utc : bool, default False
If True, convert the values to UTC before extracting the i8 values
If False, extract the i8 values directly.
Returns
-------
i8 1d array
|
[
"Helper",
"for",
"coercing",
"an",
"input",
"scalar",
"or",
"array",
"to",
"i8",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L1563-L1597
|
19,971
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
AttributesMixin._scalar_from_string
|
def _scalar_from_string(
self,
value: str,
) -> Union[Period, Timestamp, Timedelta, NaTType]:
"""
Construct a scalar type from a string.
Parameters
----------
value : str
Returns
-------
Period, Timestamp, or Timedelta, or NaT
Whatever the type of ``self._scalar_type`` is.
Notes
-----
This should call ``self._check_compatible_with`` before
unboxing the result.
"""
raise AbstractMethodError(self)
|
python
|
def _scalar_from_string(
self,
value: str,
) -> Union[Period, Timestamp, Timedelta, NaTType]:
"""
Construct a scalar type from a string.
Parameters
----------
value : str
Returns
-------
Period, Timestamp, or Timedelta, or NaT
Whatever the type of ``self._scalar_type`` is.
Notes
-----
This should call ``self._check_compatible_with`` before
unboxing the result.
"""
raise AbstractMethodError(self)
|
[
"def",
"_scalar_from_string",
"(",
"self",
",",
"value",
":",
"str",
",",
")",
"->",
"Union",
"[",
"Period",
",",
"Timestamp",
",",
"Timedelta",
",",
"NaTType",
"]",
":",
"raise",
"AbstractMethodError",
"(",
"self",
")"
] |
Construct a scalar type from a string.
Parameters
----------
value : str
Returns
-------
Period, Timestamp, or Timedelta, or NaT
Whatever the type of ``self._scalar_type`` is.
Notes
-----
This should call ``self._check_compatible_with`` before
unboxing the result.
|
[
"Construct",
"a",
"scalar",
"type",
"from",
"a",
"string",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L68-L89
|
19,972
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
AttributesMixin._unbox_scalar
|
def _unbox_scalar(
self,
value: Union[Period, Timestamp, Timedelta, NaTType],
) -> int:
"""
Unbox the integer value of a scalar `value`.
Parameters
----------
value : Union[Period, Timestamp, Timedelta]
Returns
-------
int
Examples
--------
>>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
10000000000
"""
raise AbstractMethodError(self)
|
python
|
def _unbox_scalar(
self,
value: Union[Period, Timestamp, Timedelta, NaTType],
) -> int:
"""
Unbox the integer value of a scalar `value`.
Parameters
----------
value : Union[Period, Timestamp, Timedelta]
Returns
-------
int
Examples
--------
>>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
10000000000
"""
raise AbstractMethodError(self)
|
[
"def",
"_unbox_scalar",
"(",
"self",
",",
"value",
":",
"Union",
"[",
"Period",
",",
"Timestamp",
",",
"Timedelta",
",",
"NaTType",
"]",
",",
")",
"->",
"int",
":",
"raise",
"AbstractMethodError",
"(",
"self",
")"
] |
Unbox the integer value of a scalar `value`.
Parameters
----------
value : Union[Period, Timestamp, Timedelta]
Returns
-------
int
Examples
--------
>>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP
10000000000
|
[
"Unbox",
"the",
"integer",
"value",
"of",
"a",
"scalar",
"value",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L91-L111
|
19,973
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
AttributesMixin._check_compatible_with
|
def _check_compatible_with(
self,
other: Union[Period, Timestamp, Timedelta, NaTType],
) -> None:
"""
Verify that `self` and `other` are compatible.
* DatetimeArray verifies that the timezones (if any) match
* PeriodArray verifies that the freq matches
* Timedelta has no verification
In each case, NaT is considered compatible.
Parameters
----------
other
Raises
------
Exception
"""
raise AbstractMethodError(self)
|
python
|
def _check_compatible_with(
self,
other: Union[Period, Timestamp, Timedelta, NaTType],
) -> None:
"""
Verify that `self` and `other` are compatible.
* DatetimeArray verifies that the timezones (if any) match
* PeriodArray verifies that the freq matches
* Timedelta has no verification
In each case, NaT is considered compatible.
Parameters
----------
other
Raises
------
Exception
"""
raise AbstractMethodError(self)
|
[
"def",
"_check_compatible_with",
"(",
"self",
",",
"other",
":",
"Union",
"[",
"Period",
",",
"Timestamp",
",",
"Timedelta",
",",
"NaTType",
"]",
",",
")",
"->",
"None",
":",
"raise",
"AbstractMethodError",
"(",
"self",
")"
] |
Verify that `self` and `other` are compatible.
* DatetimeArray verifies that the timezones (if any) match
* PeriodArray verifies that the freq matches
* Timedelta has no verification
In each case, NaT is considered compatible.
Parameters
----------
other
Raises
------
Exception
|
[
"Verify",
"that",
"self",
"and",
"other",
"are",
"compatible",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L113-L134
|
19,974
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
DatelikeOps.strftime
|
def strftime(self, date_format):
"""
Convert to Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format
doc <%(URL)s>`__.
Parameters
----------
date_format : str
Date format string (e.g. "%%Y-%%m-%%d").
Returns
-------
Index
Index of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Examples
--------
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s')
>>> rng.strftime('%%B %%d, %%Y, %%r')
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
from pandas import Index
return Index(self._format_native_types(date_format=date_format))
|
python
|
def strftime(self, date_format):
"""
Convert to Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format
doc <%(URL)s>`__.
Parameters
----------
date_format : str
Date format string (e.g. "%%Y-%%m-%%d").
Returns
-------
Index
Index of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Examples
--------
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s')
>>> rng.strftime('%%B %%d, %%Y, %%r')
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
"""
from pandas import Index
return Index(self._format_native_types(date_format=date_format))
|
[
"def",
"strftime",
"(",
"self",
",",
"date_format",
")",
":",
"from",
"pandas",
"import",
"Index",
"return",
"Index",
"(",
"self",
".",
"_format_native_types",
"(",
"date_format",
"=",
"date_format",
")",
")"
] |
Convert to Index using specified date_format.
Return an Index of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format
doc <%(URL)s>`__.
Parameters
----------
date_format : str
Date format string (e.g. "%%Y-%%m-%%d").
Returns
-------
Index
Index of formatted strings.
See Also
--------
to_datetime : Convert the given argument to datetime.
DatetimeIndex.normalize : Return DatetimeIndex with times to midnight.
DatetimeIndex.round : Round the DatetimeIndex to the specified freq.
DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq.
Examples
--------
>>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"),
... periods=3, freq='s')
>>> rng.strftime('%%B %%d, %%Y, %%r')
Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM',
'March 10, 2018, 09:00:02 AM'],
dtype='object')
|
[
"Convert",
"to",
"Index",
"using",
"specified",
"date_format",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L144-L180
|
19,975
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
DatetimeLikeArrayMixin.repeat
|
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
values = self._data.repeat(repeats)
return type(self)(values.view('i8'), dtype=self.dtype)
|
python
|
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
values = self._data.repeat(repeats)
return type(self)(values.view('i8'), dtype=self.dtype)
|
[
"def",
"repeat",
"(",
"self",
",",
"repeats",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"nv",
".",
"validate_repeat",
"(",
"args",
",",
"kwargs",
")",
"values",
"=",
"self",
".",
"_data",
".",
"repeat",
"(",
"repeats",
")",
"return",
"type",
"(",
"self",
")",
"(",
"values",
".",
"view",
"(",
"'i8'",
")",
",",
"dtype",
"=",
"self",
".",
"dtype",
")"
] |
Repeat elements of an array.
See Also
--------
numpy.ndarray.repeat
|
[
"Repeat",
"elements",
"of",
"an",
"array",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L668-L678
|
19,976
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
DatetimeLikeArrayMixin._add_delta
|
def _add_delta(self, other):
"""
Add a timedelta-like, Tick or TimedeltaIndex-like object
to self, yielding an int64 numpy array
Parameters
----------
delta : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : ndarray[int64]
Notes
-----
The result's name is set outside of _add_delta by the calling
method (__add__ or __sub__), if necessary (i.e. for Indexes).
"""
if isinstance(other, (Tick, timedelta, np.timedelta64)):
new_values = self._add_timedeltalike_scalar(other)
elif is_timedelta64_dtype(other):
# ndarray[timedelta64] or TimedeltaArray/index
new_values = self._add_delta_tdi(other)
return new_values
|
python
|
def _add_delta(self, other):
"""
Add a timedelta-like, Tick or TimedeltaIndex-like object
to self, yielding an int64 numpy array
Parameters
----------
delta : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : ndarray[int64]
Notes
-----
The result's name is set outside of _add_delta by the calling
method (__add__ or __sub__), if necessary (i.e. for Indexes).
"""
if isinstance(other, (Tick, timedelta, np.timedelta64)):
new_values = self._add_timedeltalike_scalar(other)
elif is_timedelta64_dtype(other):
# ndarray[timedelta64] or TimedeltaArray/index
new_values = self._add_delta_tdi(other)
return new_values
|
[
"def",
"_add_delta",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"(",
"Tick",
",",
"timedelta",
",",
"np",
".",
"timedelta64",
")",
")",
":",
"new_values",
"=",
"self",
".",
"_add_timedeltalike_scalar",
"(",
"other",
")",
"elif",
"is_timedelta64_dtype",
"(",
"other",
")",
":",
"# ndarray[timedelta64] or TimedeltaArray/index",
"new_values",
"=",
"self",
".",
"_add_delta_tdi",
"(",
"other",
")",
"return",
"new_values"
] |
Add a timedelta-like, Tick or TimedeltaIndex-like object
to self, yielding an int64 numpy array
Parameters
----------
delta : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : ndarray[int64]
Notes
-----
The result's name is set outside of _add_delta by the calling
method (__add__ or __sub__), if necessary (i.e. for Indexes).
|
[
"Add",
"a",
"timedelta",
"-",
"like",
"Tick",
"or",
"TimedeltaIndex",
"-",
"like",
"object",
"to",
"self",
"yielding",
"an",
"int64",
"numpy",
"array"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L942-L967
|
19,977
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
DatetimeLikeArrayMixin._add_timedeltalike_scalar
|
def _add_timedeltalike_scalar(self, other):
"""
Add a delta of a timedeltalike
return the i8 result view
"""
if isna(other):
# i.e np.timedelta64("NaT"), not recognized by delta_to_nanoseconds
new_values = np.empty(len(self), dtype='i8')
new_values[:] = iNaT
return new_values
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc,
arr_mask=self._isnan).view('i8')
new_values = self._maybe_mask_results(new_values)
return new_values.view('i8')
|
python
|
def _add_timedeltalike_scalar(self, other):
"""
Add a delta of a timedeltalike
return the i8 result view
"""
if isna(other):
# i.e np.timedelta64("NaT"), not recognized by delta_to_nanoseconds
new_values = np.empty(len(self), dtype='i8')
new_values[:] = iNaT
return new_values
inc = delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc,
arr_mask=self._isnan).view('i8')
new_values = self._maybe_mask_results(new_values)
return new_values.view('i8')
|
[
"def",
"_add_timedeltalike_scalar",
"(",
"self",
",",
"other",
")",
":",
"if",
"isna",
"(",
"other",
")",
":",
"# i.e np.timedelta64(\"NaT\"), not recognized by delta_to_nanoseconds",
"new_values",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"self",
")",
",",
"dtype",
"=",
"'i8'",
")",
"new_values",
"[",
":",
"]",
"=",
"iNaT",
"return",
"new_values",
"inc",
"=",
"delta_to_nanoseconds",
"(",
"other",
")",
"new_values",
"=",
"checked_add_with_arr",
"(",
"self",
".",
"asi8",
",",
"inc",
",",
"arr_mask",
"=",
"self",
".",
"_isnan",
")",
".",
"view",
"(",
"'i8'",
")",
"new_values",
"=",
"self",
".",
"_maybe_mask_results",
"(",
"new_values",
")",
"return",
"new_values",
".",
"view",
"(",
"'i8'",
")"
] |
Add a delta of a timedeltalike
return the i8 result view
|
[
"Add",
"a",
"delta",
"of",
"a",
"timedeltalike",
"return",
"the",
"i8",
"result",
"view"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L969-L984
|
19,978
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
DatetimeLikeArrayMixin._add_delta_tdi
|
def _add_delta_tdi(self, other):
"""
Add a delta of a TimedeltaIndex
return the i8 result view
"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
# ndarray[timedelta64]; wrap in TimedeltaIndex for op
from pandas import TimedeltaIndex
other = TimedeltaIndex(other)
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(self_i8, other_i8,
arr_mask=self._isnan,
b_mask=other._isnan)
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view('i8')
|
python
|
def _add_delta_tdi(self, other):
"""
Add a delta of a TimedeltaIndex
return the i8 result view
"""
if len(self) != len(other):
raise ValueError("cannot add indices of unequal length")
if isinstance(other, np.ndarray):
# ndarray[timedelta64]; wrap in TimedeltaIndex for op
from pandas import TimedeltaIndex
other = TimedeltaIndex(other)
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(self_i8, other_i8,
arr_mask=self._isnan,
b_mask=other._isnan)
if self._hasnans or other._hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view('i8')
|
[
"def",
"_add_delta_tdi",
"(",
"self",
",",
"other",
")",
":",
"if",
"len",
"(",
"self",
")",
"!=",
"len",
"(",
"other",
")",
":",
"raise",
"ValueError",
"(",
"\"cannot add indices of unequal length\"",
")",
"if",
"isinstance",
"(",
"other",
",",
"np",
".",
"ndarray",
")",
":",
"# ndarray[timedelta64]; wrap in TimedeltaIndex for op",
"from",
"pandas",
"import",
"TimedeltaIndex",
"other",
"=",
"TimedeltaIndex",
"(",
"other",
")",
"self_i8",
"=",
"self",
".",
"asi8",
"other_i8",
"=",
"other",
".",
"asi8",
"new_values",
"=",
"checked_add_with_arr",
"(",
"self_i8",
",",
"other_i8",
",",
"arr_mask",
"=",
"self",
".",
"_isnan",
",",
"b_mask",
"=",
"other",
".",
"_isnan",
")",
"if",
"self",
".",
"_hasnans",
"or",
"other",
".",
"_hasnans",
":",
"mask",
"=",
"(",
"self",
".",
"_isnan",
")",
"|",
"(",
"other",
".",
"_isnan",
")",
"new_values",
"[",
"mask",
"]",
"=",
"iNaT",
"return",
"new_values",
".",
"view",
"(",
"'i8'",
")"
] |
Add a delta of a TimedeltaIndex
return the i8 result view
|
[
"Add",
"a",
"delta",
"of",
"a",
"TimedeltaIndex",
"return",
"the",
"i8",
"result",
"view"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L986-L1007
|
19,979
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
DatetimeLikeArrayMixin._add_nat
|
def _add_nat(self):
"""
Add pd.NaT to self
"""
if is_period_dtype(self):
raise TypeError('Cannot add {cls} and {typ}'
.format(cls=type(self).__name__,
typ=type(NaT).__name__))
# GH#19124 pd.NaT is treated like a timedelta for both timedelta
# and datetime dtypes
result = np.zeros(len(self), dtype=np.int64)
result.fill(iNaT)
return type(self)(result, dtype=self.dtype, freq=None)
|
python
|
def _add_nat(self):
"""
Add pd.NaT to self
"""
if is_period_dtype(self):
raise TypeError('Cannot add {cls} and {typ}'
.format(cls=type(self).__name__,
typ=type(NaT).__name__))
# GH#19124 pd.NaT is treated like a timedelta for both timedelta
# and datetime dtypes
result = np.zeros(len(self), dtype=np.int64)
result.fill(iNaT)
return type(self)(result, dtype=self.dtype, freq=None)
|
[
"def",
"_add_nat",
"(",
"self",
")",
":",
"if",
"is_period_dtype",
"(",
"self",
")",
":",
"raise",
"TypeError",
"(",
"'Cannot add {cls} and {typ}'",
".",
"format",
"(",
"cls",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
",",
"typ",
"=",
"type",
"(",
"NaT",
")",
".",
"__name__",
")",
")",
"# GH#19124 pd.NaT is treated like a timedelta for both timedelta",
"# and datetime dtypes",
"result",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"self",
")",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"result",
".",
"fill",
"(",
"iNaT",
")",
"return",
"type",
"(",
"self",
")",
"(",
"result",
",",
"dtype",
"=",
"self",
".",
"dtype",
",",
"freq",
"=",
"None",
")"
] |
Add pd.NaT to self
|
[
"Add",
"pd",
".",
"NaT",
"to",
"self"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L1009-L1022
|
19,980
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
DatetimeLikeArrayMixin._sub_nat
|
def _sub_nat(self):
"""
Subtract pd.NaT from self
"""
# GH#19124 Timedelta - datetime is not in general well-defined.
# We make an exception for pd.NaT, which in this case quacks
# like a timedelta.
# For datetime64 dtypes by convention we treat NaT as a datetime, so
# this subtraction returns a timedelta64 dtype.
# For period dtype, timedelta64 is a close-enough return dtype.
result = np.zeros(len(self), dtype=np.int64)
result.fill(iNaT)
return result.view('timedelta64[ns]')
|
python
|
def _sub_nat(self):
"""
Subtract pd.NaT from self
"""
# GH#19124 Timedelta - datetime is not in general well-defined.
# We make an exception for pd.NaT, which in this case quacks
# like a timedelta.
# For datetime64 dtypes by convention we treat NaT as a datetime, so
# this subtraction returns a timedelta64 dtype.
# For period dtype, timedelta64 is a close-enough return dtype.
result = np.zeros(len(self), dtype=np.int64)
result.fill(iNaT)
return result.view('timedelta64[ns]')
|
[
"def",
"_sub_nat",
"(",
"self",
")",
":",
"# GH#19124 Timedelta - datetime is not in general well-defined.",
"# We make an exception for pd.NaT, which in this case quacks",
"# like a timedelta.",
"# For datetime64 dtypes by convention we treat NaT as a datetime, so",
"# this subtraction returns a timedelta64 dtype.",
"# For period dtype, timedelta64 is a close-enough return dtype.",
"result",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"self",
")",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"result",
".",
"fill",
"(",
"iNaT",
")",
"return",
"result",
".",
"view",
"(",
"'timedelta64[ns]'",
")"
] |
Subtract pd.NaT from self
|
[
"Subtract",
"pd",
".",
"NaT",
"from",
"self"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L1024-L1036
|
19,981
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
DatetimeLikeArrayMixin._addsub_int_array
|
def _addsub_int_array(self, other, op):
"""
Add or subtract array-like of integers equivalent to applying
`_time_shift` pointwise.
Parameters
----------
other : Index, ExtensionArray, np.ndarray
integer-dtype
op : {operator.add, operator.sub}
Returns
-------
result : same class as self
"""
# _addsub_int_array is overriden by PeriodArray
assert not is_period_dtype(self)
assert op in [operator.add, operator.sub]
if self.freq is None:
# GH#19123
raise NullFrequencyError("Cannot shift with no freq")
elif isinstance(self.freq, Tick):
# easy case where we can convert to timedelta64 operation
td = Timedelta(self.freq)
return op(self, td * other)
# We should only get here with DatetimeIndex; dispatch
# to _addsub_offset_array
assert not is_timedelta64_dtype(self)
return op(self, np.array(other) * self.freq)
|
python
|
def _addsub_int_array(self, other, op):
"""
Add or subtract array-like of integers equivalent to applying
`_time_shift` pointwise.
Parameters
----------
other : Index, ExtensionArray, np.ndarray
integer-dtype
op : {operator.add, operator.sub}
Returns
-------
result : same class as self
"""
# _addsub_int_array is overriden by PeriodArray
assert not is_period_dtype(self)
assert op in [operator.add, operator.sub]
if self.freq is None:
# GH#19123
raise NullFrequencyError("Cannot shift with no freq")
elif isinstance(self.freq, Tick):
# easy case where we can convert to timedelta64 operation
td = Timedelta(self.freq)
return op(self, td * other)
# We should only get here with DatetimeIndex; dispatch
# to _addsub_offset_array
assert not is_timedelta64_dtype(self)
return op(self, np.array(other) * self.freq)
|
[
"def",
"_addsub_int_array",
"(",
"self",
",",
"other",
",",
"op",
")",
":",
"# _addsub_int_array is overriden by PeriodArray",
"assert",
"not",
"is_period_dtype",
"(",
"self",
")",
"assert",
"op",
"in",
"[",
"operator",
".",
"add",
",",
"operator",
".",
"sub",
"]",
"if",
"self",
".",
"freq",
"is",
"None",
":",
"# GH#19123",
"raise",
"NullFrequencyError",
"(",
"\"Cannot shift with no freq\"",
")",
"elif",
"isinstance",
"(",
"self",
".",
"freq",
",",
"Tick",
")",
":",
"# easy case where we can convert to timedelta64 operation",
"td",
"=",
"Timedelta",
"(",
"self",
".",
"freq",
")",
"return",
"op",
"(",
"self",
",",
"td",
"*",
"other",
")",
"# We should only get here with DatetimeIndex; dispatch",
"# to _addsub_offset_array",
"assert",
"not",
"is_timedelta64_dtype",
"(",
"self",
")",
"return",
"op",
"(",
"self",
",",
"np",
".",
"array",
"(",
"other",
")",
"*",
"self",
".",
"freq",
")"
] |
Add or subtract array-like of integers equivalent to applying
`_time_shift` pointwise.
Parameters
----------
other : Index, ExtensionArray, np.ndarray
integer-dtype
op : {operator.add, operator.sub}
Returns
-------
result : same class as self
|
[
"Add",
"or",
"subtract",
"array",
"-",
"like",
"of",
"integers",
"equivalent",
"to",
"applying",
"_time_shift",
"pointwise",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L1077-L1108
|
19,982
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
DatetimeLikeArrayMixin._addsub_offset_array
|
def _addsub_offset_array(self, other, op):
"""
Add or subtract array-like of DateOffset objects
Parameters
----------
other : Index, np.ndarray
object-dtype containing pd.DateOffset objects
op : {operator.add, operator.sub}
Returns
-------
result : same class as self
"""
assert op in [operator.add, operator.sub]
if len(other) == 1:
return op(self, other[0])
warnings.warn("Adding/subtracting array of DateOffsets to "
"{cls} not vectorized"
.format(cls=type(self).__name__), PerformanceWarning)
# For EA self.astype('O') returns a numpy array, not an Index
left = lib.values_from_object(self.astype('O'))
res_values = op(left, np.array(other))
kwargs = {}
if not is_period_dtype(self):
kwargs['freq'] = 'infer'
return self._from_sequence(res_values, **kwargs)
|
python
|
def _addsub_offset_array(self, other, op):
"""
Add or subtract array-like of DateOffset objects
Parameters
----------
other : Index, np.ndarray
object-dtype containing pd.DateOffset objects
op : {operator.add, operator.sub}
Returns
-------
result : same class as self
"""
assert op in [operator.add, operator.sub]
if len(other) == 1:
return op(self, other[0])
warnings.warn("Adding/subtracting array of DateOffsets to "
"{cls} not vectorized"
.format(cls=type(self).__name__), PerformanceWarning)
# For EA self.astype('O') returns a numpy array, not an Index
left = lib.values_from_object(self.astype('O'))
res_values = op(left, np.array(other))
kwargs = {}
if not is_period_dtype(self):
kwargs['freq'] = 'infer'
return self._from_sequence(res_values, **kwargs)
|
[
"def",
"_addsub_offset_array",
"(",
"self",
",",
"other",
",",
"op",
")",
":",
"assert",
"op",
"in",
"[",
"operator",
".",
"add",
",",
"operator",
".",
"sub",
"]",
"if",
"len",
"(",
"other",
")",
"==",
"1",
":",
"return",
"op",
"(",
"self",
",",
"other",
"[",
"0",
"]",
")",
"warnings",
".",
"warn",
"(",
"\"Adding/subtracting array of DateOffsets to \"",
"\"{cls} not vectorized\"",
".",
"format",
"(",
"cls",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
")",
",",
"PerformanceWarning",
")",
"# For EA self.astype('O') returns a numpy array, not an Index",
"left",
"=",
"lib",
".",
"values_from_object",
"(",
"self",
".",
"astype",
"(",
"'O'",
")",
")",
"res_values",
"=",
"op",
"(",
"left",
",",
"np",
".",
"array",
"(",
"other",
")",
")",
"kwargs",
"=",
"{",
"}",
"if",
"not",
"is_period_dtype",
"(",
"self",
")",
":",
"kwargs",
"[",
"'freq'",
"]",
"=",
"'infer'",
"return",
"self",
".",
"_from_sequence",
"(",
"res_values",
",",
"*",
"*",
"kwargs",
")"
] |
Add or subtract array-like of DateOffset objects
Parameters
----------
other : Index, np.ndarray
object-dtype containing pd.DateOffset objects
op : {operator.add, operator.sub}
Returns
-------
result : same class as self
|
[
"Add",
"or",
"subtract",
"array",
"-",
"like",
"of",
"DateOffset",
"objects"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L1110-L1139
|
19,983
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
DatetimeLikeArrayMixin._ensure_localized
|
def _ensure_localized(self, arg, ambiguous='raise', nonexistent='raise',
from_utc=False):
"""
Ensure that we are re-localized.
This is for compat as we can then call this on all datetimelike
arrays generally (ignored for Period/Timedelta)
Parameters
----------
arg : Union[DatetimeLikeArray, DatetimeIndexOpsMixin, ndarray]
ambiguous : str, bool, or bool-ndarray, default 'raise'
nonexistent : str, default 'raise'
from_utc : bool, default False
If True, localize the i8 ndarray to UTC first before converting to
the appropriate tz. If False, localize directly to the tz.
Returns
-------
localized array
"""
# reconvert to local tz
tz = getattr(self, 'tz', None)
if tz is not None:
if not isinstance(arg, type(self)):
arg = self._simple_new(arg)
if from_utc:
arg = arg.tz_localize('UTC').tz_convert(self.tz)
else:
arg = arg.tz_localize(
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
)
return arg
|
python
|
def _ensure_localized(self, arg, ambiguous='raise', nonexistent='raise',
from_utc=False):
"""
Ensure that we are re-localized.
This is for compat as we can then call this on all datetimelike
arrays generally (ignored for Period/Timedelta)
Parameters
----------
arg : Union[DatetimeLikeArray, DatetimeIndexOpsMixin, ndarray]
ambiguous : str, bool, or bool-ndarray, default 'raise'
nonexistent : str, default 'raise'
from_utc : bool, default False
If True, localize the i8 ndarray to UTC first before converting to
the appropriate tz. If False, localize directly to the tz.
Returns
-------
localized array
"""
# reconvert to local tz
tz = getattr(self, 'tz', None)
if tz is not None:
if not isinstance(arg, type(self)):
arg = self._simple_new(arg)
if from_utc:
arg = arg.tz_localize('UTC').tz_convert(self.tz)
else:
arg = arg.tz_localize(
self.tz, ambiguous=ambiguous, nonexistent=nonexistent
)
return arg
|
[
"def",
"_ensure_localized",
"(",
"self",
",",
"arg",
",",
"ambiguous",
"=",
"'raise'",
",",
"nonexistent",
"=",
"'raise'",
",",
"from_utc",
"=",
"False",
")",
":",
"# reconvert to local tz",
"tz",
"=",
"getattr",
"(",
"self",
",",
"'tz'",
",",
"None",
")",
"if",
"tz",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"type",
"(",
"self",
")",
")",
":",
"arg",
"=",
"self",
".",
"_simple_new",
"(",
"arg",
")",
"if",
"from_utc",
":",
"arg",
"=",
"arg",
".",
"tz_localize",
"(",
"'UTC'",
")",
".",
"tz_convert",
"(",
"self",
".",
"tz",
")",
"else",
":",
"arg",
"=",
"arg",
".",
"tz_localize",
"(",
"self",
".",
"tz",
",",
"ambiguous",
"=",
"ambiguous",
",",
"nonexistent",
"=",
"nonexistent",
")",
"return",
"arg"
] |
Ensure that we are re-localized.
This is for compat as we can then call this on all datetimelike
arrays generally (ignored for Period/Timedelta)
Parameters
----------
arg : Union[DatetimeLikeArray, DatetimeIndexOpsMixin, ndarray]
ambiguous : str, bool, or bool-ndarray, default 'raise'
nonexistent : str, default 'raise'
from_utc : bool, default False
If True, localize the i8 ndarray to UTC first before converting to
the appropriate tz. If False, localize directly to the tz.
Returns
-------
localized array
|
[
"Ensure",
"that",
"we",
"are",
"re",
"-",
"localized",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L1340-L1373
|
19,984
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
DatetimeLikeArrayMixin.min
|
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Array or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Index.min : Return the minimum value in an Index.
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
result = nanops.nanmin(self.asi8, skipna=skipna, mask=self.isna())
if isna(result):
# Period._from_ordinal does not handle np.nan gracefully
return NaT
return self._box_func(result)
|
python
|
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Array or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Index.min : Return the minimum value in an Index.
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
result = nanops.nanmin(self.asi8, skipna=skipna, mask=self.isna())
if isna(result):
# Period._from_ordinal does not handle np.nan gracefully
return NaT
return self._box_func(result)
|
[
"def",
"min",
"(",
"self",
",",
"axis",
"=",
"None",
",",
"skipna",
"=",
"True",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"nv",
".",
"validate_min",
"(",
"args",
",",
"kwargs",
")",
"nv",
".",
"validate_minmax_axis",
"(",
"axis",
")",
"result",
"=",
"nanops",
".",
"nanmin",
"(",
"self",
".",
"asi8",
",",
"skipna",
"=",
"skipna",
",",
"mask",
"=",
"self",
".",
"isna",
"(",
")",
")",
"if",
"isna",
"(",
"result",
")",
":",
"# Period._from_ordinal does not handle np.nan gracefully",
"return",
"NaT",
"return",
"self",
".",
"_box_func",
"(",
"result",
")"
] |
Return the minimum value of the Array or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Index.min : Return the minimum value in an Index.
Series.min : Return the minimum value in a Series.
|
[
"Return",
"the",
"minimum",
"value",
"of",
"the",
"Array",
"or",
"minimum",
"along",
"an",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L1385-L1403
|
19,985
|
pandas-dev/pandas
|
pandas/core/arrays/datetimelike.py
|
DatetimeLikeArrayMixin.max
|
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Array or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Index.max : Return the maximum value in an Index.
Series.max : Return the maximum value in a Series.
"""
# TODO: skipna is broken with max.
# See https://github.com/pandas-dev/pandas/issues/24265
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
mask = self.isna()
if skipna:
values = self[~mask].asi8
elif mask.any():
return NaT
else:
values = self.asi8
if not len(values):
# short-circut for empty max / min
return NaT
result = nanops.nanmax(values, skipna=skipna)
# Don't have to worry about NA `result`, since no NA went in.
return self._box_func(result)
|
python
|
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Array or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Index.max : Return the maximum value in an Index.
Series.max : Return the maximum value in a Series.
"""
# TODO: skipna is broken with max.
# See https://github.com/pandas-dev/pandas/issues/24265
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
mask = self.isna()
if skipna:
values = self[~mask].asi8
elif mask.any():
return NaT
else:
values = self.asi8
if not len(values):
# short-circut for empty max / min
return NaT
result = nanops.nanmax(values, skipna=skipna)
# Don't have to worry about NA `result`, since no NA went in.
return self._box_func(result)
|
[
"def",
"max",
"(",
"self",
",",
"axis",
"=",
"None",
",",
"skipna",
"=",
"True",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: skipna is broken with max.",
"# See https://github.com/pandas-dev/pandas/issues/24265",
"nv",
".",
"validate_max",
"(",
"args",
",",
"kwargs",
")",
"nv",
".",
"validate_minmax_axis",
"(",
"axis",
")",
"mask",
"=",
"self",
".",
"isna",
"(",
")",
"if",
"skipna",
":",
"values",
"=",
"self",
"[",
"~",
"mask",
"]",
".",
"asi8",
"elif",
"mask",
".",
"any",
"(",
")",
":",
"return",
"NaT",
"else",
":",
"values",
"=",
"self",
".",
"asi8",
"if",
"not",
"len",
"(",
"values",
")",
":",
"# short-circut for empty max / min",
"return",
"NaT",
"result",
"=",
"nanops",
".",
"nanmax",
"(",
"values",
",",
"skipna",
"=",
"skipna",
")",
"# Don't have to worry about NA `result`, since no NA went in.",
"return",
"self",
".",
"_box_func",
"(",
"result",
")"
] |
Return the maximum value of the Array or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Index.max : Return the maximum value in an Index.
Series.max : Return the maximum value in a Series.
|
[
"Return",
"the",
"maximum",
"value",
"of",
"the",
"Array",
"or",
"maximum",
"along",
"an",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimelike.py#L1405-L1435
|
19,986
|
pandas-dev/pandas
|
pandas/core/arrays/period.py
|
_period_array_cmp
|
def _period_array_cmp(cls, op):
"""
Wrap comparison operations to convert Period-like to PeriodDtype
"""
opname = '__{name}__'.format(name=op.__name__)
nat_result = opname == '__ne__'
def wrapper(self, other):
op = getattr(self.asi8, opname)
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
return NotImplemented
if is_list_like(other) and len(other) != len(self):
raise ValueError("Lengths must match")
if isinstance(other, Period):
self._check_compatible_with(other)
result = op(other.ordinal)
elif isinstance(other, cls):
self._check_compatible_with(other)
result = op(other.asi8)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is NaT:
result = np.empty(len(self.asi8), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
result = op(other.ordinal)
if self._hasnans:
result[self._isnan] = nat_result
return result
return compat.set_function_name(wrapper, opname, cls)
|
python
|
def _period_array_cmp(cls, op):
"""
Wrap comparison operations to convert Period-like to PeriodDtype
"""
opname = '__{name}__'.format(name=op.__name__)
nat_result = opname == '__ne__'
def wrapper(self, other):
op = getattr(self.asi8, opname)
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
return NotImplemented
if is_list_like(other) and len(other) != len(self):
raise ValueError("Lengths must match")
if isinstance(other, Period):
self._check_compatible_with(other)
result = op(other.ordinal)
elif isinstance(other, cls):
self._check_compatible_with(other)
result = op(other.asi8)
mask = self._isnan | other._isnan
if mask.any():
result[mask] = nat_result
return result
elif other is NaT:
result = np.empty(len(self.asi8), dtype=bool)
result.fill(nat_result)
else:
other = Period(other, freq=self.freq)
result = op(other.ordinal)
if self._hasnans:
result[self._isnan] = nat_result
return result
return compat.set_function_name(wrapper, opname, cls)
|
[
"def",
"_period_array_cmp",
"(",
"cls",
",",
"op",
")",
":",
"opname",
"=",
"'__{name}__'",
".",
"format",
"(",
"name",
"=",
"op",
".",
"__name__",
")",
"nat_result",
"=",
"opname",
"==",
"'__ne__'",
"def",
"wrapper",
"(",
"self",
",",
"other",
")",
":",
"op",
"=",
"getattr",
"(",
"self",
".",
"asi8",
",",
"opname",
")",
"if",
"isinstance",
"(",
"other",
",",
"(",
"ABCDataFrame",
",",
"ABCSeries",
",",
"ABCIndexClass",
")",
")",
":",
"return",
"NotImplemented",
"if",
"is_list_like",
"(",
"other",
")",
"and",
"len",
"(",
"other",
")",
"!=",
"len",
"(",
"self",
")",
":",
"raise",
"ValueError",
"(",
"\"Lengths must match\"",
")",
"if",
"isinstance",
"(",
"other",
",",
"Period",
")",
":",
"self",
".",
"_check_compatible_with",
"(",
"other",
")",
"result",
"=",
"op",
"(",
"other",
".",
"ordinal",
")",
"elif",
"isinstance",
"(",
"other",
",",
"cls",
")",
":",
"self",
".",
"_check_compatible_with",
"(",
"other",
")",
"result",
"=",
"op",
"(",
"other",
".",
"asi8",
")",
"mask",
"=",
"self",
".",
"_isnan",
"|",
"other",
".",
"_isnan",
"if",
"mask",
".",
"any",
"(",
")",
":",
"result",
"[",
"mask",
"]",
"=",
"nat_result",
"return",
"result",
"elif",
"other",
"is",
"NaT",
":",
"result",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"self",
".",
"asi8",
")",
",",
"dtype",
"=",
"bool",
")",
"result",
".",
"fill",
"(",
"nat_result",
")",
"else",
":",
"other",
"=",
"Period",
"(",
"other",
",",
"freq",
"=",
"self",
".",
"freq",
")",
"result",
"=",
"op",
"(",
"other",
".",
"ordinal",
")",
"if",
"self",
".",
"_hasnans",
":",
"result",
"[",
"self",
".",
"_isnan",
"]",
"=",
"nat_result",
"return",
"result",
"return",
"compat",
".",
"set_function_name",
"(",
"wrapper",
",",
"opname",
",",
"cls",
")"
] |
Wrap comparison operations to convert Period-like to PeriodDtype
|
[
"Wrap",
"comparison",
"operations",
"to",
"convert",
"Period",
"-",
"like",
"to",
"PeriodDtype"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/period.py#L44-L86
|
19,987
|
pandas-dev/pandas
|
pandas/core/arrays/period.py
|
_raise_on_incompatible
|
def _raise_on_incompatible(left, right):
"""
Helper function to render a consistent error message when raising
IncompatibleFrequency.
Parameters
----------
left : PeriodArray
right : DateOffset, Period, ndarray, or timedelta-like
Raises
------
IncompatibleFrequency
"""
# GH#24283 error message format depends on whether right is scalar
if isinstance(right, np.ndarray):
other_freq = None
elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, DateOffset)):
other_freq = right.freqstr
else:
other_freq = _delta_to_tick(Timedelta(right)).freqstr
msg = DIFFERENT_FREQ.format(cls=type(left).__name__,
own_freq=left.freqstr,
other_freq=other_freq)
raise IncompatibleFrequency(msg)
|
python
|
def _raise_on_incompatible(left, right):
"""
Helper function to render a consistent error message when raising
IncompatibleFrequency.
Parameters
----------
left : PeriodArray
right : DateOffset, Period, ndarray, or timedelta-like
Raises
------
IncompatibleFrequency
"""
# GH#24283 error message format depends on whether right is scalar
if isinstance(right, np.ndarray):
other_freq = None
elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, DateOffset)):
other_freq = right.freqstr
else:
other_freq = _delta_to_tick(Timedelta(right)).freqstr
msg = DIFFERENT_FREQ.format(cls=type(left).__name__,
own_freq=left.freqstr,
other_freq=other_freq)
raise IncompatibleFrequency(msg)
|
[
"def",
"_raise_on_incompatible",
"(",
"left",
",",
"right",
")",
":",
"# GH#24283 error message format depends on whether right is scalar",
"if",
"isinstance",
"(",
"right",
",",
"np",
".",
"ndarray",
")",
":",
"other_freq",
"=",
"None",
"elif",
"isinstance",
"(",
"right",
",",
"(",
"ABCPeriodIndex",
",",
"PeriodArray",
",",
"Period",
",",
"DateOffset",
")",
")",
":",
"other_freq",
"=",
"right",
".",
"freqstr",
"else",
":",
"other_freq",
"=",
"_delta_to_tick",
"(",
"Timedelta",
"(",
"right",
")",
")",
".",
"freqstr",
"msg",
"=",
"DIFFERENT_FREQ",
".",
"format",
"(",
"cls",
"=",
"type",
"(",
"left",
")",
".",
"__name__",
",",
"own_freq",
"=",
"left",
".",
"freqstr",
",",
"other_freq",
"=",
"other_freq",
")",
"raise",
"IncompatibleFrequency",
"(",
"msg",
")"
] |
Helper function to render a consistent error message when raising
IncompatibleFrequency.
Parameters
----------
left : PeriodArray
right : DateOffset, Period, ndarray, or timedelta-like
Raises
------
IncompatibleFrequency
|
[
"Helper",
"function",
"to",
"render",
"a",
"consistent",
"error",
"message",
"when",
"raising",
"IncompatibleFrequency",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/period.py#L681-L706
|
19,988
|
pandas-dev/pandas
|
pandas/core/arrays/period.py
|
period_array
|
def period_array(
data: Sequence[Optional[Period]],
freq: Optional[Tick] = None,
copy: bool = False,
) -> PeriodArray:
"""
Construct a new PeriodArray from a sequence of Period scalars.
Parameters
----------
data : Sequence of Period objects
A sequence of Period objects. These are required to all have
the same ``freq.`` Missing values can be indicated by ``None``
or ``pandas.NaT``.
freq : str, Tick, or Offset
The frequency of every element of the array. This can be specified
to avoid inferring the `freq` from `data`.
copy : bool, default False
Whether to ensure a copy of the data is made.
Returns
-------
PeriodArray
See Also
--------
PeriodArray
pandas.PeriodIndex
Examples
--------
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A')])
<PeriodArray>
['2017', '2018']
Length: 2, dtype: period[A-DEC]
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A'),
... pd.NaT])
<PeriodArray>
['2017', '2018', 'NaT']
Length: 3, dtype: period[A-DEC]
Integers that look like years are handled
>>> period_array([2000, 2001, 2002], freq='D')
['2000-01-01', '2001-01-01', '2002-01-01']
Length: 3, dtype: period[D]
Datetime-like strings may also be passed
>>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
<PeriodArray>
['2000Q1', '2000Q2', '2000Q3', '2000Q4']
Length: 4, dtype: period[Q-DEC]
"""
if is_datetime64_dtype(data):
return PeriodArray._from_datetime64(data, freq)
if isinstance(data, (ABCPeriodIndex, ABCSeries, PeriodArray)):
return PeriodArray(data, freq)
# other iterable of some kind
if not isinstance(data, (np.ndarray, list, tuple)):
data = list(data)
data = np.asarray(data)
if freq:
dtype = PeriodDtype(freq)
else:
dtype = None
if is_float_dtype(data) and len(data) > 0:
raise TypeError("PeriodIndex does not allow "
"floating point in construction")
data = ensure_object(data)
return PeriodArray._from_sequence(data, dtype=dtype)
|
python
|
def period_array(
data: Sequence[Optional[Period]],
freq: Optional[Tick] = None,
copy: bool = False,
) -> PeriodArray:
"""
Construct a new PeriodArray from a sequence of Period scalars.
Parameters
----------
data : Sequence of Period objects
A sequence of Period objects. These are required to all have
the same ``freq.`` Missing values can be indicated by ``None``
or ``pandas.NaT``.
freq : str, Tick, or Offset
The frequency of every element of the array. This can be specified
to avoid inferring the `freq` from `data`.
copy : bool, default False
Whether to ensure a copy of the data is made.
Returns
-------
PeriodArray
See Also
--------
PeriodArray
pandas.PeriodIndex
Examples
--------
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A')])
<PeriodArray>
['2017', '2018']
Length: 2, dtype: period[A-DEC]
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A'),
... pd.NaT])
<PeriodArray>
['2017', '2018', 'NaT']
Length: 3, dtype: period[A-DEC]
Integers that look like years are handled
>>> period_array([2000, 2001, 2002], freq='D')
['2000-01-01', '2001-01-01', '2002-01-01']
Length: 3, dtype: period[D]
Datetime-like strings may also be passed
>>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
<PeriodArray>
['2000Q1', '2000Q2', '2000Q3', '2000Q4']
Length: 4, dtype: period[Q-DEC]
"""
if is_datetime64_dtype(data):
return PeriodArray._from_datetime64(data, freq)
if isinstance(data, (ABCPeriodIndex, ABCSeries, PeriodArray)):
return PeriodArray(data, freq)
# other iterable of some kind
if not isinstance(data, (np.ndarray, list, tuple)):
data = list(data)
data = np.asarray(data)
if freq:
dtype = PeriodDtype(freq)
else:
dtype = None
if is_float_dtype(data) and len(data) > 0:
raise TypeError("PeriodIndex does not allow "
"floating point in construction")
data = ensure_object(data)
return PeriodArray._from_sequence(data, dtype=dtype)
|
[
"def",
"period_array",
"(",
"data",
":",
"Sequence",
"[",
"Optional",
"[",
"Period",
"]",
"]",
",",
"freq",
":",
"Optional",
"[",
"Tick",
"]",
"=",
"None",
",",
"copy",
":",
"bool",
"=",
"False",
",",
")",
"->",
"PeriodArray",
":",
"if",
"is_datetime64_dtype",
"(",
"data",
")",
":",
"return",
"PeriodArray",
".",
"_from_datetime64",
"(",
"data",
",",
"freq",
")",
"if",
"isinstance",
"(",
"data",
",",
"(",
"ABCPeriodIndex",
",",
"ABCSeries",
",",
"PeriodArray",
")",
")",
":",
"return",
"PeriodArray",
"(",
"data",
",",
"freq",
")",
"# other iterable of some kind",
"if",
"not",
"isinstance",
"(",
"data",
",",
"(",
"np",
".",
"ndarray",
",",
"list",
",",
"tuple",
")",
")",
":",
"data",
"=",
"list",
"(",
"data",
")",
"data",
"=",
"np",
".",
"asarray",
"(",
"data",
")",
"if",
"freq",
":",
"dtype",
"=",
"PeriodDtype",
"(",
"freq",
")",
"else",
":",
"dtype",
"=",
"None",
"if",
"is_float_dtype",
"(",
"data",
")",
"and",
"len",
"(",
"data",
")",
">",
"0",
":",
"raise",
"TypeError",
"(",
"\"PeriodIndex does not allow \"",
"\"floating point in construction\"",
")",
"data",
"=",
"ensure_object",
"(",
"data",
")",
"return",
"PeriodArray",
".",
"_from_sequence",
"(",
"data",
",",
"dtype",
"=",
"dtype",
")"
] |
Construct a new PeriodArray from a sequence of Period scalars.
Parameters
----------
data : Sequence of Period objects
A sequence of Period objects. These are required to all have
the same ``freq.`` Missing values can be indicated by ``None``
or ``pandas.NaT``.
freq : str, Tick, or Offset
The frequency of every element of the array. This can be specified
to avoid inferring the `freq` from `data`.
copy : bool, default False
Whether to ensure a copy of the data is made.
Returns
-------
PeriodArray
See Also
--------
PeriodArray
pandas.PeriodIndex
Examples
--------
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A')])
<PeriodArray>
['2017', '2018']
Length: 2, dtype: period[A-DEC]
>>> period_array([pd.Period('2017', freq='A'),
... pd.Period('2018', freq='A'),
... pd.NaT])
<PeriodArray>
['2017', '2018', 'NaT']
Length: 3, dtype: period[A-DEC]
Integers that look like years are handled
>>> period_array([2000, 2001, 2002], freq='D')
['2000-01-01', '2001-01-01', '2002-01-01']
Length: 3, dtype: period[D]
Datetime-like strings may also be passed
>>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q')
<PeriodArray>
['2000Q1', '2000Q2', '2000Q3', '2000Q4']
Length: 4, dtype: period[Q-DEC]
|
[
"Construct",
"a",
"new",
"PeriodArray",
"from",
"a",
"sequence",
"of",
"Period",
"scalars",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/period.py#L712-L791
|
19,989
|
pandas-dev/pandas
|
pandas/core/arrays/period.py
|
validate_dtype_freq
|
def validate_dtype_freq(dtype, freq):
"""
If both a dtype and a freq are available, ensure they match. If only
dtype is available, extract the implied freq.
Parameters
----------
dtype : dtype
freq : DateOffset or None
Returns
-------
freq : DateOffset
Raises
------
ValueError : non-period dtype
IncompatibleFrequency : mismatch between dtype and freq
"""
if freq is not None:
freq = frequencies.to_offset(freq)
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError('dtype must be PeriodDtype')
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
raise IncompatibleFrequency('specified freq and dtype '
'are different')
return freq
|
python
|
def validate_dtype_freq(dtype, freq):
"""
If both a dtype and a freq are available, ensure they match. If only
dtype is available, extract the implied freq.
Parameters
----------
dtype : dtype
freq : DateOffset or None
Returns
-------
freq : DateOffset
Raises
------
ValueError : non-period dtype
IncompatibleFrequency : mismatch between dtype and freq
"""
if freq is not None:
freq = frequencies.to_offset(freq)
if dtype is not None:
dtype = pandas_dtype(dtype)
if not is_period_dtype(dtype):
raise ValueError('dtype must be PeriodDtype')
if freq is None:
freq = dtype.freq
elif freq != dtype.freq:
raise IncompatibleFrequency('specified freq and dtype '
'are different')
return freq
|
[
"def",
"validate_dtype_freq",
"(",
"dtype",
",",
"freq",
")",
":",
"if",
"freq",
"is",
"not",
"None",
":",
"freq",
"=",
"frequencies",
".",
"to_offset",
"(",
"freq",
")",
"if",
"dtype",
"is",
"not",
"None",
":",
"dtype",
"=",
"pandas_dtype",
"(",
"dtype",
")",
"if",
"not",
"is_period_dtype",
"(",
"dtype",
")",
":",
"raise",
"ValueError",
"(",
"'dtype must be PeriodDtype'",
")",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"dtype",
".",
"freq",
"elif",
"freq",
"!=",
"dtype",
".",
"freq",
":",
"raise",
"IncompatibleFrequency",
"(",
"'specified freq and dtype '",
"'are different'",
")",
"return",
"freq"
] |
If both a dtype and a freq are available, ensure they match. If only
dtype is available, extract the implied freq.
Parameters
----------
dtype : dtype
freq : DateOffset or None
Returns
-------
freq : DateOffset
Raises
------
ValueError : non-period dtype
IncompatibleFrequency : mismatch between dtype and freq
|
[
"If",
"both",
"a",
"dtype",
"and",
"a",
"freq",
"are",
"available",
"ensure",
"they",
"match",
".",
"If",
"only",
"dtype",
"is",
"available",
"extract",
"the",
"implied",
"freq",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/period.py#L794-L825
|
19,990
|
pandas-dev/pandas
|
pandas/core/arrays/period.py
|
dt64arr_to_periodarr
|
def dt64arr_to_periodarr(data, freq, tz=None):
"""
Convert an datetime-like array to values Period ordinals.
Parameters
----------
data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
freq : Optional[Union[str, Tick]]
Must match the `freq` on the `data` if `data` is a DatetimeIndex
or Series.
tz : Optional[tzinfo]
Returns
-------
ordinals : ndarray[int]
freq : Tick
The frequencey extracted from the Series or DatetimeIndex if that's
used.
"""
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: {dtype}'.format(dtype=data.dtype))
if freq is None:
if isinstance(data, ABCIndexClass):
data, freq = data._values, data.freq
elif isinstance(data, ABCSeries):
data, freq = data._values, data.dt.freq
freq = Period._maybe_convert_freq(freq)
if isinstance(data, (ABCIndexClass, ABCSeries)):
data = data._values
base, mult = libfrequencies.get_freq_code(freq)
return libperiod.dt64arr_to_periodarr(data.view('i8'), base, tz), freq
|
python
|
def dt64arr_to_periodarr(data, freq, tz=None):
"""
Convert an datetime-like array to values Period ordinals.
Parameters
----------
data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
freq : Optional[Union[str, Tick]]
Must match the `freq` on the `data` if `data` is a DatetimeIndex
or Series.
tz : Optional[tzinfo]
Returns
-------
ordinals : ndarray[int]
freq : Tick
The frequencey extracted from the Series or DatetimeIndex if that's
used.
"""
if data.dtype != np.dtype('M8[ns]'):
raise ValueError('Wrong dtype: {dtype}'.format(dtype=data.dtype))
if freq is None:
if isinstance(data, ABCIndexClass):
data, freq = data._values, data.freq
elif isinstance(data, ABCSeries):
data, freq = data._values, data.dt.freq
freq = Period._maybe_convert_freq(freq)
if isinstance(data, (ABCIndexClass, ABCSeries)):
data = data._values
base, mult = libfrequencies.get_freq_code(freq)
return libperiod.dt64arr_to_periodarr(data.view('i8'), base, tz), freq
|
[
"def",
"dt64arr_to_periodarr",
"(",
"data",
",",
"freq",
",",
"tz",
"=",
"None",
")",
":",
"if",
"data",
".",
"dtype",
"!=",
"np",
".",
"dtype",
"(",
"'M8[ns]'",
")",
":",
"raise",
"ValueError",
"(",
"'Wrong dtype: {dtype}'",
".",
"format",
"(",
"dtype",
"=",
"data",
".",
"dtype",
")",
")",
"if",
"freq",
"is",
"None",
":",
"if",
"isinstance",
"(",
"data",
",",
"ABCIndexClass",
")",
":",
"data",
",",
"freq",
"=",
"data",
".",
"_values",
",",
"data",
".",
"freq",
"elif",
"isinstance",
"(",
"data",
",",
"ABCSeries",
")",
":",
"data",
",",
"freq",
"=",
"data",
".",
"_values",
",",
"data",
".",
"dt",
".",
"freq",
"freq",
"=",
"Period",
".",
"_maybe_convert_freq",
"(",
"freq",
")",
"if",
"isinstance",
"(",
"data",
",",
"(",
"ABCIndexClass",
",",
"ABCSeries",
")",
")",
":",
"data",
"=",
"data",
".",
"_values",
"base",
",",
"mult",
"=",
"libfrequencies",
".",
"get_freq_code",
"(",
"freq",
")",
"return",
"libperiod",
".",
"dt64arr_to_periodarr",
"(",
"data",
".",
"view",
"(",
"'i8'",
")",
",",
"base",
",",
"tz",
")",
",",
"freq"
] |
Convert an datetime-like array to values Period ordinals.
Parameters
----------
data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]]
freq : Optional[Union[str, Tick]]
Must match the `freq` on the `data` if `data` is a DatetimeIndex
or Series.
tz : Optional[tzinfo]
Returns
-------
ordinals : ndarray[int]
freq : Tick
The frequencey extracted from the Series or DatetimeIndex if that's
used.
|
[
"Convert",
"an",
"datetime",
"-",
"like",
"array",
"to",
"values",
"Period",
"ordinals",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/period.py#L828-L863
|
19,991
|
pandas-dev/pandas
|
pandas/core/arrays/period.py
|
PeriodArray._from_datetime64
|
def _from_datetime64(cls, data, freq, tz=None):
"""
Construct a PeriodArray from a datetime64 array
Parameters
----------
data : ndarray[datetime64[ns], datetime64[ns, tz]]
freq : str or Tick
tz : tzinfo, optional
Returns
-------
PeriodArray[freq]
"""
data, freq = dt64arr_to_periodarr(data, freq, tz)
return cls(data, freq=freq)
|
python
|
def _from_datetime64(cls, data, freq, tz=None):
"""
Construct a PeriodArray from a datetime64 array
Parameters
----------
data : ndarray[datetime64[ns], datetime64[ns, tz]]
freq : str or Tick
tz : tzinfo, optional
Returns
-------
PeriodArray[freq]
"""
data, freq = dt64arr_to_periodarr(data, freq, tz)
return cls(data, freq=freq)
|
[
"def",
"_from_datetime64",
"(",
"cls",
",",
"data",
",",
"freq",
",",
"tz",
"=",
"None",
")",
":",
"data",
",",
"freq",
"=",
"dt64arr_to_periodarr",
"(",
"data",
",",
"freq",
",",
"tz",
")",
"return",
"cls",
"(",
"data",
",",
"freq",
"=",
"freq",
")"
] |
Construct a PeriodArray from a datetime64 array
Parameters
----------
data : ndarray[datetime64[ns], datetime64[ns, tz]]
freq : str or Tick
tz : tzinfo, optional
Returns
-------
PeriodArray[freq]
|
[
"Construct",
"a",
"PeriodArray",
"from",
"a",
"datetime64",
"array"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/period.py#L211-L226
|
19,992
|
pandas-dev/pandas
|
pandas/core/arrays/period.py
|
PeriodArray._format_native_types
|
def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs):
"""
actually format my specific types
"""
values = self.astype(object)
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: '%s' % dt
if self._hasnans:
mask = self._isnan
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([formatter(dt) for dt
in values[imask]])
else:
values = np.array([formatter(dt) for dt in values])
return values
|
python
|
def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs):
"""
actually format my specific types
"""
values = self.astype(object)
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: '%s' % dt
if self._hasnans:
mask = self._isnan
values[mask] = na_rep
imask = ~mask
values[imask] = np.array([formatter(dt) for dt
in values[imask]])
else:
values = np.array([formatter(dt) for dt in values])
return values
|
[
"def",
"_format_native_types",
"(",
"self",
",",
"na_rep",
"=",
"'NaT'",
",",
"date_format",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"values",
"=",
"self",
".",
"astype",
"(",
"object",
")",
"if",
"date_format",
":",
"formatter",
"=",
"lambda",
"dt",
":",
"dt",
".",
"strftime",
"(",
"date_format",
")",
"else",
":",
"formatter",
"=",
"lambda",
"dt",
":",
"'%s'",
"%",
"dt",
"if",
"self",
".",
"_hasnans",
":",
"mask",
"=",
"self",
".",
"_isnan",
"values",
"[",
"mask",
"]",
"=",
"na_rep",
"imask",
"=",
"~",
"mask",
"values",
"[",
"imask",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"formatter",
"(",
"dt",
")",
"for",
"dt",
"in",
"values",
"[",
"imask",
"]",
"]",
")",
"else",
":",
"values",
"=",
"np",
".",
"array",
"(",
"[",
"formatter",
"(",
"dt",
")",
"for",
"dt",
"in",
"values",
"]",
")",
"return",
"values"
] |
actually format my specific types
|
[
"actually",
"format",
"my",
"specific",
"types"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/period.py#L477-L496
|
19,993
|
pandas-dev/pandas
|
pandas/core/arrays/period.py
|
PeriodArray._add_delta
|
def _add_delta(self, other):
"""
Add a timedelta-like, Tick, or TimedeltaIndex-like object
to self, yielding a new PeriodArray
Parameters
----------
other : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : PeriodArray
"""
if not isinstance(self.freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
_raise_on_incompatible(self, other)
new_ordinals = super()._add_delta(other)
return type(self)(new_ordinals, freq=self.freq)
|
python
|
def _add_delta(self, other):
"""
Add a timedelta-like, Tick, or TimedeltaIndex-like object
to self, yielding a new PeriodArray
Parameters
----------
other : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : PeriodArray
"""
if not isinstance(self.freq, Tick):
# We cannot add timedelta-like to non-tick PeriodArray
_raise_on_incompatible(self, other)
new_ordinals = super()._add_delta(other)
return type(self)(new_ordinals, freq=self.freq)
|
[
"def",
"_add_delta",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"freq",
",",
"Tick",
")",
":",
"# We cannot add timedelta-like to non-tick PeriodArray",
"_raise_on_incompatible",
"(",
"self",
",",
"other",
")",
"new_ordinals",
"=",
"super",
"(",
")",
".",
"_add_delta",
"(",
"other",
")",
"return",
"type",
"(",
"self",
")",
"(",
"new_ordinals",
",",
"freq",
"=",
"self",
".",
"freq",
")"
] |
Add a timedelta-like, Tick, or TimedeltaIndex-like object
to self, yielding a new PeriodArray
Parameters
----------
other : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : PeriodArray
|
[
"Add",
"a",
"timedelta",
"-",
"like",
"Tick",
"or",
"TimedeltaIndex",
"-",
"like",
"object",
"to",
"self",
"yielding",
"a",
"new",
"PeriodArray"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/period.py#L604-L623
|
19,994
|
pandas-dev/pandas
|
pandas/core/arrays/period.py
|
PeriodArray._check_timedeltalike_freq_compat
|
def _check_timedeltalike_freq_compat(self, other):
"""
Arithmetic operations with timedelta-like scalars or array `other`
are only valid if `other` is an integer multiple of `self.freq`.
If the operation is valid, find that integer multiple. Otherwise,
raise because the operation is invalid.
Parameters
----------
other : timedelta, np.timedelta64, Tick,
ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
Returns
-------
multiple : int or ndarray[int64]
Raises
------
IncompatibleFrequency
"""
assert isinstance(self.freq, Tick) # checked by calling function
own_offset = frequencies.to_offset(self.freq.rule_code)
base_nanos = delta_to_nanoseconds(own_offset)
if isinstance(other, (timedelta, np.timedelta64, Tick)):
nanos = delta_to_nanoseconds(other)
elif isinstance(other, np.ndarray):
# numpy timedelta64 array; all entries must be compatible
assert other.dtype.kind == 'm'
if other.dtype != _TD_DTYPE:
# i.e. non-nano unit
# TODO: disallow unit-less timedelta64
other = other.astype(_TD_DTYPE)
nanos = other.view('i8')
else:
# TimedeltaArray/Index
nanos = other.asi8
if np.all(nanos % base_nanos == 0):
# nanos being added is an integer multiple of the
# base-frequency to self.freq
delta = nanos // base_nanos
# delta is the integer (or integer-array) number of periods
# by which will be added to self.
return delta
_raise_on_incompatible(self, other)
|
python
|
def _check_timedeltalike_freq_compat(self, other):
"""
Arithmetic operations with timedelta-like scalars or array `other`
are only valid if `other` is an integer multiple of `self.freq`.
If the operation is valid, find that integer multiple. Otherwise,
raise because the operation is invalid.
Parameters
----------
other : timedelta, np.timedelta64, Tick,
ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
Returns
-------
multiple : int or ndarray[int64]
Raises
------
IncompatibleFrequency
"""
assert isinstance(self.freq, Tick) # checked by calling function
own_offset = frequencies.to_offset(self.freq.rule_code)
base_nanos = delta_to_nanoseconds(own_offset)
if isinstance(other, (timedelta, np.timedelta64, Tick)):
nanos = delta_to_nanoseconds(other)
elif isinstance(other, np.ndarray):
# numpy timedelta64 array; all entries must be compatible
assert other.dtype.kind == 'm'
if other.dtype != _TD_DTYPE:
# i.e. non-nano unit
# TODO: disallow unit-less timedelta64
other = other.astype(_TD_DTYPE)
nanos = other.view('i8')
else:
# TimedeltaArray/Index
nanos = other.asi8
if np.all(nanos % base_nanos == 0):
# nanos being added is an integer multiple of the
# base-frequency to self.freq
delta = nanos // base_nanos
# delta is the integer (or integer-array) number of periods
# by which will be added to self.
return delta
_raise_on_incompatible(self, other)
|
[
"def",
"_check_timedeltalike_freq_compat",
"(",
"self",
",",
"other",
")",
":",
"assert",
"isinstance",
"(",
"self",
".",
"freq",
",",
"Tick",
")",
"# checked by calling function",
"own_offset",
"=",
"frequencies",
".",
"to_offset",
"(",
"self",
".",
"freq",
".",
"rule_code",
")",
"base_nanos",
"=",
"delta_to_nanoseconds",
"(",
"own_offset",
")",
"if",
"isinstance",
"(",
"other",
",",
"(",
"timedelta",
",",
"np",
".",
"timedelta64",
",",
"Tick",
")",
")",
":",
"nanos",
"=",
"delta_to_nanoseconds",
"(",
"other",
")",
"elif",
"isinstance",
"(",
"other",
",",
"np",
".",
"ndarray",
")",
":",
"# numpy timedelta64 array; all entries must be compatible",
"assert",
"other",
".",
"dtype",
".",
"kind",
"==",
"'m'",
"if",
"other",
".",
"dtype",
"!=",
"_TD_DTYPE",
":",
"# i.e. non-nano unit",
"# TODO: disallow unit-less timedelta64",
"other",
"=",
"other",
".",
"astype",
"(",
"_TD_DTYPE",
")",
"nanos",
"=",
"other",
".",
"view",
"(",
"'i8'",
")",
"else",
":",
"# TimedeltaArray/Index",
"nanos",
"=",
"other",
".",
"asi8",
"if",
"np",
".",
"all",
"(",
"nanos",
"%",
"base_nanos",
"==",
"0",
")",
":",
"# nanos being added is an integer multiple of the",
"# base-frequency to self.freq",
"delta",
"=",
"nanos",
"//",
"base_nanos",
"# delta is the integer (or integer-array) number of periods",
"# by which will be added to self.",
"return",
"delta",
"_raise_on_incompatible",
"(",
"self",
",",
"other",
")"
] |
Arithmetic operations with timedelta-like scalars or array `other`
are only valid if `other` is an integer multiple of `self.freq`.
If the operation is valid, find that integer multiple. Otherwise,
raise because the operation is invalid.
Parameters
----------
other : timedelta, np.timedelta64, Tick,
ndarray[timedelta64], TimedeltaArray, TimedeltaIndex
Returns
-------
multiple : int or ndarray[int64]
Raises
------
IncompatibleFrequency
|
[
"Arithmetic",
"operations",
"with",
"timedelta",
"-",
"like",
"scalars",
"or",
"array",
"other",
"are",
"only",
"valid",
"if",
"other",
"is",
"an",
"integer",
"multiple",
"of",
"self",
".",
"freq",
".",
"If",
"the",
"operation",
"is",
"valid",
"find",
"that",
"integer",
"multiple",
".",
"Otherwise",
"raise",
"because",
"the",
"operation",
"is",
"invalid",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/period.py#L625-L672
|
19,995
|
pandas-dev/pandas
|
pandas/core/dtypes/missing.py
|
_isna_old
|
def _isna_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
return libmissing.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isna_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=_isna_old))
elif isinstance(obj, list):
return _isna_ndarraylike_old(np.asarray(obj, dtype=object))
elif hasattr(obj, '__array__'):
return _isna_ndarraylike_old(np.asarray(obj))
else:
return obj is None
|
python
|
def _isna_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
return libmissing.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isna_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isna(func=_isna_old))
elif isinstance(obj, list):
return _isna_ndarraylike_old(np.asarray(obj, dtype=object))
elif hasattr(obj, '__array__'):
return _isna_ndarraylike_old(np.asarray(obj))
else:
return obj is None
|
[
"def",
"_isna_old",
"(",
"obj",
")",
":",
"if",
"is_scalar",
"(",
"obj",
")",
":",
"return",
"libmissing",
".",
"checknull_old",
"(",
"obj",
")",
"# hack (for now) because MI registers as ndarray",
"elif",
"isinstance",
"(",
"obj",
",",
"ABCMultiIndex",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"isna is not defined for MultiIndex\"",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"(",
"ABCSeries",
",",
"np",
".",
"ndarray",
",",
"ABCIndexClass",
")",
")",
":",
"return",
"_isna_ndarraylike_old",
"(",
"obj",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"ABCGeneric",
")",
":",
"return",
"obj",
".",
"_constructor",
"(",
"obj",
".",
"_data",
".",
"isna",
"(",
"func",
"=",
"_isna_old",
")",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"return",
"_isna_ndarraylike_old",
"(",
"np",
".",
"asarray",
"(",
"obj",
",",
"dtype",
"=",
"object",
")",
")",
"elif",
"hasattr",
"(",
"obj",
",",
"'__array__'",
")",
":",
"return",
"_isna_ndarraylike_old",
"(",
"np",
".",
"asarray",
"(",
"obj",
")",
")",
"else",
":",
"return",
"obj",
"is",
"None"
] |
Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
|
[
"Detect",
"missing",
"values",
".",
"Treat",
"None",
"NaN",
"INF",
"-",
"INF",
"as",
"null",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/missing.py#L126-L151
|
19,996
|
pandas-dev/pandas
|
pandas/core/dtypes/missing.py
|
_maybe_fill
|
def _maybe_fill(arr, fill_value=np.nan):
"""
if we have a compatible fill_value and arr dtype, then fill
"""
if _isna_compat(arr, fill_value):
arr.fill(fill_value)
return arr
|
python
|
def _maybe_fill(arr, fill_value=np.nan):
"""
if we have a compatible fill_value and arr dtype, then fill
"""
if _isna_compat(arr, fill_value):
arr.fill(fill_value)
return arr
|
[
"def",
"_maybe_fill",
"(",
"arr",
",",
"fill_value",
"=",
"np",
".",
"nan",
")",
":",
"if",
"_isna_compat",
"(",
"arr",
",",
"fill_value",
")",
":",
"arr",
".",
"fill",
"(",
"fill_value",
")",
"return",
"arr"
] |
if we have a compatible fill_value and arr dtype, then fill
|
[
"if",
"we",
"have",
"a",
"compatible",
"fill_value",
"and",
"arr",
"dtype",
"then",
"fill"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/missing.py#L470-L476
|
19,997
|
pandas-dev/pandas
|
pandas/core/dtypes/missing.py
|
na_value_for_dtype
|
def na_value_for_dtype(dtype, compat=True):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
compat : boolean, default True
Returns
-------
np.dtype or a pandas dtype
Examples
--------
>>> na_value_for_dtype(np.dtype('int64'))
0
>>> na_value_for_dtype(np.dtype('int64'), compat=False)
nan
>>> na_value_for_dtype(np.dtype('float64'))
nan
>>> na_value_for_dtype(np.dtype('bool'))
False
>>> na_value_for_dtype(np.dtype('datetime64[ns]'))
NaT
"""
dtype = pandas_dtype(dtype)
if is_extension_array_dtype(dtype):
return dtype.na_value
if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or
is_timedelta64_dtype(dtype) or is_period_dtype(dtype)):
return NaT
elif is_float_dtype(dtype):
return np.nan
elif is_integer_dtype(dtype):
if compat:
return 0
return np.nan
elif is_bool_dtype(dtype):
return False
return np.nan
|
python
|
def na_value_for_dtype(dtype, compat=True):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
compat : boolean, default True
Returns
-------
np.dtype or a pandas dtype
Examples
--------
>>> na_value_for_dtype(np.dtype('int64'))
0
>>> na_value_for_dtype(np.dtype('int64'), compat=False)
nan
>>> na_value_for_dtype(np.dtype('float64'))
nan
>>> na_value_for_dtype(np.dtype('bool'))
False
>>> na_value_for_dtype(np.dtype('datetime64[ns]'))
NaT
"""
dtype = pandas_dtype(dtype)
if is_extension_array_dtype(dtype):
return dtype.na_value
if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or
is_timedelta64_dtype(dtype) or is_period_dtype(dtype)):
return NaT
elif is_float_dtype(dtype):
return np.nan
elif is_integer_dtype(dtype):
if compat:
return 0
return np.nan
elif is_bool_dtype(dtype):
return False
return np.nan
|
[
"def",
"na_value_for_dtype",
"(",
"dtype",
",",
"compat",
"=",
"True",
")",
":",
"dtype",
"=",
"pandas_dtype",
"(",
"dtype",
")",
"if",
"is_extension_array_dtype",
"(",
"dtype",
")",
":",
"return",
"dtype",
".",
"na_value",
"if",
"(",
"is_datetime64_dtype",
"(",
"dtype",
")",
"or",
"is_datetime64tz_dtype",
"(",
"dtype",
")",
"or",
"is_timedelta64_dtype",
"(",
"dtype",
")",
"or",
"is_period_dtype",
"(",
"dtype",
")",
")",
":",
"return",
"NaT",
"elif",
"is_float_dtype",
"(",
"dtype",
")",
":",
"return",
"np",
".",
"nan",
"elif",
"is_integer_dtype",
"(",
"dtype",
")",
":",
"if",
"compat",
":",
"return",
"0",
"return",
"np",
".",
"nan",
"elif",
"is_bool_dtype",
"(",
"dtype",
")",
":",
"return",
"False",
"return",
"np",
".",
"nan"
] |
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
compat : boolean, default True
Returns
-------
np.dtype or a pandas dtype
Examples
--------
>>> na_value_for_dtype(np.dtype('int64'))
0
>>> na_value_for_dtype(np.dtype('int64'), compat=False)
nan
>>> na_value_for_dtype(np.dtype('float64'))
nan
>>> na_value_for_dtype(np.dtype('bool'))
False
>>> na_value_for_dtype(np.dtype('datetime64[ns]'))
NaT
|
[
"Return",
"a",
"dtype",
"compat",
"na",
"value"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/missing.py#L479-L520
|
19,998
|
pandas-dev/pandas
|
pandas/plotting/_tools.py
|
table
|
def table(ax, data, rowLabels=None, colLabels=None, **kwargs):
"""
Helper function to convert DataFrame and Series to matplotlib.table
Parameters
----------
ax : Matplotlib axes object
data : DataFrame or Series
data for table contents
kwargs : keywords, optional
keyword arguments which passed to matplotlib.table.table.
If `rowLabels` or `colLabels` is not specified, data index or column
name will be used.
Returns
-------
matplotlib table object
"""
if isinstance(data, ABCSeries):
data = data.to_frame()
elif isinstance(data, ABCDataFrame):
pass
else:
raise ValueError('Input data must be DataFrame or Series')
if rowLabels is None:
rowLabels = data.index
if colLabels is None:
colLabels = data.columns
cellText = data.values
import matplotlib.table
table = matplotlib.table.table(ax, cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels, **kwargs)
return table
|
python
|
def table(ax, data, rowLabels=None, colLabels=None, **kwargs):
"""
Helper function to convert DataFrame and Series to matplotlib.table
Parameters
----------
ax : Matplotlib axes object
data : DataFrame or Series
data for table contents
kwargs : keywords, optional
keyword arguments which passed to matplotlib.table.table.
If `rowLabels` or `colLabels` is not specified, data index or column
name will be used.
Returns
-------
matplotlib table object
"""
if isinstance(data, ABCSeries):
data = data.to_frame()
elif isinstance(data, ABCDataFrame):
pass
else:
raise ValueError('Input data must be DataFrame or Series')
if rowLabels is None:
rowLabels = data.index
if colLabels is None:
colLabels = data.columns
cellText = data.values
import matplotlib.table
table = matplotlib.table.table(ax, cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels, **kwargs)
return table
|
[
"def",
"table",
"(",
"ax",
",",
"data",
",",
"rowLabels",
"=",
"None",
",",
"colLabels",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"ABCSeries",
")",
":",
"data",
"=",
"data",
".",
"to_frame",
"(",
")",
"elif",
"isinstance",
"(",
"data",
",",
"ABCDataFrame",
")",
":",
"pass",
"else",
":",
"raise",
"ValueError",
"(",
"'Input data must be DataFrame or Series'",
")",
"if",
"rowLabels",
"is",
"None",
":",
"rowLabels",
"=",
"data",
".",
"index",
"if",
"colLabels",
"is",
"None",
":",
"colLabels",
"=",
"data",
".",
"columns",
"cellText",
"=",
"data",
".",
"values",
"import",
"matplotlib",
".",
"table",
"table",
"=",
"matplotlib",
".",
"table",
".",
"table",
"(",
"ax",
",",
"cellText",
"=",
"cellText",
",",
"rowLabels",
"=",
"rowLabels",
",",
"colLabels",
"=",
"colLabels",
",",
"*",
"*",
"kwargs",
")",
"return",
"table"
] |
Helper function to convert DataFrame and Series to matplotlib.table
Parameters
----------
ax : Matplotlib axes object
data : DataFrame or Series
data for table contents
kwargs : keywords, optional
keyword arguments which passed to matplotlib.table.table.
If `rowLabels` or `colLabels` is not specified, data index or column
name will be used.
Returns
-------
matplotlib table object
|
[
"Helper",
"function",
"to",
"convert",
"DataFrame",
"and",
"Series",
"to",
"matplotlib",
".",
"table"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_tools.py#L23-L60
|
19,999
|
pandas-dev/pandas
|
pandas/plotting/_tools.py
|
_subplots
|
def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, ax=None, layout=None, layout_type='box',
**fig_kw):
"""Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
naxes : int
Number of required axes. Exceeded axes are set invisible. Default is
nrows * ncols.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharey : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing is done: the returned axis object is always
a 2-d array containing Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
ax : Matplotlib axis object, optional
layout : tuple
Number of rows and columns of the subplot grid.
If not specified, calculated from naxes and layout_type
layout_type : {'box', 'horziontal', 'vertical'}, default 'box'
Specify how to layout the subplot grid.
fig_kw : Other keyword arguments to be passed to the figure() call.
Note that all keywords not recognized above will be
automatically included here.
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one subplot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
if is_list_like(ax):
ax = _flatten(ax)
if layout is not None:
warnings.warn("When passing multiple axes, layout keyword is "
"ignored", UserWarning)
if sharex or sharey:
warnings.warn("When passing multiple axes, sharex and sharey "
"are ignored. These settings must be specified "
"when creating axes", UserWarning,
stacklevel=4)
if len(ax) == naxes:
fig = ax[0].get_figure()
return fig, ax
else:
raise ValueError("The number of passed axes must be {0}, the "
"same as the output plot".format(naxes))
fig = ax.get_figure()
# if ax is passed and a number of subplots is 1, return ax as it is
if naxes == 1:
if squeeze:
return fig, ax
else:
return fig, _flatten(ax)
else:
warnings.warn("To output multiple subplots, the figure containing "
"the passed axes is being cleared", UserWarning,
stacklevel=4)
fig.clear()
nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type)
nplots = nrows * ncols
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw['sharex'] = ax0
if sharey:
subplot_kw['sharey'] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
kwds = subplot_kw.copy()
# Set sharex and sharey to None for blank/dummy axes, these can
# interfere with proper axis limits on the visible axes if
# they share axes e.g. issue #7528
if i >= naxes:
kwds['sharex'] = None
kwds['sharey'] = None
ax = fig.add_subplot(nrows, ncols, i + 1, **kwds)
axarr[i] = ax
if naxes != nplots:
for ax in axarr[naxes:]:
ax.set_visible(False)
_handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots == 1:
axes = axarr[0]
else:
axes = axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
axes = axarr.reshape(nrows, ncols)
return fig, axes
|
python
|
def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, ax=None, layout=None, layout_type='box',
**fig_kw):
"""Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
naxes : int
Number of required axes. Exceeded axes are set invisible. Default is
nrows * ncols.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharey : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing is done: the returned axis object is always
a 2-d array containing Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
ax : Matplotlib axis object, optional
layout : tuple
Number of rows and columns of the subplot grid.
If not specified, calculated from naxes and layout_type
layout_type : {'box', 'horziontal', 'vertical'}, default 'box'
Specify how to layout the subplot grid.
fig_kw : Other keyword arguments to be passed to the figure() call.
Note that all keywords not recognized above will be
automatically included here.
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one subplot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
if is_list_like(ax):
ax = _flatten(ax)
if layout is not None:
warnings.warn("When passing multiple axes, layout keyword is "
"ignored", UserWarning)
if sharex or sharey:
warnings.warn("When passing multiple axes, sharex and sharey "
"are ignored. These settings must be specified "
"when creating axes", UserWarning,
stacklevel=4)
if len(ax) == naxes:
fig = ax[0].get_figure()
return fig, ax
else:
raise ValueError("The number of passed axes must be {0}, the "
"same as the output plot".format(naxes))
fig = ax.get_figure()
# if ax is passed and a number of subplots is 1, return ax as it is
if naxes == 1:
if squeeze:
return fig, ax
else:
return fig, _flatten(ax)
else:
warnings.warn("To output multiple subplots, the figure containing "
"the passed axes is being cleared", UserWarning,
stacklevel=4)
fig.clear()
nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type)
nplots = nrows * ncols
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw['sharex'] = ax0
if sharey:
subplot_kw['sharey'] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
kwds = subplot_kw.copy()
# Set sharex and sharey to None for blank/dummy axes, these can
# interfere with proper axis limits on the visible axes if
# they share axes e.g. issue #7528
if i >= naxes:
kwds['sharex'] = None
kwds['sharey'] = None
ax = fig.add_subplot(nrows, ncols, i + 1, **kwds)
axarr[i] = ax
if naxes != nplots:
for ax in axarr[naxes:]:
ax.set_visible(False)
_handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots == 1:
axes = axarr[0]
else:
axes = axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
axes = axarr.reshape(nrows, ncols)
return fig, axes
|
[
"def",
"_subplots",
"(",
"naxes",
"=",
"None",
",",
"sharex",
"=",
"False",
",",
"sharey",
"=",
"False",
",",
"squeeze",
"=",
"True",
",",
"subplot_kw",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"layout",
"=",
"None",
",",
"layout_type",
"=",
"'box'",
",",
"*",
"*",
"fig_kw",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"if",
"subplot_kw",
"is",
"None",
":",
"subplot_kw",
"=",
"{",
"}",
"if",
"ax",
"is",
"None",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
"*",
"*",
"fig_kw",
")",
"else",
":",
"if",
"is_list_like",
"(",
"ax",
")",
":",
"ax",
"=",
"_flatten",
"(",
"ax",
")",
"if",
"layout",
"is",
"not",
"None",
":",
"warnings",
".",
"warn",
"(",
"\"When passing multiple axes, layout keyword is \"",
"\"ignored\"",
",",
"UserWarning",
")",
"if",
"sharex",
"or",
"sharey",
":",
"warnings",
".",
"warn",
"(",
"\"When passing multiple axes, sharex and sharey \"",
"\"are ignored. These settings must be specified \"",
"\"when creating axes\"",
",",
"UserWarning",
",",
"stacklevel",
"=",
"4",
")",
"if",
"len",
"(",
"ax",
")",
"==",
"naxes",
":",
"fig",
"=",
"ax",
"[",
"0",
"]",
".",
"get_figure",
"(",
")",
"return",
"fig",
",",
"ax",
"else",
":",
"raise",
"ValueError",
"(",
"\"The number of passed axes must be {0}, the \"",
"\"same as the output plot\"",
".",
"format",
"(",
"naxes",
")",
")",
"fig",
"=",
"ax",
".",
"get_figure",
"(",
")",
"# if ax is passed and a number of subplots is 1, return ax as it is",
"if",
"naxes",
"==",
"1",
":",
"if",
"squeeze",
":",
"return",
"fig",
",",
"ax",
"else",
":",
"return",
"fig",
",",
"_flatten",
"(",
"ax",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"To output multiple subplots, the figure containing \"",
"\"the passed axes is being cleared\"",
",",
"UserWarning",
",",
"stacklevel",
"=",
"4",
")",
"fig",
".",
"clear",
"(",
")",
"nrows",
",",
"ncols",
"=",
"_get_layout",
"(",
"naxes",
",",
"layout",
"=",
"layout",
",",
"layout_type",
"=",
"layout_type",
")",
"nplots",
"=",
"nrows",
"*",
"ncols",
"# Create empty object array to hold all axes. It's easiest to make it 1-d",
"# so we can just append subplots upon creation, and then",
"axarr",
"=",
"np",
".",
"empty",
"(",
"nplots",
",",
"dtype",
"=",
"object",
")",
"# Create first subplot separately, so we can share it if requested",
"ax0",
"=",
"fig",
".",
"add_subplot",
"(",
"nrows",
",",
"ncols",
",",
"1",
",",
"*",
"*",
"subplot_kw",
")",
"if",
"sharex",
":",
"subplot_kw",
"[",
"'sharex'",
"]",
"=",
"ax0",
"if",
"sharey",
":",
"subplot_kw",
"[",
"'sharey'",
"]",
"=",
"ax0",
"axarr",
"[",
"0",
"]",
"=",
"ax0",
"# Note off-by-one counting because add_subplot uses the MATLAB 1-based",
"# convention.",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"nplots",
")",
":",
"kwds",
"=",
"subplot_kw",
".",
"copy",
"(",
")",
"# Set sharex and sharey to None for blank/dummy axes, these can",
"# interfere with proper axis limits on the visible axes if",
"# they share axes e.g. issue #7528",
"if",
"i",
">=",
"naxes",
":",
"kwds",
"[",
"'sharex'",
"]",
"=",
"None",
"kwds",
"[",
"'sharey'",
"]",
"=",
"None",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"nrows",
",",
"ncols",
",",
"i",
"+",
"1",
",",
"*",
"*",
"kwds",
")",
"axarr",
"[",
"i",
"]",
"=",
"ax",
"if",
"naxes",
"!=",
"nplots",
":",
"for",
"ax",
"in",
"axarr",
"[",
"naxes",
":",
"]",
":",
"ax",
".",
"set_visible",
"(",
"False",
")",
"_handle_shared_axes",
"(",
"axarr",
",",
"nplots",
",",
"naxes",
",",
"nrows",
",",
"ncols",
",",
"sharex",
",",
"sharey",
")",
"if",
"squeeze",
":",
"# Reshape the array to have the final desired dimension (nrow,ncol),",
"# though discarding unneeded dimensions that equal 1. If we only have",
"# one subplot, just return it instead of a 1-element array.",
"if",
"nplots",
"==",
"1",
":",
"axes",
"=",
"axarr",
"[",
"0",
"]",
"else",
":",
"axes",
"=",
"axarr",
".",
"reshape",
"(",
"nrows",
",",
"ncols",
")",
".",
"squeeze",
"(",
")",
"else",
":",
"# returned axis array will be always 2-d, even if nrows=ncols=1",
"axes",
"=",
"axarr",
".",
"reshape",
"(",
"nrows",
",",
"ncols",
")",
"return",
"fig",
",",
"axes"
] |
Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
naxes : int
Number of required axes. Exceeded axes are set invisible. Default is
nrows * ncols.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharey : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing is done: the returned axis object is always
a 2-d array containing Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
ax : Matplotlib axis object, optional
layout : tuple
Number of rows and columns of the subplot grid.
If not specified, calculated from naxes and layout_type
layout_type : {'box', 'horziontal', 'vertical'}, default 'box'
Specify how to layout the subplot grid.
fig_kw : Other keyword arguments to be passed to the figure() call.
Note that all keywords not recognized above will be
automatically included here.
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one subplot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
|
[
"Create",
"a",
"figure",
"with",
"a",
"set",
"of",
"subplots",
"already",
"made",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_tools.py#L110-L271
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.