id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
20,400
|
pandas-dev/pandas
|
pandas/core/arrays/datetimes.py
|
DatetimeArray._add_delta
|
def _add_delta(self, delta):
"""
Add a timedelta-like, Tick, or TimedeltaIndex-like object
to self, yielding a new DatetimeArray
Parameters
----------
other : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : DatetimeArray
"""
new_values = super()._add_delta(delta)
return type(self)._from_sequence(new_values, tz=self.tz, freq='infer')
|
python
|
def _add_delta(self, delta):
"""
Add a timedelta-like, Tick, or TimedeltaIndex-like object
to self, yielding a new DatetimeArray
Parameters
----------
other : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : DatetimeArray
"""
new_values = super()._add_delta(delta)
return type(self)._from_sequence(new_values, tz=self.tz, freq='infer')
|
[
"def",
"_add_delta",
"(",
"self",
",",
"delta",
")",
":",
"new_values",
"=",
"super",
"(",
")",
".",
"_add_delta",
"(",
"delta",
")",
"return",
"type",
"(",
"self",
")",
".",
"_from_sequence",
"(",
"new_values",
",",
"tz",
"=",
"self",
".",
"tz",
",",
"freq",
"=",
"'infer'",
")"
] |
Add a timedelta-like, Tick, or TimedeltaIndex-like object
to self, yielding a new DatetimeArray
Parameters
----------
other : {timedelta, np.timedelta64, Tick,
TimedeltaIndex, ndarray[timedelta64]}
Returns
-------
result : DatetimeArray
|
[
"Add",
"a",
"timedelta",
"-",
"like",
"Tick",
"or",
"TimedeltaIndex",
"-",
"like",
"object",
"to",
"self",
"yielding",
"a",
"new",
"DatetimeArray"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L759-L774
|
20,401
|
pandas-dev/pandas
|
pandas/core/arrays/datetimes.py
|
DatetimeArray.normalize
|
def normalize(self):
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array/Index.
Returns
-------
DatetimeArray, DatetimeIndex or Series
The same type as the original data. Series will have the same
name and index. DatetimeIndex will have the same name.
See Also
--------
floor : Floor the datetimes to the specified freq.
ceil : Ceil the datetimes to the specified freq.
round : Round the datetimes to the specified freq.
Examples
--------
>>> idx = pd.date_range(start='2014-08-01 10:00', freq='H',
... periods=3, tz='Asia/Calcutta')
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
'2014-08-01 12:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq='H')
>>> idx.normalize()
DatetimeIndex(['2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
"""
if self.tz is None or timezones.is_utc(self.tz):
not_null = ~self.isna()
DAY_NS = ccalendar.DAY_SECONDS * 1000000000
new_values = self.asi8.copy()
adjustment = (new_values[not_null] % DAY_NS)
new_values[not_null] = new_values[not_null] - adjustment
else:
new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz)
return type(self)._from_sequence(new_values,
freq='infer').tz_localize(self.tz)
|
python
|
def normalize(self):
"""
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array/Index.
Returns
-------
DatetimeArray, DatetimeIndex or Series
The same type as the original data. Series will have the same
name and index. DatetimeIndex will have the same name.
See Also
--------
floor : Floor the datetimes to the specified freq.
ceil : Ceil the datetimes to the specified freq.
round : Round the datetimes to the specified freq.
Examples
--------
>>> idx = pd.date_range(start='2014-08-01 10:00', freq='H',
... periods=3, tz='Asia/Calcutta')
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
'2014-08-01 12:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq='H')
>>> idx.normalize()
DatetimeIndex(['2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
"""
if self.tz is None or timezones.is_utc(self.tz):
not_null = ~self.isna()
DAY_NS = ccalendar.DAY_SECONDS * 1000000000
new_values = self.asi8.copy()
adjustment = (new_values[not_null] % DAY_NS)
new_values[not_null] = new_values[not_null] - adjustment
else:
new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz)
return type(self)._from_sequence(new_values,
freq='infer').tz_localize(self.tz)
|
[
"def",
"normalize",
"(",
"self",
")",
":",
"if",
"self",
".",
"tz",
"is",
"None",
"or",
"timezones",
".",
"is_utc",
"(",
"self",
".",
"tz",
")",
":",
"not_null",
"=",
"~",
"self",
".",
"isna",
"(",
")",
"DAY_NS",
"=",
"ccalendar",
".",
"DAY_SECONDS",
"*",
"1000000000",
"new_values",
"=",
"self",
".",
"asi8",
".",
"copy",
"(",
")",
"adjustment",
"=",
"(",
"new_values",
"[",
"not_null",
"]",
"%",
"DAY_NS",
")",
"new_values",
"[",
"not_null",
"]",
"=",
"new_values",
"[",
"not_null",
"]",
"-",
"adjustment",
"else",
":",
"new_values",
"=",
"conversion",
".",
"normalize_i8_timestamps",
"(",
"self",
".",
"asi8",
",",
"self",
".",
"tz",
")",
"return",
"type",
"(",
"self",
")",
".",
"_from_sequence",
"(",
"new_values",
",",
"freq",
"=",
"'infer'",
")",
".",
"tz_localize",
"(",
"self",
".",
"tz",
")"
] |
Convert times to midnight.
The time component of the date-time is converted to midnight i.e.
00:00:00. This is useful in cases, when the time does not matter.
Length is unaltered. The timezones are unaffected.
This method is available on Series with datetime values under
the ``.dt`` accessor, and directly on Datetime Array/Index.
Returns
-------
DatetimeArray, DatetimeIndex or Series
The same type as the original data. Series will have the same
name and index. DatetimeIndex will have the same name.
See Also
--------
floor : Floor the datetimes to the specified freq.
ceil : Ceil the datetimes to the specified freq.
round : Round the datetimes to the specified freq.
Examples
--------
>>> idx = pd.date_range(start='2014-08-01 10:00', freq='H',
... periods=3, tz='Asia/Calcutta')
>>> idx
DatetimeIndex(['2014-08-01 10:00:00+05:30',
'2014-08-01 11:00:00+05:30',
'2014-08-01 12:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq='H')
>>> idx.normalize()
DatetimeIndex(['2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30',
'2014-08-01 00:00:00+05:30'],
dtype='datetime64[ns, Asia/Calcutta]', freq=None)
|
[
"Convert",
"times",
"to",
"midnight",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1063-L1110
|
20,402
|
pandas-dev/pandas
|
pandas/core/arrays/datetimes.py
|
DatetimeArray.to_perioddelta
|
def to_perioddelta(self, freq):
"""
Calculate TimedeltaArray of difference between index
values and index converted to PeriodArray at specified
freq. Used for vectorized offsets
Parameters
----------
freq : Period frequency
Returns
-------
TimedeltaArray/Index
"""
# TODO: consider privatizing (discussion in GH#23113)
from pandas.core.arrays.timedeltas import TimedeltaArray
i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8
m8delta = i8delta.view('m8[ns]')
return TimedeltaArray(m8delta)
|
python
|
def to_perioddelta(self, freq):
"""
Calculate TimedeltaArray of difference between index
values and index converted to PeriodArray at specified
freq. Used for vectorized offsets
Parameters
----------
freq : Period frequency
Returns
-------
TimedeltaArray/Index
"""
# TODO: consider privatizing (discussion in GH#23113)
from pandas.core.arrays.timedeltas import TimedeltaArray
i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8
m8delta = i8delta.view('m8[ns]')
return TimedeltaArray(m8delta)
|
[
"def",
"to_perioddelta",
"(",
"self",
",",
"freq",
")",
":",
"# TODO: consider privatizing (discussion in GH#23113)",
"from",
"pandas",
".",
"core",
".",
"arrays",
".",
"timedeltas",
"import",
"TimedeltaArray",
"i8delta",
"=",
"self",
".",
"asi8",
"-",
"self",
".",
"to_period",
"(",
"freq",
")",
".",
"to_timestamp",
"(",
")",
".",
"asi8",
"m8delta",
"=",
"i8delta",
".",
"view",
"(",
"'m8[ns]'",
")",
"return",
"TimedeltaArray",
"(",
"m8delta",
")"
] |
Calculate TimedeltaArray of difference between index
values and index converted to PeriodArray at specified
freq. Used for vectorized offsets
Parameters
----------
freq : Period frequency
Returns
-------
TimedeltaArray/Index
|
[
"Calculate",
"TimedeltaArray",
"of",
"difference",
"between",
"index",
"values",
"and",
"index",
"converted",
"to",
"PeriodArray",
"at",
"specified",
"freq",
".",
"Used",
"for",
"vectorized",
"offsets"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1173-L1191
|
20,403
|
pandas-dev/pandas
|
pandas/core/arrays/datetimes.py
|
DatetimeArray.month_name
|
def month_name(self, locale=None):
"""
Return the month names of the DateTimeIndex with specified locale.
.. versionadded:: 0.23.0
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
"""
if self.tz is not None and not timezones.is_utc(self.tz):
values = self._local_timestamps()
else:
values = self.asi8
result = fields.get_date_name_field(values, 'month_name',
locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
|
python
|
def month_name(self, locale=None):
"""
Return the month names of the DateTimeIndex with specified locale.
.. versionadded:: 0.23.0
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
"""
if self.tz is not None and not timezones.is_utc(self.tz):
values = self._local_timestamps()
else:
values = self.asi8
result = fields.get_date_name_field(values, 'month_name',
locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result
|
[
"def",
"month_name",
"(",
"self",
",",
"locale",
"=",
"None",
")",
":",
"if",
"self",
".",
"tz",
"is",
"not",
"None",
"and",
"not",
"timezones",
".",
"is_utc",
"(",
"self",
".",
"tz",
")",
":",
"values",
"=",
"self",
".",
"_local_timestamps",
"(",
")",
"else",
":",
"values",
"=",
"self",
".",
"asi8",
"result",
"=",
"fields",
".",
"get_date_name_field",
"(",
"values",
",",
"'month_name'",
",",
"locale",
"=",
"locale",
")",
"result",
"=",
"self",
".",
"_maybe_mask_results",
"(",
"result",
",",
"fill_value",
"=",
"None",
")",
"return",
"result"
] |
Return the month names of the DateTimeIndex with specified locale.
.. versionadded:: 0.23.0
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
|
[
"Return",
"the",
"month",
"names",
"of",
"the",
"DateTimeIndex",
"with",
"specified",
"locale",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1196-L1230
|
20,404
|
pandas-dev/pandas
|
pandas/core/arrays/datetimes.py
|
DatetimeArray.time
|
def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
if self.tz is not None and not timezones.is_utc(self.tz):
timestamps = self._local_timestamps()
else:
timestamps = self.asi8
return tslib.ints_to_pydatetime(timestamps, box="time")
|
python
|
def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# If the Timestamps have a timezone that is not UTC,
# convert them into their i8 representation while
# keeping their timezone and not using UTC
if self.tz is not None and not timezones.is_utc(self.tz):
timestamps = self._local_timestamps()
else:
timestamps = self.asi8
return tslib.ints_to_pydatetime(timestamps, box="time")
|
[
"def",
"time",
"(",
"self",
")",
":",
"# If the Timestamps have a timezone that is not UTC,",
"# convert them into their i8 representation while",
"# keeping their timezone and not using UTC",
"if",
"self",
".",
"tz",
"is",
"not",
"None",
"and",
"not",
"timezones",
".",
"is_utc",
"(",
"self",
".",
"tz",
")",
":",
"timestamps",
"=",
"self",
".",
"_local_timestamps",
"(",
")",
"else",
":",
"timestamps",
"=",
"self",
".",
"asi8",
"return",
"tslib",
".",
"ints_to_pydatetime",
"(",
"timestamps",
",",
"box",
"=",
"\"time\"",
")"
] |
Returns numpy array of datetime.time. The time part of the Timestamps.
|
[
"Returns",
"numpy",
"array",
"of",
"datetime",
".",
"time",
".",
"The",
"time",
"part",
"of",
"the",
"Timestamps",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/datetimes.py#L1269-L1281
|
20,405
|
pandas-dev/pandas
|
scripts/validate_docstrings.py
|
get_api_items
|
def get_api_items(api_doc_fd):
"""
Yield information about all public API items.
Parse api.rst file from the documentation, and extract all the functions,
methods, classes, attributes... This should include all pandas public API.
Parameters
----------
api_doc_fd : file descriptor
A file descriptor of the API documentation page, containing the table
of contents with all the public API.
Yields
------
name : str
The name of the object (e.g. 'pandas.Series.str.upper).
func : function
The object itself. In most cases this will be a function or method,
but it can also be classes, properties, cython objects...
section : str
The name of the section in the API page where the object item is
located.
subsection : str
The name of the subsection in the API page where the object item is
located.
"""
current_module = 'pandas'
previous_line = current_section = current_subsection = ''
position = None
for line in api_doc_fd:
line = line.strip()
if len(line) == len(previous_line):
if set(line) == set('-'):
current_section = previous_line
continue
if set(line) == set('~'):
current_subsection = previous_line
continue
if line.startswith('.. currentmodule::'):
current_module = line.replace('.. currentmodule::', '').strip()
continue
if line == '.. autosummary::':
position = 'autosummary'
continue
if position == 'autosummary':
if line == '':
position = 'items'
continue
if position == 'items':
if line == '':
position = None
continue
item = line.strip()
func = importlib.import_module(current_module)
for part in item.split('.'):
func = getattr(func, part)
yield ('.'.join([current_module, item]), func,
current_section, current_subsection)
previous_line = line
|
python
|
def get_api_items(api_doc_fd):
"""
Yield information about all public API items.
Parse api.rst file from the documentation, and extract all the functions,
methods, classes, attributes... This should include all pandas public API.
Parameters
----------
api_doc_fd : file descriptor
A file descriptor of the API documentation page, containing the table
of contents with all the public API.
Yields
------
name : str
The name of the object (e.g. 'pandas.Series.str.upper).
func : function
The object itself. In most cases this will be a function or method,
but it can also be classes, properties, cython objects...
section : str
The name of the section in the API page where the object item is
located.
subsection : str
The name of the subsection in the API page where the object item is
located.
"""
current_module = 'pandas'
previous_line = current_section = current_subsection = ''
position = None
for line in api_doc_fd:
line = line.strip()
if len(line) == len(previous_line):
if set(line) == set('-'):
current_section = previous_line
continue
if set(line) == set('~'):
current_subsection = previous_line
continue
if line.startswith('.. currentmodule::'):
current_module = line.replace('.. currentmodule::', '').strip()
continue
if line == '.. autosummary::':
position = 'autosummary'
continue
if position == 'autosummary':
if line == '':
position = 'items'
continue
if position == 'items':
if line == '':
position = None
continue
item = line.strip()
func = importlib.import_module(current_module)
for part in item.split('.'):
func = getattr(func, part)
yield ('.'.join([current_module, item]), func,
current_section, current_subsection)
previous_line = line
|
[
"def",
"get_api_items",
"(",
"api_doc_fd",
")",
":",
"current_module",
"=",
"'pandas'",
"previous_line",
"=",
"current_section",
"=",
"current_subsection",
"=",
"''",
"position",
"=",
"None",
"for",
"line",
"in",
"api_doc_fd",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"line",
")",
"==",
"len",
"(",
"previous_line",
")",
":",
"if",
"set",
"(",
"line",
")",
"==",
"set",
"(",
"'-'",
")",
":",
"current_section",
"=",
"previous_line",
"continue",
"if",
"set",
"(",
"line",
")",
"==",
"set",
"(",
"'~'",
")",
":",
"current_subsection",
"=",
"previous_line",
"continue",
"if",
"line",
".",
"startswith",
"(",
"'.. currentmodule::'",
")",
":",
"current_module",
"=",
"line",
".",
"replace",
"(",
"'.. currentmodule::'",
",",
"''",
")",
".",
"strip",
"(",
")",
"continue",
"if",
"line",
"==",
"'.. autosummary::'",
":",
"position",
"=",
"'autosummary'",
"continue",
"if",
"position",
"==",
"'autosummary'",
":",
"if",
"line",
"==",
"''",
":",
"position",
"=",
"'items'",
"continue",
"if",
"position",
"==",
"'items'",
":",
"if",
"line",
"==",
"''",
":",
"position",
"=",
"None",
"continue",
"item",
"=",
"line",
".",
"strip",
"(",
")",
"func",
"=",
"importlib",
".",
"import_module",
"(",
"current_module",
")",
"for",
"part",
"in",
"item",
".",
"split",
"(",
"'.'",
")",
":",
"func",
"=",
"getattr",
"(",
"func",
",",
"part",
")",
"yield",
"(",
"'.'",
".",
"join",
"(",
"[",
"current_module",
",",
"item",
"]",
")",
",",
"func",
",",
"current_section",
",",
"current_subsection",
")",
"previous_line",
"=",
"line"
] |
Yield information about all public API items.
Parse api.rst file from the documentation, and extract all the functions,
methods, classes, attributes... This should include all pandas public API.
Parameters
----------
api_doc_fd : file descriptor
A file descriptor of the API documentation page, containing the table
of contents with all the public API.
Yields
------
name : str
The name of the object (e.g. 'pandas.Series.str.upper).
func : function
The object itself. In most cases this will be a function or method,
but it can also be classes, properties, cython objects...
section : str
The name of the section in the API page where the object item is
located.
subsection : str
The name of the subsection in the API page where the object item is
located.
|
[
"Yield",
"information",
"about",
"all",
"public",
"API",
"items",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L158-L223
|
20,406
|
pandas-dev/pandas
|
scripts/validate_docstrings.py
|
validate_one
|
def validate_one(func_name):
"""
Validate the docstring for the given func_name
Parameters
----------
func_name : function
Function whose docstring will be evaluated (e.g. pandas.read_csv).
Returns
-------
dict
A dictionary containing all the information obtained from validating
the docstring.
"""
doc = Docstring(func_name)
errs, wrns, examples_errs = get_validation_data(doc)
return {'type': doc.type,
'docstring': doc.clean_doc,
'deprecated': doc.deprecated,
'file': doc.source_file_name,
'file_line': doc.source_file_def_line,
'github_link': doc.github_url,
'errors': errs,
'warnings': wrns,
'examples_errors': examples_errs}
|
python
|
def validate_one(func_name):
"""
Validate the docstring for the given func_name
Parameters
----------
func_name : function
Function whose docstring will be evaluated (e.g. pandas.read_csv).
Returns
-------
dict
A dictionary containing all the information obtained from validating
the docstring.
"""
doc = Docstring(func_name)
errs, wrns, examples_errs = get_validation_data(doc)
return {'type': doc.type,
'docstring': doc.clean_doc,
'deprecated': doc.deprecated,
'file': doc.source_file_name,
'file_line': doc.source_file_def_line,
'github_link': doc.github_url,
'errors': errs,
'warnings': wrns,
'examples_errors': examples_errs}
|
[
"def",
"validate_one",
"(",
"func_name",
")",
":",
"doc",
"=",
"Docstring",
"(",
"func_name",
")",
"errs",
",",
"wrns",
",",
"examples_errs",
"=",
"get_validation_data",
"(",
"doc",
")",
"return",
"{",
"'type'",
":",
"doc",
".",
"type",
",",
"'docstring'",
":",
"doc",
".",
"clean_doc",
",",
"'deprecated'",
":",
"doc",
".",
"deprecated",
",",
"'file'",
":",
"doc",
".",
"source_file_name",
",",
"'file_line'",
":",
"doc",
".",
"source_file_def_line",
",",
"'github_link'",
":",
"doc",
".",
"github_url",
",",
"'errors'",
":",
"errs",
",",
"'warnings'",
":",
"wrns",
",",
"'examples_errors'",
":",
"examples_errs",
"}"
] |
Validate the docstring for the given func_name
Parameters
----------
func_name : function
Function whose docstring will be evaluated (e.g. pandas.read_csv).
Returns
-------
dict
A dictionary containing all the information obtained from validating
the docstring.
|
[
"Validate",
"the",
"docstring",
"for",
"the",
"given",
"func_name"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L788-L813
|
20,407
|
pandas-dev/pandas
|
scripts/validate_docstrings.py
|
validate_all
|
def validate_all(prefix, ignore_deprecated=False):
"""
Execute the validation of all docstrings, and return a dict with the
results.
Parameters
----------
prefix : str or None
If provided, only the docstrings that start with this pattern will be
validated. If None, all docstrings will be validated.
ignore_deprecated: bool, default False
If True, deprecated objects are ignored when validating docstrings.
Returns
-------
dict
A dictionary with an item for every function/method... containing
all the validation information.
"""
result = {}
seen = {}
# functions from the API docs
api_doc_fnames = os.path.join(
BASE_PATH, 'doc', 'source', 'reference', '*.rst')
api_items = []
for api_doc_fname in glob.glob(api_doc_fnames):
with open(api_doc_fname) as f:
api_items += list(get_api_items(f))
for func_name, func_obj, section, subsection in api_items:
if prefix and not func_name.startswith(prefix):
continue
doc_info = validate_one(func_name)
if ignore_deprecated and doc_info['deprecated']:
continue
result[func_name] = doc_info
shared_code_key = doc_info['file'], doc_info['file_line']
shared_code = seen.get(shared_code_key, '')
result[func_name].update({'in_api': True,
'section': section,
'subsection': subsection,
'shared_code_with': shared_code})
seen[shared_code_key] = func_name
# functions from introspecting Series, DataFrame and Panel
api_item_names = set(list(zip(*api_items))[0])
for class_ in (pandas.Series, pandas.DataFrame, pandas.Panel):
for member in inspect.getmembers(class_):
func_name = 'pandas.{}.{}'.format(class_.__name__, member[0])
if (not member[0].startswith('_')
and func_name not in api_item_names):
if prefix and not func_name.startswith(prefix):
continue
doc_info = validate_one(func_name)
if ignore_deprecated and doc_info['deprecated']:
continue
result[func_name] = doc_info
result[func_name]['in_api'] = False
return result
|
python
|
def validate_all(prefix, ignore_deprecated=False):
"""
Execute the validation of all docstrings, and return a dict with the
results.
Parameters
----------
prefix : str or None
If provided, only the docstrings that start with this pattern will be
validated. If None, all docstrings will be validated.
ignore_deprecated: bool, default False
If True, deprecated objects are ignored when validating docstrings.
Returns
-------
dict
A dictionary with an item for every function/method... containing
all the validation information.
"""
result = {}
seen = {}
# functions from the API docs
api_doc_fnames = os.path.join(
BASE_PATH, 'doc', 'source', 'reference', '*.rst')
api_items = []
for api_doc_fname in glob.glob(api_doc_fnames):
with open(api_doc_fname) as f:
api_items += list(get_api_items(f))
for func_name, func_obj, section, subsection in api_items:
if prefix and not func_name.startswith(prefix):
continue
doc_info = validate_one(func_name)
if ignore_deprecated and doc_info['deprecated']:
continue
result[func_name] = doc_info
shared_code_key = doc_info['file'], doc_info['file_line']
shared_code = seen.get(shared_code_key, '')
result[func_name].update({'in_api': True,
'section': section,
'subsection': subsection,
'shared_code_with': shared_code})
seen[shared_code_key] = func_name
# functions from introspecting Series, DataFrame and Panel
api_item_names = set(list(zip(*api_items))[0])
for class_ in (pandas.Series, pandas.DataFrame, pandas.Panel):
for member in inspect.getmembers(class_):
func_name = 'pandas.{}.{}'.format(class_.__name__, member[0])
if (not member[0].startswith('_')
and func_name not in api_item_names):
if prefix and not func_name.startswith(prefix):
continue
doc_info = validate_one(func_name)
if ignore_deprecated and doc_info['deprecated']:
continue
result[func_name] = doc_info
result[func_name]['in_api'] = False
return result
|
[
"def",
"validate_all",
"(",
"prefix",
",",
"ignore_deprecated",
"=",
"False",
")",
":",
"result",
"=",
"{",
"}",
"seen",
"=",
"{",
"}",
"# functions from the API docs",
"api_doc_fnames",
"=",
"os",
".",
"path",
".",
"join",
"(",
"BASE_PATH",
",",
"'doc'",
",",
"'source'",
",",
"'reference'",
",",
"'*.rst'",
")",
"api_items",
"=",
"[",
"]",
"for",
"api_doc_fname",
"in",
"glob",
".",
"glob",
"(",
"api_doc_fnames",
")",
":",
"with",
"open",
"(",
"api_doc_fname",
")",
"as",
"f",
":",
"api_items",
"+=",
"list",
"(",
"get_api_items",
"(",
"f",
")",
")",
"for",
"func_name",
",",
"func_obj",
",",
"section",
",",
"subsection",
"in",
"api_items",
":",
"if",
"prefix",
"and",
"not",
"func_name",
".",
"startswith",
"(",
"prefix",
")",
":",
"continue",
"doc_info",
"=",
"validate_one",
"(",
"func_name",
")",
"if",
"ignore_deprecated",
"and",
"doc_info",
"[",
"'deprecated'",
"]",
":",
"continue",
"result",
"[",
"func_name",
"]",
"=",
"doc_info",
"shared_code_key",
"=",
"doc_info",
"[",
"'file'",
"]",
",",
"doc_info",
"[",
"'file_line'",
"]",
"shared_code",
"=",
"seen",
".",
"get",
"(",
"shared_code_key",
",",
"''",
")",
"result",
"[",
"func_name",
"]",
".",
"update",
"(",
"{",
"'in_api'",
":",
"True",
",",
"'section'",
":",
"section",
",",
"'subsection'",
":",
"subsection",
",",
"'shared_code_with'",
":",
"shared_code",
"}",
")",
"seen",
"[",
"shared_code_key",
"]",
"=",
"func_name",
"# functions from introspecting Series, DataFrame and Panel",
"api_item_names",
"=",
"set",
"(",
"list",
"(",
"zip",
"(",
"*",
"api_items",
")",
")",
"[",
"0",
"]",
")",
"for",
"class_",
"in",
"(",
"pandas",
".",
"Series",
",",
"pandas",
".",
"DataFrame",
",",
"pandas",
".",
"Panel",
")",
":",
"for",
"member",
"in",
"inspect",
".",
"getmembers",
"(",
"class_",
")",
":",
"func_name",
"=",
"'pandas.{}.{}'",
".",
"format",
"(",
"class_",
".",
"__name__",
",",
"member",
"[",
"0",
"]",
")",
"if",
"(",
"not",
"member",
"[",
"0",
"]",
".",
"startswith",
"(",
"'_'",
")",
"and",
"func_name",
"not",
"in",
"api_item_names",
")",
":",
"if",
"prefix",
"and",
"not",
"func_name",
".",
"startswith",
"(",
"prefix",
")",
":",
"continue",
"doc_info",
"=",
"validate_one",
"(",
"func_name",
")",
"if",
"ignore_deprecated",
"and",
"doc_info",
"[",
"'deprecated'",
"]",
":",
"continue",
"result",
"[",
"func_name",
"]",
"=",
"doc_info",
"result",
"[",
"func_name",
"]",
"[",
"'in_api'",
"]",
"=",
"False",
"return",
"result"
] |
Execute the validation of all docstrings, and return a dict with the
results.
Parameters
----------
prefix : str or None
If provided, only the docstrings that start with this pattern will be
validated. If None, all docstrings will be validated.
ignore_deprecated: bool, default False
If True, deprecated objects are ignored when validating docstrings.
Returns
-------
dict
A dictionary with an item for every function/method... containing
all the validation information.
|
[
"Execute",
"the",
"validation",
"of",
"all",
"docstrings",
"and",
"return",
"a",
"dict",
"with",
"the",
"results",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L816-L877
|
20,408
|
pandas-dev/pandas
|
scripts/validate_docstrings.py
|
Docstring._load_obj
|
def _load_obj(name):
"""
Import Python object from its name as string.
Parameters
----------
name : str
Object name to import (e.g. pandas.Series.str.upper)
Returns
-------
object
Python object that can be a class, method, function...
Examples
--------
>>> Docstring._load_obj('pandas.Series')
<class 'pandas.core.series.Series'>
"""
for maxsplit in range(1, name.count('.') + 1):
# TODO when py3 only replace by: module, *func_parts = ...
func_name_split = name.rsplit('.', maxsplit)
module = func_name_split[0]
func_parts = func_name_split[1:]
try:
obj = importlib.import_module(module)
except ImportError:
pass
else:
continue
if 'obj' not in locals():
raise ImportError('No module can be imported '
'from "{}"'.format(name))
for part in func_parts:
obj = getattr(obj, part)
return obj
|
python
|
def _load_obj(name):
"""
Import Python object from its name as string.
Parameters
----------
name : str
Object name to import (e.g. pandas.Series.str.upper)
Returns
-------
object
Python object that can be a class, method, function...
Examples
--------
>>> Docstring._load_obj('pandas.Series')
<class 'pandas.core.series.Series'>
"""
for maxsplit in range(1, name.count('.') + 1):
# TODO when py3 only replace by: module, *func_parts = ...
func_name_split = name.rsplit('.', maxsplit)
module = func_name_split[0]
func_parts = func_name_split[1:]
try:
obj = importlib.import_module(module)
except ImportError:
pass
else:
continue
if 'obj' not in locals():
raise ImportError('No module can be imported '
'from "{}"'.format(name))
for part in func_parts:
obj = getattr(obj, part)
return obj
|
[
"def",
"_load_obj",
"(",
"name",
")",
":",
"for",
"maxsplit",
"in",
"range",
"(",
"1",
",",
"name",
".",
"count",
"(",
"'.'",
")",
"+",
"1",
")",
":",
"# TODO when py3 only replace by: module, *func_parts = ...",
"func_name_split",
"=",
"name",
".",
"rsplit",
"(",
"'.'",
",",
"maxsplit",
")",
"module",
"=",
"func_name_split",
"[",
"0",
"]",
"func_parts",
"=",
"func_name_split",
"[",
"1",
":",
"]",
"try",
":",
"obj",
"=",
"importlib",
".",
"import_module",
"(",
"module",
")",
"except",
"ImportError",
":",
"pass",
"else",
":",
"continue",
"if",
"'obj'",
"not",
"in",
"locals",
"(",
")",
":",
"raise",
"ImportError",
"(",
"'No module can be imported '",
"'from \"{}\"'",
".",
"format",
"(",
"name",
")",
")",
"for",
"part",
"in",
"func_parts",
":",
"obj",
"=",
"getattr",
"(",
"obj",
",",
"part",
")",
"return",
"obj"
] |
Import Python object from its name as string.
Parameters
----------
name : str
Object name to import (e.g. pandas.Series.str.upper)
Returns
-------
object
Python object that can be a class, method, function...
Examples
--------
>>> Docstring._load_obj('pandas.Series')
<class 'pandas.core.series.Series'>
|
[
"Import",
"Python",
"object",
"from",
"its",
"name",
"as",
"string",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L240-L277
|
20,409
|
pandas-dev/pandas
|
scripts/validate_docstrings.py
|
Docstring._to_original_callable
|
def _to_original_callable(obj):
"""
Find the Python object that contains the source code of the object.
This is useful to find the place in the source code (file and line
number) where a docstring is defined. It does not currently work for
all cases, but it should help find some (properties...).
"""
while True:
if inspect.isfunction(obj) or inspect.isclass(obj):
f = inspect.getfile(obj)
if f.startswith('<') and f.endswith('>'):
return None
return obj
if inspect.ismethod(obj):
obj = obj.__func__
elif isinstance(obj, functools.partial):
obj = obj.func
elif isinstance(obj, property):
obj = obj.fget
else:
return None
|
python
|
def _to_original_callable(obj):
"""
Find the Python object that contains the source code of the object.
This is useful to find the place in the source code (file and line
number) where a docstring is defined. It does not currently work for
all cases, but it should help find some (properties...).
"""
while True:
if inspect.isfunction(obj) or inspect.isclass(obj):
f = inspect.getfile(obj)
if f.startswith('<') and f.endswith('>'):
return None
return obj
if inspect.ismethod(obj):
obj = obj.__func__
elif isinstance(obj, functools.partial):
obj = obj.func
elif isinstance(obj, property):
obj = obj.fget
else:
return None
|
[
"def",
"_to_original_callable",
"(",
"obj",
")",
":",
"while",
"True",
":",
"if",
"inspect",
".",
"isfunction",
"(",
"obj",
")",
"or",
"inspect",
".",
"isclass",
"(",
"obj",
")",
":",
"f",
"=",
"inspect",
".",
"getfile",
"(",
"obj",
")",
"if",
"f",
".",
"startswith",
"(",
"'<'",
")",
"and",
"f",
".",
"endswith",
"(",
"'>'",
")",
":",
"return",
"None",
"return",
"obj",
"if",
"inspect",
".",
"ismethod",
"(",
"obj",
")",
":",
"obj",
"=",
"obj",
".",
"__func__",
"elif",
"isinstance",
"(",
"obj",
",",
"functools",
".",
"partial",
")",
":",
"obj",
"=",
"obj",
".",
"func",
"elif",
"isinstance",
"(",
"obj",
",",
"property",
")",
":",
"obj",
"=",
"obj",
".",
"fget",
"else",
":",
"return",
"None"
] |
Find the Python object that contains the source code of the object.
This is useful to find the place in the source code (file and line
number) where a docstring is defined. It does not currently work for
all cases, but it should help find some (properties...).
|
[
"Find",
"the",
"Python",
"object",
"that",
"contains",
"the",
"source",
"code",
"of",
"the",
"object",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L280-L301
|
20,410
|
pandas-dev/pandas
|
scripts/validate_docstrings.py
|
Docstring.method_returns_something
|
def method_returns_something(self):
'''
Check if the docstrings method can return something.
Bare returns, returns valued None and returns from nested functions are
disconsidered.
Returns
-------
bool
Whether the docstrings method can return something.
'''
def get_returns_not_on_nested_functions(node):
returns = [node] if isinstance(node, ast.Return) else []
for child in ast.iter_child_nodes(node):
# Ignore nested functions and its subtrees.
if not isinstance(child, ast.FunctionDef):
child_returns = get_returns_not_on_nested_functions(child)
returns.extend(child_returns)
return returns
tree = ast.parse(self.method_source).body
if tree:
returns = get_returns_not_on_nested_functions(tree[0])
return_values = [r.value for r in returns]
# Replace NameConstant nodes valued None for None.
for i, v in enumerate(return_values):
if isinstance(v, ast.NameConstant) and v.value is None:
return_values[i] = None
return any(return_values)
else:
return False
|
python
|
def method_returns_something(self):
'''
Check if the docstrings method can return something.
Bare returns, returns valued None and returns from nested functions are
disconsidered.
Returns
-------
bool
Whether the docstrings method can return something.
'''
def get_returns_not_on_nested_functions(node):
returns = [node] if isinstance(node, ast.Return) else []
for child in ast.iter_child_nodes(node):
# Ignore nested functions and its subtrees.
if not isinstance(child, ast.FunctionDef):
child_returns = get_returns_not_on_nested_functions(child)
returns.extend(child_returns)
return returns
tree = ast.parse(self.method_source).body
if tree:
returns = get_returns_not_on_nested_functions(tree[0])
return_values = [r.value for r in returns]
# Replace NameConstant nodes valued None for None.
for i, v in enumerate(return_values):
if isinstance(v, ast.NameConstant) and v.value is None:
return_values[i] = None
return any(return_values)
else:
return False
|
[
"def",
"method_returns_something",
"(",
"self",
")",
":",
"def",
"get_returns_not_on_nested_functions",
"(",
"node",
")",
":",
"returns",
"=",
"[",
"node",
"]",
"if",
"isinstance",
"(",
"node",
",",
"ast",
".",
"Return",
")",
"else",
"[",
"]",
"for",
"child",
"in",
"ast",
".",
"iter_child_nodes",
"(",
"node",
")",
":",
"# Ignore nested functions and its subtrees.",
"if",
"not",
"isinstance",
"(",
"child",
",",
"ast",
".",
"FunctionDef",
")",
":",
"child_returns",
"=",
"get_returns_not_on_nested_functions",
"(",
"child",
")",
"returns",
".",
"extend",
"(",
"child_returns",
")",
"return",
"returns",
"tree",
"=",
"ast",
".",
"parse",
"(",
"self",
".",
"method_source",
")",
".",
"body",
"if",
"tree",
":",
"returns",
"=",
"get_returns_not_on_nested_functions",
"(",
"tree",
"[",
"0",
"]",
")",
"return_values",
"=",
"[",
"r",
".",
"value",
"for",
"r",
"in",
"returns",
"]",
"# Replace NameConstant nodes valued None for None.",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"return_values",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"ast",
".",
"NameConstant",
")",
"and",
"v",
".",
"value",
"is",
"None",
":",
"return_values",
"[",
"i",
"]",
"=",
"None",
"return",
"any",
"(",
"return_values",
")",
"else",
":",
"return",
"False"
] |
Check if the docstrings method can return something.
Bare returns, returns valued None and returns from nested functions are
disconsidered.
Returns
-------
bool
Whether the docstrings method can return something.
|
[
"Check",
"if",
"the",
"docstrings",
"method",
"can",
"return",
"something",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/validate_docstrings.py#L503-L535
|
20,411
|
pandas-dev/pandas
|
pandas/io/excel/_base.py
|
ExcelWriter._value_with_fmt
|
def _value_with_fmt(self, val):
"""Convert numpy types to Python types for the Excel writers.
Parameters
----------
val : object
Value to be written into cells
Returns
-------
Tuple with the first element being the converted value and the second
being an optional format
"""
fmt = None
if is_integer(val):
val = int(val)
elif is_float(val):
val = float(val)
elif is_bool(val):
val = bool(val)
elif isinstance(val, datetime):
fmt = self.datetime_format
elif isinstance(val, date):
fmt = self.date_format
elif isinstance(val, timedelta):
val = val.total_seconds() / float(86400)
fmt = '0'
else:
val = compat.to_str(val)
return val, fmt
|
python
|
def _value_with_fmt(self, val):
"""Convert numpy types to Python types for the Excel writers.
Parameters
----------
val : object
Value to be written into cells
Returns
-------
Tuple with the first element being the converted value and the second
being an optional format
"""
fmt = None
if is_integer(val):
val = int(val)
elif is_float(val):
val = float(val)
elif is_bool(val):
val = bool(val)
elif isinstance(val, datetime):
fmt = self.datetime_format
elif isinstance(val, date):
fmt = self.date_format
elif isinstance(val, timedelta):
val = val.total_seconds() / float(86400)
fmt = '0'
else:
val = compat.to_str(val)
return val, fmt
|
[
"def",
"_value_with_fmt",
"(",
"self",
",",
"val",
")",
":",
"fmt",
"=",
"None",
"if",
"is_integer",
"(",
"val",
")",
":",
"val",
"=",
"int",
"(",
"val",
")",
"elif",
"is_float",
"(",
"val",
")",
":",
"val",
"=",
"float",
"(",
"val",
")",
"elif",
"is_bool",
"(",
"val",
")",
":",
"val",
"=",
"bool",
"(",
"val",
")",
"elif",
"isinstance",
"(",
"val",
",",
"datetime",
")",
":",
"fmt",
"=",
"self",
".",
"datetime_format",
"elif",
"isinstance",
"(",
"val",
",",
"date",
")",
":",
"fmt",
"=",
"self",
".",
"date_format",
"elif",
"isinstance",
"(",
"val",
",",
"timedelta",
")",
":",
"val",
"=",
"val",
".",
"total_seconds",
"(",
")",
"/",
"float",
"(",
"86400",
")",
"fmt",
"=",
"'0'",
"else",
":",
"val",
"=",
"compat",
".",
"to_str",
"(",
"val",
")",
"return",
"val",
",",
"fmt"
] |
Convert numpy types to Python types for the Excel writers.
Parameters
----------
val : object
Value to be written into cells
Returns
-------
Tuple with the first element being the converted value and the second
being an optional format
|
[
"Convert",
"numpy",
"types",
"to",
"Python",
"types",
"for",
"the",
"Excel",
"writers",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_base.py#L675-L706
|
20,412
|
pandas-dev/pandas
|
pandas/io/excel/_base.py
|
ExcelWriter.check_extension
|
def check_extension(cls, ext):
"""checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError."""
if ext.startswith('.'):
ext = ext[1:]
if not any(ext in extension for extension in cls.supported_extensions):
msg = ("Invalid extension for engine '{engine}': '{ext}'"
.format(engine=pprint_thing(cls.engine),
ext=pprint_thing(ext)))
raise ValueError(msg)
else:
return True
|
python
|
def check_extension(cls, ext):
"""checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError."""
if ext.startswith('.'):
ext = ext[1:]
if not any(ext in extension for extension in cls.supported_extensions):
msg = ("Invalid extension for engine '{engine}': '{ext}'"
.format(engine=pprint_thing(cls.engine),
ext=pprint_thing(ext)))
raise ValueError(msg)
else:
return True
|
[
"def",
"check_extension",
"(",
"cls",
",",
"ext",
")",
":",
"if",
"ext",
".",
"startswith",
"(",
"'.'",
")",
":",
"ext",
"=",
"ext",
"[",
"1",
":",
"]",
"if",
"not",
"any",
"(",
"ext",
"in",
"extension",
"for",
"extension",
"in",
"cls",
".",
"supported_extensions",
")",
":",
"msg",
"=",
"(",
"\"Invalid extension for engine '{engine}': '{ext}'\"",
".",
"format",
"(",
"engine",
"=",
"pprint_thing",
"(",
"cls",
".",
"engine",
")",
",",
"ext",
"=",
"pprint_thing",
"(",
"ext",
")",
")",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"else",
":",
"return",
"True"
] |
checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError.
|
[
"checks",
"that",
"path",
"s",
"extension",
"against",
"the",
"Writer",
"s",
"supported",
"extensions",
".",
"If",
"it",
"isn",
"t",
"supported",
"raises",
"UnsupportedFiletypeError",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/excel/_base.py#L709-L720
|
20,413
|
pandas-dev/pandas
|
pandas/core/computation/pytables.py
|
_validate_where
|
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (Expr, str)) or is_list_like(w)):
raise TypeError("where must be passed as a string, Expr, "
"or list-like of Exprs")
return w
|
python
|
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (Expr, str)) or is_list_like(w)):
raise TypeError("where must be passed as a string, Expr, "
"or list-like of Exprs")
return w
|
[
"def",
"_validate_where",
"(",
"w",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"w",
",",
"(",
"Expr",
",",
"str",
")",
")",
"or",
"is_list_like",
"(",
"w",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"where must be passed as a string, Expr, \"",
"\"or list-like of Exprs\"",
")",
"return",
"w"
] |
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
|
[
"Validate",
"that",
"the",
"where",
"statement",
"is",
"of",
"the",
"right",
"type",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/pytables.py#L460-L483
|
20,414
|
pandas-dev/pandas
|
pandas/core/computation/pytables.py
|
maybe_expression
|
def maybe_expression(s):
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, str):
return False
ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',)
# make sure we have an op at least
return any(op in s for op in ops)
|
python
|
def maybe_expression(s):
""" loose checking if s is a pytables-acceptable expression """
if not isinstance(s, str):
return False
ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',)
# make sure we have an op at least
return any(op in s for op in ops)
|
[
"def",
"maybe_expression",
"(",
"s",
")",
":",
"if",
"not",
"isinstance",
"(",
"s",
",",
"str",
")",
":",
"return",
"False",
"ops",
"=",
"ExprVisitor",
".",
"binary_ops",
"+",
"ExprVisitor",
".",
"unary_ops",
"+",
"(",
"'='",
",",
")",
"# make sure we have an op at least",
"return",
"any",
"(",
"op",
"in",
"s",
"for",
"op",
"in",
"ops",
")"
] |
loose checking if s is a pytables-acceptable expression
|
[
"loose",
"checking",
"if",
"s",
"is",
"a",
"pytables",
"-",
"acceptable",
"expression"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/pytables.py#L598-L605
|
20,415
|
pandas-dev/pandas
|
pandas/core/computation/pytables.py
|
BinOp.conform
|
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
|
python
|
def conform(self, rhs):
""" inplace conform rhs """
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
|
[
"def",
"conform",
"(",
"self",
",",
"rhs",
")",
":",
"if",
"not",
"is_list_like",
"(",
"rhs",
")",
":",
"rhs",
"=",
"[",
"rhs",
"]",
"if",
"isinstance",
"(",
"rhs",
",",
"np",
".",
"ndarray",
")",
":",
"rhs",
"=",
"rhs",
".",
"ravel",
"(",
")",
"return",
"rhs"
] |
inplace conform rhs
|
[
"inplace",
"conform",
"rhs"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/pytables.py#L132-L138
|
20,416
|
pandas-dev/pandas
|
pandas/core/computation/pytables.py
|
BinOp.generate
|
def generate(self, v):
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return "({lhs} {op} {val})".format(lhs=self.lhs, op=self.op, val=val)
|
python
|
def generate(self, v):
""" create and return the op string for this TermValue """
val = v.tostring(self.encoding)
return "({lhs} {op} {val})".format(lhs=self.lhs, op=self.op, val=val)
|
[
"def",
"generate",
"(",
"self",
",",
"v",
")",
":",
"val",
"=",
"v",
".",
"tostring",
"(",
"self",
".",
"encoding",
")",
"return",
"\"({lhs} {op} {val})\"",
".",
"format",
"(",
"lhs",
"=",
"self",
".",
"lhs",
",",
"op",
"=",
"self",
".",
"op",
",",
"val",
"=",
"val",
")"
] |
create and return the op string for this TermValue
|
[
"create",
"and",
"return",
"the",
"op",
"string",
"for",
"this",
"TermValue"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/pytables.py#L166-L169
|
20,417
|
pandas-dev/pandas
|
pandas/core/computation/pytables.py
|
BinOp.convert_value
|
def convert_value(self, v):
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == 'datetime64' or kind == 'datetime':
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert('UTC')
return TermValue(v, v.value, kind)
elif kind == 'timedelta64' or kind == 'timedelta':
v = Timedelta(v, unit='s').value
return TermValue(int(v), v, kind)
elif meta == 'category':
metadata = com.values_from_object(self.metadata)
result = metadata.searchsorted(v, side='left')
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, 'integer')
elif kind == 'integer':
v = int(float(v))
return TermValue(v, v, kind)
elif kind == 'float':
v = float(v)
return TermValue(v, v, kind)
elif kind == 'bool':
if isinstance(v, str):
v = not v.strip().lower() in ['false', 'f', 'no',
'n', 'none', '0',
'[]', '{}', '']
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, str):
# string quoting
return TermValue(v, stringify(v), 'string')
else:
raise TypeError("Cannot compare {v} of type {typ} to {kind} column"
.format(v=v, typ=type(v), kind=kind))
|
python
|
def convert_value(self, v):
""" convert the expression that is in the term to something that is
accepted by pytables """
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
kind = _ensure_decoded(self.kind)
meta = _ensure_decoded(self.meta)
if kind == 'datetime64' or kind == 'datetime':
if isinstance(v, (int, float)):
v = stringify(v)
v = _ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert('UTC')
return TermValue(v, v.value, kind)
elif kind == 'timedelta64' or kind == 'timedelta':
v = Timedelta(v, unit='s').value
return TermValue(int(v), v, kind)
elif meta == 'category':
metadata = com.values_from_object(self.metadata)
result = metadata.searchsorted(v, side='left')
# result returns 0 if v is first element or if v is not in metadata
# check that metadata contains v
if not result and v not in metadata:
result = -1
return TermValue(result, result, 'integer')
elif kind == 'integer':
v = int(float(v))
return TermValue(v, v, kind)
elif kind == 'float':
v = float(v)
return TermValue(v, v, kind)
elif kind == 'bool':
if isinstance(v, str):
v = not v.strip().lower() in ['false', 'f', 'no',
'n', 'none', '0',
'[]', '{}', '']
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, str):
# string quoting
return TermValue(v, stringify(v), 'string')
else:
raise TypeError("Cannot compare {v} of type {typ} to {kind} column"
.format(v=v, typ=type(v), kind=kind))
|
[
"def",
"convert_value",
"(",
"self",
",",
"v",
")",
":",
"def",
"stringify",
"(",
"value",
")",
":",
"if",
"self",
".",
"encoding",
"is",
"not",
"None",
":",
"encoder",
"=",
"partial",
"(",
"pprint_thing_encoded",
",",
"encoding",
"=",
"self",
".",
"encoding",
")",
"else",
":",
"encoder",
"=",
"pprint_thing",
"return",
"encoder",
"(",
"value",
")",
"kind",
"=",
"_ensure_decoded",
"(",
"self",
".",
"kind",
")",
"meta",
"=",
"_ensure_decoded",
"(",
"self",
".",
"meta",
")",
"if",
"kind",
"==",
"'datetime64'",
"or",
"kind",
"==",
"'datetime'",
":",
"if",
"isinstance",
"(",
"v",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"v",
"=",
"stringify",
"(",
"v",
")",
"v",
"=",
"_ensure_decoded",
"(",
"v",
")",
"v",
"=",
"Timestamp",
"(",
"v",
")",
"if",
"v",
".",
"tz",
"is",
"not",
"None",
":",
"v",
"=",
"v",
".",
"tz_convert",
"(",
"'UTC'",
")",
"return",
"TermValue",
"(",
"v",
",",
"v",
".",
"value",
",",
"kind",
")",
"elif",
"kind",
"==",
"'timedelta64'",
"or",
"kind",
"==",
"'timedelta'",
":",
"v",
"=",
"Timedelta",
"(",
"v",
",",
"unit",
"=",
"'s'",
")",
".",
"value",
"return",
"TermValue",
"(",
"int",
"(",
"v",
")",
",",
"v",
",",
"kind",
")",
"elif",
"meta",
"==",
"'category'",
":",
"metadata",
"=",
"com",
".",
"values_from_object",
"(",
"self",
".",
"metadata",
")",
"result",
"=",
"metadata",
".",
"searchsorted",
"(",
"v",
",",
"side",
"=",
"'left'",
")",
"# result returns 0 if v is first element or if v is not in metadata",
"# check that metadata contains v",
"if",
"not",
"result",
"and",
"v",
"not",
"in",
"metadata",
":",
"result",
"=",
"-",
"1",
"return",
"TermValue",
"(",
"result",
",",
"result",
",",
"'integer'",
")",
"elif",
"kind",
"==",
"'integer'",
":",
"v",
"=",
"int",
"(",
"float",
"(",
"v",
")",
")",
"return",
"TermValue",
"(",
"v",
",",
"v",
",",
"kind",
")",
"elif",
"kind",
"==",
"'float'",
":",
"v",
"=",
"float",
"(",
"v",
")",
"return",
"TermValue",
"(",
"v",
",",
"v",
",",
"kind",
")",
"elif",
"kind",
"==",
"'bool'",
":",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"v",
"=",
"not",
"v",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"in",
"[",
"'false'",
",",
"'f'",
",",
"'no'",
",",
"'n'",
",",
"'none'",
",",
"'0'",
",",
"'[]'",
",",
"'{}'",
",",
"''",
"]",
"else",
":",
"v",
"=",
"bool",
"(",
"v",
")",
"return",
"TermValue",
"(",
"v",
",",
"v",
",",
"kind",
")",
"elif",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"# string quoting",
"return",
"TermValue",
"(",
"v",
",",
"stringify",
"(",
"v",
")",
",",
"'string'",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Cannot compare {v} of type {typ} to {kind} column\"",
".",
"format",
"(",
"v",
"=",
"v",
",",
"typ",
"=",
"type",
"(",
"v",
")",
",",
"kind",
"=",
"kind",
")",
")"
] |
convert the expression that is in the term to something that is
accepted by pytables
|
[
"convert",
"the",
"expression",
"that",
"is",
"in",
"the",
"term",
"to",
"something",
"that",
"is",
"accepted",
"by",
"pytables"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/pytables.py#L171-L224
|
20,418
|
pandas-dev/pandas
|
pandas/core/computation/pytables.py
|
FilterBinOp.invert
|
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
|
python
|
def invert(self):
""" invert the filter """
if self.filter is not None:
f = list(self.filter)
f[1] = self.generate_filter_op(invert=True)
self.filter = tuple(f)
return self
|
[
"def",
"invert",
"(",
"self",
")",
":",
"if",
"self",
".",
"filter",
"is",
"not",
"None",
":",
"f",
"=",
"list",
"(",
"self",
".",
"filter",
")",
"f",
"[",
"1",
"]",
"=",
"self",
".",
"generate_filter_op",
"(",
"invert",
"=",
"True",
")",
"self",
".",
"filter",
"=",
"tuple",
"(",
"f",
")",
"return",
"self"
] |
invert the filter
|
[
"invert",
"the",
"filter"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/pytables.py#L236-L242
|
20,419
|
pandas-dev/pandas
|
pandas/core/computation/pytables.py
|
Expr.evaluate
|
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError("cannot process expression [{expr}], [{slf}] "
"is not a valid condition".format(expr=self.expr,
slf=self))
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError("cannot process expression [{expr}], [{slf}] "
"is not a valid filter".format(expr=self.expr,
slf=self))
return self.condition, self.filter
|
python
|
def evaluate(self):
""" create and return the numexpr condition and filter """
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError:
raise ValueError("cannot process expression [{expr}], [{slf}] "
"is not a valid condition".format(expr=self.expr,
slf=self))
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError:
raise ValueError("cannot process expression [{expr}], [{slf}] "
"is not a valid filter".format(expr=self.expr,
slf=self))
return self.condition, self.filter
|
[
"def",
"evaluate",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"condition",
"=",
"self",
".",
"terms",
".",
"prune",
"(",
"ConditionBinOp",
")",
"except",
"AttributeError",
":",
"raise",
"ValueError",
"(",
"\"cannot process expression [{expr}], [{slf}] \"",
"\"is not a valid condition\"",
".",
"format",
"(",
"expr",
"=",
"self",
".",
"expr",
",",
"slf",
"=",
"self",
")",
")",
"try",
":",
"self",
".",
"filter",
"=",
"self",
".",
"terms",
".",
"prune",
"(",
"FilterBinOp",
")",
"except",
"AttributeError",
":",
"raise",
"ValueError",
"(",
"\"cannot process expression [{expr}], [{slf}] \"",
"\"is not a valid filter\"",
".",
"format",
"(",
"expr",
"=",
"self",
".",
"expr",
",",
"slf",
"=",
"self",
")",
")",
"return",
"self",
".",
"condition",
",",
"self",
".",
"filter"
] |
create and return the numexpr condition and filter
|
[
"create",
"and",
"return",
"the",
"numexpr",
"condition",
"and",
"filter"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/pytables.py#L556-L572
|
20,420
|
pandas-dev/pandas
|
pandas/core/computation/pytables.py
|
TermValue.tostring
|
def tostring(self, encoding):
""" quote the string if not encoded
else encode and return """
if self.kind == 'string':
if encoding is not None:
return self.converted
return '"{converted}"'.format(converted=self.converted)
elif self.kind == 'float':
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return self.converted
|
python
|
def tostring(self, encoding):
""" quote the string if not encoded
else encode and return """
if self.kind == 'string':
if encoding is not None:
return self.converted
return '"{converted}"'.format(converted=self.converted)
elif self.kind == 'float':
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return self.converted
|
[
"def",
"tostring",
"(",
"self",
",",
"encoding",
")",
":",
"if",
"self",
".",
"kind",
"==",
"'string'",
":",
"if",
"encoding",
"is",
"not",
"None",
":",
"return",
"self",
".",
"converted",
"return",
"'\"{converted}\"'",
".",
"format",
"(",
"converted",
"=",
"self",
".",
"converted",
")",
"elif",
"self",
".",
"kind",
"==",
"'float'",
":",
"# python 2 str(float) is not always",
"# round-trippable so use repr()",
"return",
"repr",
"(",
"self",
".",
"converted",
")",
"return",
"self",
".",
"converted"
] |
quote the string if not encoded
else encode and return
|
[
"quote",
"the",
"string",
"if",
"not",
"encoded",
"else",
"encode",
"and",
"return"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/pytables.py#L584-L595
|
20,421
|
pandas-dev/pandas
|
pandas/compat/numpy/function.py
|
validate_argmin_with_skipna
|
def validate_argmin_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmin' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmin(args, kwargs)
return skipna
|
python
|
def validate_argmin_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmin' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmin(args, kwargs)
return skipna
|
[
"def",
"validate_argmin_with_skipna",
"(",
"skipna",
",",
"args",
",",
"kwargs",
")",
":",
"skipna",
",",
"args",
"=",
"process_skipna",
"(",
"skipna",
",",
"args",
")",
"validate_argmin",
"(",
"args",
",",
"kwargs",
")",
"return",
"skipna"
] |
If 'Series.argmin' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
|
[
"If",
"Series",
".",
"argmin",
"is",
"called",
"via",
"the",
"numpy",
"library",
"the",
"third",
"parameter",
"in",
"its",
"signature",
"is",
"out",
"which",
"takes",
"either",
"an",
"ndarray",
"or",
"None",
"so",
"check",
"if",
"the",
"skipna",
"parameter",
"is",
"either",
"an",
"instance",
"of",
"ndarray",
"or",
"is",
"None",
"since",
"skipna",
"itself",
"should",
"be",
"a",
"boolean"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/compat/numpy/function.py#L77-L88
|
20,422
|
pandas-dev/pandas
|
pandas/compat/numpy/function.py
|
validate_argmax_with_skipna
|
def validate_argmax_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmax' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmax(args, kwargs)
return skipna
|
python
|
def validate_argmax_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmax' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmax(args, kwargs)
return skipna
|
[
"def",
"validate_argmax_with_skipna",
"(",
"skipna",
",",
"args",
",",
"kwargs",
")",
":",
"skipna",
",",
"args",
"=",
"process_skipna",
"(",
"skipna",
",",
"args",
")",
"validate_argmax",
"(",
"args",
",",
"kwargs",
")",
"return",
"skipna"
] |
If 'Series.argmax' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
|
[
"If",
"Series",
".",
"argmax",
"is",
"called",
"via",
"the",
"numpy",
"library",
"the",
"third",
"parameter",
"in",
"its",
"signature",
"is",
"out",
"which",
"takes",
"either",
"an",
"ndarray",
"or",
"None",
"so",
"check",
"if",
"the",
"skipna",
"parameter",
"is",
"either",
"an",
"instance",
"of",
"ndarray",
"or",
"is",
"None",
"since",
"skipna",
"itself",
"should",
"be",
"a",
"boolean"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/compat/numpy/function.py#L91-L102
|
20,423
|
pandas-dev/pandas
|
pandas/compat/numpy/function.py
|
validate_argsort_with_ascending
|
def validate_argsort_with_ascending(ascending, args, kwargs):
"""
If 'Categorical.argsort' is called via the 'numpy' library, the
first parameter in its signature is 'axis', which takes either
an integer or 'None', so check if the 'ascending' parameter has
either integer type or is None, since 'ascending' itself should
be a boolean
"""
if is_integer(ascending) or ascending is None:
args = (ascending,) + args
ascending = True
validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
return ascending
|
python
|
def validate_argsort_with_ascending(ascending, args, kwargs):
"""
If 'Categorical.argsort' is called via the 'numpy' library, the
first parameter in its signature is 'axis', which takes either
an integer or 'None', so check if the 'ascending' parameter has
either integer type or is None, since 'ascending' itself should
be a boolean
"""
if is_integer(ascending) or ascending is None:
args = (ascending,) + args
ascending = True
validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
return ascending
|
[
"def",
"validate_argsort_with_ascending",
"(",
"ascending",
",",
"args",
",",
"kwargs",
")",
":",
"if",
"is_integer",
"(",
"ascending",
")",
"or",
"ascending",
"is",
"None",
":",
"args",
"=",
"(",
"ascending",
",",
")",
"+",
"args",
"ascending",
"=",
"True",
"validate_argsort_kind",
"(",
"args",
",",
"kwargs",
",",
"max_fname_arg_count",
"=",
"3",
")",
"return",
"ascending"
] |
If 'Categorical.argsort' is called via the 'numpy' library, the
first parameter in its signature is 'axis', which takes either
an integer or 'None', so check if the 'ascending' parameter has
either integer type or is None, since 'ascending' itself should
be a boolean
|
[
"If",
"Categorical",
".",
"argsort",
"is",
"called",
"via",
"the",
"numpy",
"library",
"the",
"first",
"parameter",
"in",
"its",
"signature",
"is",
"axis",
"which",
"takes",
"either",
"an",
"integer",
"or",
"None",
"so",
"check",
"if",
"the",
"ascending",
"parameter",
"has",
"either",
"integer",
"type",
"or",
"is",
"None",
"since",
"ascending",
"itself",
"should",
"be",
"a",
"boolean"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/compat/numpy/function.py#L123-L137
|
20,424
|
pandas-dev/pandas
|
pandas/compat/numpy/function.py
|
validate_clip_with_axis
|
def validate_clip_with_axis(axis, args, kwargs):
"""
If 'NDFrame.clip' is called via the numpy library, the third
parameter in its signature is 'out', which can takes an ndarray,
so check if the 'axis' parameter is an instance of ndarray, since
'axis' itself should either be an integer or None
"""
if isinstance(axis, ndarray):
args = (axis,) + args
axis = None
validate_clip(args, kwargs)
return axis
|
python
|
def validate_clip_with_axis(axis, args, kwargs):
"""
If 'NDFrame.clip' is called via the numpy library, the third
parameter in its signature is 'out', which can takes an ndarray,
so check if the 'axis' parameter is an instance of ndarray, since
'axis' itself should either be an integer or None
"""
if isinstance(axis, ndarray):
args = (axis,) + args
axis = None
validate_clip(args, kwargs)
return axis
|
[
"def",
"validate_clip_with_axis",
"(",
"axis",
",",
"args",
",",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"axis",
",",
"ndarray",
")",
":",
"args",
"=",
"(",
"axis",
",",
")",
"+",
"args",
"axis",
"=",
"None",
"validate_clip",
"(",
"args",
",",
"kwargs",
")",
"return",
"axis"
] |
If 'NDFrame.clip' is called via the numpy library, the third
parameter in its signature is 'out', which can takes an ndarray,
so check if the 'axis' parameter is an instance of ndarray, since
'axis' itself should either be an integer or None
|
[
"If",
"NDFrame",
".",
"clip",
"is",
"called",
"via",
"the",
"numpy",
"library",
"the",
"third",
"parameter",
"in",
"its",
"signature",
"is",
"out",
"which",
"can",
"takes",
"an",
"ndarray",
"so",
"check",
"if",
"the",
"axis",
"parameter",
"is",
"an",
"instance",
"of",
"ndarray",
"since",
"axis",
"itself",
"should",
"either",
"be",
"an",
"integer",
"or",
"None"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/compat/numpy/function.py#L145-L158
|
20,425
|
pandas-dev/pandas
|
pandas/compat/numpy/function.py
|
validate_cum_func_with_skipna
|
def validate_cum_func_with_skipna(skipna, args, kwargs, name):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'dtype', which takes either a
'numpy' dtype or 'None', so check if the 'skipna' parameter is
a boolean or not
"""
if not is_bool(skipna):
args = (skipna,) + args
skipna = True
validate_cum_func(args, kwargs, fname=name)
return skipna
|
python
|
def validate_cum_func_with_skipna(skipna, args, kwargs, name):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'dtype', which takes either a
'numpy' dtype or 'None', so check if the 'skipna' parameter is
a boolean or not
"""
if not is_bool(skipna):
args = (skipna,) + args
skipna = True
validate_cum_func(args, kwargs, fname=name)
return skipna
|
[
"def",
"validate_cum_func_with_skipna",
"(",
"skipna",
",",
"args",
",",
"kwargs",
",",
"name",
")",
":",
"if",
"not",
"is_bool",
"(",
"skipna",
")",
":",
"args",
"=",
"(",
"skipna",
",",
")",
"+",
"args",
"skipna",
"=",
"True",
"validate_cum_func",
"(",
"args",
",",
"kwargs",
",",
"fname",
"=",
"name",
")",
"return",
"skipna"
] |
If this function is called via the 'numpy' library, the third
parameter in its signature is 'dtype', which takes either a
'numpy' dtype or 'None', so check if the 'skipna' parameter is
a boolean or not
|
[
"If",
"this",
"function",
"is",
"called",
"via",
"the",
"numpy",
"library",
"the",
"third",
"parameter",
"in",
"its",
"signature",
"is",
"dtype",
"which",
"takes",
"either",
"a",
"numpy",
"dtype",
"or",
"None",
"so",
"check",
"if",
"the",
"skipna",
"parameter",
"is",
"a",
"boolean",
"or",
"not"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/compat/numpy/function.py#L176-L188
|
20,426
|
pandas-dev/pandas
|
pandas/compat/numpy/function.py
|
validate_take_with_convert
|
def validate_take_with_convert(convert, args, kwargs):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'axis', which takes either an
ndarray or 'None', so check if the 'convert' parameter is either
an instance of ndarray or is None
"""
if isinstance(convert, ndarray) or convert is None:
args = (convert,) + args
convert = True
validate_take(args, kwargs, max_fname_arg_count=3, method='both')
return convert
|
python
|
def validate_take_with_convert(convert, args, kwargs):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'axis', which takes either an
ndarray or 'None', so check if the 'convert' parameter is either
an instance of ndarray or is None
"""
if isinstance(convert, ndarray) or convert is None:
args = (convert,) + args
convert = True
validate_take(args, kwargs, max_fname_arg_count=3, method='both')
return convert
|
[
"def",
"validate_take_with_convert",
"(",
"convert",
",",
"args",
",",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"convert",
",",
"ndarray",
")",
"or",
"convert",
"is",
"None",
":",
"args",
"=",
"(",
"convert",
",",
")",
"+",
"args",
"convert",
"=",
"True",
"validate_take",
"(",
"args",
",",
"kwargs",
",",
"max_fname_arg_count",
"=",
"3",
",",
"method",
"=",
"'both'",
")",
"return",
"convert"
] |
If this function is called via the 'numpy' library, the third
parameter in its signature is 'axis', which takes either an
ndarray or 'None', so check if the 'convert' parameter is either
an instance of ndarray or is None
|
[
"If",
"this",
"function",
"is",
"called",
"via",
"the",
"numpy",
"library",
"the",
"third",
"parameter",
"in",
"its",
"signature",
"is",
"axis",
"which",
"takes",
"either",
"an",
"ndarray",
"or",
"None",
"so",
"check",
"if",
"the",
"convert",
"parameter",
"is",
"either",
"an",
"instance",
"of",
"ndarray",
"or",
"is",
"None"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/compat/numpy/function.py#L269-L282
|
20,427
|
pandas-dev/pandas
|
pandas/compat/numpy/function.py
|
validate_groupby_func
|
def validate_groupby_func(name, args, kwargs, allowed=None):
"""
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
their necessary parameters are explicitly listed in
the function signature
"""
if allowed is None:
allowed = []
kwargs = set(kwargs) - set(allowed)
if len(args) + len(kwargs) > 0:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with groupby. Use .groupby(...)."
"{func}() instead".format(func=name)))
|
python
|
def validate_groupby_func(name, args, kwargs, allowed=None):
"""
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
their necessary parameters are explicitly listed in
the function signature
"""
if allowed is None:
allowed = []
kwargs = set(kwargs) - set(allowed)
if len(args) + len(kwargs) > 0:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with groupby. Use .groupby(...)."
"{func}() instead".format(func=name)))
|
[
"def",
"validate_groupby_func",
"(",
"name",
",",
"args",
",",
"kwargs",
",",
"allowed",
"=",
"None",
")",
":",
"if",
"allowed",
"is",
"None",
":",
"allowed",
"=",
"[",
"]",
"kwargs",
"=",
"set",
"(",
"kwargs",
")",
"-",
"set",
"(",
"allowed",
")",
"if",
"len",
"(",
"args",
")",
"+",
"len",
"(",
"kwargs",
")",
">",
"0",
":",
"raise",
"UnsupportedFunctionCall",
"(",
"(",
"\"numpy operations are not valid \"",
"\"with groupby. Use .groupby(...).\"",
"\"{func}() instead\"",
".",
"format",
"(",
"func",
"=",
"name",
")",
")",
")"
] |
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
their necessary parameters are explicitly listed in
the function signature
|
[
"args",
"and",
"kwargs",
"should",
"be",
"empty",
"except",
"for",
"allowed",
"kwargs",
"because",
"all",
"of",
"their",
"necessary",
"parameters",
"are",
"explicitly",
"listed",
"in",
"the",
"function",
"signature"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/compat/numpy/function.py#L349-L365
|
20,428
|
pandas-dev/pandas
|
pandas/compat/numpy/function.py
|
validate_resampler_func
|
def validate_resampler_func(method, args, kwargs):
"""
'args' and 'kwargs' should be empty because all of
their necessary parameters are explicitly listed in
the function signature
"""
if len(args) + len(kwargs) > 0:
if method in RESAMPLER_NUMPY_OPS:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with resample. Use .resample(...)."
"{func}() instead".format(func=method)))
else:
raise TypeError("too many arguments passed in")
|
python
|
def validate_resampler_func(method, args, kwargs):
"""
'args' and 'kwargs' should be empty because all of
their necessary parameters are explicitly listed in
the function signature
"""
if len(args) + len(kwargs) > 0:
if method in RESAMPLER_NUMPY_OPS:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with resample. Use .resample(...)."
"{func}() instead".format(func=method)))
else:
raise TypeError("too many arguments passed in")
|
[
"def",
"validate_resampler_func",
"(",
"method",
",",
"args",
",",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
"+",
"len",
"(",
"kwargs",
")",
">",
"0",
":",
"if",
"method",
"in",
"RESAMPLER_NUMPY_OPS",
":",
"raise",
"UnsupportedFunctionCall",
"(",
"(",
"\"numpy operations are not valid \"",
"\"with resample. Use .resample(...).\"",
"\"{func}() instead\"",
".",
"format",
"(",
"func",
"=",
"method",
")",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"too many arguments passed in\"",
")"
] |
'args' and 'kwargs' should be empty because all of
their necessary parameters are explicitly listed in
the function signature
|
[
"args",
"and",
"kwargs",
"should",
"be",
"empty",
"because",
"all",
"of",
"their",
"necessary",
"parameters",
"are",
"explicitly",
"listed",
"in",
"the",
"function",
"signature"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/compat/numpy/function.py#L372-L385
|
20,429
|
pandas-dev/pandas
|
pandas/compat/numpy/function.py
|
validate_minmax_axis
|
def validate_minmax_axis(axis):
"""
Ensure that the axis argument passed to min, max, argmin, or argmax is
zero or None, as otherwise it will be incorrectly ignored.
Parameters
----------
axis : int or None
Raises
------
ValueError
"""
ndim = 1 # hard-coded for Index
if axis is None:
return
if axis >= ndim or (axis < 0 and ndim + axis < 0):
raise ValueError("`axis` must be fewer than the number of "
"dimensions ({ndim})".format(ndim=ndim))
|
python
|
def validate_minmax_axis(axis):
"""
Ensure that the axis argument passed to min, max, argmin, or argmax is
zero or None, as otherwise it will be incorrectly ignored.
Parameters
----------
axis : int or None
Raises
------
ValueError
"""
ndim = 1 # hard-coded for Index
if axis is None:
return
if axis >= ndim or (axis < 0 and ndim + axis < 0):
raise ValueError("`axis` must be fewer than the number of "
"dimensions ({ndim})".format(ndim=ndim))
|
[
"def",
"validate_minmax_axis",
"(",
"axis",
")",
":",
"ndim",
"=",
"1",
"# hard-coded for Index",
"if",
"axis",
"is",
"None",
":",
"return",
"if",
"axis",
">=",
"ndim",
"or",
"(",
"axis",
"<",
"0",
"and",
"ndim",
"+",
"axis",
"<",
"0",
")",
":",
"raise",
"ValueError",
"(",
"\"`axis` must be fewer than the number of \"",
"\"dimensions ({ndim})\"",
".",
"format",
"(",
"ndim",
"=",
"ndim",
")",
")"
] |
Ensure that the axis argument passed to min, max, argmin, or argmax is
zero or None, as otherwise it will be incorrectly ignored.
Parameters
----------
axis : int or None
Raises
------
ValueError
|
[
"Ensure",
"that",
"the",
"axis",
"argument",
"passed",
"to",
"min",
"max",
"argmin",
"or",
"argmax",
"is",
"zero",
"or",
"None",
"as",
"otherwise",
"it",
"will",
"be",
"incorrectly",
"ignored",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/compat/numpy/function.py#L388-L406
|
20,430
|
pandas-dev/pandas
|
pandas/io/packers.py
|
read_msgpack
|
def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs):
"""
Load msgpack pandas object from the specified
file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, BytesIO like or string
encoding : Encoding for decoding msgpack str type
iterator : boolean, if True, return an iterator to the unpacker
(default is False)
Returns
-------
obj : same type as object stored in file
"""
path_or_buf, _, _, should_close = get_filepath_or_buffer(path_or_buf)
if iterator:
return Iterator(path_or_buf)
def read(fh):
unpacked_obj = list(unpack(fh, encoding=encoding, **kwargs))
if len(unpacked_obj) == 1:
return unpacked_obj[0]
if should_close:
try:
path_or_buf.close()
except IOError:
pass
return unpacked_obj
# see if we have an actual file
if isinstance(path_or_buf, str):
try:
exists = os.path.exists(path_or_buf)
except (TypeError, ValueError):
exists = False
if exists:
with open(path_or_buf, 'rb') as fh:
return read(fh)
if isinstance(path_or_buf, bytes):
# treat as a binary-like
fh = None
try:
fh = BytesIO(path_or_buf)
return read(fh)
finally:
if fh is not None:
fh.close()
elif hasattr(path_or_buf, 'read') and callable(path_or_buf.read):
# treat as a buffer like
return read(path_or_buf)
raise ValueError('path_or_buf needs to be a string file path or file-like')
|
python
|
def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs):
"""
Load msgpack pandas object from the specified
file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, BytesIO like or string
encoding : Encoding for decoding msgpack str type
iterator : boolean, if True, return an iterator to the unpacker
(default is False)
Returns
-------
obj : same type as object stored in file
"""
path_or_buf, _, _, should_close = get_filepath_or_buffer(path_or_buf)
if iterator:
return Iterator(path_or_buf)
def read(fh):
unpacked_obj = list(unpack(fh, encoding=encoding, **kwargs))
if len(unpacked_obj) == 1:
return unpacked_obj[0]
if should_close:
try:
path_or_buf.close()
except IOError:
pass
return unpacked_obj
# see if we have an actual file
if isinstance(path_or_buf, str):
try:
exists = os.path.exists(path_or_buf)
except (TypeError, ValueError):
exists = False
if exists:
with open(path_or_buf, 'rb') as fh:
return read(fh)
if isinstance(path_or_buf, bytes):
# treat as a binary-like
fh = None
try:
fh = BytesIO(path_or_buf)
return read(fh)
finally:
if fh is not None:
fh.close()
elif hasattr(path_or_buf, 'read') and callable(path_or_buf.read):
# treat as a buffer like
return read(path_or_buf)
raise ValueError('path_or_buf needs to be a string file path or file-like')
|
[
"def",
"read_msgpack",
"(",
"path_or_buf",
",",
"encoding",
"=",
"'utf-8'",
",",
"iterator",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"path_or_buf",
",",
"_",
",",
"_",
",",
"should_close",
"=",
"get_filepath_or_buffer",
"(",
"path_or_buf",
")",
"if",
"iterator",
":",
"return",
"Iterator",
"(",
"path_or_buf",
")",
"def",
"read",
"(",
"fh",
")",
":",
"unpacked_obj",
"=",
"list",
"(",
"unpack",
"(",
"fh",
",",
"encoding",
"=",
"encoding",
",",
"*",
"*",
"kwargs",
")",
")",
"if",
"len",
"(",
"unpacked_obj",
")",
"==",
"1",
":",
"return",
"unpacked_obj",
"[",
"0",
"]",
"if",
"should_close",
":",
"try",
":",
"path_or_buf",
".",
"close",
"(",
")",
"except",
"IOError",
":",
"pass",
"return",
"unpacked_obj",
"# see if we have an actual file",
"if",
"isinstance",
"(",
"path_or_buf",
",",
"str",
")",
":",
"try",
":",
"exists",
"=",
"os",
".",
"path",
".",
"exists",
"(",
"path_or_buf",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"exists",
"=",
"False",
"if",
"exists",
":",
"with",
"open",
"(",
"path_or_buf",
",",
"'rb'",
")",
"as",
"fh",
":",
"return",
"read",
"(",
"fh",
")",
"if",
"isinstance",
"(",
"path_or_buf",
",",
"bytes",
")",
":",
"# treat as a binary-like",
"fh",
"=",
"None",
"try",
":",
"fh",
"=",
"BytesIO",
"(",
"path_or_buf",
")",
"return",
"read",
"(",
"fh",
")",
"finally",
":",
"if",
"fh",
"is",
"not",
"None",
":",
"fh",
".",
"close",
"(",
")",
"elif",
"hasattr",
"(",
"path_or_buf",
",",
"'read'",
")",
"and",
"callable",
"(",
"path_or_buf",
".",
"read",
")",
":",
"# treat as a buffer like",
"return",
"read",
"(",
"path_or_buf",
")",
"raise",
"ValueError",
"(",
"'path_or_buf needs to be a string file path or file-like'",
")"
] |
Load msgpack pandas object from the specified
file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, BytesIO like or string
encoding : Encoding for decoding msgpack str type
iterator : boolean, if True, return an iterator to the unpacker
(default is False)
Returns
-------
obj : same type as object stored in file
|
[
"Load",
"msgpack",
"pandas",
"object",
"from",
"the",
"specified",
"file",
"path"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/packers.py#L160-L219
|
20,431
|
pandas-dev/pandas
|
pandas/io/packers.py
|
dtype_for
|
def dtype_for(t):
""" return my dtype mapping, whether number or name """
if t in dtype_dict:
return dtype_dict[t]
return np.typeDict.get(t, t)
|
python
|
def dtype_for(t):
""" return my dtype mapping, whether number or name """
if t in dtype_dict:
return dtype_dict[t]
return np.typeDict.get(t, t)
|
[
"def",
"dtype_for",
"(",
"t",
")",
":",
"if",
"t",
"in",
"dtype_dict",
":",
"return",
"dtype_dict",
"[",
"t",
"]",
"return",
"np",
".",
"typeDict",
".",
"get",
"(",
"t",
",",
"t",
")"
] |
return my dtype mapping, whether number or name
|
[
"return",
"my",
"dtype",
"mapping",
"whether",
"number",
"or",
"name"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/packers.py#L236-L240
|
20,432
|
pandas-dev/pandas
|
pandas/io/packers.py
|
c2f
|
def c2f(r, i, ctype_name):
"""
Convert strings to complex number instance with specified numpy type.
"""
ftype = c2f_dict[ctype_name]
return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
|
python
|
def c2f(r, i, ctype_name):
"""
Convert strings to complex number instance with specified numpy type.
"""
ftype = c2f_dict[ctype_name]
return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
|
[
"def",
"c2f",
"(",
"r",
",",
"i",
",",
"ctype_name",
")",
":",
"ftype",
"=",
"c2f_dict",
"[",
"ctype_name",
"]",
"return",
"np",
".",
"typeDict",
"[",
"ctype_name",
"]",
"(",
"ftype",
"(",
"r",
")",
"+",
"1j",
"*",
"ftype",
"(",
"i",
")",
")"
] |
Convert strings to complex number instance with specified numpy type.
|
[
"Convert",
"strings",
"to",
"complex",
"number",
"instance",
"with",
"specified",
"numpy",
"type",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/packers.py#L252-L258
|
20,433
|
pandas-dev/pandas
|
pandas/io/packers.py
|
convert
|
def convert(values):
""" convert the numpy values to a list """
dtype = values.dtype
if is_categorical_dtype(values):
return values
elif is_object_dtype(dtype):
return values.ravel().tolist()
if needs_i8_conversion(dtype):
values = values.view('i8')
v = values.ravel()
if compressor == 'zlib':
_check_zlib()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, zlib.compress(v))
elif compressor == 'blosc':
_check_blosc()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, blosc.compress(v, typesize=dtype.itemsize))
# ndarray (on original dtype)
return ExtType(0, v.tostring())
|
python
|
def convert(values):
""" convert the numpy values to a list """
dtype = values.dtype
if is_categorical_dtype(values):
return values
elif is_object_dtype(dtype):
return values.ravel().tolist()
if needs_i8_conversion(dtype):
values = values.view('i8')
v = values.ravel()
if compressor == 'zlib':
_check_zlib()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, zlib.compress(v))
elif compressor == 'blosc':
_check_blosc()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, blosc.compress(v, typesize=dtype.itemsize))
# ndarray (on original dtype)
return ExtType(0, v.tostring())
|
[
"def",
"convert",
"(",
"values",
")",
":",
"dtype",
"=",
"values",
".",
"dtype",
"if",
"is_categorical_dtype",
"(",
"values",
")",
":",
"return",
"values",
"elif",
"is_object_dtype",
"(",
"dtype",
")",
":",
"return",
"values",
".",
"ravel",
"(",
")",
".",
"tolist",
"(",
")",
"if",
"needs_i8_conversion",
"(",
"dtype",
")",
":",
"values",
"=",
"values",
".",
"view",
"(",
"'i8'",
")",
"v",
"=",
"values",
".",
"ravel",
"(",
")",
"if",
"compressor",
"==",
"'zlib'",
":",
"_check_zlib",
"(",
")",
"# return string arrays like they are",
"if",
"dtype",
"==",
"np",
".",
"object_",
":",
"return",
"v",
".",
"tolist",
"(",
")",
"# convert to a bytes array",
"v",
"=",
"v",
".",
"tostring",
"(",
")",
"return",
"ExtType",
"(",
"0",
",",
"zlib",
".",
"compress",
"(",
"v",
")",
")",
"elif",
"compressor",
"==",
"'blosc'",
":",
"_check_blosc",
"(",
")",
"# return string arrays like they are",
"if",
"dtype",
"==",
"np",
".",
"object_",
":",
"return",
"v",
".",
"tolist",
"(",
")",
"# convert to a bytes array",
"v",
"=",
"v",
".",
"tostring",
"(",
")",
"return",
"ExtType",
"(",
"0",
",",
"blosc",
".",
"compress",
"(",
"v",
",",
"typesize",
"=",
"dtype",
".",
"itemsize",
")",
")",
"# ndarray (on original dtype)",
"return",
"ExtType",
"(",
"0",
",",
"v",
".",
"tostring",
"(",
")",
")"
] |
convert the numpy values to a list
|
[
"convert",
"the",
"numpy",
"values",
"to",
"a",
"list"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/packers.py#L261-L299
|
20,434
|
pandas-dev/pandas
|
pandas/io/packers.py
|
pack
|
def pack(o, default=encode,
encoding='utf-8', unicode_errors='strict', use_single_float=False,
autoreset=1, use_bin_type=1):
"""
Pack an object and return the packed bytes.
"""
return Packer(default=default, encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type).pack(o)
|
python
|
def pack(o, default=encode,
encoding='utf-8', unicode_errors='strict', use_single_float=False,
autoreset=1, use_bin_type=1):
"""
Pack an object and return the packed bytes.
"""
return Packer(default=default, encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type).pack(o)
|
[
"def",
"pack",
"(",
"o",
",",
"default",
"=",
"encode",
",",
"encoding",
"=",
"'utf-8'",
",",
"unicode_errors",
"=",
"'strict'",
",",
"use_single_float",
"=",
"False",
",",
"autoreset",
"=",
"1",
",",
"use_bin_type",
"=",
"1",
")",
":",
"return",
"Packer",
"(",
"default",
"=",
"default",
",",
"encoding",
"=",
"encoding",
",",
"unicode_errors",
"=",
"unicode_errors",
",",
"use_single_float",
"=",
"use_single_float",
",",
"autoreset",
"=",
"autoreset",
",",
"use_bin_type",
"=",
"use_bin_type",
")",
".",
"pack",
"(",
"o",
")"
] |
Pack an object and return the packed bytes.
|
[
"Pack",
"an",
"object",
"and",
"return",
"the",
"packed",
"bytes",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/packers.py#L714-L725
|
20,435
|
pandas-dev/pandas
|
pandas/io/json/json.py
|
read_json
|
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None,
convert_axes=None, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None, encoding=None,
lines=False, chunksize=None, compression='infer'):
"""
Convert a JSON string to pandas object.
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be ``file://localhost/path/to/table.json``
orient : string,
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values', 'table'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
.. versionadded:: 0.23.0
'table' as an allowed value for the ``orient`` argument
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default None
If True, infer dtypes; if a dict of column to dtype, then use those;
if False, then don't infer dtypes at all, applies only to the data.
For all ``orient`` values except ``'table'``, default is True.
.. versionchanged:: 0.25.0
Not applicable for ``orient='table'``.
convert_axes : boolean, default None
Try to convert the axes to the proper dtypes.
For all ``orient`` values except ``'table'``, default is True.
.. versionchanged:: 0.25.0
Not applicable for ``orient='table'``.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
.. versionadded:: 0.19.0
lines : boolean, default False
Read the file as a json object per line.
.. versionadded:: 0.19.0
chunksize : integer, default None
Return JsonReader object for iteration.
See the `line-delimted json docs
<http://pandas.pydata.org/pandas-docs/stable/io.html#io-jsonl>`_
for more information on ``chunksize``.
This can only be passed if `lines=True`.
If this is None, the file will be read into memory all at once.
.. versionadded:: 0.21.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, zip or xz if path_or_buf is a string ending in
'.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
otherwise. If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
.. versionadded:: 0.21.0
Returns
-------
result : Series or DataFrame, depending on the value of `typ`.
See Also
--------
DataFrame.to_json
Notes
-----
Specific to ``orient='table'``, if a :class:`DataFrame` with a literal
:class:`Index` name of `index` gets written with :func:`to_json`, the
subsequent read operation will incorrectly set the :class:`Index` name to
``None``. This is because `index` is also used by :func:`DataFrame.to_json`
to denote a missing :class:`Index` name, and the subsequent
:func:`read_json` operation cannot distinguish between the two. The same
limitation is encountered with a :class:`MultiIndex` and any names
beginning with ``'level_'``.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
if orient == 'table' and dtype:
raise ValueError("cannot pass both dtype and orient='table'")
if orient == 'table' and convert_axes:
raise ValueError("cannot pass both convert_axes and orient='table'")
if dtype is None and orient != 'table':
dtype = True
if convert_axes is None and orient != 'table':
convert_axes = True
compression = _infer_compression(path_or_buf, compression)
filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression,
)
json_reader = JsonReader(
filepath_or_buffer, orient=orient, typ=typ, dtype=dtype,
convert_axes=convert_axes, convert_dates=convert_dates,
keep_default_dates=keep_default_dates, numpy=numpy,
precise_float=precise_float, date_unit=date_unit, encoding=encoding,
lines=lines, chunksize=chunksize, compression=compression,
)
if chunksize:
return json_reader
result = json_reader.read()
if should_close:
try:
filepath_or_buffer.close()
except: # noqa: flake8
pass
return result
|
python
|
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None,
convert_axes=None, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None, encoding=None,
lines=False, chunksize=None, compression='infer'):
"""
Convert a JSON string to pandas object.
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be ``file://localhost/path/to/table.json``
orient : string,
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values', 'table'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
.. versionadded:: 0.23.0
'table' as an allowed value for the ``orient`` argument
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default None
If True, infer dtypes; if a dict of column to dtype, then use those;
if False, then don't infer dtypes at all, applies only to the data.
For all ``orient`` values except ``'table'``, default is True.
.. versionchanged:: 0.25.0
Not applicable for ``orient='table'``.
convert_axes : boolean, default None
Try to convert the axes to the proper dtypes.
For all ``orient`` values except ``'table'``, default is True.
.. versionchanged:: 0.25.0
Not applicable for ``orient='table'``.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
.. versionadded:: 0.19.0
lines : boolean, default False
Read the file as a json object per line.
.. versionadded:: 0.19.0
chunksize : integer, default None
Return JsonReader object for iteration.
See the `line-delimted json docs
<http://pandas.pydata.org/pandas-docs/stable/io.html#io-jsonl>`_
for more information on ``chunksize``.
This can only be passed if `lines=True`.
If this is None, the file will be read into memory all at once.
.. versionadded:: 0.21.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, zip or xz if path_or_buf is a string ending in
'.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
otherwise. If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
.. versionadded:: 0.21.0
Returns
-------
result : Series or DataFrame, depending on the value of `typ`.
See Also
--------
DataFrame.to_json
Notes
-----
Specific to ``orient='table'``, if a :class:`DataFrame` with a literal
:class:`Index` name of `index` gets written with :func:`to_json`, the
subsequent read operation will incorrectly set the :class:`Index` name to
``None``. This is because `index` is also used by :func:`DataFrame.to_json`
to denote a missing :class:`Index` name, and the subsequent
:func:`read_json` operation cannot distinguish between the two. The same
limitation is encountered with a :class:`MultiIndex` and any names
beginning with ``'level_'``.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
if orient == 'table' and dtype:
raise ValueError("cannot pass both dtype and orient='table'")
if orient == 'table' and convert_axes:
raise ValueError("cannot pass both convert_axes and orient='table'")
if dtype is None and orient != 'table':
dtype = True
if convert_axes is None and orient != 'table':
convert_axes = True
compression = _infer_compression(path_or_buf, compression)
filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(
path_or_buf, encoding=encoding, compression=compression,
)
json_reader = JsonReader(
filepath_or_buffer, orient=orient, typ=typ, dtype=dtype,
convert_axes=convert_axes, convert_dates=convert_dates,
keep_default_dates=keep_default_dates, numpy=numpy,
precise_float=precise_float, date_unit=date_unit, encoding=encoding,
lines=lines, chunksize=chunksize, compression=compression,
)
if chunksize:
return json_reader
result = json_reader.read()
if should_close:
try:
filepath_or_buffer.close()
except: # noqa: flake8
pass
return result
|
[
"def",
"read_json",
"(",
"path_or_buf",
"=",
"None",
",",
"orient",
"=",
"None",
",",
"typ",
"=",
"'frame'",
",",
"dtype",
"=",
"None",
",",
"convert_axes",
"=",
"None",
",",
"convert_dates",
"=",
"True",
",",
"keep_default_dates",
"=",
"True",
",",
"numpy",
"=",
"False",
",",
"precise_float",
"=",
"False",
",",
"date_unit",
"=",
"None",
",",
"encoding",
"=",
"None",
",",
"lines",
"=",
"False",
",",
"chunksize",
"=",
"None",
",",
"compression",
"=",
"'infer'",
")",
":",
"if",
"orient",
"==",
"'table'",
"and",
"dtype",
":",
"raise",
"ValueError",
"(",
"\"cannot pass both dtype and orient='table'\"",
")",
"if",
"orient",
"==",
"'table'",
"and",
"convert_axes",
":",
"raise",
"ValueError",
"(",
"\"cannot pass both convert_axes and orient='table'\"",
")",
"if",
"dtype",
"is",
"None",
"and",
"orient",
"!=",
"'table'",
":",
"dtype",
"=",
"True",
"if",
"convert_axes",
"is",
"None",
"and",
"orient",
"!=",
"'table'",
":",
"convert_axes",
"=",
"True",
"compression",
"=",
"_infer_compression",
"(",
"path_or_buf",
",",
"compression",
")",
"filepath_or_buffer",
",",
"_",
",",
"compression",
",",
"should_close",
"=",
"get_filepath_or_buffer",
"(",
"path_or_buf",
",",
"encoding",
"=",
"encoding",
",",
"compression",
"=",
"compression",
",",
")",
"json_reader",
"=",
"JsonReader",
"(",
"filepath_or_buffer",
",",
"orient",
"=",
"orient",
",",
"typ",
"=",
"typ",
",",
"dtype",
"=",
"dtype",
",",
"convert_axes",
"=",
"convert_axes",
",",
"convert_dates",
"=",
"convert_dates",
",",
"keep_default_dates",
"=",
"keep_default_dates",
",",
"numpy",
"=",
"numpy",
",",
"precise_float",
"=",
"precise_float",
",",
"date_unit",
"=",
"date_unit",
",",
"encoding",
"=",
"encoding",
",",
"lines",
"=",
"lines",
",",
"chunksize",
"=",
"chunksize",
",",
"compression",
"=",
"compression",
",",
")",
"if",
"chunksize",
":",
"return",
"json_reader",
"result",
"=",
"json_reader",
".",
"read",
"(",
")",
"if",
"should_close",
":",
"try",
":",
"filepath_or_buffer",
".",
"close",
"(",
")",
"except",
":",
"# noqa: flake8",
"pass",
"return",
"result"
] |
Convert a JSON string to pandas object.
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be ``file://localhost/path/to/table.json``
orient : string,
Indication of expected JSON string format.
Compatible JSON strings can be produced by ``to_json()`` with a
corresponding orient value.
The set of possible orients is:
- ``'split'`` : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- ``'records'`` : list like
``[{column -> value}, ... , {column -> value}]``
- ``'index'`` : dict like ``{index -> {column -> value}}``
- ``'columns'`` : dict like ``{column -> {index -> value}}``
- ``'values'`` : just the values array
The allowed and default values depend on the value
of the `typ` parameter.
* when ``typ == 'series'``,
- allowed orients are ``{'split','records','index'}``
- default is ``'index'``
- The Series index must be unique for orient ``'index'``.
* when ``typ == 'frame'``,
- allowed orients are ``{'split','records','index',
'columns','values', 'table'}``
- default is ``'columns'``
- The DataFrame index must be unique for orients ``'index'`` and
``'columns'``.
- The DataFrame columns must be unique for orients ``'index'``,
``'columns'``, and ``'records'``.
.. versionadded:: 0.23.0
'table' as an allowed value for the ``orient`` argument
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default None
If True, infer dtypes; if a dict of column to dtype, then use those;
if False, then don't infer dtypes at all, applies only to the data.
For all ``orient`` values except ``'table'``, default is True.
.. versionchanged:: 0.25.0
Not applicable for ``orient='table'``.
convert_axes : boolean, default None
Try to convert the axes to the proper dtypes.
For all ``orient`` values except ``'table'``, default is True.
.. versionchanged:: 0.25.0
Not applicable for ``orient='table'``.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
encoding : str, default is 'utf-8'
The encoding to use to decode py3 bytes.
.. versionadded:: 0.19.0
lines : boolean, default False
Read the file as a json object per line.
.. versionadded:: 0.19.0
chunksize : integer, default None
Return JsonReader object for iteration.
See the `line-delimted json docs
<http://pandas.pydata.org/pandas-docs/stable/io.html#io-jsonl>`_
for more information on ``chunksize``.
This can only be passed if `lines=True`.
If this is None, the file will be read into memory all at once.
.. versionadded:: 0.21.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, zip or xz if path_or_buf is a string ending in
'.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
otherwise. If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
.. versionadded:: 0.21.0
Returns
-------
result : Series or DataFrame, depending on the value of `typ`.
See Also
--------
DataFrame.to_json
Notes
-----
Specific to ``orient='table'``, if a :class:`DataFrame` with a literal
:class:`Index` name of `index` gets written with :func:`to_json`, the
subsequent read operation will incorrectly set the :class:`Index` name to
``None``. This is because `index` is also used by :func:`DataFrame.to_json`
to denote a missing :class:`Index` name, and the subsequent
:func:`read_json` operation cannot distinguish between the two. The same
limitation is encountered with a :class:`MultiIndex` and any names
beginning with ``'level_'``.
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
Encoding/decoding a Dataframe using ``'split'`` formatted JSON:
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
>>> pd.read_json(_, orient='split')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
>>> pd.read_json(_, orient='index')
col 1 col 2
row 1 a b
row 2 c d
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> pd.read_json(_, orient='records')
col 1 col 2
0 a b
1 c d
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
|
[
"Convert",
"a",
"JSON",
"string",
"to",
"pandas",
"object",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/json.py#L222-L450
|
20,436
|
pandas-dev/pandas
|
pandas/io/json/json.py
|
FrameWriter._format_axes
|
def _format_axes(self):
"""
Try to format axes if they are datelike.
"""
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'{orient}'.".format(orient=self.orient))
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'{orient}'.".format(orient=self.orient))
|
python
|
def _format_axes(self):
"""
Try to format axes if they are datelike.
"""
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'{orient}'.".format(orient=self.orient))
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'{orient}'.".format(orient=self.orient))
|
[
"def",
"_format_axes",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"obj",
".",
"index",
".",
"is_unique",
"and",
"self",
".",
"orient",
"in",
"(",
"'index'",
",",
"'columns'",
")",
":",
"raise",
"ValueError",
"(",
"\"DataFrame index must be unique for orient=\"",
"\"'{orient}'.\"",
".",
"format",
"(",
"orient",
"=",
"self",
".",
"orient",
")",
")",
"if",
"not",
"self",
".",
"obj",
".",
"columns",
".",
"is_unique",
"and",
"self",
".",
"orient",
"in",
"(",
"'index'",
",",
"'columns'",
",",
"'records'",
")",
":",
"raise",
"ValueError",
"(",
"\"DataFrame columns must be unique for orient=\"",
"\"'{orient}'.\"",
".",
"format",
"(",
"orient",
"=",
"self",
".",
"orient",
")",
")"
] |
Try to format axes if they are datelike.
|
[
"Try",
"to",
"format",
"axes",
"if",
"they",
"are",
"datelike",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/json.py#L138-L149
|
20,437
|
pandas-dev/pandas
|
pandas/io/json/json.py
|
JsonReader._combine_lines
|
def _combine_lines(self, lines):
"""
Combines a list of JSON objects into one JSON object.
"""
lines = filter(None, map(lambda x: x.strip(), lines))
return '[' + ','.join(lines) + ']'
|
python
|
def _combine_lines(self, lines):
"""
Combines a list of JSON objects into one JSON object.
"""
lines = filter(None, map(lambda x: x.strip(), lines))
return '[' + ','.join(lines) + ']'
|
[
"def",
"_combine_lines",
"(",
"self",
",",
"lines",
")",
":",
"lines",
"=",
"filter",
"(",
"None",
",",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"strip",
"(",
")",
",",
"lines",
")",
")",
"return",
"'['",
"+",
"','",
".",
"join",
"(",
"lines",
")",
"+",
"']'"
] |
Combines a list of JSON objects into one JSON object.
|
[
"Combines",
"a",
"list",
"of",
"JSON",
"objects",
"into",
"one",
"JSON",
"object",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/json.py#L534-L539
|
20,438
|
pandas-dev/pandas
|
pandas/io/json/json.py
|
JsonReader.read
|
def read(self):
"""
Read the whole JSON input into a pandas object.
"""
if self.lines and self.chunksize:
obj = concat(self)
elif self.lines:
data = to_str(self.data)
obj = self._get_object_parser(
self._combine_lines(data.split('\n'))
)
else:
obj = self._get_object_parser(self.data)
self.close()
return obj
|
python
|
def read(self):
"""
Read the whole JSON input into a pandas object.
"""
if self.lines and self.chunksize:
obj = concat(self)
elif self.lines:
data = to_str(self.data)
obj = self._get_object_parser(
self._combine_lines(data.split('\n'))
)
else:
obj = self._get_object_parser(self.data)
self.close()
return obj
|
[
"def",
"read",
"(",
"self",
")",
":",
"if",
"self",
".",
"lines",
"and",
"self",
".",
"chunksize",
":",
"obj",
"=",
"concat",
"(",
"self",
")",
"elif",
"self",
".",
"lines",
":",
"data",
"=",
"to_str",
"(",
"self",
".",
"data",
")",
"obj",
"=",
"self",
".",
"_get_object_parser",
"(",
"self",
".",
"_combine_lines",
"(",
"data",
".",
"split",
"(",
"'\\n'",
")",
")",
")",
"else",
":",
"obj",
"=",
"self",
".",
"_get_object_parser",
"(",
"self",
".",
"data",
")",
"self",
".",
"close",
"(",
")",
"return",
"obj"
] |
Read the whole JSON input into a pandas object.
|
[
"Read",
"the",
"whole",
"JSON",
"input",
"into",
"a",
"pandas",
"object",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/json.py#L541-L556
|
20,439
|
pandas-dev/pandas
|
pandas/io/json/json.py
|
JsonReader._get_object_parser
|
def _get_object_parser(self, json):
"""
Parses a json document into a pandas object.
"""
typ = self.typ
dtype = self.dtype
kwargs = {
"orient": self.orient, "dtype": self.dtype,
"convert_axes": self.convert_axes,
"convert_dates": self.convert_dates,
"keep_default_dates": self.keep_default_dates, "numpy": self.numpy,
"precise_float": self.precise_float, "date_unit": self.date_unit
}
obj = None
if typ == 'frame':
obj = FrameParser(json, **kwargs).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
kwargs['dtype'] = dtype
obj = SeriesParser(json, **kwargs).parse()
return obj
|
python
|
def _get_object_parser(self, json):
"""
Parses a json document into a pandas object.
"""
typ = self.typ
dtype = self.dtype
kwargs = {
"orient": self.orient, "dtype": self.dtype,
"convert_axes": self.convert_axes,
"convert_dates": self.convert_dates,
"keep_default_dates": self.keep_default_dates, "numpy": self.numpy,
"precise_float": self.precise_float, "date_unit": self.date_unit
}
obj = None
if typ == 'frame':
obj = FrameParser(json, **kwargs).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
kwargs['dtype'] = dtype
obj = SeriesParser(json, **kwargs).parse()
return obj
|
[
"def",
"_get_object_parser",
"(",
"self",
",",
"json",
")",
":",
"typ",
"=",
"self",
".",
"typ",
"dtype",
"=",
"self",
".",
"dtype",
"kwargs",
"=",
"{",
"\"orient\"",
":",
"self",
".",
"orient",
",",
"\"dtype\"",
":",
"self",
".",
"dtype",
",",
"\"convert_axes\"",
":",
"self",
".",
"convert_axes",
",",
"\"convert_dates\"",
":",
"self",
".",
"convert_dates",
",",
"\"keep_default_dates\"",
":",
"self",
".",
"keep_default_dates",
",",
"\"numpy\"",
":",
"self",
".",
"numpy",
",",
"\"precise_float\"",
":",
"self",
".",
"precise_float",
",",
"\"date_unit\"",
":",
"self",
".",
"date_unit",
"}",
"obj",
"=",
"None",
"if",
"typ",
"==",
"'frame'",
":",
"obj",
"=",
"FrameParser",
"(",
"json",
",",
"*",
"*",
"kwargs",
")",
".",
"parse",
"(",
")",
"if",
"typ",
"==",
"'series'",
"or",
"obj",
"is",
"None",
":",
"if",
"not",
"isinstance",
"(",
"dtype",
",",
"bool",
")",
":",
"kwargs",
"[",
"'dtype'",
"]",
"=",
"dtype",
"obj",
"=",
"SeriesParser",
"(",
"json",
",",
"*",
"*",
"kwargs",
")",
".",
"parse",
"(",
")",
"return",
"obj"
] |
Parses a json document into a pandas object.
|
[
"Parses",
"a",
"json",
"document",
"into",
"a",
"pandas",
"object",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/json.py#L558-L580
|
20,440
|
pandas-dev/pandas
|
pandas/io/json/json.py
|
Parser.check_keys_split
|
def check_keys_split(self, decoded):
"""
Checks that dict has only the appropriate keys for orient='split'.
"""
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError("JSON data had unexpected key(s): {bad_keys}"
.format(bad_keys=pprint_thing(bad_keys)))
|
python
|
def check_keys_split(self, decoded):
"""
Checks that dict has only the appropriate keys for orient='split'.
"""
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError("JSON data had unexpected key(s): {bad_keys}"
.format(bad_keys=pprint_thing(bad_keys)))
|
[
"def",
"check_keys_split",
"(",
"self",
",",
"decoded",
")",
":",
"bad_keys",
"=",
"set",
"(",
"decoded",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"set",
"(",
"self",
".",
"_split_keys",
")",
")",
"if",
"bad_keys",
":",
"bad_keys",
"=",
"\", \"",
".",
"join",
"(",
"bad_keys",
")",
"raise",
"ValueError",
"(",
"\"JSON data had unexpected key(s): {bad_keys}\"",
".",
"format",
"(",
"bad_keys",
"=",
"pprint_thing",
"(",
"bad_keys",
")",
")",
")"
] |
Checks that dict has only the appropriate keys for orient='split'.
|
[
"Checks",
"that",
"dict",
"has",
"only",
"the",
"appropriate",
"keys",
"for",
"orient",
"=",
"split",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/json.py#L651-L659
|
20,441
|
pandas-dev/pandas
|
pandas/io/json/json.py
|
Parser._convert_axes
|
def _convert_axes(self):
"""
Try to convert axes.
"""
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False,
convert_dates=True)
if result:
setattr(self.obj, axis, new_axis)
|
python
|
def _convert_axes(self):
"""
Try to convert axes.
"""
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False,
convert_dates=True)
if result:
setattr(self.obj, axis, new_axis)
|
[
"def",
"_convert_axes",
"(",
"self",
")",
":",
"for",
"axis",
"in",
"self",
".",
"obj",
".",
"_AXIS_NUMBERS",
".",
"keys",
"(",
")",
":",
"new_axis",
",",
"result",
"=",
"self",
".",
"_try_convert_data",
"(",
"axis",
",",
"self",
".",
"obj",
".",
"_get_axis",
"(",
"axis",
")",
",",
"use_dtypes",
"=",
"False",
",",
"convert_dates",
"=",
"True",
")",
"if",
"result",
":",
"setattr",
"(",
"self",
".",
"obj",
",",
"axis",
",",
"new_axis",
")"
] |
Try to convert axes.
|
[
"Try",
"to",
"convert",
"axes",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/json.py#L678-L687
|
20,442
|
pandas-dev/pandas
|
pandas/io/json/json.py
|
FrameParser._process_converter
|
def _process_converter(self, f, filt=None):
"""
Take a conversion function and possibly recreate the frame.
"""
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.iteritems()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
|
python
|
def _process_converter(self, f, filt=None):
"""
Take a conversion function and possibly recreate the frame.
"""
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.iteritems()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
|
[
"def",
"_process_converter",
"(",
"self",
",",
"f",
",",
"filt",
"=",
"None",
")",
":",
"if",
"filt",
"is",
"None",
":",
"filt",
"=",
"lambda",
"col",
",",
"c",
":",
"True",
"needs_new_obj",
"=",
"False",
"new_obj",
"=",
"dict",
"(",
")",
"for",
"i",
",",
"(",
"col",
",",
"c",
")",
"in",
"enumerate",
"(",
"self",
".",
"obj",
".",
"iteritems",
"(",
")",
")",
":",
"if",
"filt",
"(",
"col",
",",
"c",
")",
":",
"new_data",
",",
"result",
"=",
"f",
"(",
"col",
",",
"c",
")",
"if",
"result",
":",
"c",
"=",
"new_data",
"needs_new_obj",
"=",
"True",
"new_obj",
"[",
"i",
"]",
"=",
"c",
"if",
"needs_new_obj",
":",
"# possibly handle dup columns",
"new_obj",
"=",
"DataFrame",
"(",
"new_obj",
",",
"index",
"=",
"self",
".",
"obj",
".",
"index",
")",
"new_obj",
".",
"columns",
"=",
"self",
".",
"obj",
".",
"columns",
"self",
".",
"obj",
"=",
"new_obj"
] |
Take a conversion function and possibly recreate the frame.
|
[
"Take",
"a",
"conversion",
"function",
"and",
"possibly",
"recreate",
"the",
"frame",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/json/json.py#L904-L927
|
20,443
|
pandas-dev/pandas
|
pandas/io/formats/format.py
|
format_array
|
def format_array(values, formatter, float_format=None, na_rep='NaN',
digits=None, space=None, justify='right', decimal='.',
leading_space=None):
"""
Format an array for printing.
Parameters
----------
values
formatter
float_format
na_rep
digits
space
justify
decimal
leading_space : bool, optional
Whether the array should be formatted with a leading space.
When an array as a column of a Series or DataFrame, we do want
the leading space to pad between columns.
When formatting an Index subclass
(e.g. IntervalIndex._format_native_types), we don't want the
leading space since it should be left-aligned.
Returns
-------
List[str]
"""
if is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif is_datetime64tz_dtype(values):
fmt_klass = Datetime64TZFormatter
elif is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
elif is_extension_array_dtype(values.dtype):
fmt_klass = ExtensionArrayFormatter
elif is_float_dtype(values.dtype) or is_complex_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep,
float_format=float_format, formatter=formatter,
space=space, justify=justify, decimal=decimal,
leading_space=leading_space)
return fmt_obj.get_result()
|
python
|
def format_array(values, formatter, float_format=None, na_rep='NaN',
digits=None, space=None, justify='right', decimal='.',
leading_space=None):
"""
Format an array for printing.
Parameters
----------
values
formatter
float_format
na_rep
digits
space
justify
decimal
leading_space : bool, optional
Whether the array should be formatted with a leading space.
When an array as a column of a Series or DataFrame, we do want
the leading space to pad between columns.
When formatting an Index subclass
(e.g. IntervalIndex._format_native_types), we don't want the
leading space since it should be left-aligned.
Returns
-------
List[str]
"""
if is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif is_datetime64tz_dtype(values):
fmt_klass = Datetime64TZFormatter
elif is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
elif is_extension_array_dtype(values.dtype):
fmt_klass = ExtensionArrayFormatter
elif is_float_dtype(values.dtype) or is_complex_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep,
float_format=float_format, formatter=formatter,
space=space, justify=justify, decimal=decimal,
leading_space=leading_space)
return fmt_obj.get_result()
|
[
"def",
"format_array",
"(",
"values",
",",
"formatter",
",",
"float_format",
"=",
"None",
",",
"na_rep",
"=",
"'NaN'",
",",
"digits",
"=",
"None",
",",
"space",
"=",
"None",
",",
"justify",
"=",
"'right'",
",",
"decimal",
"=",
"'.'",
",",
"leading_space",
"=",
"None",
")",
":",
"if",
"is_datetime64_dtype",
"(",
"values",
".",
"dtype",
")",
":",
"fmt_klass",
"=",
"Datetime64Formatter",
"elif",
"is_datetime64tz_dtype",
"(",
"values",
")",
":",
"fmt_klass",
"=",
"Datetime64TZFormatter",
"elif",
"is_timedelta64_dtype",
"(",
"values",
".",
"dtype",
")",
":",
"fmt_klass",
"=",
"Timedelta64Formatter",
"elif",
"is_extension_array_dtype",
"(",
"values",
".",
"dtype",
")",
":",
"fmt_klass",
"=",
"ExtensionArrayFormatter",
"elif",
"is_float_dtype",
"(",
"values",
".",
"dtype",
")",
"or",
"is_complex_dtype",
"(",
"values",
".",
"dtype",
")",
":",
"fmt_klass",
"=",
"FloatArrayFormatter",
"elif",
"is_integer_dtype",
"(",
"values",
".",
"dtype",
")",
":",
"fmt_klass",
"=",
"IntArrayFormatter",
"else",
":",
"fmt_klass",
"=",
"GenericArrayFormatter",
"if",
"space",
"is",
"None",
":",
"space",
"=",
"get_option",
"(",
"\"display.column_space\"",
")",
"if",
"float_format",
"is",
"None",
":",
"float_format",
"=",
"get_option",
"(",
"\"display.float_format\"",
")",
"if",
"digits",
"is",
"None",
":",
"digits",
"=",
"get_option",
"(",
"\"display.precision\"",
")",
"fmt_obj",
"=",
"fmt_klass",
"(",
"values",
",",
"digits",
"=",
"digits",
",",
"na_rep",
"=",
"na_rep",
",",
"float_format",
"=",
"float_format",
",",
"formatter",
"=",
"formatter",
",",
"space",
"=",
"space",
",",
"justify",
"=",
"justify",
",",
"decimal",
"=",
"decimal",
",",
"leading_space",
"=",
"leading_space",
")",
"return",
"fmt_obj",
".",
"get_result",
"(",
")"
] |
Format an array for printing.
Parameters
----------
values
formatter
float_format
na_rep
digits
space
justify
decimal
leading_space : bool, optional
Whether the array should be formatted with a leading space.
When an array as a column of a Series or DataFrame, we do want
the leading space to pad between columns.
When formatting an Index subclass
(e.g. IntervalIndex._format_native_types), we don't want the
leading space since it should be left-aligned.
Returns
-------
List[str]
|
[
"Format",
"an",
"array",
"for",
"printing",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L853-L912
|
20,444
|
pandas-dev/pandas
|
pandas/io/formats/format.py
|
format_percentiles
|
def format_percentiles(percentiles):
"""
Outputs rounded and formatted percentiles.
Parameters
----------
percentiles : list-like, containing floats from interval [0,1]
Returns
-------
formatted : list of strings
Notes
-----
Rounding precision is chosen so that: (1) if any two elements of
``percentiles`` differ, they remain different after rounding
(2) no entry is *rounded* to 0% or 100%.
Any non-integer is always rounded to at least 1 decimal place.
Examples
--------
Keeps all entries different after rounding:
>>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
['1.999%', '2.001%', '50%', '66.667%', '99.99%']
No element is rounded to 0% or 100% (unless already equal to it).
Duplicates are allowed:
>>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
"""
percentiles = np.asarray(percentiles)
# It checks for np.NaN as well
with np.errstate(invalid='ignore'):
if not is_numeric_dtype(percentiles) or not np.all(percentiles >= 0) \
or not np.all(percentiles <= 1):
raise ValueError("percentiles should all be in the interval [0,1]")
percentiles = 100 * percentiles
int_idx = (percentiles.astype(int) == percentiles)
if np.all(int_idx):
out = percentiles.astype(int).astype(str)
return [i + '%' for i in out]
unique_pcts = np.unique(percentiles)
to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None
to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None
# Least precision that keeps percentiles unique after rounding
prec = -np.floor(np.log10(np.min(
np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end)
))).astype(int)
prec = max(1, prec)
out = np.empty_like(percentiles, dtype=object)
out[int_idx] = percentiles[int_idx].astype(int).astype(str)
out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
return [i + '%' for i in out]
|
python
|
def format_percentiles(percentiles):
"""
Outputs rounded and formatted percentiles.
Parameters
----------
percentiles : list-like, containing floats from interval [0,1]
Returns
-------
formatted : list of strings
Notes
-----
Rounding precision is chosen so that: (1) if any two elements of
``percentiles`` differ, they remain different after rounding
(2) no entry is *rounded* to 0% or 100%.
Any non-integer is always rounded to at least 1 decimal place.
Examples
--------
Keeps all entries different after rounding:
>>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
['1.999%', '2.001%', '50%', '66.667%', '99.99%']
No element is rounded to 0% or 100% (unless already equal to it).
Duplicates are allowed:
>>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
"""
percentiles = np.asarray(percentiles)
# It checks for np.NaN as well
with np.errstate(invalid='ignore'):
if not is_numeric_dtype(percentiles) or not np.all(percentiles >= 0) \
or not np.all(percentiles <= 1):
raise ValueError("percentiles should all be in the interval [0,1]")
percentiles = 100 * percentiles
int_idx = (percentiles.astype(int) == percentiles)
if np.all(int_idx):
out = percentiles.astype(int).astype(str)
return [i + '%' for i in out]
unique_pcts = np.unique(percentiles)
to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None
to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None
# Least precision that keeps percentiles unique after rounding
prec = -np.floor(np.log10(np.min(
np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end)
))).astype(int)
prec = max(1, prec)
out = np.empty_like(percentiles, dtype=object)
out[int_idx] = percentiles[int_idx].astype(int).astype(str)
out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
return [i + '%' for i in out]
|
[
"def",
"format_percentiles",
"(",
"percentiles",
")",
":",
"percentiles",
"=",
"np",
".",
"asarray",
"(",
"percentiles",
")",
"# It checks for np.NaN as well",
"with",
"np",
".",
"errstate",
"(",
"invalid",
"=",
"'ignore'",
")",
":",
"if",
"not",
"is_numeric_dtype",
"(",
"percentiles",
")",
"or",
"not",
"np",
".",
"all",
"(",
"percentiles",
">=",
"0",
")",
"or",
"not",
"np",
".",
"all",
"(",
"percentiles",
"<=",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"percentiles should all be in the interval [0,1]\"",
")",
"percentiles",
"=",
"100",
"*",
"percentiles",
"int_idx",
"=",
"(",
"percentiles",
".",
"astype",
"(",
"int",
")",
"==",
"percentiles",
")",
"if",
"np",
".",
"all",
"(",
"int_idx",
")",
":",
"out",
"=",
"percentiles",
".",
"astype",
"(",
"int",
")",
".",
"astype",
"(",
"str",
")",
"return",
"[",
"i",
"+",
"'%'",
"for",
"i",
"in",
"out",
"]",
"unique_pcts",
"=",
"np",
".",
"unique",
"(",
"percentiles",
")",
"to_begin",
"=",
"unique_pcts",
"[",
"0",
"]",
"if",
"unique_pcts",
"[",
"0",
"]",
">",
"0",
"else",
"None",
"to_end",
"=",
"100",
"-",
"unique_pcts",
"[",
"-",
"1",
"]",
"if",
"unique_pcts",
"[",
"-",
"1",
"]",
"<",
"100",
"else",
"None",
"# Least precision that keeps percentiles unique after rounding",
"prec",
"=",
"-",
"np",
".",
"floor",
"(",
"np",
".",
"log10",
"(",
"np",
".",
"min",
"(",
"np",
".",
"ediff1d",
"(",
"unique_pcts",
",",
"to_begin",
"=",
"to_begin",
",",
"to_end",
"=",
"to_end",
")",
")",
")",
")",
".",
"astype",
"(",
"int",
")",
"prec",
"=",
"max",
"(",
"1",
",",
"prec",
")",
"out",
"=",
"np",
".",
"empty_like",
"(",
"percentiles",
",",
"dtype",
"=",
"object",
")",
"out",
"[",
"int_idx",
"]",
"=",
"percentiles",
"[",
"int_idx",
"]",
".",
"astype",
"(",
"int",
")",
".",
"astype",
"(",
"str",
")",
"out",
"[",
"~",
"int_idx",
"]",
"=",
"percentiles",
"[",
"~",
"int_idx",
"]",
".",
"round",
"(",
"prec",
")",
".",
"astype",
"(",
"str",
")",
"return",
"[",
"i",
"+",
"'%'",
"for",
"i",
"in",
"out",
"]"
] |
Outputs rounded and formatted percentiles.
Parameters
----------
percentiles : list-like, containing floats from interval [0,1]
Returns
-------
formatted : list of strings
Notes
-----
Rounding precision is chosen so that: (1) if any two elements of
``percentiles`` differ, they remain different after rounding
(2) no entry is *rounded* to 0% or 100%.
Any non-integer is always rounded to at least 1 decimal place.
Examples
--------
Keeps all entries different after rounding:
>>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
['1.999%', '2.001%', '50%', '66.667%', '99.99%']
No element is rounded to 0% or 100% (unless already equal to it).
Duplicates are allowed:
>>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
|
[
"Outputs",
"rounded",
"and",
"formatted",
"percentiles",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L1208-L1268
|
20,445
|
pandas-dev/pandas
|
pandas/io/formats/format.py
|
_get_format_timedelta64
|
def _get_format_timedelta64(values, nat_rep='NaT', box=False):
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values,
values_int % one_day_nanos != 0).sum() == 0
all_sub_day = np.logical_and(
consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0
if even_days:
format = None
elif all_sub_day:
format = 'sub_day'
else:
format = 'long'
def _formatter(x):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = "'{res}'".format(res=result)
return result
return _formatter
|
python
|
def _get_format_timedelta64(values, nat_rep='NaT', box=False):
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values,
values_int % one_day_nanos != 0).sum() == 0
all_sub_day = np.logical_and(
consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0
if even_days:
format = None
elif all_sub_day:
format = 'sub_day'
else:
format = 'long'
def _formatter(x):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = "'{res}'".format(res=result)
return result
return _formatter
|
[
"def",
"_get_format_timedelta64",
"(",
"values",
",",
"nat_rep",
"=",
"'NaT'",
",",
"box",
"=",
"False",
")",
":",
"values_int",
"=",
"values",
".",
"astype",
"(",
"np",
".",
"int64",
")",
"consider_values",
"=",
"values_int",
"!=",
"iNaT",
"one_day_nanos",
"=",
"(",
"86400",
"*",
"1e9",
")",
"even_days",
"=",
"np",
".",
"logical_and",
"(",
"consider_values",
",",
"values_int",
"%",
"one_day_nanos",
"!=",
"0",
")",
".",
"sum",
"(",
")",
"==",
"0",
"all_sub_day",
"=",
"np",
".",
"logical_and",
"(",
"consider_values",
",",
"np",
".",
"abs",
"(",
"values_int",
")",
">=",
"one_day_nanos",
")",
".",
"sum",
"(",
")",
"==",
"0",
"if",
"even_days",
":",
"format",
"=",
"None",
"elif",
"all_sub_day",
":",
"format",
"=",
"'sub_day'",
"else",
":",
"format",
"=",
"'long'",
"def",
"_formatter",
"(",
"x",
")",
":",
"if",
"x",
"is",
"None",
"or",
"(",
"is_scalar",
"(",
"x",
")",
"and",
"isna",
"(",
"x",
")",
")",
":",
"return",
"nat_rep",
"if",
"not",
"isinstance",
"(",
"x",
",",
"Timedelta",
")",
":",
"x",
"=",
"Timedelta",
"(",
"x",
")",
"result",
"=",
"x",
".",
"_repr_base",
"(",
"format",
"=",
"format",
")",
"if",
"box",
":",
"result",
"=",
"\"'{res}'\"",
".",
"format",
"(",
"res",
"=",
"result",
")",
"return",
"result",
"return",
"_formatter"
] |
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
|
[
"Return",
"a",
"formatter",
"function",
"for",
"a",
"range",
"of",
"timedeltas",
".",
"These",
"will",
"all",
"have",
"the",
"same",
"format",
"argument"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L1360-L1396
|
20,446
|
pandas-dev/pandas
|
pandas/io/formats/format.py
|
_trim_zeros_complex
|
def _trim_zeros_complex(str_complexes, na_rep='NaN'):
"""
Separates the real and imaginary parts from the complex number, and
executes the _trim_zeros_float method on each of those.
"""
def separate_and_trim(str_complex, na_rep):
num_arr = str_complex.split('+')
return (_trim_zeros_float([num_arr[0]], na_rep) +
['+'] +
_trim_zeros_float([num_arr[1][:-1]], na_rep) +
['j'])
return [''.join(separate_and_trim(x, na_rep)) for x in str_complexes]
|
python
|
def _trim_zeros_complex(str_complexes, na_rep='NaN'):
"""
Separates the real and imaginary parts from the complex number, and
executes the _trim_zeros_float method on each of those.
"""
def separate_and_trim(str_complex, na_rep):
num_arr = str_complex.split('+')
return (_trim_zeros_float([num_arr[0]], na_rep) +
['+'] +
_trim_zeros_float([num_arr[1][:-1]], na_rep) +
['j'])
return [''.join(separate_and_trim(x, na_rep)) for x in str_complexes]
|
[
"def",
"_trim_zeros_complex",
"(",
"str_complexes",
",",
"na_rep",
"=",
"'NaN'",
")",
":",
"def",
"separate_and_trim",
"(",
"str_complex",
",",
"na_rep",
")",
":",
"num_arr",
"=",
"str_complex",
".",
"split",
"(",
"'+'",
")",
"return",
"(",
"_trim_zeros_float",
"(",
"[",
"num_arr",
"[",
"0",
"]",
"]",
",",
"na_rep",
")",
"+",
"[",
"'+'",
"]",
"+",
"_trim_zeros_float",
"(",
"[",
"num_arr",
"[",
"1",
"]",
"[",
":",
"-",
"1",
"]",
"]",
",",
"na_rep",
")",
"+",
"[",
"'j'",
"]",
")",
"return",
"[",
"''",
".",
"join",
"(",
"separate_and_trim",
"(",
"x",
",",
"na_rep",
")",
")",
"for",
"x",
"in",
"str_complexes",
"]"
] |
Separates the real and imaginary parts from the complex number, and
executes the _trim_zeros_float method on each of those.
|
[
"Separates",
"the",
"real",
"and",
"imaginary",
"parts",
"from",
"the",
"complex",
"number",
"and",
"executes",
"the",
"_trim_zeros_float",
"method",
"on",
"each",
"of",
"those",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L1427-L1439
|
20,447
|
pandas-dev/pandas
|
pandas/io/formats/format.py
|
_trim_zeros_float
|
def _trim_zeros_float(str_floats, na_rep='NaN'):
"""
Trims zeros, leaving just one before the decimal points if need be.
"""
trimmed = str_floats
def _is_number(x):
return (x != na_rep and not x.endswith('inf'))
def _cond(values):
finite = [x for x in values if _is_number(x)]
return (len(finite) > 0 and all(x.endswith('0') for x in finite) and
not (any(('e' in x) or ('E' in x) for x in finite)))
while _cond(trimmed):
trimmed = [x[:-1] if _is_number(x) else x for x in trimmed]
# leave one 0 after the decimal points if need be.
return [x + "0" if x.endswith('.') and _is_number(x) else x
for x in trimmed]
|
python
|
def _trim_zeros_float(str_floats, na_rep='NaN'):
"""
Trims zeros, leaving just one before the decimal points if need be.
"""
trimmed = str_floats
def _is_number(x):
return (x != na_rep and not x.endswith('inf'))
def _cond(values):
finite = [x for x in values if _is_number(x)]
return (len(finite) > 0 and all(x.endswith('0') for x in finite) and
not (any(('e' in x) or ('E' in x) for x in finite)))
while _cond(trimmed):
trimmed = [x[:-1] if _is_number(x) else x for x in trimmed]
# leave one 0 after the decimal points if need be.
return [x + "0" if x.endswith('.') and _is_number(x) else x
for x in trimmed]
|
[
"def",
"_trim_zeros_float",
"(",
"str_floats",
",",
"na_rep",
"=",
"'NaN'",
")",
":",
"trimmed",
"=",
"str_floats",
"def",
"_is_number",
"(",
"x",
")",
":",
"return",
"(",
"x",
"!=",
"na_rep",
"and",
"not",
"x",
".",
"endswith",
"(",
"'inf'",
")",
")",
"def",
"_cond",
"(",
"values",
")",
":",
"finite",
"=",
"[",
"x",
"for",
"x",
"in",
"values",
"if",
"_is_number",
"(",
"x",
")",
"]",
"return",
"(",
"len",
"(",
"finite",
")",
">",
"0",
"and",
"all",
"(",
"x",
".",
"endswith",
"(",
"'0'",
")",
"for",
"x",
"in",
"finite",
")",
"and",
"not",
"(",
"any",
"(",
"(",
"'e'",
"in",
"x",
")",
"or",
"(",
"'E'",
"in",
"x",
")",
"for",
"x",
"in",
"finite",
")",
")",
")",
"while",
"_cond",
"(",
"trimmed",
")",
":",
"trimmed",
"=",
"[",
"x",
"[",
":",
"-",
"1",
"]",
"if",
"_is_number",
"(",
"x",
")",
"else",
"x",
"for",
"x",
"in",
"trimmed",
"]",
"# leave one 0 after the decimal points if need be.",
"return",
"[",
"x",
"+",
"\"0\"",
"if",
"x",
".",
"endswith",
"(",
"'.'",
")",
"and",
"_is_number",
"(",
"x",
")",
"else",
"x",
"for",
"x",
"in",
"trimmed",
"]"
] |
Trims zeros, leaving just one before the decimal points if need be.
|
[
"Trims",
"zeros",
"leaving",
"just",
"one",
"before",
"the",
"decimal",
"points",
"if",
"need",
"be",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L1442-L1461
|
20,448
|
pandas-dev/pandas
|
pandas/io/formats/format.py
|
set_eng_float_format
|
def set_eng_float_format(accuracy=3, use_eng_prefix=False):
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
|
python
|
def set_eng_float_format(accuracy=3, use_eng_prefix=False):
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
|
[
"def",
"set_eng_float_format",
"(",
"accuracy",
"=",
"3",
",",
"use_eng_prefix",
"=",
"False",
")",
":",
"set_option",
"(",
"\"display.float_format\"",
",",
"EngFormatter",
"(",
"accuracy",
",",
"use_eng_prefix",
")",
")",
"set_option",
"(",
"\"display.column_space\"",
",",
"max",
"(",
"12",
",",
"accuracy",
"+",
"9",
")",
")"
] |
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
|
[
"Alter",
"default",
"behavior",
"on",
"how",
"float",
"is",
"formatted",
"in",
"DataFrame",
".",
"Format",
"float",
"in",
"engineering",
"format",
".",
"By",
"accuracy",
"we",
"mean",
"the",
"number",
"of",
"decimal",
"digits",
"after",
"the",
"floating",
"point",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L1570-L1580
|
20,449
|
pandas-dev/pandas
|
pandas/io/formats/format.py
|
get_level_lengths
|
def get_level_lengths(levels, sentinel=''):
"""For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
----------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
"""
if len(levels) == 0:
return []
control = [True] * len(levels[0])
result = []
for level in levels:
last_index = 0
lengths = {}
for i, key in enumerate(level):
if control[i] and key == sentinel:
pass
else:
control[i] = False
lengths[last_index] = i - last_index
last_index = i
lengths[last_index] = len(level) - last_index
result.append(lengths)
return result
|
python
|
def get_level_lengths(levels, sentinel=''):
"""For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
----------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
"""
if len(levels) == 0:
return []
control = [True] * len(levels[0])
result = []
for level in levels:
last_index = 0
lengths = {}
for i, key in enumerate(level):
if control[i] and key == sentinel:
pass
else:
control[i] = False
lengths[last_index] = i - last_index
last_index = i
lengths[last_index] = len(level) - last_index
result.append(lengths)
return result
|
[
"def",
"get_level_lengths",
"(",
"levels",
",",
"sentinel",
"=",
"''",
")",
":",
"if",
"len",
"(",
"levels",
")",
"==",
"0",
":",
"return",
"[",
"]",
"control",
"=",
"[",
"True",
"]",
"*",
"len",
"(",
"levels",
"[",
"0",
"]",
")",
"result",
"=",
"[",
"]",
"for",
"level",
"in",
"levels",
":",
"last_index",
"=",
"0",
"lengths",
"=",
"{",
"}",
"for",
"i",
",",
"key",
"in",
"enumerate",
"(",
"level",
")",
":",
"if",
"control",
"[",
"i",
"]",
"and",
"key",
"==",
"sentinel",
":",
"pass",
"else",
":",
"control",
"[",
"i",
"]",
"=",
"False",
"lengths",
"[",
"last_index",
"]",
"=",
"i",
"-",
"last_index",
"last_index",
"=",
"i",
"lengths",
"[",
"last_index",
"]",
"=",
"len",
"(",
"level",
")",
"-",
"last_index",
"result",
".",
"append",
"(",
"lengths",
")",
"return",
"result"
] |
For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
----------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
|
[
"For",
"each",
"index",
"in",
"each",
"level",
"the",
"function",
"returns",
"lengths",
"of",
"indexes",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L1603-L1640
|
20,450
|
pandas-dev/pandas
|
pandas/io/formats/format.py
|
buffer_put_lines
|
def buffer_put_lines(buf, lines):
"""
Appends lines to a buffer.
Parameters
----------
buf
The buffer to write to
lines
The lines to append.
"""
if any(isinstance(x, str) for x in lines):
lines = [str(x) for x in lines]
buf.write('\n'.join(lines))
|
python
|
def buffer_put_lines(buf, lines):
"""
Appends lines to a buffer.
Parameters
----------
buf
The buffer to write to
lines
The lines to append.
"""
if any(isinstance(x, str) for x in lines):
lines = [str(x) for x in lines]
buf.write('\n'.join(lines))
|
[
"def",
"buffer_put_lines",
"(",
"buf",
",",
"lines",
")",
":",
"if",
"any",
"(",
"isinstance",
"(",
"x",
",",
"str",
")",
"for",
"x",
"in",
"lines",
")",
":",
"lines",
"=",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"lines",
"]",
"buf",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"lines",
")",
")"
] |
Appends lines to a buffer.
Parameters
----------
buf
The buffer to write to
lines
The lines to append.
|
[
"Appends",
"lines",
"to",
"a",
"buffer",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L1643-L1656
|
20,451
|
pandas-dev/pandas
|
pandas/io/formats/format.py
|
EastAsianTextAdjustment.len
|
def len(self, text):
"""
Calculate display width considering unicode East Asian Width
"""
if not isinstance(text, str):
return len(text)
return sum(self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width)
for c in text)
|
python
|
def len(self, text):
"""
Calculate display width considering unicode East Asian Width
"""
if not isinstance(text, str):
return len(text)
return sum(self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width)
for c in text)
|
[
"def",
"len",
"(",
"self",
",",
"text",
")",
":",
"if",
"not",
"isinstance",
"(",
"text",
",",
"str",
")",
":",
"return",
"len",
"(",
"text",
")",
"return",
"sum",
"(",
"self",
".",
"_EAW_MAP",
".",
"get",
"(",
"east_asian_width",
"(",
"c",
")",
",",
"self",
".",
"ambiguous_width",
")",
"for",
"c",
"in",
"text",
")"
] |
Calculate display width considering unicode East Asian Width
|
[
"Calculate",
"display",
"width",
"considering",
"unicode",
"East",
"Asian",
"Width"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L322-L330
|
20,452
|
pandas-dev/pandas
|
pandas/io/formats/format.py
|
FloatArrayFormatter._value_formatter
|
def _value_formatter(self, float_format=None, threshold=None):
"""Returns a function to be applied on each value to format it
"""
# the float_format parameter supersedes self.float_format
if float_format is None:
float_format = self.float_format
# we are going to compose different functions, to first convert to
# a string, then replace the decimal symbol, and finally chop according
# to the threshold
# when there is no float_format, we use str instead of '%g'
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
if float_format:
def base_formatter(v):
return float_format(value=v) if notna(v) else self.na_rep
else:
def base_formatter(v):
return str(v) if notna(v) else self.na_rep
if self.decimal != '.':
def decimal_formatter(v):
return base_formatter(v).replace('.', self.decimal, 1)
else:
decimal_formatter = base_formatter
if threshold is None:
return decimal_formatter
def formatter(value):
if notna(value):
if abs(value) > threshold:
return decimal_formatter(value)
else:
return decimal_formatter(0.0)
else:
return self.na_rep
return formatter
|
python
|
def _value_formatter(self, float_format=None, threshold=None):
"""Returns a function to be applied on each value to format it
"""
# the float_format parameter supersedes self.float_format
if float_format is None:
float_format = self.float_format
# we are going to compose different functions, to first convert to
# a string, then replace the decimal symbol, and finally chop according
# to the threshold
# when there is no float_format, we use str instead of '%g'
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
if float_format:
def base_formatter(v):
return float_format(value=v) if notna(v) else self.na_rep
else:
def base_formatter(v):
return str(v) if notna(v) else self.na_rep
if self.decimal != '.':
def decimal_formatter(v):
return base_formatter(v).replace('.', self.decimal, 1)
else:
decimal_formatter = base_formatter
if threshold is None:
return decimal_formatter
def formatter(value):
if notna(value):
if abs(value) > threshold:
return decimal_formatter(value)
else:
return decimal_formatter(0.0)
else:
return self.na_rep
return formatter
|
[
"def",
"_value_formatter",
"(",
"self",
",",
"float_format",
"=",
"None",
",",
"threshold",
"=",
"None",
")",
":",
"# the float_format parameter supersedes self.float_format",
"if",
"float_format",
"is",
"None",
":",
"float_format",
"=",
"self",
".",
"float_format",
"# we are going to compose different functions, to first convert to",
"# a string, then replace the decimal symbol, and finally chop according",
"# to the threshold",
"# when there is no float_format, we use str instead of '%g'",
"# because str(0.0) = '0.0' while '%g' % 0.0 = '0'",
"if",
"float_format",
":",
"def",
"base_formatter",
"(",
"v",
")",
":",
"return",
"float_format",
"(",
"value",
"=",
"v",
")",
"if",
"notna",
"(",
"v",
")",
"else",
"self",
".",
"na_rep",
"else",
":",
"def",
"base_formatter",
"(",
"v",
")",
":",
"return",
"str",
"(",
"v",
")",
"if",
"notna",
"(",
"v",
")",
"else",
"self",
".",
"na_rep",
"if",
"self",
".",
"decimal",
"!=",
"'.'",
":",
"def",
"decimal_formatter",
"(",
"v",
")",
":",
"return",
"base_formatter",
"(",
"v",
")",
".",
"replace",
"(",
"'.'",
",",
"self",
".",
"decimal",
",",
"1",
")",
"else",
":",
"decimal_formatter",
"=",
"base_formatter",
"if",
"threshold",
"is",
"None",
":",
"return",
"decimal_formatter",
"def",
"formatter",
"(",
"value",
")",
":",
"if",
"notna",
"(",
"value",
")",
":",
"if",
"abs",
"(",
"value",
")",
">",
"threshold",
":",
"return",
"decimal_formatter",
"(",
"value",
")",
"else",
":",
"return",
"decimal_formatter",
"(",
"0.0",
")",
"else",
":",
"return",
"self",
".",
"na_rep",
"return",
"formatter"
] |
Returns a function to be applied on each value to format it
|
[
"Returns",
"a",
"function",
"to",
"be",
"applied",
"on",
"each",
"value",
"to",
"format",
"it"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L1015-L1054
|
20,453
|
pandas-dev/pandas
|
pandas/io/formats/format.py
|
FloatArrayFormatter.get_result_as_array
|
def get_result_as_array(self):
"""
Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array
"""
if self.formatter is not None:
return np.array([self.formatter(x) for x in self.values])
if self.fixed_width:
threshold = get_option("display.chop_threshold")
else:
threshold = None
# if we have a fixed_width, we'll need to try different float_format
def format_values_with(float_format):
formatter = self._value_formatter(float_format, threshold)
# default formatter leaves a space to the left when formatting
# floats, must be consistent for left-justifying NaNs (GH #25061)
if self.justify == 'left':
na_rep = ' ' + self.na_rep
else:
na_rep = self.na_rep
# separate the wheat from the chaff
values = self.values
is_complex = is_complex_dtype(values)
mask = isna(values)
if hasattr(values, 'to_dense'): # sparse numpy ndarray
values = values.to_dense()
values = np.array(values, dtype='object')
values[mask] = na_rep
imask = (~mask).ravel()
values.flat[imask] = np.array([formatter(val)
for val in values.ravel()[imask]])
if self.fixed_width:
if is_complex:
return _trim_zeros_complex(values, na_rep)
else:
return _trim_zeros_float(values, na_rep)
return values
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
if self.float_format is None:
if self.fixed_width:
float_format = partial('{value: .{digits:d}f}'.format,
digits=self.digits)
else:
float_format = self.float_format
else:
float_format = lambda value: self.float_format % value
formatted_values = format_values_with(float_format)
if not self.fixed_width:
return formatted_values
# we need do convert to engineering format if some values are too small
# and would appear as 0, or if some values are too big and take too
# much space
if len(formatted_values) > 0:
maxlen = max(len(x) for x in formatted_values)
too_long = maxlen > self.digits + 6
else:
too_long = False
with np.errstate(invalid='ignore'):
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
# large values: more that 8 characters including decimal symbol
# and first digit, hence > 1e6
has_large_values = (abs_vals > 1e6).any()
has_small_values = ((abs_vals < 10**(-self.digits)) &
(abs_vals > 0)).any()
if has_small_values or (too_long and has_large_values):
float_format = partial('{value: .{digits:d}e}'.format,
digits=self.digits)
formatted_values = format_values_with(float_format)
return formatted_values
|
python
|
def get_result_as_array(self):
"""
Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array
"""
if self.formatter is not None:
return np.array([self.formatter(x) for x in self.values])
if self.fixed_width:
threshold = get_option("display.chop_threshold")
else:
threshold = None
# if we have a fixed_width, we'll need to try different float_format
def format_values_with(float_format):
formatter = self._value_formatter(float_format, threshold)
# default formatter leaves a space to the left when formatting
# floats, must be consistent for left-justifying NaNs (GH #25061)
if self.justify == 'left':
na_rep = ' ' + self.na_rep
else:
na_rep = self.na_rep
# separate the wheat from the chaff
values = self.values
is_complex = is_complex_dtype(values)
mask = isna(values)
if hasattr(values, 'to_dense'): # sparse numpy ndarray
values = values.to_dense()
values = np.array(values, dtype='object')
values[mask] = na_rep
imask = (~mask).ravel()
values.flat[imask] = np.array([formatter(val)
for val in values.ravel()[imask]])
if self.fixed_width:
if is_complex:
return _trim_zeros_complex(values, na_rep)
else:
return _trim_zeros_float(values, na_rep)
return values
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
if self.float_format is None:
if self.fixed_width:
float_format = partial('{value: .{digits:d}f}'.format,
digits=self.digits)
else:
float_format = self.float_format
else:
float_format = lambda value: self.float_format % value
formatted_values = format_values_with(float_format)
if not self.fixed_width:
return formatted_values
# we need do convert to engineering format if some values are too small
# and would appear as 0, or if some values are too big and take too
# much space
if len(formatted_values) > 0:
maxlen = max(len(x) for x in formatted_values)
too_long = maxlen > self.digits + 6
else:
too_long = False
with np.errstate(invalid='ignore'):
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
# large values: more that 8 characters including decimal symbol
# and first digit, hence > 1e6
has_large_values = (abs_vals > 1e6).any()
has_small_values = ((abs_vals < 10**(-self.digits)) &
(abs_vals > 0)).any()
if has_small_values or (too_long and has_large_values):
float_format = partial('{value: .{digits:d}e}'.format,
digits=self.digits)
formatted_values = format_values_with(float_format)
return formatted_values
|
[
"def",
"get_result_as_array",
"(",
"self",
")",
":",
"if",
"self",
".",
"formatter",
"is",
"not",
"None",
":",
"return",
"np",
".",
"array",
"(",
"[",
"self",
".",
"formatter",
"(",
"x",
")",
"for",
"x",
"in",
"self",
".",
"values",
"]",
")",
"if",
"self",
".",
"fixed_width",
":",
"threshold",
"=",
"get_option",
"(",
"\"display.chop_threshold\"",
")",
"else",
":",
"threshold",
"=",
"None",
"# if we have a fixed_width, we'll need to try different float_format",
"def",
"format_values_with",
"(",
"float_format",
")",
":",
"formatter",
"=",
"self",
".",
"_value_formatter",
"(",
"float_format",
",",
"threshold",
")",
"# default formatter leaves a space to the left when formatting",
"# floats, must be consistent for left-justifying NaNs (GH #25061)",
"if",
"self",
".",
"justify",
"==",
"'left'",
":",
"na_rep",
"=",
"' '",
"+",
"self",
".",
"na_rep",
"else",
":",
"na_rep",
"=",
"self",
".",
"na_rep",
"# separate the wheat from the chaff",
"values",
"=",
"self",
".",
"values",
"is_complex",
"=",
"is_complex_dtype",
"(",
"values",
")",
"mask",
"=",
"isna",
"(",
"values",
")",
"if",
"hasattr",
"(",
"values",
",",
"'to_dense'",
")",
":",
"# sparse numpy ndarray",
"values",
"=",
"values",
".",
"to_dense",
"(",
")",
"values",
"=",
"np",
".",
"array",
"(",
"values",
",",
"dtype",
"=",
"'object'",
")",
"values",
"[",
"mask",
"]",
"=",
"na_rep",
"imask",
"=",
"(",
"~",
"mask",
")",
".",
"ravel",
"(",
")",
"values",
".",
"flat",
"[",
"imask",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"formatter",
"(",
"val",
")",
"for",
"val",
"in",
"values",
".",
"ravel",
"(",
")",
"[",
"imask",
"]",
"]",
")",
"if",
"self",
".",
"fixed_width",
":",
"if",
"is_complex",
":",
"return",
"_trim_zeros_complex",
"(",
"values",
",",
"na_rep",
")",
"else",
":",
"return",
"_trim_zeros_float",
"(",
"values",
",",
"na_rep",
")",
"return",
"values",
"# There is a special default string when we are fixed-width",
"# The default is otherwise to use str instead of a formatting string",
"if",
"self",
".",
"float_format",
"is",
"None",
":",
"if",
"self",
".",
"fixed_width",
":",
"float_format",
"=",
"partial",
"(",
"'{value: .{digits:d}f}'",
".",
"format",
",",
"digits",
"=",
"self",
".",
"digits",
")",
"else",
":",
"float_format",
"=",
"self",
".",
"float_format",
"else",
":",
"float_format",
"=",
"lambda",
"value",
":",
"self",
".",
"float_format",
"%",
"value",
"formatted_values",
"=",
"format_values_with",
"(",
"float_format",
")",
"if",
"not",
"self",
".",
"fixed_width",
":",
"return",
"formatted_values",
"# we need do convert to engineering format if some values are too small",
"# and would appear as 0, or if some values are too big and take too",
"# much space",
"if",
"len",
"(",
"formatted_values",
")",
">",
"0",
":",
"maxlen",
"=",
"max",
"(",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"formatted_values",
")",
"too_long",
"=",
"maxlen",
">",
"self",
".",
"digits",
"+",
"6",
"else",
":",
"too_long",
"=",
"False",
"with",
"np",
".",
"errstate",
"(",
"invalid",
"=",
"'ignore'",
")",
":",
"abs_vals",
"=",
"np",
".",
"abs",
"(",
"self",
".",
"values",
")",
"# this is pretty arbitrary for now",
"# large values: more that 8 characters including decimal symbol",
"# and first digit, hence > 1e6",
"has_large_values",
"=",
"(",
"abs_vals",
">",
"1e6",
")",
".",
"any",
"(",
")",
"has_small_values",
"=",
"(",
"(",
"abs_vals",
"<",
"10",
"**",
"(",
"-",
"self",
".",
"digits",
")",
")",
"&",
"(",
"abs_vals",
">",
"0",
")",
")",
".",
"any",
"(",
")",
"if",
"has_small_values",
"or",
"(",
"too_long",
"and",
"has_large_values",
")",
":",
"float_format",
"=",
"partial",
"(",
"'{value: .{digits:d}e}'",
".",
"format",
",",
"digits",
"=",
"self",
".",
"digits",
")",
"formatted_values",
"=",
"format_values_with",
"(",
"float_format",
")",
"return",
"formatted_values"
] |
Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array
|
[
"Returns",
"the",
"float",
"values",
"converted",
"into",
"strings",
"using",
"the",
"parameters",
"given",
"at",
"initialisation",
"as",
"a",
"numpy",
"array"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L1056-L1141
|
20,454
|
pandas-dev/pandas
|
pandas/io/formats/format.py
|
Datetime64TZFormatter._format_strings
|
def _format_strings(self):
""" we by definition have a TZ """
values = self.values.astype(object)
is_dates_only = _is_dates_only(values)
formatter = (self.formatter or
_get_format_datetime64(is_dates_only,
date_format=self.date_format))
fmt_values = [formatter(x) for x in values]
return fmt_values
|
python
|
def _format_strings(self):
""" we by definition have a TZ """
values = self.values.astype(object)
is_dates_only = _is_dates_only(values)
formatter = (self.formatter or
_get_format_datetime64(is_dates_only,
date_format=self.date_format))
fmt_values = [formatter(x) for x in values]
return fmt_values
|
[
"def",
"_format_strings",
"(",
"self",
")",
":",
"values",
"=",
"self",
".",
"values",
".",
"astype",
"(",
"object",
")",
"is_dates_only",
"=",
"_is_dates_only",
"(",
"values",
")",
"formatter",
"=",
"(",
"self",
".",
"formatter",
"or",
"_get_format_datetime64",
"(",
"is_dates_only",
",",
"date_format",
"=",
"self",
".",
"date_format",
")",
")",
"fmt_values",
"=",
"[",
"formatter",
"(",
"x",
")",
"for",
"x",
"in",
"values",
"]",
"return",
"fmt_values"
] |
we by definition have a TZ
|
[
"we",
"by",
"definition",
"have",
"a",
"TZ"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/format.py#L1332-L1342
|
20,455
|
pandas-dev/pandas
|
pandas/core/indexes/interval.py
|
_get_interval_closed_bounds
|
def _get_interval_closed_bounds(interval):
"""
Given an Interval or IntervalIndex, return the corresponding interval with
closed bounds.
"""
left, right = interval.left, interval.right
if interval.open_left:
left = _get_next_label(left)
if interval.open_right:
right = _get_prev_label(right)
return left, right
|
python
|
def _get_interval_closed_bounds(interval):
"""
Given an Interval or IntervalIndex, return the corresponding interval with
closed bounds.
"""
left, right = interval.left, interval.right
if interval.open_left:
left = _get_next_label(left)
if interval.open_right:
right = _get_prev_label(right)
return left, right
|
[
"def",
"_get_interval_closed_bounds",
"(",
"interval",
")",
":",
"left",
",",
"right",
"=",
"interval",
".",
"left",
",",
"interval",
".",
"right",
"if",
"interval",
".",
"open_left",
":",
"left",
"=",
"_get_next_label",
"(",
"left",
")",
"if",
"interval",
".",
"open_right",
":",
"right",
"=",
"_get_prev_label",
"(",
"right",
")",
"return",
"left",
",",
"right"
] |
Given an Interval or IntervalIndex, return the corresponding interval with
closed bounds.
|
[
"Given",
"an",
"Interval",
"or",
"IntervalIndex",
"return",
"the",
"corresponding",
"interval",
"with",
"closed",
"bounds",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/interval.py#L79-L89
|
20,456
|
pandas-dev/pandas
|
pandas/core/indexes/interval.py
|
interval_range
|
def interval_range(start=None, end=None, periods=None, freq=None,
name=None, closed='right'):
"""
Return a fixed frequency IntervalIndex
Parameters
----------
start : numeric or datetime-like, default None
Left bound for generating intervals
end : numeric or datetime-like, default None
Right bound for generating intervals
periods : integer, default None
Number of periods to generate
freq : numeric, string, or DateOffset, default None
The length of each interval. Must be consistent with the type of start
and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
for numeric and 'D' for datetime-like.
name : string, default None
Name of the resulting IntervalIndex
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
rng : IntervalIndex
See Also
--------
IntervalIndex : An Index of intervals that are all closed on the same side.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``IntervalIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end``, inclusively.
To learn more about datetime-like frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
Numeric ``start`` and ``end`` is supported.
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
closed='right', dtype='interval[int64]')
Additionally, datetime-like input is also supported.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
... end=pd.Timestamp('2017-01-04'))
IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
(2017-01-03, 2017-01-04]],
closed='right', dtype='interval[datetime64[ns]]')
The ``freq`` parameter specifies the frequency between the left and right.
endpoints of the individual intervals within the ``IntervalIndex``. For
numeric ``start`` and ``end``, the frequency must also be numeric.
>>> pd.interval_range(start=0, periods=4, freq=1.5)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
closed='right', dtype='interval[float64]')
Similarly, for datetime-like ``start`` and ``end``, the frequency must be
convertible to a DateOffset.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
... periods=3, freq='MS')
IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
(2017-03-01, 2017-04-01]],
closed='right', dtype='interval[datetime64[ns]]')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.interval_range(start=0, end=6, periods=4)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
closed='right',
dtype='interval[float64]')
The ``closed`` parameter specifies which endpoints of the individual
intervals within the ``IntervalIndex`` are closed.
>>> pd.interval_range(end=5, periods=4, closed='both')
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
closed='both', dtype='interval[int64]')
"""
start = com.maybe_box_datetimelike(start)
end = com.maybe_box_datetimelike(end)
endpoint = start if start is not None else end
if freq is None and com._any_none(periods, start, end):
freq = 1 if is_number(endpoint) else 'D'
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError('Of the four parameters: start, end, periods, and '
'freq, exactly three must be specified')
if not _is_valid_endpoint(start):
msg = 'start must be numeric or datetime-like, got {start}'
raise ValueError(msg.format(start=start))
elif not _is_valid_endpoint(end):
msg = 'end must be numeric or datetime-like, got {end}'
raise ValueError(msg.format(end=end))
if is_float(periods):
periods = int(periods)
elif not is_integer(periods) and periods is not None:
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
if freq is not None and not is_number(freq):
try:
freq = to_offset(freq)
except ValueError:
raise ValueError('freq must be numeric or convertible to '
'DateOffset, got {freq}'.format(freq=freq))
# verify type compatibility
if not all([_is_type_compatible(start, end),
_is_type_compatible(start, freq),
_is_type_compatible(end, freq)]):
raise TypeError("start, end, freq need to be type compatible")
# +1 to convert interval count to breaks count (n breaks = n-1 intervals)
if periods is not None:
periods += 1
if is_number(endpoint):
# force consistency between start/end/freq (lower end if freq skips it)
if com._all_not_none(start, end, freq):
end -= (end - start) % freq
# compute the period/start/end if unspecified (at most one)
if periods is None:
periods = int((end - start) // freq) + 1
elif start is None:
start = end - (periods - 1) * freq
elif end is None:
end = start + (periods - 1) * freq
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com._not_none(start, end, freq)):
# np.linspace always produces float output
breaks = maybe_downcast_to_dtype(breaks, 'int64')
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
range_func = date_range
else:
range_func = timedelta_range
breaks = range_func(start=start, end=end, periods=periods, freq=freq)
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
|
python
|
def interval_range(start=None, end=None, periods=None, freq=None,
name=None, closed='right'):
"""
Return a fixed frequency IntervalIndex
Parameters
----------
start : numeric or datetime-like, default None
Left bound for generating intervals
end : numeric or datetime-like, default None
Right bound for generating intervals
periods : integer, default None
Number of periods to generate
freq : numeric, string, or DateOffset, default None
The length of each interval. Must be consistent with the type of start
and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
for numeric and 'D' for datetime-like.
name : string, default None
Name of the resulting IntervalIndex
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
rng : IntervalIndex
See Also
--------
IntervalIndex : An Index of intervals that are all closed on the same side.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``IntervalIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end``, inclusively.
To learn more about datetime-like frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
Numeric ``start`` and ``end`` is supported.
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
closed='right', dtype='interval[int64]')
Additionally, datetime-like input is also supported.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
... end=pd.Timestamp('2017-01-04'))
IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
(2017-01-03, 2017-01-04]],
closed='right', dtype='interval[datetime64[ns]]')
The ``freq`` parameter specifies the frequency between the left and right.
endpoints of the individual intervals within the ``IntervalIndex``. For
numeric ``start`` and ``end``, the frequency must also be numeric.
>>> pd.interval_range(start=0, periods=4, freq=1.5)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
closed='right', dtype='interval[float64]')
Similarly, for datetime-like ``start`` and ``end``, the frequency must be
convertible to a DateOffset.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
... periods=3, freq='MS')
IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
(2017-03-01, 2017-04-01]],
closed='right', dtype='interval[datetime64[ns]]')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.interval_range(start=0, end=6, periods=4)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
closed='right',
dtype='interval[float64]')
The ``closed`` parameter specifies which endpoints of the individual
intervals within the ``IntervalIndex`` are closed.
>>> pd.interval_range(end=5, periods=4, closed='both')
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
closed='both', dtype='interval[int64]')
"""
start = com.maybe_box_datetimelike(start)
end = com.maybe_box_datetimelike(end)
endpoint = start if start is not None else end
if freq is None and com._any_none(periods, start, end):
freq = 1 if is_number(endpoint) else 'D'
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError('Of the four parameters: start, end, periods, and '
'freq, exactly three must be specified')
if not _is_valid_endpoint(start):
msg = 'start must be numeric or datetime-like, got {start}'
raise ValueError(msg.format(start=start))
elif not _is_valid_endpoint(end):
msg = 'end must be numeric or datetime-like, got {end}'
raise ValueError(msg.format(end=end))
if is_float(periods):
periods = int(periods)
elif not is_integer(periods) and periods is not None:
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
if freq is not None and not is_number(freq):
try:
freq = to_offset(freq)
except ValueError:
raise ValueError('freq must be numeric or convertible to '
'DateOffset, got {freq}'.format(freq=freq))
# verify type compatibility
if not all([_is_type_compatible(start, end),
_is_type_compatible(start, freq),
_is_type_compatible(end, freq)]):
raise TypeError("start, end, freq need to be type compatible")
# +1 to convert interval count to breaks count (n breaks = n-1 intervals)
if periods is not None:
periods += 1
if is_number(endpoint):
# force consistency between start/end/freq (lower end if freq skips it)
if com._all_not_none(start, end, freq):
end -= (end - start) % freq
# compute the period/start/end if unspecified (at most one)
if periods is None:
periods = int((end - start) // freq) + 1
elif start is None:
start = end - (periods - 1) * freq
elif end is None:
end = start + (periods - 1) * freq
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com._not_none(start, end, freq)):
# np.linspace always produces float output
breaks = maybe_downcast_to_dtype(breaks, 'int64')
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
range_func = date_range
else:
range_func = timedelta_range
breaks = range_func(start=start, end=end, periods=periods, freq=freq)
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
|
[
"def",
"interval_range",
"(",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"periods",
"=",
"None",
",",
"freq",
"=",
"None",
",",
"name",
"=",
"None",
",",
"closed",
"=",
"'right'",
")",
":",
"start",
"=",
"com",
".",
"maybe_box_datetimelike",
"(",
"start",
")",
"end",
"=",
"com",
".",
"maybe_box_datetimelike",
"(",
"end",
")",
"endpoint",
"=",
"start",
"if",
"start",
"is",
"not",
"None",
"else",
"end",
"if",
"freq",
"is",
"None",
"and",
"com",
".",
"_any_none",
"(",
"periods",
",",
"start",
",",
"end",
")",
":",
"freq",
"=",
"1",
"if",
"is_number",
"(",
"endpoint",
")",
"else",
"'D'",
"if",
"com",
".",
"count_not_none",
"(",
"start",
",",
"end",
",",
"periods",
",",
"freq",
")",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'Of the four parameters: start, end, periods, and '",
"'freq, exactly three must be specified'",
")",
"if",
"not",
"_is_valid_endpoint",
"(",
"start",
")",
":",
"msg",
"=",
"'start must be numeric or datetime-like, got {start}'",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"start",
"=",
"start",
")",
")",
"elif",
"not",
"_is_valid_endpoint",
"(",
"end",
")",
":",
"msg",
"=",
"'end must be numeric or datetime-like, got {end}'",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"end",
"=",
"end",
")",
")",
"if",
"is_float",
"(",
"periods",
")",
":",
"periods",
"=",
"int",
"(",
"periods",
")",
"elif",
"not",
"is_integer",
"(",
"periods",
")",
"and",
"periods",
"is",
"not",
"None",
":",
"msg",
"=",
"'periods must be a number, got {periods}'",
"raise",
"TypeError",
"(",
"msg",
".",
"format",
"(",
"periods",
"=",
"periods",
")",
")",
"if",
"freq",
"is",
"not",
"None",
"and",
"not",
"is_number",
"(",
"freq",
")",
":",
"try",
":",
"freq",
"=",
"to_offset",
"(",
"freq",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'freq must be numeric or convertible to '",
"'DateOffset, got {freq}'",
".",
"format",
"(",
"freq",
"=",
"freq",
")",
")",
"# verify type compatibility",
"if",
"not",
"all",
"(",
"[",
"_is_type_compatible",
"(",
"start",
",",
"end",
")",
",",
"_is_type_compatible",
"(",
"start",
",",
"freq",
")",
",",
"_is_type_compatible",
"(",
"end",
",",
"freq",
")",
"]",
")",
":",
"raise",
"TypeError",
"(",
"\"start, end, freq need to be type compatible\"",
")",
"# +1 to convert interval count to breaks count (n breaks = n-1 intervals)",
"if",
"periods",
"is",
"not",
"None",
":",
"periods",
"+=",
"1",
"if",
"is_number",
"(",
"endpoint",
")",
":",
"# force consistency between start/end/freq (lower end if freq skips it)",
"if",
"com",
".",
"_all_not_none",
"(",
"start",
",",
"end",
",",
"freq",
")",
":",
"end",
"-=",
"(",
"end",
"-",
"start",
")",
"%",
"freq",
"# compute the period/start/end if unspecified (at most one)",
"if",
"periods",
"is",
"None",
":",
"periods",
"=",
"int",
"(",
"(",
"end",
"-",
"start",
")",
"//",
"freq",
")",
"+",
"1",
"elif",
"start",
"is",
"None",
":",
"start",
"=",
"end",
"-",
"(",
"periods",
"-",
"1",
")",
"*",
"freq",
"elif",
"end",
"is",
"None",
":",
"end",
"=",
"start",
"+",
"(",
"periods",
"-",
"1",
")",
"*",
"freq",
"breaks",
"=",
"np",
".",
"linspace",
"(",
"start",
",",
"end",
",",
"periods",
")",
"if",
"all",
"(",
"is_integer",
"(",
"x",
")",
"for",
"x",
"in",
"com",
".",
"_not_none",
"(",
"start",
",",
"end",
",",
"freq",
")",
")",
":",
"# np.linspace always produces float output",
"breaks",
"=",
"maybe_downcast_to_dtype",
"(",
"breaks",
",",
"'int64'",
")",
"else",
":",
"# delegate to the appropriate range function",
"if",
"isinstance",
"(",
"endpoint",
",",
"Timestamp",
")",
":",
"range_func",
"=",
"date_range",
"else",
":",
"range_func",
"=",
"timedelta_range",
"breaks",
"=",
"range_func",
"(",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"periods",
"=",
"periods",
",",
"freq",
"=",
"freq",
")",
"return",
"IntervalIndex",
".",
"from_breaks",
"(",
"breaks",
",",
"name",
"=",
"name",
",",
"closed",
"=",
"closed",
")"
] |
Return a fixed frequency IntervalIndex
Parameters
----------
start : numeric or datetime-like, default None
Left bound for generating intervals
end : numeric or datetime-like, default None
Right bound for generating intervals
periods : integer, default None
Number of periods to generate
freq : numeric, string, or DateOffset, default None
The length of each interval. Must be consistent with the type of start
and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
for numeric and 'D' for datetime-like.
name : string, default None
Name of the resulting IntervalIndex
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
rng : IntervalIndex
See Also
--------
IntervalIndex : An Index of intervals that are all closed on the same side.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``IntervalIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end``, inclusively.
To learn more about datetime-like frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
Numeric ``start`` and ``end`` is supported.
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
closed='right', dtype='interval[int64]')
Additionally, datetime-like input is also supported.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
... end=pd.Timestamp('2017-01-04'))
IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
(2017-01-03, 2017-01-04]],
closed='right', dtype='interval[datetime64[ns]]')
The ``freq`` parameter specifies the frequency between the left and right.
endpoints of the individual intervals within the ``IntervalIndex``. For
numeric ``start`` and ``end``, the frequency must also be numeric.
>>> pd.interval_range(start=0, periods=4, freq=1.5)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
closed='right', dtype='interval[float64]')
Similarly, for datetime-like ``start`` and ``end``, the frequency must be
convertible to a DateOffset.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
... periods=3, freq='MS')
IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
(2017-03-01, 2017-04-01]],
closed='right', dtype='interval[datetime64[ns]]')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.interval_range(start=0, end=6, periods=4)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
closed='right',
dtype='interval[float64]')
The ``closed`` parameter specifies which endpoints of the individual
intervals within the ``IntervalIndex`` are closed.
>>> pd.interval_range(end=5, periods=4, closed='both')
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
closed='both', dtype='interval[int64]')
|
[
"Return",
"a",
"fixed",
"frequency",
"IntervalIndex"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/interval.py#L1156-L1312
|
20,457
|
pandas-dev/pandas
|
pandas/io/formats/csvs.py
|
CSVFormatter.save
|
def save(self):
"""
Create the writer & save
"""
# GH21227 internal compression is not used when file-like passed.
if self.compression and hasattr(self.path_or_buf, 'write'):
msg = ("compression has no effect when passing file-like "
"object as input.")
warnings.warn(msg, RuntimeWarning, stacklevel=2)
# when zip compression is called.
is_zip = isinstance(self.path_or_buf, ZipFile) or (
not hasattr(self.path_or_buf, 'write')
and self.compression == 'zip')
if is_zip:
# zipfile doesn't support writing string to archive. uses string
# buffer to receive csv writing and dump into zip compression
# file handle. GH21241, GH21118
f = StringIO()
close = False
elif hasattr(self.path_or_buf, 'write'):
f = self.path_or_buf
close = False
else:
f, handles = _get_handle(self.path_or_buf, self.mode,
encoding=self.encoding,
compression=self.compression)
close = True
try:
writer_kwargs = dict(lineterminator=self.line_terminator,
delimiter=self.sep, quoting=self.quoting,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar)
if self.encoding == 'ascii':
self.writer = csvlib.writer(f, **writer_kwargs)
else:
writer_kwargs['encoding'] = self.encoding
self.writer = UnicodeWriter(f, **writer_kwargs)
self._save()
finally:
if is_zip:
# GH17778 handles zip compression separately.
buf = f.getvalue()
if hasattr(self.path_or_buf, 'write'):
self.path_or_buf.write(buf)
else:
f, handles = _get_handle(self.path_or_buf, self.mode,
encoding=self.encoding,
compression=self.compression)
f.write(buf)
close = True
if close:
f.close()
for _fh in handles:
_fh.close()
|
python
|
def save(self):
"""
Create the writer & save
"""
# GH21227 internal compression is not used when file-like passed.
if self.compression and hasattr(self.path_or_buf, 'write'):
msg = ("compression has no effect when passing file-like "
"object as input.")
warnings.warn(msg, RuntimeWarning, stacklevel=2)
# when zip compression is called.
is_zip = isinstance(self.path_or_buf, ZipFile) or (
not hasattr(self.path_or_buf, 'write')
and self.compression == 'zip')
if is_zip:
# zipfile doesn't support writing string to archive. uses string
# buffer to receive csv writing and dump into zip compression
# file handle. GH21241, GH21118
f = StringIO()
close = False
elif hasattr(self.path_or_buf, 'write'):
f = self.path_or_buf
close = False
else:
f, handles = _get_handle(self.path_or_buf, self.mode,
encoding=self.encoding,
compression=self.compression)
close = True
try:
writer_kwargs = dict(lineterminator=self.line_terminator,
delimiter=self.sep, quoting=self.quoting,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar)
if self.encoding == 'ascii':
self.writer = csvlib.writer(f, **writer_kwargs)
else:
writer_kwargs['encoding'] = self.encoding
self.writer = UnicodeWriter(f, **writer_kwargs)
self._save()
finally:
if is_zip:
# GH17778 handles zip compression separately.
buf = f.getvalue()
if hasattr(self.path_or_buf, 'write'):
self.path_or_buf.write(buf)
else:
f, handles = _get_handle(self.path_or_buf, self.mode,
encoding=self.encoding,
compression=self.compression)
f.write(buf)
close = True
if close:
f.close()
for _fh in handles:
_fh.close()
|
[
"def",
"save",
"(",
"self",
")",
":",
"# GH21227 internal compression is not used when file-like passed.",
"if",
"self",
".",
"compression",
"and",
"hasattr",
"(",
"self",
".",
"path_or_buf",
",",
"'write'",
")",
":",
"msg",
"=",
"(",
"\"compression has no effect when passing file-like \"",
"\"object as input.\"",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"RuntimeWarning",
",",
"stacklevel",
"=",
"2",
")",
"# when zip compression is called.",
"is_zip",
"=",
"isinstance",
"(",
"self",
".",
"path_or_buf",
",",
"ZipFile",
")",
"or",
"(",
"not",
"hasattr",
"(",
"self",
".",
"path_or_buf",
",",
"'write'",
")",
"and",
"self",
".",
"compression",
"==",
"'zip'",
")",
"if",
"is_zip",
":",
"# zipfile doesn't support writing string to archive. uses string",
"# buffer to receive csv writing and dump into zip compression",
"# file handle. GH21241, GH21118",
"f",
"=",
"StringIO",
"(",
")",
"close",
"=",
"False",
"elif",
"hasattr",
"(",
"self",
".",
"path_or_buf",
",",
"'write'",
")",
":",
"f",
"=",
"self",
".",
"path_or_buf",
"close",
"=",
"False",
"else",
":",
"f",
",",
"handles",
"=",
"_get_handle",
"(",
"self",
".",
"path_or_buf",
",",
"self",
".",
"mode",
",",
"encoding",
"=",
"self",
".",
"encoding",
",",
"compression",
"=",
"self",
".",
"compression",
")",
"close",
"=",
"True",
"try",
":",
"writer_kwargs",
"=",
"dict",
"(",
"lineterminator",
"=",
"self",
".",
"line_terminator",
",",
"delimiter",
"=",
"self",
".",
"sep",
",",
"quoting",
"=",
"self",
".",
"quoting",
",",
"doublequote",
"=",
"self",
".",
"doublequote",
",",
"escapechar",
"=",
"self",
".",
"escapechar",
",",
"quotechar",
"=",
"self",
".",
"quotechar",
")",
"if",
"self",
".",
"encoding",
"==",
"'ascii'",
":",
"self",
".",
"writer",
"=",
"csvlib",
".",
"writer",
"(",
"f",
",",
"*",
"*",
"writer_kwargs",
")",
"else",
":",
"writer_kwargs",
"[",
"'encoding'",
"]",
"=",
"self",
".",
"encoding",
"self",
".",
"writer",
"=",
"UnicodeWriter",
"(",
"f",
",",
"*",
"*",
"writer_kwargs",
")",
"self",
".",
"_save",
"(",
")",
"finally",
":",
"if",
"is_zip",
":",
"# GH17778 handles zip compression separately.",
"buf",
"=",
"f",
".",
"getvalue",
"(",
")",
"if",
"hasattr",
"(",
"self",
".",
"path_or_buf",
",",
"'write'",
")",
":",
"self",
".",
"path_or_buf",
".",
"write",
"(",
"buf",
")",
"else",
":",
"f",
",",
"handles",
"=",
"_get_handle",
"(",
"self",
".",
"path_or_buf",
",",
"self",
".",
"mode",
",",
"encoding",
"=",
"self",
".",
"encoding",
",",
"compression",
"=",
"self",
".",
"compression",
")",
"f",
".",
"write",
"(",
"buf",
")",
"close",
"=",
"True",
"if",
"close",
":",
"f",
".",
"close",
"(",
")",
"for",
"_fh",
"in",
"handles",
":",
"_fh",
".",
"close",
"(",
")"
] |
Create the writer & save
|
[
"Create",
"the",
"writer",
"&",
"save"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/csvs.py#L125-L184
|
20,458
|
pandas-dev/pandas
|
pandas/core/accessor.py
|
delegate_names
|
def delegate_names(delegate, accessors, typ, overwrite=False):
"""
Add delegated names to a class using a class decorator. This provides
an alternative usage to directly calling `_add_delegate_accessors`
below a class definition.
Parameters
----------
delegate : object
the class to get methods/properties & doc-strings
accessors : Sequence[str]
List of accessor to add
typ : {'property', 'method'}
overwrite : boolean, default False
overwrite the method/property in the target class if it exists
Returns
-------
callable
A class decorator.
Examples
--------
@delegate_names(Categorical, ["categories", "ordered"], "property")
class CategoricalAccessor(PandasDelegate):
[...]
"""
def add_delegate_accessors(cls):
cls._add_delegate_accessors(delegate, accessors, typ,
overwrite=overwrite)
return cls
return add_delegate_accessors
|
python
|
def delegate_names(delegate, accessors, typ, overwrite=False):
"""
Add delegated names to a class using a class decorator. This provides
an alternative usage to directly calling `_add_delegate_accessors`
below a class definition.
Parameters
----------
delegate : object
the class to get methods/properties & doc-strings
accessors : Sequence[str]
List of accessor to add
typ : {'property', 'method'}
overwrite : boolean, default False
overwrite the method/property in the target class if it exists
Returns
-------
callable
A class decorator.
Examples
--------
@delegate_names(Categorical, ["categories", "ordered"], "property")
class CategoricalAccessor(PandasDelegate):
[...]
"""
def add_delegate_accessors(cls):
cls._add_delegate_accessors(delegate, accessors, typ,
overwrite=overwrite)
return cls
return add_delegate_accessors
|
[
"def",
"delegate_names",
"(",
"delegate",
",",
"accessors",
",",
"typ",
",",
"overwrite",
"=",
"False",
")",
":",
"def",
"add_delegate_accessors",
"(",
"cls",
")",
":",
"cls",
".",
"_add_delegate_accessors",
"(",
"delegate",
",",
"accessors",
",",
"typ",
",",
"overwrite",
"=",
"overwrite",
")",
"return",
"cls",
"return",
"add_delegate_accessors"
] |
Add delegated names to a class using a class decorator. This provides
an alternative usage to directly calling `_add_delegate_accessors`
below a class definition.
Parameters
----------
delegate : object
the class to get methods/properties & doc-strings
accessors : Sequence[str]
List of accessor to add
typ : {'property', 'method'}
overwrite : boolean, default False
overwrite the method/property in the target class if it exists
Returns
-------
callable
A class decorator.
Examples
--------
@delegate_names(Categorical, ["categories", "ordered"], "property")
class CategoricalAccessor(PandasDelegate):
[...]
|
[
"Add",
"delegated",
"names",
"to",
"a",
"class",
"using",
"a",
"class",
"decorator",
".",
"This",
"provides",
"an",
"alternative",
"usage",
"to",
"directly",
"calling",
"_add_delegate_accessors",
"below",
"a",
"class",
"definition",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/accessor.py#L114-L146
|
20,459
|
pandas-dev/pandas
|
pandas/core/accessor.py
|
PandasDelegate._add_delegate_accessors
|
def _add_delegate_accessors(cls, delegate, accessors, typ,
overwrite=False):
"""
Add accessors to cls from the delegate class.
Parameters
----------
cls : the class to add the methods/properties to
delegate : the class to get methods/properties & doc-strings
accessors : string list of accessors to add
typ : 'property' or 'method'
overwrite : boolean, default False
overwrite the method/property in the target class if it exists.
"""
def _create_delegator_property(name):
def _getter(self):
return self._delegate_property_get(name)
def _setter(self, new_values):
return self._delegate_property_set(name, new_values)
_getter.__name__ = name
_setter.__name__ = name
return property(fget=_getter, fset=_setter,
doc=getattr(delegate, name).__doc__)
def _create_delegator_method(name):
def f(self, *args, **kwargs):
return self._delegate_method(name, *args, **kwargs)
f.__name__ = name
f.__doc__ = getattr(delegate, name).__doc__
return f
for name in accessors:
if typ == 'property':
f = _create_delegator_property(name)
else:
f = _create_delegator_method(name)
# don't overwrite existing methods/properties
if overwrite or not hasattr(cls, name):
setattr(cls, name, f)
|
python
|
def _add_delegate_accessors(cls, delegate, accessors, typ,
overwrite=False):
"""
Add accessors to cls from the delegate class.
Parameters
----------
cls : the class to add the methods/properties to
delegate : the class to get methods/properties & doc-strings
accessors : string list of accessors to add
typ : 'property' or 'method'
overwrite : boolean, default False
overwrite the method/property in the target class if it exists.
"""
def _create_delegator_property(name):
def _getter(self):
return self._delegate_property_get(name)
def _setter(self, new_values):
return self._delegate_property_set(name, new_values)
_getter.__name__ = name
_setter.__name__ = name
return property(fget=_getter, fset=_setter,
doc=getattr(delegate, name).__doc__)
def _create_delegator_method(name):
def f(self, *args, **kwargs):
return self._delegate_method(name, *args, **kwargs)
f.__name__ = name
f.__doc__ = getattr(delegate, name).__doc__
return f
for name in accessors:
if typ == 'property':
f = _create_delegator_property(name)
else:
f = _create_delegator_method(name)
# don't overwrite existing methods/properties
if overwrite or not hasattr(cls, name):
setattr(cls, name, f)
|
[
"def",
"_add_delegate_accessors",
"(",
"cls",
",",
"delegate",
",",
"accessors",
",",
"typ",
",",
"overwrite",
"=",
"False",
")",
":",
"def",
"_create_delegator_property",
"(",
"name",
")",
":",
"def",
"_getter",
"(",
"self",
")",
":",
"return",
"self",
".",
"_delegate_property_get",
"(",
"name",
")",
"def",
"_setter",
"(",
"self",
",",
"new_values",
")",
":",
"return",
"self",
".",
"_delegate_property_set",
"(",
"name",
",",
"new_values",
")",
"_getter",
".",
"__name__",
"=",
"name",
"_setter",
".",
"__name__",
"=",
"name",
"return",
"property",
"(",
"fget",
"=",
"_getter",
",",
"fset",
"=",
"_setter",
",",
"doc",
"=",
"getattr",
"(",
"delegate",
",",
"name",
")",
".",
"__doc__",
")",
"def",
"_create_delegator_method",
"(",
"name",
")",
":",
"def",
"f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_delegate_method",
"(",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"f",
".",
"__name__",
"=",
"name",
"f",
".",
"__doc__",
"=",
"getattr",
"(",
"delegate",
",",
"name",
")",
".",
"__doc__",
"return",
"f",
"for",
"name",
"in",
"accessors",
":",
"if",
"typ",
"==",
"'property'",
":",
"f",
"=",
"_create_delegator_property",
"(",
"name",
")",
"else",
":",
"f",
"=",
"_create_delegator_method",
"(",
"name",
")",
"# don't overwrite existing methods/properties",
"if",
"overwrite",
"or",
"not",
"hasattr",
"(",
"cls",
",",
"name",
")",
":",
"setattr",
"(",
"cls",
",",
"name",
",",
"f",
")"
] |
Add accessors to cls from the delegate class.
Parameters
----------
cls : the class to add the methods/properties to
delegate : the class to get methods/properties & doc-strings
accessors : string list of accessors to add
typ : 'property' or 'method'
overwrite : boolean, default False
overwrite the method/property in the target class if it exists.
|
[
"Add",
"accessors",
"to",
"cls",
"from",
"the",
"delegate",
"class",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/accessor.py#L63-L111
|
20,460
|
pandas-dev/pandas
|
pandas/core/computation/expressions.py
|
_can_use_numexpr
|
def _can_use_numexpr(op, op_str, a, b, dtype_check):
""" return a boolean if we WILL be using numexpr """
if op_str is not None:
# required min elements (otherwise we are adding overhead)
if np.prod(a.shape) > _MIN_ELEMENTS:
# check for dtype compatibility
dtypes = set()
for o in [a, b]:
if hasattr(o, 'get_dtype_counts'):
s = o.get_dtype_counts()
if len(s) > 1:
return False
dtypes |= set(s.index)
elif isinstance(o, np.ndarray):
dtypes |= {o.dtype.name}
# allowed are a superset
if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
return True
return False
|
python
|
def _can_use_numexpr(op, op_str, a, b, dtype_check):
""" return a boolean if we WILL be using numexpr """
if op_str is not None:
# required min elements (otherwise we are adding overhead)
if np.prod(a.shape) > _MIN_ELEMENTS:
# check for dtype compatibility
dtypes = set()
for o in [a, b]:
if hasattr(o, 'get_dtype_counts'):
s = o.get_dtype_counts()
if len(s) > 1:
return False
dtypes |= set(s.index)
elif isinstance(o, np.ndarray):
dtypes |= {o.dtype.name}
# allowed are a superset
if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
return True
return False
|
[
"def",
"_can_use_numexpr",
"(",
"op",
",",
"op_str",
",",
"a",
",",
"b",
",",
"dtype_check",
")",
":",
"if",
"op_str",
"is",
"not",
"None",
":",
"# required min elements (otherwise we are adding overhead)",
"if",
"np",
".",
"prod",
"(",
"a",
".",
"shape",
")",
">",
"_MIN_ELEMENTS",
":",
"# check for dtype compatibility",
"dtypes",
"=",
"set",
"(",
")",
"for",
"o",
"in",
"[",
"a",
",",
"b",
"]",
":",
"if",
"hasattr",
"(",
"o",
",",
"'get_dtype_counts'",
")",
":",
"s",
"=",
"o",
".",
"get_dtype_counts",
"(",
")",
"if",
"len",
"(",
"s",
")",
">",
"1",
":",
"return",
"False",
"dtypes",
"|=",
"set",
"(",
"s",
".",
"index",
")",
"elif",
"isinstance",
"(",
"o",
",",
"np",
".",
"ndarray",
")",
":",
"dtypes",
"|=",
"{",
"o",
".",
"dtype",
".",
"name",
"}",
"# allowed are a superset",
"if",
"not",
"len",
"(",
"dtypes",
")",
"or",
"_ALLOWED_DTYPES",
"[",
"dtype_check",
"]",
">=",
"dtypes",
":",
"return",
"True",
"return",
"False"
] |
return a boolean if we WILL be using numexpr
|
[
"return",
"a",
"boolean",
"if",
"we",
"WILL",
"be",
"using",
"numexpr"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expressions.py#L72-L94
|
20,461
|
pandas-dev/pandas
|
pandas/core/computation/expressions.py
|
evaluate
|
def evaluate(op, op_str, a, b, use_numexpr=True,
**eval_kwargs):
""" evaluate and return the expression of the op on a and b
Parameters
----------
op : the actual operand
op_str: the string version of the op
a : left operand
b : right operand
use_numexpr : whether to try to use numexpr (default True)
"""
use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)
if use_numexpr:
return _evaluate(op, op_str, a, b, **eval_kwargs)
return _evaluate_standard(op, op_str, a, b)
|
python
|
def evaluate(op, op_str, a, b, use_numexpr=True,
**eval_kwargs):
""" evaluate and return the expression of the op on a and b
Parameters
----------
op : the actual operand
op_str: the string version of the op
a : left operand
b : right operand
use_numexpr : whether to try to use numexpr (default True)
"""
use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)
if use_numexpr:
return _evaluate(op, op_str, a, b, **eval_kwargs)
return _evaluate_standard(op, op_str, a, b)
|
[
"def",
"evaluate",
"(",
"op",
",",
"op_str",
",",
"a",
",",
"b",
",",
"use_numexpr",
"=",
"True",
",",
"*",
"*",
"eval_kwargs",
")",
":",
"use_numexpr",
"=",
"use_numexpr",
"and",
"_bool_arith_check",
"(",
"op_str",
",",
"a",
",",
"b",
")",
"if",
"use_numexpr",
":",
"return",
"_evaluate",
"(",
"op",
",",
"op_str",
",",
"a",
",",
"b",
",",
"*",
"*",
"eval_kwargs",
")",
"return",
"_evaluate_standard",
"(",
"op",
",",
"op_str",
",",
"a",
",",
"b",
")"
] |
evaluate and return the expression of the op on a and b
Parameters
----------
op : the actual operand
op_str: the string version of the op
a : left operand
b : right operand
use_numexpr : whether to try to use numexpr (default True)
|
[
"evaluate",
"and",
"return",
"the",
"expression",
"of",
"the",
"op",
"on",
"a",
"and",
"b"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expressions.py#L193-L210
|
20,462
|
pandas-dev/pandas
|
pandas/core/computation/expressions.py
|
where
|
def where(cond, a, b, use_numexpr=True):
""" evaluate the where condition cond on a and b
Parameters
----------
cond : a boolean array
a : return if cond is True
b : return if cond is False
use_numexpr : whether to try to use numexpr (default True)
"""
if use_numexpr:
return _where(cond, a, b)
return _where_standard(cond, a, b)
|
python
|
def where(cond, a, b, use_numexpr=True):
""" evaluate the where condition cond on a and b
Parameters
----------
cond : a boolean array
a : return if cond is True
b : return if cond is False
use_numexpr : whether to try to use numexpr (default True)
"""
if use_numexpr:
return _where(cond, a, b)
return _where_standard(cond, a, b)
|
[
"def",
"where",
"(",
"cond",
",",
"a",
",",
"b",
",",
"use_numexpr",
"=",
"True",
")",
":",
"if",
"use_numexpr",
":",
"return",
"_where",
"(",
"cond",
",",
"a",
",",
"b",
")",
"return",
"_where_standard",
"(",
"cond",
",",
"a",
",",
"b",
")"
] |
evaluate the where condition cond on a and b
Parameters
----------
cond : a boolean array
a : return if cond is True
b : return if cond is False
use_numexpr : whether to try to use numexpr (default True)
|
[
"evaluate",
"the",
"where",
"condition",
"cond",
"on",
"a",
"and",
"b"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/expressions.py#L213-L227
|
20,463
|
pandas-dev/pandas
|
pandas/io/feather_format.py
|
to_feather
|
def to_feather(df, path):
"""
Write a DataFrame to the feather-format
Parameters
----------
df : DataFrame
path : string file path, or file-like object
"""
path = _stringify_path(path)
if not isinstance(df, DataFrame):
raise ValueError("feather only support IO with DataFrames")
feather = _try_import()[0]
valid_types = {'string', 'unicode'}
# validate index
# --------------
# validate that we have only a default index
# raise on anything else as we don't serialize the index
if not isinstance(df.index, Int64Index):
raise ValueError("feather does not support serializing {} "
"for the index; you can .reset_index()"
"to make the index into column(s)".format(
type(df.index)))
if not df.index.equals(RangeIndex.from_range(range(len(df)))):
raise ValueError("feather does not support serializing a "
"non-default index for the index; you "
"can .reset_index() to make the index "
"into column(s)")
if df.index.name is not None:
raise ValueError("feather does not serialize index meta-data on a "
"default index")
# validate columns
# ----------------
# must have value column names (strings only)
if df.columns.inferred_type not in valid_types:
raise ValueError("feather must have string column names")
feather.write_feather(df, path)
|
python
|
def to_feather(df, path):
"""
Write a DataFrame to the feather-format
Parameters
----------
df : DataFrame
path : string file path, or file-like object
"""
path = _stringify_path(path)
if not isinstance(df, DataFrame):
raise ValueError("feather only support IO with DataFrames")
feather = _try_import()[0]
valid_types = {'string', 'unicode'}
# validate index
# --------------
# validate that we have only a default index
# raise on anything else as we don't serialize the index
if not isinstance(df.index, Int64Index):
raise ValueError("feather does not support serializing {} "
"for the index; you can .reset_index()"
"to make the index into column(s)".format(
type(df.index)))
if not df.index.equals(RangeIndex.from_range(range(len(df)))):
raise ValueError("feather does not support serializing a "
"non-default index for the index; you "
"can .reset_index() to make the index "
"into column(s)")
if df.index.name is not None:
raise ValueError("feather does not serialize index meta-data on a "
"default index")
# validate columns
# ----------------
# must have value column names (strings only)
if df.columns.inferred_type not in valid_types:
raise ValueError("feather must have string column names")
feather.write_feather(df, path)
|
[
"def",
"to_feather",
"(",
"df",
",",
"path",
")",
":",
"path",
"=",
"_stringify_path",
"(",
"path",
")",
"if",
"not",
"isinstance",
"(",
"df",
",",
"DataFrame",
")",
":",
"raise",
"ValueError",
"(",
"\"feather only support IO with DataFrames\"",
")",
"feather",
"=",
"_try_import",
"(",
")",
"[",
"0",
"]",
"valid_types",
"=",
"{",
"'string'",
",",
"'unicode'",
"}",
"# validate index",
"# --------------",
"# validate that we have only a default index",
"# raise on anything else as we don't serialize the index",
"if",
"not",
"isinstance",
"(",
"df",
".",
"index",
",",
"Int64Index",
")",
":",
"raise",
"ValueError",
"(",
"\"feather does not support serializing {} \"",
"\"for the index; you can .reset_index()\"",
"\"to make the index into column(s)\"",
".",
"format",
"(",
"type",
"(",
"df",
".",
"index",
")",
")",
")",
"if",
"not",
"df",
".",
"index",
".",
"equals",
"(",
"RangeIndex",
".",
"from_range",
"(",
"range",
"(",
"len",
"(",
"df",
")",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"feather does not support serializing a \"",
"\"non-default index for the index; you \"",
"\"can .reset_index() to make the index \"",
"\"into column(s)\"",
")",
"if",
"df",
".",
"index",
".",
"name",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"feather does not serialize index meta-data on a \"",
"\"default index\"",
")",
"# validate columns",
"# ----------------",
"# must have value column names (strings only)",
"if",
"df",
".",
"columns",
".",
"inferred_type",
"not",
"in",
"valid_types",
":",
"raise",
"ValueError",
"(",
"\"feather must have string column names\"",
")",
"feather",
".",
"write_feather",
"(",
"df",
",",
"path",
")"
] |
Write a DataFrame to the feather-format
Parameters
----------
df : DataFrame
path : string file path, or file-like object
|
[
"Write",
"a",
"DataFrame",
"to",
"the",
"feather",
"-",
"format"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/feather_format.py#L36-L82
|
20,464
|
pandas-dev/pandas
|
pandas/io/feather_format.py
|
read_feather
|
def read_feather(path, columns=None, use_threads=True):
"""
Load a feather-format object from the file path
.. versionadded 0.20.0
Parameters
----------
path : string file path, or file-like object
columns : sequence, default None
If not provided, all columns are read
.. versionadded 0.24.0
nthreads : int, default 1
Number of CPU threads to use when reading to pandas.DataFrame
.. versionadded 0.21.0
.. deprecated 0.24.0
use_threads : bool, default True
Whether to parallelize reading using multiple threads
.. versionadded 0.24.0
Returns
-------
type of object stored in file
"""
feather, pyarrow = _try_import()
path = _stringify_path(path)
if LooseVersion(pyarrow.__version__) < LooseVersion('0.11.0'):
int_use_threads = int(use_threads)
if int_use_threads < 1:
int_use_threads = 1
return feather.read_feather(path, columns=columns,
nthreads=int_use_threads)
return feather.read_feather(path, columns=columns,
use_threads=bool(use_threads))
|
python
|
def read_feather(path, columns=None, use_threads=True):
"""
Load a feather-format object from the file path
.. versionadded 0.20.0
Parameters
----------
path : string file path, or file-like object
columns : sequence, default None
If not provided, all columns are read
.. versionadded 0.24.0
nthreads : int, default 1
Number of CPU threads to use when reading to pandas.DataFrame
.. versionadded 0.21.0
.. deprecated 0.24.0
use_threads : bool, default True
Whether to parallelize reading using multiple threads
.. versionadded 0.24.0
Returns
-------
type of object stored in file
"""
feather, pyarrow = _try_import()
path = _stringify_path(path)
if LooseVersion(pyarrow.__version__) < LooseVersion('0.11.0'):
int_use_threads = int(use_threads)
if int_use_threads < 1:
int_use_threads = 1
return feather.read_feather(path, columns=columns,
nthreads=int_use_threads)
return feather.read_feather(path, columns=columns,
use_threads=bool(use_threads))
|
[
"def",
"read_feather",
"(",
"path",
",",
"columns",
"=",
"None",
",",
"use_threads",
"=",
"True",
")",
":",
"feather",
",",
"pyarrow",
"=",
"_try_import",
"(",
")",
"path",
"=",
"_stringify_path",
"(",
"path",
")",
"if",
"LooseVersion",
"(",
"pyarrow",
".",
"__version__",
")",
"<",
"LooseVersion",
"(",
"'0.11.0'",
")",
":",
"int_use_threads",
"=",
"int",
"(",
"use_threads",
")",
"if",
"int_use_threads",
"<",
"1",
":",
"int_use_threads",
"=",
"1",
"return",
"feather",
".",
"read_feather",
"(",
"path",
",",
"columns",
"=",
"columns",
",",
"nthreads",
"=",
"int_use_threads",
")",
"return",
"feather",
".",
"read_feather",
"(",
"path",
",",
"columns",
"=",
"columns",
",",
"use_threads",
"=",
"bool",
"(",
"use_threads",
")",
")"
] |
Load a feather-format object from the file path
.. versionadded 0.20.0
Parameters
----------
path : string file path, or file-like object
columns : sequence, default None
If not provided, all columns are read
.. versionadded 0.24.0
nthreads : int, default 1
Number of CPU threads to use when reading to pandas.DataFrame
.. versionadded 0.21.0
.. deprecated 0.24.0
use_threads : bool, default True
Whether to parallelize reading using multiple threads
.. versionadded 0.24.0
Returns
-------
type of object stored in file
|
[
"Load",
"a",
"feather",
"-",
"format",
"object",
"from",
"the",
"file",
"path"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/feather_format.py#L86-L125
|
20,465
|
pandas-dev/pandas
|
pandas/core/arrays/_ranges.py
|
generate_regular_range
|
def generate_regular_range(start, end, periods, freq):
"""
Generate a range of dates with the spans between dates described by
the given `freq` DateOffset.
Parameters
----------
start : Timestamp or None
first point of produced date range
end : Timestamp or None
last point of produced date range
periods : int
number of periods in produced date range
freq : DateOffset
describes space between dates in produced date range
Returns
-------
ndarray[np.int64] representing nanosecond unix timestamps
"""
if isinstance(freq, Tick):
stride = freq.nanos
if periods is None:
b = Timestamp(start).value
# cannot just use e = Timestamp(end) + 1 because arange breaks when
# stride is too large, see GH10887
e = (b + (Timestamp(end).value - b) // stride * stride +
stride // 2 + 1)
# end.tz == start.tz by this point due to _generate implementation
tz = start.tz
elif start is not None:
b = Timestamp(start).value
e = _generate_range_overflow_safe(b, periods, stride, side='start')
tz = start.tz
elif end is not None:
e = Timestamp(end).value + stride
b = _generate_range_overflow_safe(e, periods, stride, side='end')
tz = end.tz
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
with np.errstate(over="raise"):
# If the range is sufficiently large, np.arange may overflow
# and incorrectly return an empty array if not caught.
try:
values = np.arange(b, e, stride, dtype=np.int64)
except FloatingPointError:
xdr = [b]
while xdr[-1] != e:
xdr.append(xdr[-1] + stride)
values = np.array(xdr[:-1], dtype=np.int64)
else:
tz = None
# start and end should have the same timezone by this point
if start is not None:
tz = start.tz
elif end is not None:
tz = end.tz
xdr = generate_range(start=start, end=end,
periods=periods, offset=freq)
values = np.array([x.value for x in xdr], dtype=np.int64)
return values, tz
|
python
|
def generate_regular_range(start, end, periods, freq):
"""
Generate a range of dates with the spans between dates described by
the given `freq` DateOffset.
Parameters
----------
start : Timestamp or None
first point of produced date range
end : Timestamp or None
last point of produced date range
periods : int
number of periods in produced date range
freq : DateOffset
describes space between dates in produced date range
Returns
-------
ndarray[np.int64] representing nanosecond unix timestamps
"""
if isinstance(freq, Tick):
stride = freq.nanos
if periods is None:
b = Timestamp(start).value
# cannot just use e = Timestamp(end) + 1 because arange breaks when
# stride is too large, see GH10887
e = (b + (Timestamp(end).value - b) // stride * stride +
stride // 2 + 1)
# end.tz == start.tz by this point due to _generate implementation
tz = start.tz
elif start is not None:
b = Timestamp(start).value
e = _generate_range_overflow_safe(b, periods, stride, side='start')
tz = start.tz
elif end is not None:
e = Timestamp(end).value + stride
b = _generate_range_overflow_safe(e, periods, stride, side='end')
tz = end.tz
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
with np.errstate(over="raise"):
# If the range is sufficiently large, np.arange may overflow
# and incorrectly return an empty array if not caught.
try:
values = np.arange(b, e, stride, dtype=np.int64)
except FloatingPointError:
xdr = [b]
while xdr[-1] != e:
xdr.append(xdr[-1] + stride)
values = np.array(xdr[:-1], dtype=np.int64)
else:
tz = None
# start and end should have the same timezone by this point
if start is not None:
tz = start.tz
elif end is not None:
tz = end.tz
xdr = generate_range(start=start, end=end,
periods=periods, offset=freq)
values = np.array([x.value for x in xdr], dtype=np.int64)
return values, tz
|
[
"def",
"generate_regular_range",
"(",
"start",
",",
"end",
",",
"periods",
",",
"freq",
")",
":",
"if",
"isinstance",
"(",
"freq",
",",
"Tick",
")",
":",
"stride",
"=",
"freq",
".",
"nanos",
"if",
"periods",
"is",
"None",
":",
"b",
"=",
"Timestamp",
"(",
"start",
")",
".",
"value",
"# cannot just use e = Timestamp(end) + 1 because arange breaks when",
"# stride is too large, see GH10887",
"e",
"=",
"(",
"b",
"+",
"(",
"Timestamp",
"(",
"end",
")",
".",
"value",
"-",
"b",
")",
"//",
"stride",
"*",
"stride",
"+",
"stride",
"//",
"2",
"+",
"1",
")",
"# end.tz == start.tz by this point due to _generate implementation",
"tz",
"=",
"start",
".",
"tz",
"elif",
"start",
"is",
"not",
"None",
":",
"b",
"=",
"Timestamp",
"(",
"start",
")",
".",
"value",
"e",
"=",
"_generate_range_overflow_safe",
"(",
"b",
",",
"periods",
",",
"stride",
",",
"side",
"=",
"'start'",
")",
"tz",
"=",
"start",
".",
"tz",
"elif",
"end",
"is",
"not",
"None",
":",
"e",
"=",
"Timestamp",
"(",
"end",
")",
".",
"value",
"+",
"stride",
"b",
"=",
"_generate_range_overflow_safe",
"(",
"e",
",",
"periods",
",",
"stride",
",",
"side",
"=",
"'end'",
")",
"tz",
"=",
"end",
".",
"tz",
"else",
":",
"raise",
"ValueError",
"(",
"\"at least 'start' or 'end' should be specified \"",
"\"if a 'period' is given.\"",
")",
"with",
"np",
".",
"errstate",
"(",
"over",
"=",
"\"raise\"",
")",
":",
"# If the range is sufficiently large, np.arange may overflow",
"# and incorrectly return an empty array if not caught.",
"try",
":",
"values",
"=",
"np",
".",
"arange",
"(",
"b",
",",
"e",
",",
"stride",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"except",
"FloatingPointError",
":",
"xdr",
"=",
"[",
"b",
"]",
"while",
"xdr",
"[",
"-",
"1",
"]",
"!=",
"e",
":",
"xdr",
".",
"append",
"(",
"xdr",
"[",
"-",
"1",
"]",
"+",
"stride",
")",
"values",
"=",
"np",
".",
"array",
"(",
"xdr",
"[",
":",
"-",
"1",
"]",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"else",
":",
"tz",
"=",
"None",
"# start and end should have the same timezone by this point",
"if",
"start",
"is",
"not",
"None",
":",
"tz",
"=",
"start",
".",
"tz",
"elif",
"end",
"is",
"not",
"None",
":",
"tz",
"=",
"end",
".",
"tz",
"xdr",
"=",
"generate_range",
"(",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"periods",
"=",
"periods",
",",
"offset",
"=",
"freq",
")",
"values",
"=",
"np",
".",
"array",
"(",
"[",
"x",
".",
"value",
"for",
"x",
"in",
"xdr",
"]",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"return",
"values",
",",
"tz"
] |
Generate a range of dates with the spans between dates described by
the given `freq` DateOffset.
Parameters
----------
start : Timestamp or None
first point of produced date range
end : Timestamp or None
last point of produced date range
periods : int
number of periods in produced date range
freq : DateOffset
describes space between dates in produced date range
Returns
-------
ndarray[np.int64] representing nanosecond unix timestamps
|
[
"Generate",
"a",
"range",
"of",
"dates",
"with",
"the",
"spans",
"between",
"dates",
"described",
"by",
"the",
"given",
"freq",
"DateOffset",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/_ranges.py#L13-L79
|
20,466
|
pandas-dev/pandas
|
pandas/core/arrays/_ranges.py
|
_generate_range_overflow_safe
|
def _generate_range_overflow_safe(endpoint, periods, stride, side='start'):
"""
Calculate the second endpoint for passing to np.arange, checking
to avoid an integer overflow. Catch OverflowError and re-raise
as OutOfBoundsDatetime.
Parameters
----------
endpoint : int
nanosecond timestamp of the known endpoint of the desired range
periods : int
number of periods in the desired range
stride : int
nanoseconds between periods in the desired range
side : {'start', 'end'}
which end of the range `endpoint` refers to
Returns
-------
other_end : int
Raises
------
OutOfBoundsDatetime
"""
# GH#14187 raise instead of incorrectly wrapping around
assert side in ['start', 'end']
i64max = np.uint64(np.iinfo(np.int64).max)
msg = ('Cannot generate range with {side}={endpoint} and '
'periods={periods}'
.format(side=side, endpoint=endpoint, periods=periods))
with np.errstate(over="raise"):
# if periods * strides cannot be multiplied within the *uint64* bounds,
# we cannot salvage the operation by recursing, so raise
try:
addend = np.uint64(periods) * np.uint64(np.abs(stride))
except FloatingPointError:
raise OutOfBoundsDatetime(msg)
if np.abs(addend) <= i64max:
# relatively easy case without casting concerns
return _generate_range_overflow_safe_signed(
endpoint, periods, stride, side)
elif ((endpoint > 0 and side == 'start' and stride > 0) or
(endpoint < 0 and side == 'end' and stride > 0)):
# no chance of not-overflowing
raise OutOfBoundsDatetime(msg)
elif (side == 'end' and endpoint > i64max and endpoint - stride <= i64max):
# in _generate_regular_range we added `stride` thereby overflowing
# the bounds. Adjust to fix this.
return _generate_range_overflow_safe(endpoint - stride,
periods - 1, stride, side)
# split into smaller pieces
mid_periods = periods // 2
remaining = periods - mid_periods
assert 0 < remaining < periods, (remaining, periods, endpoint, stride)
midpoint = _generate_range_overflow_safe(endpoint, mid_periods,
stride, side)
return _generate_range_overflow_safe(midpoint, remaining, stride, side)
|
python
|
def _generate_range_overflow_safe(endpoint, periods, stride, side='start'):
"""
Calculate the second endpoint for passing to np.arange, checking
to avoid an integer overflow. Catch OverflowError and re-raise
as OutOfBoundsDatetime.
Parameters
----------
endpoint : int
nanosecond timestamp of the known endpoint of the desired range
periods : int
number of periods in the desired range
stride : int
nanoseconds between periods in the desired range
side : {'start', 'end'}
which end of the range `endpoint` refers to
Returns
-------
other_end : int
Raises
------
OutOfBoundsDatetime
"""
# GH#14187 raise instead of incorrectly wrapping around
assert side in ['start', 'end']
i64max = np.uint64(np.iinfo(np.int64).max)
msg = ('Cannot generate range with {side}={endpoint} and '
'periods={periods}'
.format(side=side, endpoint=endpoint, periods=periods))
with np.errstate(over="raise"):
# if periods * strides cannot be multiplied within the *uint64* bounds,
# we cannot salvage the operation by recursing, so raise
try:
addend = np.uint64(periods) * np.uint64(np.abs(stride))
except FloatingPointError:
raise OutOfBoundsDatetime(msg)
if np.abs(addend) <= i64max:
# relatively easy case without casting concerns
return _generate_range_overflow_safe_signed(
endpoint, periods, stride, side)
elif ((endpoint > 0 and side == 'start' and stride > 0) or
(endpoint < 0 and side == 'end' and stride > 0)):
# no chance of not-overflowing
raise OutOfBoundsDatetime(msg)
elif (side == 'end' and endpoint > i64max and endpoint - stride <= i64max):
# in _generate_regular_range we added `stride` thereby overflowing
# the bounds. Adjust to fix this.
return _generate_range_overflow_safe(endpoint - stride,
periods - 1, stride, side)
# split into smaller pieces
mid_periods = periods // 2
remaining = periods - mid_periods
assert 0 < remaining < periods, (remaining, periods, endpoint, stride)
midpoint = _generate_range_overflow_safe(endpoint, mid_periods,
stride, side)
return _generate_range_overflow_safe(midpoint, remaining, stride, side)
|
[
"def",
"_generate_range_overflow_safe",
"(",
"endpoint",
",",
"periods",
",",
"stride",
",",
"side",
"=",
"'start'",
")",
":",
"# GH#14187 raise instead of incorrectly wrapping around",
"assert",
"side",
"in",
"[",
"'start'",
",",
"'end'",
"]",
"i64max",
"=",
"np",
".",
"uint64",
"(",
"np",
".",
"iinfo",
"(",
"np",
".",
"int64",
")",
".",
"max",
")",
"msg",
"=",
"(",
"'Cannot generate range with {side}={endpoint} and '",
"'periods={periods}'",
".",
"format",
"(",
"side",
"=",
"side",
",",
"endpoint",
"=",
"endpoint",
",",
"periods",
"=",
"periods",
")",
")",
"with",
"np",
".",
"errstate",
"(",
"over",
"=",
"\"raise\"",
")",
":",
"# if periods * strides cannot be multiplied within the *uint64* bounds,",
"# we cannot salvage the operation by recursing, so raise",
"try",
":",
"addend",
"=",
"np",
".",
"uint64",
"(",
"periods",
")",
"*",
"np",
".",
"uint64",
"(",
"np",
".",
"abs",
"(",
"stride",
")",
")",
"except",
"FloatingPointError",
":",
"raise",
"OutOfBoundsDatetime",
"(",
"msg",
")",
"if",
"np",
".",
"abs",
"(",
"addend",
")",
"<=",
"i64max",
":",
"# relatively easy case without casting concerns",
"return",
"_generate_range_overflow_safe_signed",
"(",
"endpoint",
",",
"periods",
",",
"stride",
",",
"side",
")",
"elif",
"(",
"(",
"endpoint",
">",
"0",
"and",
"side",
"==",
"'start'",
"and",
"stride",
">",
"0",
")",
"or",
"(",
"endpoint",
"<",
"0",
"and",
"side",
"==",
"'end'",
"and",
"stride",
">",
"0",
")",
")",
":",
"# no chance of not-overflowing",
"raise",
"OutOfBoundsDatetime",
"(",
"msg",
")",
"elif",
"(",
"side",
"==",
"'end'",
"and",
"endpoint",
">",
"i64max",
"and",
"endpoint",
"-",
"stride",
"<=",
"i64max",
")",
":",
"# in _generate_regular_range we added `stride` thereby overflowing",
"# the bounds. Adjust to fix this.",
"return",
"_generate_range_overflow_safe",
"(",
"endpoint",
"-",
"stride",
",",
"periods",
"-",
"1",
",",
"stride",
",",
"side",
")",
"# split into smaller pieces",
"mid_periods",
"=",
"periods",
"//",
"2",
"remaining",
"=",
"periods",
"-",
"mid_periods",
"assert",
"0",
"<",
"remaining",
"<",
"periods",
",",
"(",
"remaining",
",",
"periods",
",",
"endpoint",
",",
"stride",
")",
"midpoint",
"=",
"_generate_range_overflow_safe",
"(",
"endpoint",
",",
"mid_periods",
",",
"stride",
",",
"side",
")",
"return",
"_generate_range_overflow_safe",
"(",
"midpoint",
",",
"remaining",
",",
"stride",
",",
"side",
")"
] |
Calculate the second endpoint for passing to np.arange, checking
to avoid an integer overflow. Catch OverflowError and re-raise
as OutOfBoundsDatetime.
Parameters
----------
endpoint : int
nanosecond timestamp of the known endpoint of the desired range
periods : int
number of periods in the desired range
stride : int
nanoseconds between periods in the desired range
side : {'start', 'end'}
which end of the range `endpoint` refers to
Returns
-------
other_end : int
Raises
------
OutOfBoundsDatetime
|
[
"Calculate",
"the",
"second",
"endpoint",
"for",
"passing",
"to",
"np",
".",
"arange",
"checking",
"to",
"avoid",
"an",
"integer",
"overflow",
".",
"Catch",
"OverflowError",
"and",
"re",
"-",
"raise",
"as",
"OutOfBoundsDatetime",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/_ranges.py#L82-L146
|
20,467
|
pandas-dev/pandas
|
pandas/_config/localization.py
|
set_locale
|
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""
Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if all(x is not None for x in normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
|
python
|
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""
Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if all(x is not None for x in normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
|
[
"def",
"set_locale",
"(",
"new_locale",
",",
"lc_var",
"=",
"locale",
".",
"LC_ALL",
")",
":",
"current_locale",
"=",
"locale",
".",
"getlocale",
"(",
")",
"try",
":",
"locale",
".",
"setlocale",
"(",
"lc_var",
",",
"new_locale",
")",
"normalized_locale",
"=",
"locale",
".",
"getlocale",
"(",
")",
"if",
"all",
"(",
"x",
"is",
"not",
"None",
"for",
"x",
"in",
"normalized_locale",
")",
":",
"yield",
"'.'",
".",
"join",
"(",
"normalized_locale",
")",
"else",
":",
"yield",
"new_locale",
"finally",
":",
"locale",
".",
"setlocale",
"(",
"lc_var",
",",
"current_locale",
")"
] |
Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
|
[
"Context",
"manager",
"for",
"temporarily",
"setting",
"a",
"locale",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/_config/localization.py#L15-L44
|
20,468
|
pandas-dev/pandas
|
pandas/_config/localization.py
|
can_set_locale
|
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError, locale.Error):
# horrible name for a Exception subclass
return False
else:
return True
|
python
|
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError, locale.Error):
# horrible name for a Exception subclass
return False
else:
return True
|
[
"def",
"can_set_locale",
"(",
"lc",
",",
"lc_var",
"=",
"locale",
".",
"LC_ALL",
")",
":",
"try",
":",
"with",
"set_locale",
"(",
"lc",
",",
"lc_var",
"=",
"lc_var",
")",
":",
"pass",
"except",
"(",
"ValueError",
",",
"locale",
".",
"Error",
")",
":",
"# horrible name for a Exception subclass",
"return",
"False",
"else",
":",
"return",
"True"
] |
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
|
[
"Check",
"to",
"see",
"if",
"we",
"can",
"set",
"a",
"locale",
"and",
"subsequently",
"get",
"the",
"locale",
"without",
"raising",
"an",
"Exception",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/_config/localization.py#L47-L72
|
20,469
|
pandas-dev/pandas
|
pandas/_config/localization.py
|
_valid_locales
|
def _valid_locales(locales, normalize):
"""
Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, map(normalizer, locales)))
|
python
|
def _valid_locales(locales, normalize):
"""
Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, map(normalizer, locales)))
|
[
"def",
"_valid_locales",
"(",
"locales",
",",
"normalize",
")",
":",
"if",
"normalize",
":",
"normalizer",
"=",
"lambda",
"x",
":",
"locale",
".",
"normalize",
"(",
"x",
".",
"strip",
"(",
")",
")",
"else",
":",
"normalizer",
"=",
"lambda",
"x",
":",
"x",
".",
"strip",
"(",
")",
"return",
"list",
"(",
"filter",
"(",
"can_set_locale",
",",
"map",
"(",
"normalizer",
",",
"locales",
")",
")",
")"
] |
Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
|
[
"Return",
"a",
"list",
"of",
"normalized",
"locales",
"that",
"do",
"not",
"throw",
"an",
"Exception",
"when",
"set",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/_config/localization.py#L75-L97
|
20,470
|
pandas-dev/pandas
|
pandas/_config/localization.py
|
get_locales
|
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""
Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
out_locales.append(str(
x, encoding=options.display.encoding))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.format(prefix=prefix))
found = pattern.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
|
python
|
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""
Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
out_locales.append(str(
x, encoding=options.display.encoding))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.format(prefix=prefix))
found = pattern.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
|
[
"def",
"get_locales",
"(",
"prefix",
"=",
"None",
",",
"normalize",
"=",
"True",
",",
"locale_getter",
"=",
"_default_locale_getter",
")",
":",
"try",
":",
"raw_locales",
"=",
"locale_getter",
"(",
")",
"except",
"Exception",
":",
"return",
"None",
"try",
":",
"# raw_locales is \"\\n\" separated list of locales",
"# it may contain non-decodable parts, so split",
"# extract what we can and then rejoin.",
"raw_locales",
"=",
"raw_locales",
".",
"split",
"(",
"b'\\n'",
")",
"out_locales",
"=",
"[",
"]",
"for",
"x",
"in",
"raw_locales",
":",
"out_locales",
".",
"append",
"(",
"str",
"(",
"x",
",",
"encoding",
"=",
"options",
".",
"display",
".",
"encoding",
")",
")",
"except",
"TypeError",
":",
"pass",
"if",
"prefix",
"is",
"None",
":",
"return",
"_valid_locales",
"(",
"out_locales",
",",
"normalize",
")",
"pattern",
"=",
"re",
".",
"compile",
"(",
"'{prefix}.*'",
".",
"format",
"(",
"prefix",
"=",
"prefix",
")",
")",
"found",
"=",
"pattern",
".",
"findall",
"(",
"'\\n'",
".",
"join",
"(",
"out_locales",
")",
")",
"return",
"_valid_locales",
"(",
"found",
",",
"normalize",
")"
] |
Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
|
[
"Get",
"all",
"the",
"locales",
"that",
"are",
"available",
"on",
"the",
"system",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/_config/localization.py#L109-L162
|
20,471
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
ensure_float
|
def ensure_float(arr):
"""
Ensure that an array object has a float dtype if possible.
Parameters
----------
arr : array-like
The array whose data type we want to enforce as float.
Returns
-------
float_arr : The original array cast to the float dtype if
possible. Otherwise, the original array is returned.
"""
if issubclass(arr.dtype.type, (np.integer, np.bool_)):
arr = arr.astype(float)
return arr
|
python
|
def ensure_float(arr):
"""
Ensure that an array object has a float dtype if possible.
Parameters
----------
arr : array-like
The array whose data type we want to enforce as float.
Returns
-------
float_arr : The original array cast to the float dtype if
possible. Otherwise, the original array is returned.
"""
if issubclass(arr.dtype.type, (np.integer, np.bool_)):
arr = arr.astype(float)
return arr
|
[
"def",
"ensure_float",
"(",
"arr",
")",
":",
"if",
"issubclass",
"(",
"arr",
".",
"dtype",
".",
"type",
",",
"(",
"np",
".",
"integer",
",",
"np",
".",
"bool_",
")",
")",
":",
"arr",
"=",
"arr",
".",
"astype",
"(",
"float",
")",
"return",
"arr"
] |
Ensure that an array object has a float dtype if possible.
Parameters
----------
arr : array-like
The array whose data type we want to enforce as float.
Returns
-------
float_arr : The original array cast to the float dtype if
possible. Otherwise, the original array is returned.
|
[
"Ensure",
"that",
"an",
"array",
"object",
"has",
"a",
"float",
"dtype",
"if",
"possible",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L40-L57
|
20,472
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
ensure_int64_or_float64
|
def ensure_int64_or_float64(arr, copy=False):
"""
Ensure that an dtype array of some integer dtype
has an int64 dtype if possible
If it's not possible, potentially because of overflow,
convert the array to float64 instead.
Parameters
----------
arr : array-like
The array whose data type we want to enforce.
copy: boolean
Whether to copy the original array or reuse
it in place, if possible.
Returns
-------
out_arr : The input array cast as int64 if
possible without overflow.
Otherwise the input array cast to float64.
"""
try:
return arr.astype('int64', copy=copy, casting='safe')
except TypeError:
return arr.astype('float64', copy=copy)
|
python
|
def ensure_int64_or_float64(arr, copy=False):
"""
Ensure that an dtype array of some integer dtype
has an int64 dtype if possible
If it's not possible, potentially because of overflow,
convert the array to float64 instead.
Parameters
----------
arr : array-like
The array whose data type we want to enforce.
copy: boolean
Whether to copy the original array or reuse
it in place, if possible.
Returns
-------
out_arr : The input array cast as int64 if
possible without overflow.
Otherwise the input array cast to float64.
"""
try:
return arr.astype('int64', copy=copy, casting='safe')
except TypeError:
return arr.astype('float64', copy=copy)
|
[
"def",
"ensure_int64_or_float64",
"(",
"arr",
",",
"copy",
"=",
"False",
")",
":",
"try",
":",
"return",
"arr",
".",
"astype",
"(",
"'int64'",
",",
"copy",
"=",
"copy",
",",
"casting",
"=",
"'safe'",
")",
"except",
"TypeError",
":",
"return",
"arr",
".",
"astype",
"(",
"'float64'",
",",
"copy",
"=",
"copy",
")"
] |
Ensure that an dtype array of some integer dtype
has an int64 dtype if possible
If it's not possible, potentially because of overflow,
convert the array to float64 instead.
Parameters
----------
arr : array-like
The array whose data type we want to enforce.
copy: boolean
Whether to copy the original array or reuse
it in place, if possible.
Returns
-------
out_arr : The input array cast as int64 if
possible without overflow.
Otherwise the input array cast to float64.
|
[
"Ensure",
"that",
"an",
"dtype",
"array",
"of",
"some",
"integer",
"dtype",
"has",
"an",
"int64",
"dtype",
"if",
"possible",
"If",
"it",
"s",
"not",
"possible",
"potentially",
"because",
"of",
"overflow",
"convert",
"the",
"array",
"to",
"float64",
"instead",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L90-L114
|
20,473
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
classes_and_not_datetimelike
|
def classes_and_not_datetimelike(*klasses):
"""
evaluate if the tipo is a subclass of the klasses
and not a datetimelike
"""
return lambda tipo: (issubclass(tipo, klasses) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
|
python
|
def classes_and_not_datetimelike(*klasses):
"""
evaluate if the tipo is a subclass of the klasses
and not a datetimelike
"""
return lambda tipo: (issubclass(tipo, klasses) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
|
[
"def",
"classes_and_not_datetimelike",
"(",
"*",
"klasses",
")",
":",
"return",
"lambda",
"tipo",
":",
"(",
"issubclass",
"(",
"tipo",
",",
"klasses",
")",
"and",
"not",
"issubclass",
"(",
"tipo",
",",
"(",
"np",
".",
"datetime64",
",",
"np",
".",
"timedelta64",
")",
")",
")"
] |
evaluate if the tipo is a subclass of the klasses
and not a datetimelike
|
[
"evaluate",
"if",
"the",
"tipo",
"is",
"a",
"subclass",
"of",
"the",
"klasses",
"and",
"not",
"a",
"datetimelike"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L122-L128
|
20,474
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_sparse
|
def is_sparse(arr):
"""
Check whether an array-like is a 1-D pandas sparse array.
Check that the one-dimensional array-like is a pandas sparse array.
Returns True if it is a pandas sparse array, not another type of
sparse array.
Parameters
----------
arr : array-like
Array-like to check.
Returns
-------
bool
Whether or not the array-like is a pandas sparse array.
See Also
--------
DataFrame.to_sparse : Convert DataFrame to a SparseDataFrame.
Series.to_sparse : Convert Series to SparseSeries.
Series.to_dense : Return dense representation of a Series.
Examples
--------
Returns `True` if the parameter is a 1-D pandas sparse array.
>>> is_sparse(pd.SparseArray([0, 0, 1, 0]))
True
>>> is_sparse(pd.SparseSeries([0, 0, 1, 0]))
True
Returns `False` if the parameter is not sparse.
>>> is_sparse(np.array([0, 0, 1, 0]))
False
>>> is_sparse(pd.Series([0, 1, 0, 0]))
False
Returns `False` if the parameter is not a pandas sparse array.
>>> from scipy.sparse import bsr_matrix
>>> is_sparse(bsr_matrix([0, 1, 0, 0]))
False
Returns `False` if the parameter has more than one dimension.
>>> df = pd.SparseDataFrame([389., 24., 80.5, np.nan],
columns=['max_speed'],
index=['falcon', 'parrot', 'lion', 'monkey'])
>>> is_sparse(df)
False
>>> is_sparse(df.max_speed)
True
"""
from pandas.core.arrays.sparse import SparseDtype
dtype = getattr(arr, 'dtype', arr)
return isinstance(dtype, SparseDtype)
|
python
|
def is_sparse(arr):
"""
Check whether an array-like is a 1-D pandas sparse array.
Check that the one-dimensional array-like is a pandas sparse array.
Returns True if it is a pandas sparse array, not another type of
sparse array.
Parameters
----------
arr : array-like
Array-like to check.
Returns
-------
bool
Whether or not the array-like is a pandas sparse array.
See Also
--------
DataFrame.to_sparse : Convert DataFrame to a SparseDataFrame.
Series.to_sparse : Convert Series to SparseSeries.
Series.to_dense : Return dense representation of a Series.
Examples
--------
Returns `True` if the parameter is a 1-D pandas sparse array.
>>> is_sparse(pd.SparseArray([0, 0, 1, 0]))
True
>>> is_sparse(pd.SparseSeries([0, 0, 1, 0]))
True
Returns `False` if the parameter is not sparse.
>>> is_sparse(np.array([0, 0, 1, 0]))
False
>>> is_sparse(pd.Series([0, 1, 0, 0]))
False
Returns `False` if the parameter is not a pandas sparse array.
>>> from scipy.sparse import bsr_matrix
>>> is_sparse(bsr_matrix([0, 1, 0, 0]))
False
Returns `False` if the parameter has more than one dimension.
>>> df = pd.SparseDataFrame([389., 24., 80.5, np.nan],
columns=['max_speed'],
index=['falcon', 'parrot', 'lion', 'monkey'])
>>> is_sparse(df)
False
>>> is_sparse(df.max_speed)
True
"""
from pandas.core.arrays.sparse import SparseDtype
dtype = getattr(arr, 'dtype', arr)
return isinstance(dtype, SparseDtype)
|
[
"def",
"is_sparse",
"(",
"arr",
")",
":",
"from",
"pandas",
".",
"core",
".",
"arrays",
".",
"sparse",
"import",
"SparseDtype",
"dtype",
"=",
"getattr",
"(",
"arr",
",",
"'dtype'",
",",
"arr",
")",
"return",
"isinstance",
"(",
"dtype",
",",
"SparseDtype",
")"
] |
Check whether an array-like is a 1-D pandas sparse array.
Check that the one-dimensional array-like is a pandas sparse array.
Returns True if it is a pandas sparse array, not another type of
sparse array.
Parameters
----------
arr : array-like
Array-like to check.
Returns
-------
bool
Whether or not the array-like is a pandas sparse array.
See Also
--------
DataFrame.to_sparse : Convert DataFrame to a SparseDataFrame.
Series.to_sparse : Convert Series to SparseSeries.
Series.to_dense : Return dense representation of a Series.
Examples
--------
Returns `True` if the parameter is a 1-D pandas sparse array.
>>> is_sparse(pd.SparseArray([0, 0, 1, 0]))
True
>>> is_sparse(pd.SparseSeries([0, 0, 1, 0]))
True
Returns `False` if the parameter is not sparse.
>>> is_sparse(np.array([0, 0, 1, 0]))
False
>>> is_sparse(pd.Series([0, 1, 0, 0]))
False
Returns `False` if the parameter is not a pandas sparse array.
>>> from scipy.sparse import bsr_matrix
>>> is_sparse(bsr_matrix([0, 1, 0, 0]))
False
Returns `False` if the parameter has more than one dimension.
>>> df = pd.SparseDataFrame([389., 24., 80.5, np.nan],
columns=['max_speed'],
index=['falcon', 'parrot', 'lion', 'monkey'])
>>> is_sparse(df)
False
>>> is_sparse(df.max_speed)
True
|
[
"Check",
"whether",
"an",
"array",
"-",
"like",
"is",
"a",
"1",
"-",
"D",
"pandas",
"sparse",
"array",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L161-L220
|
20,475
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_scipy_sparse
|
def is_scipy_sparse(arr):
"""
Check whether an array-like is a scipy.sparse.spmatrix instance.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a scipy.sparse.spmatrix instance.
Notes
-----
If scipy is not installed, this function will always return False.
Examples
--------
>>> from scipy.sparse import bsr_matrix
>>> is_scipy_sparse(bsr_matrix([1, 2, 3]))
True
>>> is_scipy_sparse(pd.SparseArray([1, 2, 3]))
False
>>> is_scipy_sparse(pd.SparseSeries([1, 2, 3]))
False
"""
global _is_scipy_sparse
if _is_scipy_sparse is None:
try:
from scipy.sparse import issparse as _is_scipy_sparse
except ImportError:
_is_scipy_sparse = lambda _: False
return _is_scipy_sparse(arr)
|
python
|
def is_scipy_sparse(arr):
"""
Check whether an array-like is a scipy.sparse.spmatrix instance.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a scipy.sparse.spmatrix instance.
Notes
-----
If scipy is not installed, this function will always return False.
Examples
--------
>>> from scipy.sparse import bsr_matrix
>>> is_scipy_sparse(bsr_matrix([1, 2, 3]))
True
>>> is_scipy_sparse(pd.SparseArray([1, 2, 3]))
False
>>> is_scipy_sparse(pd.SparseSeries([1, 2, 3]))
False
"""
global _is_scipy_sparse
if _is_scipy_sparse is None:
try:
from scipy.sparse import issparse as _is_scipy_sparse
except ImportError:
_is_scipy_sparse = lambda _: False
return _is_scipy_sparse(arr)
|
[
"def",
"is_scipy_sparse",
"(",
"arr",
")",
":",
"global",
"_is_scipy_sparse",
"if",
"_is_scipy_sparse",
"is",
"None",
":",
"try",
":",
"from",
"scipy",
".",
"sparse",
"import",
"issparse",
"as",
"_is_scipy_sparse",
"except",
"ImportError",
":",
"_is_scipy_sparse",
"=",
"lambda",
"_",
":",
"False",
"return",
"_is_scipy_sparse",
"(",
"arr",
")"
] |
Check whether an array-like is a scipy.sparse.spmatrix instance.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a scipy.sparse.spmatrix instance.
Notes
-----
If scipy is not installed, this function will always return False.
Examples
--------
>>> from scipy.sparse import bsr_matrix
>>> is_scipy_sparse(bsr_matrix([1, 2, 3]))
True
>>> is_scipy_sparse(pd.SparseArray([1, 2, 3]))
False
>>> is_scipy_sparse(pd.SparseSeries([1, 2, 3]))
False
|
[
"Check",
"whether",
"an",
"array",
"-",
"like",
"is",
"a",
"scipy",
".",
"sparse",
".",
"spmatrix",
"instance",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L223-L260
|
20,476
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_offsetlike
|
def is_offsetlike(arr_or_obj):
"""
Check if obj or all elements of list-like is DateOffset
Parameters
----------
arr_or_obj : object
Returns
-------
boolean
Whether the object is a DateOffset or listlike of DatetOffsets
Examples
--------
>>> is_offsetlike(pd.DateOffset(days=1))
True
>>> is_offsetlike('offset')
False
>>> is_offsetlike([pd.offsets.Minute(4), pd.offsets.MonthEnd()])
True
>>> is_offsetlike(np.array([pd.DateOffset(months=3), pd.Timestamp.now()]))
False
"""
if isinstance(arr_or_obj, ABCDateOffset):
return True
elif (is_list_like(arr_or_obj) and len(arr_or_obj) and
is_object_dtype(arr_or_obj)):
return all(isinstance(x, ABCDateOffset) for x in arr_or_obj)
return False
|
python
|
def is_offsetlike(arr_or_obj):
"""
Check if obj or all elements of list-like is DateOffset
Parameters
----------
arr_or_obj : object
Returns
-------
boolean
Whether the object is a DateOffset or listlike of DatetOffsets
Examples
--------
>>> is_offsetlike(pd.DateOffset(days=1))
True
>>> is_offsetlike('offset')
False
>>> is_offsetlike([pd.offsets.Minute(4), pd.offsets.MonthEnd()])
True
>>> is_offsetlike(np.array([pd.DateOffset(months=3), pd.Timestamp.now()]))
False
"""
if isinstance(arr_or_obj, ABCDateOffset):
return True
elif (is_list_like(arr_or_obj) and len(arr_or_obj) and
is_object_dtype(arr_or_obj)):
return all(isinstance(x, ABCDateOffset) for x in arr_or_obj)
return False
|
[
"def",
"is_offsetlike",
"(",
"arr_or_obj",
")",
":",
"if",
"isinstance",
"(",
"arr_or_obj",
",",
"ABCDateOffset",
")",
":",
"return",
"True",
"elif",
"(",
"is_list_like",
"(",
"arr_or_obj",
")",
"and",
"len",
"(",
"arr_or_obj",
")",
"and",
"is_object_dtype",
"(",
"arr_or_obj",
")",
")",
":",
"return",
"all",
"(",
"isinstance",
"(",
"x",
",",
"ABCDateOffset",
")",
"for",
"x",
"in",
"arr_or_obj",
")",
"return",
"False"
] |
Check if obj or all elements of list-like is DateOffset
Parameters
----------
arr_or_obj : object
Returns
-------
boolean
Whether the object is a DateOffset or listlike of DatetOffsets
Examples
--------
>>> is_offsetlike(pd.DateOffset(days=1))
True
>>> is_offsetlike('offset')
False
>>> is_offsetlike([pd.offsets.Minute(4), pd.offsets.MonthEnd()])
True
>>> is_offsetlike(np.array([pd.DateOffset(months=3), pd.Timestamp.now()]))
False
|
[
"Check",
"if",
"obj",
"or",
"all",
"elements",
"of",
"list",
"-",
"like",
"is",
"DateOffset"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L343-L372
|
20,477
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_period
|
def is_period(arr):
"""
Check whether an array-like is a periodical index.
.. deprecated:: 0.24.0
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a periodical index.
Examples
--------
>>> is_period([1, 2, 3])
False
>>> is_period(pd.Index([1, 2, 3]))
False
>>> is_period(pd.PeriodIndex(["2017-01-01"], freq="D"))
True
"""
warnings.warn("'is_period' is deprecated and will be removed in a future "
"version. Use 'is_period_dtype' or is_period_arraylike' "
"instead.", FutureWarning, stacklevel=2)
return isinstance(arr, ABCPeriodIndex) or is_period_arraylike(arr)
|
python
|
def is_period(arr):
"""
Check whether an array-like is a periodical index.
.. deprecated:: 0.24.0
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a periodical index.
Examples
--------
>>> is_period([1, 2, 3])
False
>>> is_period(pd.Index([1, 2, 3]))
False
>>> is_period(pd.PeriodIndex(["2017-01-01"], freq="D"))
True
"""
warnings.warn("'is_period' is deprecated and will be removed in a future "
"version. Use 'is_period_dtype' or is_period_arraylike' "
"instead.", FutureWarning, stacklevel=2)
return isinstance(arr, ABCPeriodIndex) or is_period_arraylike(arr)
|
[
"def",
"is_period",
"(",
"arr",
")",
":",
"warnings",
".",
"warn",
"(",
"\"'is_period' is deprecated and will be removed in a future \"",
"\"version. Use 'is_period_dtype' or is_period_arraylike' \"",
"\"instead.\"",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"isinstance",
"(",
"arr",
",",
"ABCPeriodIndex",
")",
"or",
"is_period_arraylike",
"(",
"arr",
")"
] |
Check whether an array-like is a periodical index.
.. deprecated:: 0.24.0
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a periodical index.
Examples
--------
>>> is_period([1, 2, 3])
False
>>> is_period(pd.Index([1, 2, 3]))
False
>>> is_period(pd.PeriodIndex(["2017-01-01"], freq="D"))
True
|
[
"Check",
"whether",
"an",
"array",
"-",
"like",
"is",
"a",
"periodical",
"index",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L375-L405
|
20,478
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_string_dtype
|
def is_string_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of the string dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of the string dtype.
Examples
--------
>>> is_string_dtype(str)
True
>>> is_string_dtype(object)
True
>>> is_string_dtype(int)
False
>>>
>>> is_string_dtype(np.array(['a', 'b']))
True
>>> is_string_dtype(pd.Series([1, 2]))
False
"""
# TODO: gh-15585: consider making the checks stricter.
def condition(dtype):
return dtype.kind in ('O', 'S', 'U') and not is_period_dtype(dtype)
return _is_dtype(arr_or_dtype, condition)
|
python
|
def is_string_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of the string dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of the string dtype.
Examples
--------
>>> is_string_dtype(str)
True
>>> is_string_dtype(object)
True
>>> is_string_dtype(int)
False
>>>
>>> is_string_dtype(np.array(['a', 'b']))
True
>>> is_string_dtype(pd.Series([1, 2]))
False
"""
# TODO: gh-15585: consider making the checks stricter.
def condition(dtype):
return dtype.kind in ('O', 'S', 'U') and not is_period_dtype(dtype)
return _is_dtype(arr_or_dtype, condition)
|
[
"def",
"is_string_dtype",
"(",
"arr_or_dtype",
")",
":",
"# TODO: gh-15585: consider making the checks stricter.",
"def",
"condition",
"(",
"dtype",
")",
":",
"return",
"dtype",
".",
"kind",
"in",
"(",
"'O'",
",",
"'S'",
",",
"'U'",
")",
"and",
"not",
"is_period_dtype",
"(",
"dtype",
")",
"return",
"_is_dtype",
"(",
"arr_or_dtype",
",",
"condition",
")"
] |
Check whether the provided array or dtype is of the string dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of the string dtype.
Examples
--------
>>> is_string_dtype(str)
True
>>> is_string_dtype(object)
True
>>> is_string_dtype(int)
False
>>>
>>> is_string_dtype(np.array(['a', 'b']))
True
>>> is_string_dtype(pd.Series([1, 2]))
False
|
[
"Check",
"whether",
"the",
"provided",
"array",
"or",
"dtype",
"is",
"of",
"the",
"string",
"dtype",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L611-L643
|
20,479
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_period_arraylike
|
def is_period_arraylike(arr):
"""
Check whether an array-like is a periodical array-like or PeriodIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a periodical array-like or
PeriodIndex instance.
Examples
--------
>>> is_period_arraylike([1, 2, 3])
False
>>> is_period_arraylike(pd.Index([1, 2, 3]))
False
>>> is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D"))
True
"""
if isinstance(arr, (ABCPeriodIndex, ABCPeriodArray)):
return True
elif isinstance(arr, (np.ndarray, ABCSeries)):
return is_period_dtype(arr.dtype)
return getattr(arr, 'inferred_type', None) == 'period'
|
python
|
def is_period_arraylike(arr):
"""
Check whether an array-like is a periodical array-like or PeriodIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a periodical array-like or
PeriodIndex instance.
Examples
--------
>>> is_period_arraylike([1, 2, 3])
False
>>> is_period_arraylike(pd.Index([1, 2, 3]))
False
>>> is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D"))
True
"""
if isinstance(arr, (ABCPeriodIndex, ABCPeriodArray)):
return True
elif isinstance(arr, (np.ndarray, ABCSeries)):
return is_period_dtype(arr.dtype)
return getattr(arr, 'inferred_type', None) == 'period'
|
[
"def",
"is_period_arraylike",
"(",
"arr",
")",
":",
"if",
"isinstance",
"(",
"arr",
",",
"(",
"ABCPeriodIndex",
",",
"ABCPeriodArray",
")",
")",
":",
"return",
"True",
"elif",
"isinstance",
"(",
"arr",
",",
"(",
"np",
".",
"ndarray",
",",
"ABCSeries",
")",
")",
":",
"return",
"is_period_dtype",
"(",
"arr",
".",
"dtype",
")",
"return",
"getattr",
"(",
"arr",
",",
"'inferred_type'",
",",
"None",
")",
"==",
"'period'"
] |
Check whether an array-like is a periodical array-like or PeriodIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a periodical array-like or
PeriodIndex instance.
Examples
--------
>>> is_period_arraylike([1, 2, 3])
False
>>> is_period_arraylike(pd.Index([1, 2, 3]))
False
>>> is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D"))
True
|
[
"Check",
"whether",
"an",
"array",
"-",
"like",
"is",
"a",
"periodical",
"array",
"-",
"like",
"or",
"PeriodIndex",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L646-L675
|
20,480
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_datetime_arraylike
|
def is_datetime_arraylike(arr):
"""
Check whether an array-like is a datetime array-like or DatetimeIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a datetime array-like or
DatetimeIndex.
Examples
--------
>>> is_datetime_arraylike([1, 2, 3])
False
>>> is_datetime_arraylike(pd.Index([1, 2, 3]))
False
>>> is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3]))
True
"""
if isinstance(arr, ABCDatetimeIndex):
return True
elif isinstance(arr, (np.ndarray, ABCSeries)):
return (is_object_dtype(arr.dtype)
and lib.infer_dtype(arr, skipna=False) == 'datetime')
return getattr(arr, 'inferred_type', None) == 'datetime'
|
python
|
def is_datetime_arraylike(arr):
"""
Check whether an array-like is a datetime array-like or DatetimeIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a datetime array-like or
DatetimeIndex.
Examples
--------
>>> is_datetime_arraylike([1, 2, 3])
False
>>> is_datetime_arraylike(pd.Index([1, 2, 3]))
False
>>> is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3]))
True
"""
if isinstance(arr, ABCDatetimeIndex):
return True
elif isinstance(arr, (np.ndarray, ABCSeries)):
return (is_object_dtype(arr.dtype)
and lib.infer_dtype(arr, skipna=False) == 'datetime')
return getattr(arr, 'inferred_type', None) == 'datetime'
|
[
"def",
"is_datetime_arraylike",
"(",
"arr",
")",
":",
"if",
"isinstance",
"(",
"arr",
",",
"ABCDatetimeIndex",
")",
":",
"return",
"True",
"elif",
"isinstance",
"(",
"arr",
",",
"(",
"np",
".",
"ndarray",
",",
"ABCSeries",
")",
")",
":",
"return",
"(",
"is_object_dtype",
"(",
"arr",
".",
"dtype",
")",
"and",
"lib",
".",
"infer_dtype",
"(",
"arr",
",",
"skipna",
"=",
"False",
")",
"==",
"'datetime'",
")",
"return",
"getattr",
"(",
"arr",
",",
"'inferred_type'",
",",
"None",
")",
"==",
"'datetime'"
] |
Check whether an array-like is a datetime array-like or DatetimeIndex.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a datetime array-like or
DatetimeIndex.
Examples
--------
>>> is_datetime_arraylike([1, 2, 3])
False
>>> is_datetime_arraylike(pd.Index([1, 2, 3]))
False
>>> is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3]))
True
|
[
"Check",
"whether",
"an",
"array",
"-",
"like",
"is",
"a",
"datetime",
"array",
"-",
"like",
"or",
"DatetimeIndex",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L678-L708
|
20,481
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_datetimelike
|
def is_datetimelike(arr):
"""
Check whether an array-like is a datetime-like array-like.
Acceptable datetime-like objects are (but not limited to) datetime
indices, periodic indices, and timedelta indices.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a datetime-like array-like.
Examples
--------
>>> is_datetimelike([1, 2, 3])
False
>>> is_datetimelike(pd.Index([1, 2, 3]))
False
>>> is_datetimelike(pd.DatetimeIndex([1, 2, 3]))
True
>>> is_datetimelike(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>> is_datetimelike(pd.PeriodIndex([], freq="A"))
True
>>> is_datetimelike(np.array([], dtype=np.datetime64))
True
>>> is_datetimelike(pd.Series([], dtype="timedelta64[ns]"))
True
>>>
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_datetimelike(s)
True
"""
return (is_datetime64_dtype(arr) or is_datetime64tz_dtype(arr) or
is_timedelta64_dtype(arr) or
isinstance(arr, ABCPeriodIndex))
|
python
|
def is_datetimelike(arr):
"""
Check whether an array-like is a datetime-like array-like.
Acceptable datetime-like objects are (but not limited to) datetime
indices, periodic indices, and timedelta indices.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a datetime-like array-like.
Examples
--------
>>> is_datetimelike([1, 2, 3])
False
>>> is_datetimelike(pd.Index([1, 2, 3]))
False
>>> is_datetimelike(pd.DatetimeIndex([1, 2, 3]))
True
>>> is_datetimelike(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>> is_datetimelike(pd.PeriodIndex([], freq="A"))
True
>>> is_datetimelike(np.array([], dtype=np.datetime64))
True
>>> is_datetimelike(pd.Series([], dtype="timedelta64[ns]"))
True
>>>
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_datetimelike(s)
True
"""
return (is_datetime64_dtype(arr) or is_datetime64tz_dtype(arr) or
is_timedelta64_dtype(arr) or
isinstance(arr, ABCPeriodIndex))
|
[
"def",
"is_datetimelike",
"(",
"arr",
")",
":",
"return",
"(",
"is_datetime64_dtype",
"(",
"arr",
")",
"or",
"is_datetime64tz_dtype",
"(",
"arr",
")",
"or",
"is_timedelta64_dtype",
"(",
"arr",
")",
"or",
"isinstance",
"(",
"arr",
",",
"ABCPeriodIndex",
")",
")"
] |
Check whether an array-like is a datetime-like array-like.
Acceptable datetime-like objects are (but not limited to) datetime
indices, periodic indices, and timedelta indices.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is a datetime-like array-like.
Examples
--------
>>> is_datetimelike([1, 2, 3])
False
>>> is_datetimelike(pd.Index([1, 2, 3]))
False
>>> is_datetimelike(pd.DatetimeIndex([1, 2, 3]))
True
>>> is_datetimelike(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>> is_datetimelike(pd.PeriodIndex([], freq="A"))
True
>>> is_datetimelike(np.array([], dtype=np.datetime64))
True
>>> is_datetimelike(pd.Series([], dtype="timedelta64[ns]"))
True
>>>
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_datetimelike(s)
True
|
[
"Check",
"whether",
"an",
"array",
"-",
"like",
"is",
"a",
"datetime",
"-",
"like",
"array",
"-",
"like",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L711-L753
|
20,482
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_dtype_equal
|
def is_dtype_equal(source, target):
"""
Check if two dtypes are equal.
Parameters
----------
source : The first dtype to compare
target : The second dtype to compare
Returns
----------
boolean
Whether or not the two dtypes are equal.
Examples
--------
>>> is_dtype_equal(int, float)
False
>>> is_dtype_equal("int", int)
True
>>> is_dtype_equal(object, "category")
False
>>> is_dtype_equal(CategoricalDtype(), "category")
True
>>> is_dtype_equal(DatetimeTZDtype(), "datetime64")
False
"""
try:
source = _get_dtype(source)
target = _get_dtype(target)
return source == target
except (TypeError, AttributeError):
# invalid comparison
# object == category will hit this
return False
|
python
|
def is_dtype_equal(source, target):
"""
Check if two dtypes are equal.
Parameters
----------
source : The first dtype to compare
target : The second dtype to compare
Returns
----------
boolean
Whether or not the two dtypes are equal.
Examples
--------
>>> is_dtype_equal(int, float)
False
>>> is_dtype_equal("int", int)
True
>>> is_dtype_equal(object, "category")
False
>>> is_dtype_equal(CategoricalDtype(), "category")
True
>>> is_dtype_equal(DatetimeTZDtype(), "datetime64")
False
"""
try:
source = _get_dtype(source)
target = _get_dtype(target)
return source == target
except (TypeError, AttributeError):
# invalid comparison
# object == category will hit this
return False
|
[
"def",
"is_dtype_equal",
"(",
"source",
",",
"target",
")",
":",
"try",
":",
"source",
"=",
"_get_dtype",
"(",
"source",
")",
"target",
"=",
"_get_dtype",
"(",
"target",
")",
"return",
"source",
"==",
"target",
"except",
"(",
"TypeError",
",",
"AttributeError",
")",
":",
"# invalid comparison",
"# object == category will hit this",
"return",
"False"
] |
Check if two dtypes are equal.
Parameters
----------
source : The first dtype to compare
target : The second dtype to compare
Returns
----------
boolean
Whether or not the two dtypes are equal.
Examples
--------
>>> is_dtype_equal(int, float)
False
>>> is_dtype_equal("int", int)
True
>>> is_dtype_equal(object, "category")
False
>>> is_dtype_equal(CategoricalDtype(), "category")
True
>>> is_dtype_equal(DatetimeTZDtype(), "datetime64")
False
|
[
"Check",
"if",
"two",
"dtypes",
"are",
"equal",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L756-L792
|
20,483
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_dtype_union_equal
|
def is_dtype_union_equal(source, target):
"""
Check whether two arrays have compatible dtypes to do a union.
numpy types are checked with ``is_dtype_equal``. Extension types are
checked separately.
Parameters
----------
source : The first dtype to compare
target : The second dtype to compare
Returns
----------
boolean
Whether or not the two dtypes are equal.
>>> is_dtype_equal("int", int)
True
>>> is_dtype_equal(CategoricalDtype(['a', 'b'],
... CategoricalDtype(['b', 'c']))
True
>>> is_dtype_equal(CategoricalDtype(['a', 'b'],
... CategoricalDtype(['b', 'c'], ordered=True))
False
"""
source = _get_dtype(source)
target = _get_dtype(target)
if is_categorical_dtype(source) and is_categorical_dtype(target):
# ordered False for both
return source.ordered is target.ordered
return is_dtype_equal(source, target)
|
python
|
def is_dtype_union_equal(source, target):
"""
Check whether two arrays have compatible dtypes to do a union.
numpy types are checked with ``is_dtype_equal``. Extension types are
checked separately.
Parameters
----------
source : The first dtype to compare
target : The second dtype to compare
Returns
----------
boolean
Whether or not the two dtypes are equal.
>>> is_dtype_equal("int", int)
True
>>> is_dtype_equal(CategoricalDtype(['a', 'b'],
... CategoricalDtype(['b', 'c']))
True
>>> is_dtype_equal(CategoricalDtype(['a', 'b'],
... CategoricalDtype(['b', 'c'], ordered=True))
False
"""
source = _get_dtype(source)
target = _get_dtype(target)
if is_categorical_dtype(source) and is_categorical_dtype(target):
# ordered False for both
return source.ordered is target.ordered
return is_dtype_equal(source, target)
|
[
"def",
"is_dtype_union_equal",
"(",
"source",
",",
"target",
")",
":",
"source",
"=",
"_get_dtype",
"(",
"source",
")",
"target",
"=",
"_get_dtype",
"(",
"target",
")",
"if",
"is_categorical_dtype",
"(",
"source",
")",
"and",
"is_categorical_dtype",
"(",
"target",
")",
":",
"# ordered False for both",
"return",
"source",
".",
"ordered",
"is",
"target",
".",
"ordered",
"return",
"is_dtype_equal",
"(",
"source",
",",
"target",
")"
] |
Check whether two arrays have compatible dtypes to do a union.
numpy types are checked with ``is_dtype_equal``. Extension types are
checked separately.
Parameters
----------
source : The first dtype to compare
target : The second dtype to compare
Returns
----------
boolean
Whether or not the two dtypes are equal.
>>> is_dtype_equal("int", int)
True
>>> is_dtype_equal(CategoricalDtype(['a', 'b'],
... CategoricalDtype(['b', 'c']))
True
>>> is_dtype_equal(CategoricalDtype(['a', 'b'],
... CategoricalDtype(['b', 'c'], ordered=True))
False
|
[
"Check",
"whether",
"two",
"arrays",
"have",
"compatible",
"dtypes",
"to",
"do",
"a",
"union",
".",
"numpy",
"types",
"are",
"checked",
"with",
"is_dtype_equal",
".",
"Extension",
"types",
"are",
"checked",
"separately",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L795-L827
|
20,484
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_numeric_v_string_like
|
def is_numeric_v_string_like(a, b):
"""
Check if we are comparing a string-like object to a numeric ndarray.
NumPy doesn't like to compare such objects, especially numeric arrays
and scalar string-likes.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean
Whether we return a comparing a string-like object to a numeric array.
Examples
--------
>>> is_numeric_v_string_like(1, 1)
False
>>> is_numeric_v_string_like("foo", "foo")
False
>>> is_numeric_v_string_like(1, "foo") # non-array numeric
False
>>> is_numeric_v_string_like(np.array([1]), "foo")
True
>>> is_numeric_v_string_like("foo", np.array([1])) # symmetric check
True
>>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))
True
>>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
True
>>> is_numeric_v_string_like(np.array([1]), np.array([2]))
False
>>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"]))
False
"""
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
is_a_numeric_array = is_a_array and is_numeric_dtype(a)
is_b_numeric_array = is_b_array and is_numeric_dtype(b)
is_a_string_array = is_a_array and is_string_like_dtype(a)
is_b_string_array = is_b_array and is_string_like_dtype(b)
is_a_scalar_string_like = not is_a_array and is_string_like(a)
is_b_scalar_string_like = not is_b_array and is_string_like(b)
return ((is_a_numeric_array and is_b_scalar_string_like) or
(is_b_numeric_array and is_a_scalar_string_like) or
(is_a_numeric_array and is_b_string_array) or
(is_b_numeric_array and is_a_string_array))
|
python
|
def is_numeric_v_string_like(a, b):
"""
Check if we are comparing a string-like object to a numeric ndarray.
NumPy doesn't like to compare such objects, especially numeric arrays
and scalar string-likes.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean
Whether we return a comparing a string-like object to a numeric array.
Examples
--------
>>> is_numeric_v_string_like(1, 1)
False
>>> is_numeric_v_string_like("foo", "foo")
False
>>> is_numeric_v_string_like(1, "foo") # non-array numeric
False
>>> is_numeric_v_string_like(np.array([1]), "foo")
True
>>> is_numeric_v_string_like("foo", np.array([1])) # symmetric check
True
>>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))
True
>>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
True
>>> is_numeric_v_string_like(np.array([1]), np.array([2]))
False
>>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"]))
False
"""
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
is_a_numeric_array = is_a_array and is_numeric_dtype(a)
is_b_numeric_array = is_b_array and is_numeric_dtype(b)
is_a_string_array = is_a_array and is_string_like_dtype(a)
is_b_string_array = is_b_array and is_string_like_dtype(b)
is_a_scalar_string_like = not is_a_array and is_string_like(a)
is_b_scalar_string_like = not is_b_array and is_string_like(b)
return ((is_a_numeric_array and is_b_scalar_string_like) or
(is_b_numeric_array and is_a_scalar_string_like) or
(is_a_numeric_array and is_b_string_array) or
(is_b_numeric_array and is_a_string_array))
|
[
"def",
"is_numeric_v_string_like",
"(",
"a",
",",
"b",
")",
":",
"is_a_array",
"=",
"isinstance",
"(",
"a",
",",
"np",
".",
"ndarray",
")",
"is_b_array",
"=",
"isinstance",
"(",
"b",
",",
"np",
".",
"ndarray",
")",
"is_a_numeric_array",
"=",
"is_a_array",
"and",
"is_numeric_dtype",
"(",
"a",
")",
"is_b_numeric_array",
"=",
"is_b_array",
"and",
"is_numeric_dtype",
"(",
"b",
")",
"is_a_string_array",
"=",
"is_a_array",
"and",
"is_string_like_dtype",
"(",
"a",
")",
"is_b_string_array",
"=",
"is_b_array",
"and",
"is_string_like_dtype",
"(",
"b",
")",
"is_a_scalar_string_like",
"=",
"not",
"is_a_array",
"and",
"is_string_like",
"(",
"a",
")",
"is_b_scalar_string_like",
"=",
"not",
"is_b_array",
"and",
"is_string_like",
"(",
"b",
")",
"return",
"(",
"(",
"is_a_numeric_array",
"and",
"is_b_scalar_string_like",
")",
"or",
"(",
"is_b_numeric_array",
"and",
"is_a_scalar_string_like",
")",
"or",
"(",
"is_a_numeric_array",
"and",
"is_b_string_array",
")",
"or",
"(",
"is_b_numeric_array",
"and",
"is_a_string_array",
")",
")"
] |
Check if we are comparing a string-like object to a numeric ndarray.
NumPy doesn't like to compare such objects, especially numeric arrays
and scalar string-likes.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean
Whether we return a comparing a string-like object to a numeric array.
Examples
--------
>>> is_numeric_v_string_like(1, 1)
False
>>> is_numeric_v_string_like("foo", "foo")
False
>>> is_numeric_v_string_like(1, "foo") # non-array numeric
False
>>> is_numeric_v_string_like(np.array([1]), "foo")
True
>>> is_numeric_v_string_like("foo", np.array([1])) # symmetric check
True
>>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))
True
>>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
True
>>> is_numeric_v_string_like(np.array([1]), np.array([2]))
False
>>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"]))
False
|
[
"Check",
"if",
"we",
"are",
"comparing",
"a",
"string",
"-",
"like",
"object",
"to",
"a",
"numeric",
"ndarray",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L1280-L1335
|
20,485
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_datetimelike_v_numeric
|
def is_datetimelike_v_numeric(a, b):
"""
Check if we are comparing a datetime-like object to a numeric object.
By "numeric," we mean an object that is either of an int or float dtype.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean
Whether we return a comparing a datetime-like to a numeric object.
Examples
--------
>>> dt = np.datetime64(pd.datetime(2017, 1, 1))
>>>
>>> is_datetimelike_v_numeric(1, 1)
False
>>> is_datetimelike_v_numeric(dt, dt)
False
>>> is_datetimelike_v_numeric(1, dt)
True
>>> is_datetimelike_v_numeric(dt, 1) # symmetric check
True
>>> is_datetimelike_v_numeric(np.array([dt]), 1)
True
>>> is_datetimelike_v_numeric(np.array([1]), dt)
True
>>> is_datetimelike_v_numeric(np.array([dt]), np.array([1]))
True
>>> is_datetimelike_v_numeric(np.array([1]), np.array([2]))
False
>>> is_datetimelike_v_numeric(np.array([dt]), np.array([dt]))
False
"""
if not hasattr(a, 'dtype'):
a = np.asarray(a)
if not hasattr(b, 'dtype'):
b = np.asarray(b)
def is_numeric(x):
"""
Check if an object has a numeric dtype (i.e. integer or float).
"""
return is_integer_dtype(x) or is_float_dtype(x)
is_datetimelike = needs_i8_conversion
return ((is_datetimelike(a) and is_numeric(b)) or
(is_datetimelike(b) and is_numeric(a)))
|
python
|
def is_datetimelike_v_numeric(a, b):
"""
Check if we are comparing a datetime-like object to a numeric object.
By "numeric," we mean an object that is either of an int or float dtype.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean
Whether we return a comparing a datetime-like to a numeric object.
Examples
--------
>>> dt = np.datetime64(pd.datetime(2017, 1, 1))
>>>
>>> is_datetimelike_v_numeric(1, 1)
False
>>> is_datetimelike_v_numeric(dt, dt)
False
>>> is_datetimelike_v_numeric(1, dt)
True
>>> is_datetimelike_v_numeric(dt, 1) # symmetric check
True
>>> is_datetimelike_v_numeric(np.array([dt]), 1)
True
>>> is_datetimelike_v_numeric(np.array([1]), dt)
True
>>> is_datetimelike_v_numeric(np.array([dt]), np.array([1]))
True
>>> is_datetimelike_v_numeric(np.array([1]), np.array([2]))
False
>>> is_datetimelike_v_numeric(np.array([dt]), np.array([dt]))
False
"""
if not hasattr(a, 'dtype'):
a = np.asarray(a)
if not hasattr(b, 'dtype'):
b = np.asarray(b)
def is_numeric(x):
"""
Check if an object has a numeric dtype (i.e. integer or float).
"""
return is_integer_dtype(x) or is_float_dtype(x)
is_datetimelike = needs_i8_conversion
return ((is_datetimelike(a) and is_numeric(b)) or
(is_datetimelike(b) and is_numeric(a)))
|
[
"def",
"is_datetimelike_v_numeric",
"(",
"a",
",",
"b",
")",
":",
"if",
"not",
"hasattr",
"(",
"a",
",",
"'dtype'",
")",
":",
"a",
"=",
"np",
".",
"asarray",
"(",
"a",
")",
"if",
"not",
"hasattr",
"(",
"b",
",",
"'dtype'",
")",
":",
"b",
"=",
"np",
".",
"asarray",
"(",
"b",
")",
"def",
"is_numeric",
"(",
"x",
")",
":",
"\"\"\"\n Check if an object has a numeric dtype (i.e. integer or float).\n \"\"\"",
"return",
"is_integer_dtype",
"(",
"x",
")",
"or",
"is_float_dtype",
"(",
"x",
")",
"is_datetimelike",
"=",
"needs_i8_conversion",
"return",
"(",
"(",
"is_datetimelike",
"(",
"a",
")",
"and",
"is_numeric",
"(",
"b",
")",
")",
"or",
"(",
"is_datetimelike",
"(",
"b",
")",
"and",
"is_numeric",
"(",
"a",
")",
")",
")"
] |
Check if we are comparing a datetime-like object to a numeric object.
By "numeric," we mean an object that is either of an int or float dtype.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean
Whether we return a comparing a datetime-like to a numeric object.
Examples
--------
>>> dt = np.datetime64(pd.datetime(2017, 1, 1))
>>>
>>> is_datetimelike_v_numeric(1, 1)
False
>>> is_datetimelike_v_numeric(dt, dt)
False
>>> is_datetimelike_v_numeric(1, dt)
True
>>> is_datetimelike_v_numeric(dt, 1) # symmetric check
True
>>> is_datetimelike_v_numeric(np.array([dt]), 1)
True
>>> is_datetimelike_v_numeric(np.array([1]), dt)
True
>>> is_datetimelike_v_numeric(np.array([dt]), np.array([1]))
True
>>> is_datetimelike_v_numeric(np.array([1]), np.array([2]))
False
>>> is_datetimelike_v_numeric(np.array([dt]), np.array([dt]))
False
|
[
"Check",
"if",
"we",
"are",
"comparing",
"a",
"datetime",
"-",
"like",
"object",
"to",
"a",
"numeric",
"object",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L1338-L1393
|
20,486
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_datetimelike_v_object
|
def is_datetimelike_v_object(a, b):
"""
Check if we are comparing a datetime-like object to an object instance.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean
Whether we return a comparing a datetime-like to an object instance.
Examples
--------
>>> obj = object()
>>> dt = np.datetime64(pd.datetime(2017, 1, 1))
>>>
>>> is_datetimelike_v_object(obj, obj)
False
>>> is_datetimelike_v_object(dt, dt)
False
>>> is_datetimelike_v_object(obj, dt)
True
>>> is_datetimelike_v_object(dt, obj) # symmetric check
True
>>> is_datetimelike_v_object(np.array([dt]), obj)
True
>>> is_datetimelike_v_object(np.array([obj]), dt)
True
>>> is_datetimelike_v_object(np.array([dt]), np.array([obj]))
True
>>> is_datetimelike_v_object(np.array([obj]), np.array([obj]))
False
>>> is_datetimelike_v_object(np.array([dt]), np.array([1]))
False
>>> is_datetimelike_v_object(np.array([dt]), np.array([dt]))
False
"""
if not hasattr(a, 'dtype'):
a = np.asarray(a)
if not hasattr(b, 'dtype'):
b = np.asarray(b)
is_datetimelike = needs_i8_conversion
return ((is_datetimelike(a) and is_object_dtype(b)) or
(is_datetimelike(b) and is_object_dtype(a)))
|
python
|
def is_datetimelike_v_object(a, b):
"""
Check if we are comparing a datetime-like object to an object instance.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean
Whether we return a comparing a datetime-like to an object instance.
Examples
--------
>>> obj = object()
>>> dt = np.datetime64(pd.datetime(2017, 1, 1))
>>>
>>> is_datetimelike_v_object(obj, obj)
False
>>> is_datetimelike_v_object(dt, dt)
False
>>> is_datetimelike_v_object(obj, dt)
True
>>> is_datetimelike_v_object(dt, obj) # symmetric check
True
>>> is_datetimelike_v_object(np.array([dt]), obj)
True
>>> is_datetimelike_v_object(np.array([obj]), dt)
True
>>> is_datetimelike_v_object(np.array([dt]), np.array([obj]))
True
>>> is_datetimelike_v_object(np.array([obj]), np.array([obj]))
False
>>> is_datetimelike_v_object(np.array([dt]), np.array([1]))
False
>>> is_datetimelike_v_object(np.array([dt]), np.array([dt]))
False
"""
if not hasattr(a, 'dtype'):
a = np.asarray(a)
if not hasattr(b, 'dtype'):
b = np.asarray(b)
is_datetimelike = needs_i8_conversion
return ((is_datetimelike(a) and is_object_dtype(b)) or
(is_datetimelike(b) and is_object_dtype(a)))
|
[
"def",
"is_datetimelike_v_object",
"(",
"a",
",",
"b",
")",
":",
"if",
"not",
"hasattr",
"(",
"a",
",",
"'dtype'",
")",
":",
"a",
"=",
"np",
".",
"asarray",
"(",
"a",
")",
"if",
"not",
"hasattr",
"(",
"b",
",",
"'dtype'",
")",
":",
"b",
"=",
"np",
".",
"asarray",
"(",
"b",
")",
"is_datetimelike",
"=",
"needs_i8_conversion",
"return",
"(",
"(",
"is_datetimelike",
"(",
"a",
")",
"and",
"is_object_dtype",
"(",
"b",
")",
")",
"or",
"(",
"is_datetimelike",
"(",
"b",
")",
"and",
"is_object_dtype",
"(",
"a",
")",
")",
")"
] |
Check if we are comparing a datetime-like object to an object instance.
Parameters
----------
a : array-like, scalar
The first object to check.
b : array-like, scalar
The second object to check.
Returns
-------
boolean
Whether we return a comparing a datetime-like to an object instance.
Examples
--------
>>> obj = object()
>>> dt = np.datetime64(pd.datetime(2017, 1, 1))
>>>
>>> is_datetimelike_v_object(obj, obj)
False
>>> is_datetimelike_v_object(dt, dt)
False
>>> is_datetimelike_v_object(obj, dt)
True
>>> is_datetimelike_v_object(dt, obj) # symmetric check
True
>>> is_datetimelike_v_object(np.array([dt]), obj)
True
>>> is_datetimelike_v_object(np.array([obj]), dt)
True
>>> is_datetimelike_v_object(np.array([dt]), np.array([obj]))
True
>>> is_datetimelike_v_object(np.array([obj]), np.array([obj]))
False
>>> is_datetimelike_v_object(np.array([dt]), np.array([1]))
False
>>> is_datetimelike_v_object(np.array([dt]), np.array([dt]))
False
|
[
"Check",
"if",
"we",
"are",
"comparing",
"a",
"datetime",
"-",
"like",
"object",
"to",
"an",
"object",
"instance",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L1396-L1446
|
20,487
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
needs_i8_conversion
|
def needs_i8_conversion(arr_or_dtype):
"""
Check whether the array or dtype should be converted to int64.
An array-like or dtype "needs" such a conversion if the array-like
or dtype is of a datetime-like dtype
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype should be converted to int64.
Examples
--------
>>> needs_i8_conversion(str)
False
>>> needs_i8_conversion(np.int64)
False
>>> needs_i8_conversion(np.datetime64)
True
>>> needs_i8_conversion(np.array(['a', 'b']))
False
>>> needs_i8_conversion(pd.Series([1, 2]))
False
>>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
True
>>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
"""
if arr_or_dtype is None:
return False
return (is_datetime_or_timedelta_dtype(arr_or_dtype) or
is_datetime64tz_dtype(arr_or_dtype) or
is_period_dtype(arr_or_dtype))
|
python
|
def needs_i8_conversion(arr_or_dtype):
"""
Check whether the array or dtype should be converted to int64.
An array-like or dtype "needs" such a conversion if the array-like
or dtype is of a datetime-like dtype
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype should be converted to int64.
Examples
--------
>>> needs_i8_conversion(str)
False
>>> needs_i8_conversion(np.int64)
False
>>> needs_i8_conversion(np.datetime64)
True
>>> needs_i8_conversion(np.array(['a', 'b']))
False
>>> needs_i8_conversion(pd.Series([1, 2]))
False
>>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
True
>>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
"""
if arr_or_dtype is None:
return False
return (is_datetime_or_timedelta_dtype(arr_or_dtype) or
is_datetime64tz_dtype(arr_or_dtype) or
is_period_dtype(arr_or_dtype))
|
[
"def",
"needs_i8_conversion",
"(",
"arr_or_dtype",
")",
":",
"if",
"arr_or_dtype",
"is",
"None",
":",
"return",
"False",
"return",
"(",
"is_datetime_or_timedelta_dtype",
"(",
"arr_or_dtype",
")",
"or",
"is_datetime64tz_dtype",
"(",
"arr_or_dtype",
")",
"or",
"is_period_dtype",
"(",
"arr_or_dtype",
")",
")"
] |
Check whether the array or dtype should be converted to int64.
An array-like or dtype "needs" such a conversion if the array-like
or dtype is of a datetime-like dtype
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype should be converted to int64.
Examples
--------
>>> needs_i8_conversion(str)
False
>>> needs_i8_conversion(np.int64)
False
>>> needs_i8_conversion(np.datetime64)
True
>>> needs_i8_conversion(np.array(['a', 'b']))
False
>>> needs_i8_conversion(pd.Series([1, 2]))
False
>>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
True
>>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
|
[
"Check",
"whether",
"the",
"array",
"or",
"dtype",
"should",
"be",
"converted",
"to",
"int64",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L1449-L1488
|
20,488
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_bool_dtype
|
def is_bool_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a boolean dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a boolean dtype.
Notes
-----
An ExtensionArray is considered boolean when the ``_is_boolean``
attribute is set to True.
Examples
--------
>>> is_bool_dtype(str)
False
>>> is_bool_dtype(int)
False
>>> is_bool_dtype(bool)
True
>>> is_bool_dtype(np.bool)
True
>>> is_bool_dtype(np.array(['a', 'b']))
False
>>> is_bool_dtype(pd.Series([1, 2]))
False
>>> is_bool_dtype(np.array([True, False]))
True
>>> is_bool_dtype(pd.Categorical([True, False]))
True
>>> is_bool_dtype(pd.SparseArray([True, False]))
True
"""
if arr_or_dtype is None:
return False
try:
dtype = _get_dtype(arr_or_dtype)
except TypeError:
return False
if isinstance(arr_or_dtype, CategoricalDtype):
arr_or_dtype = arr_or_dtype.categories
# now we use the special definition for Index
if isinstance(arr_or_dtype, ABCIndexClass):
# TODO(jreback)
# we don't have a boolean Index class
# so its object, we need to infer to
# guess this
return (arr_or_dtype.is_object and
arr_or_dtype.inferred_type == 'boolean')
elif is_extension_array_dtype(arr_or_dtype):
dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype)
return dtype._is_boolean
return issubclass(dtype.type, np.bool_)
|
python
|
def is_bool_dtype(arr_or_dtype):
"""
Check whether the provided array or dtype is of a boolean dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a boolean dtype.
Notes
-----
An ExtensionArray is considered boolean when the ``_is_boolean``
attribute is set to True.
Examples
--------
>>> is_bool_dtype(str)
False
>>> is_bool_dtype(int)
False
>>> is_bool_dtype(bool)
True
>>> is_bool_dtype(np.bool)
True
>>> is_bool_dtype(np.array(['a', 'b']))
False
>>> is_bool_dtype(pd.Series([1, 2]))
False
>>> is_bool_dtype(np.array([True, False]))
True
>>> is_bool_dtype(pd.Categorical([True, False]))
True
>>> is_bool_dtype(pd.SparseArray([True, False]))
True
"""
if arr_or_dtype is None:
return False
try:
dtype = _get_dtype(arr_or_dtype)
except TypeError:
return False
if isinstance(arr_or_dtype, CategoricalDtype):
arr_or_dtype = arr_or_dtype.categories
# now we use the special definition for Index
if isinstance(arr_or_dtype, ABCIndexClass):
# TODO(jreback)
# we don't have a boolean Index class
# so its object, we need to infer to
# guess this
return (arr_or_dtype.is_object and
arr_or_dtype.inferred_type == 'boolean')
elif is_extension_array_dtype(arr_or_dtype):
dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype)
return dtype._is_boolean
return issubclass(dtype.type, np.bool_)
|
[
"def",
"is_bool_dtype",
"(",
"arr_or_dtype",
")",
":",
"if",
"arr_or_dtype",
"is",
"None",
":",
"return",
"False",
"try",
":",
"dtype",
"=",
"_get_dtype",
"(",
"arr_or_dtype",
")",
"except",
"TypeError",
":",
"return",
"False",
"if",
"isinstance",
"(",
"arr_or_dtype",
",",
"CategoricalDtype",
")",
":",
"arr_or_dtype",
"=",
"arr_or_dtype",
".",
"categories",
"# now we use the special definition for Index",
"if",
"isinstance",
"(",
"arr_or_dtype",
",",
"ABCIndexClass",
")",
":",
"# TODO(jreback)",
"# we don't have a boolean Index class",
"# so its object, we need to infer to",
"# guess this",
"return",
"(",
"arr_or_dtype",
".",
"is_object",
"and",
"arr_or_dtype",
".",
"inferred_type",
"==",
"'boolean'",
")",
"elif",
"is_extension_array_dtype",
"(",
"arr_or_dtype",
")",
":",
"dtype",
"=",
"getattr",
"(",
"arr_or_dtype",
",",
"'dtype'",
",",
"arr_or_dtype",
")",
"return",
"dtype",
".",
"_is_boolean",
"return",
"issubclass",
"(",
"dtype",
".",
"type",
",",
"np",
".",
"bool_",
")"
] |
Check whether the provided array or dtype is of a boolean dtype.
Parameters
----------
arr_or_dtype : array-like
The array or dtype to check.
Returns
-------
boolean
Whether or not the array or dtype is of a boolean dtype.
Notes
-----
An ExtensionArray is considered boolean when the ``_is_boolean``
attribute is set to True.
Examples
--------
>>> is_bool_dtype(str)
False
>>> is_bool_dtype(int)
False
>>> is_bool_dtype(bool)
True
>>> is_bool_dtype(np.bool)
True
>>> is_bool_dtype(np.array(['a', 'b']))
False
>>> is_bool_dtype(pd.Series([1, 2]))
False
>>> is_bool_dtype(np.array([True, False]))
True
>>> is_bool_dtype(pd.Categorical([True, False]))
True
>>> is_bool_dtype(pd.SparseArray([True, False]))
True
|
[
"Check",
"whether",
"the",
"provided",
"array",
"or",
"dtype",
"is",
"of",
"a",
"boolean",
"dtype",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L1600-L1663
|
20,489
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_extension_type
|
def is_extension_type(arr):
"""
Check whether an array-like is of a pandas extension class instance.
Extension classes include categoricals, pandas sparse objects (i.e.
classes represented within the pandas library and not ones external
to it like scipy sparse matrices), and datetime-like arrays.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is of a pandas extension class instance.
Examples
--------
>>> is_extension_type([1, 2, 3])
False
>>> is_extension_type(np.array([1, 2, 3]))
False
>>>
>>> cat = pd.Categorical([1, 2, 3])
>>>
>>> is_extension_type(cat)
True
>>> is_extension_type(pd.Series(cat))
True
>>> is_extension_type(pd.SparseArray([1, 2, 3]))
True
>>> is_extension_type(pd.SparseSeries([1, 2, 3]))
True
>>>
>>> from scipy.sparse import bsr_matrix
>>> is_extension_type(bsr_matrix([1, 2, 3]))
False
>>> is_extension_type(pd.DatetimeIndex([1, 2, 3]))
False
>>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>>
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_extension_type(s)
True
"""
if is_categorical(arr):
return True
elif is_sparse(arr):
return True
elif is_datetime64tz_dtype(arr):
return True
return False
|
python
|
def is_extension_type(arr):
"""
Check whether an array-like is of a pandas extension class instance.
Extension classes include categoricals, pandas sparse objects (i.e.
classes represented within the pandas library and not ones external
to it like scipy sparse matrices), and datetime-like arrays.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is of a pandas extension class instance.
Examples
--------
>>> is_extension_type([1, 2, 3])
False
>>> is_extension_type(np.array([1, 2, 3]))
False
>>>
>>> cat = pd.Categorical([1, 2, 3])
>>>
>>> is_extension_type(cat)
True
>>> is_extension_type(pd.Series(cat))
True
>>> is_extension_type(pd.SparseArray([1, 2, 3]))
True
>>> is_extension_type(pd.SparseSeries([1, 2, 3]))
True
>>>
>>> from scipy.sparse import bsr_matrix
>>> is_extension_type(bsr_matrix([1, 2, 3]))
False
>>> is_extension_type(pd.DatetimeIndex([1, 2, 3]))
False
>>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>>
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_extension_type(s)
True
"""
if is_categorical(arr):
return True
elif is_sparse(arr):
return True
elif is_datetime64tz_dtype(arr):
return True
return False
|
[
"def",
"is_extension_type",
"(",
"arr",
")",
":",
"if",
"is_categorical",
"(",
"arr",
")",
":",
"return",
"True",
"elif",
"is_sparse",
"(",
"arr",
")",
":",
"return",
"True",
"elif",
"is_datetime64tz_dtype",
"(",
"arr",
")",
":",
"return",
"True",
"return",
"False"
] |
Check whether an array-like is of a pandas extension class instance.
Extension classes include categoricals, pandas sparse objects (i.e.
classes represented within the pandas library and not ones external
to it like scipy sparse matrices), and datetime-like arrays.
Parameters
----------
arr : array-like
The array-like to check.
Returns
-------
boolean
Whether or not the array-like is of a pandas extension class instance.
Examples
--------
>>> is_extension_type([1, 2, 3])
False
>>> is_extension_type(np.array([1, 2, 3]))
False
>>>
>>> cat = pd.Categorical([1, 2, 3])
>>>
>>> is_extension_type(cat)
True
>>> is_extension_type(pd.Series(cat))
True
>>> is_extension_type(pd.SparseArray([1, 2, 3]))
True
>>> is_extension_type(pd.SparseSeries([1, 2, 3]))
True
>>>
>>> from scipy.sparse import bsr_matrix
>>> is_extension_type(bsr_matrix([1, 2, 3]))
False
>>> is_extension_type(pd.DatetimeIndex([1, 2, 3]))
False
>>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
True
>>>
>>> dtype = DatetimeTZDtype("ns", tz="US/Eastern")
>>> s = pd.Series([], dtype=dtype)
>>> is_extension_type(s)
True
|
[
"Check",
"whether",
"an",
"array",
"-",
"like",
"is",
"of",
"a",
"pandas",
"extension",
"class",
"instance",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L1666-L1722
|
20,490
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
is_extension_array_dtype
|
def is_extension_array_dtype(arr_or_dtype):
"""
Check if an object is a pandas extension array type.
See the :ref:`Use Guide <extending.extension-types>` for more.
Parameters
----------
arr_or_dtype : object
For array-like input, the ``.dtype`` attribute will
be extracted.
Returns
-------
bool
Whether the `arr_or_dtype` is an extension array type.
Notes
-----
This checks whether an object implements the pandas extension
array interface. In pandas, this includes:
* Categorical
* Sparse
* Interval
* Period
* DatetimeArray
* TimedeltaArray
Third-party libraries may implement arrays or types satisfying
this interface as well.
Examples
--------
>>> from pandas.api.types import is_extension_array_dtype
>>> arr = pd.Categorical(['a', 'b'])
>>> is_extension_array_dtype(arr)
True
>>> is_extension_array_dtype(arr.dtype)
True
>>> arr = np.array(['a', 'b'])
>>> is_extension_array_dtype(arr.dtype)
False
"""
dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype)
return (isinstance(dtype, ExtensionDtype) or
registry.find(dtype) is not None)
|
python
|
def is_extension_array_dtype(arr_or_dtype):
"""
Check if an object is a pandas extension array type.
See the :ref:`Use Guide <extending.extension-types>` for more.
Parameters
----------
arr_or_dtype : object
For array-like input, the ``.dtype`` attribute will
be extracted.
Returns
-------
bool
Whether the `arr_or_dtype` is an extension array type.
Notes
-----
This checks whether an object implements the pandas extension
array interface. In pandas, this includes:
* Categorical
* Sparse
* Interval
* Period
* DatetimeArray
* TimedeltaArray
Third-party libraries may implement arrays or types satisfying
this interface as well.
Examples
--------
>>> from pandas.api.types import is_extension_array_dtype
>>> arr = pd.Categorical(['a', 'b'])
>>> is_extension_array_dtype(arr)
True
>>> is_extension_array_dtype(arr.dtype)
True
>>> arr = np.array(['a', 'b'])
>>> is_extension_array_dtype(arr.dtype)
False
"""
dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype)
return (isinstance(dtype, ExtensionDtype) or
registry.find(dtype) is not None)
|
[
"def",
"is_extension_array_dtype",
"(",
"arr_or_dtype",
")",
":",
"dtype",
"=",
"getattr",
"(",
"arr_or_dtype",
",",
"'dtype'",
",",
"arr_or_dtype",
")",
"return",
"(",
"isinstance",
"(",
"dtype",
",",
"ExtensionDtype",
")",
"or",
"registry",
".",
"find",
"(",
"dtype",
")",
"is",
"not",
"None",
")"
] |
Check if an object is a pandas extension array type.
See the :ref:`Use Guide <extending.extension-types>` for more.
Parameters
----------
arr_or_dtype : object
For array-like input, the ``.dtype`` attribute will
be extracted.
Returns
-------
bool
Whether the `arr_or_dtype` is an extension array type.
Notes
-----
This checks whether an object implements the pandas extension
array interface. In pandas, this includes:
* Categorical
* Sparse
* Interval
* Period
* DatetimeArray
* TimedeltaArray
Third-party libraries may implement arrays or types satisfying
this interface as well.
Examples
--------
>>> from pandas.api.types import is_extension_array_dtype
>>> arr = pd.Categorical(['a', 'b'])
>>> is_extension_array_dtype(arr)
True
>>> is_extension_array_dtype(arr.dtype)
True
>>> arr = np.array(['a', 'b'])
>>> is_extension_array_dtype(arr.dtype)
False
|
[
"Check",
"if",
"an",
"object",
"is",
"a",
"pandas",
"extension",
"array",
"type",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L1725-L1772
|
20,491
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
_get_dtype
|
def _get_dtype(arr_or_dtype):
"""
Get the dtype instance associated with an array
or dtype object.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype object whose dtype we want to extract.
Returns
-------
obj_dtype : The extract dtype instance from the
passed in array or dtype object.
Raises
------
TypeError : The passed in object is None.
"""
if arr_or_dtype is None:
raise TypeError("Cannot deduce dtype from null object")
# fastpath
elif isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype)
# if we have an array-like
elif hasattr(arr_or_dtype, 'dtype'):
arr_or_dtype = arr_or_dtype.dtype
return pandas_dtype(arr_or_dtype)
|
python
|
def _get_dtype(arr_or_dtype):
"""
Get the dtype instance associated with an array
or dtype object.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype object whose dtype we want to extract.
Returns
-------
obj_dtype : The extract dtype instance from the
passed in array or dtype object.
Raises
------
TypeError : The passed in object is None.
"""
if arr_or_dtype is None:
raise TypeError("Cannot deduce dtype from null object")
# fastpath
elif isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype
elif isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype)
# if we have an array-like
elif hasattr(arr_or_dtype, 'dtype'):
arr_or_dtype = arr_or_dtype.dtype
return pandas_dtype(arr_or_dtype)
|
[
"def",
"_get_dtype",
"(",
"arr_or_dtype",
")",
":",
"if",
"arr_or_dtype",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"Cannot deduce dtype from null object\"",
")",
"# fastpath",
"elif",
"isinstance",
"(",
"arr_or_dtype",
",",
"np",
".",
"dtype",
")",
":",
"return",
"arr_or_dtype",
"elif",
"isinstance",
"(",
"arr_or_dtype",
",",
"type",
")",
":",
"return",
"np",
".",
"dtype",
"(",
"arr_or_dtype",
")",
"# if we have an array-like",
"elif",
"hasattr",
"(",
"arr_or_dtype",
",",
"'dtype'",
")",
":",
"arr_or_dtype",
"=",
"arr_or_dtype",
".",
"dtype",
"return",
"pandas_dtype",
"(",
"arr_or_dtype",
")"
] |
Get the dtype instance associated with an array
or dtype object.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype object whose dtype we want to extract.
Returns
-------
obj_dtype : The extract dtype instance from the
passed in array or dtype object.
Raises
------
TypeError : The passed in object is None.
|
[
"Get",
"the",
"dtype",
"instance",
"associated",
"with",
"an",
"array",
"or",
"dtype",
"object",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L1833-L1866
|
20,492
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
infer_dtype_from_object
|
def infer_dtype_from_object(dtype):
"""
Get a numpy dtype.type-style object for a dtype object.
This methods also includes handling of the datetime64[ns] and
datetime64[ns, TZ] objects.
If no dtype can be found, we return ``object``.
Parameters
----------
dtype : dtype, type
The dtype object whose numpy dtype.type-style
object we want to extract.
Returns
-------
dtype_object : The extracted numpy dtype.type-style object.
"""
if isinstance(dtype, type) and issubclass(dtype, np.generic):
# Type object from a dtype
return dtype
elif isinstance(dtype, (np.dtype, PandasExtensionDtype, ExtensionDtype)):
# dtype object
try:
_validate_date_like_dtype(dtype)
except TypeError:
# Should still pass if we don't have a date-like
pass
return dtype.type
try:
dtype = pandas_dtype(dtype)
except TypeError:
pass
if is_extension_array_dtype(dtype):
return dtype.type
elif isinstance(dtype, str):
# TODO(jreback)
# should deprecate these
if dtype in ['datetimetz', 'datetime64tz']:
return DatetimeTZDtype.type
elif dtype in ['period']:
raise NotImplementedError
if dtype == 'datetime' or dtype == 'timedelta':
dtype += '64'
try:
return infer_dtype_from_object(getattr(np, dtype))
except (AttributeError, TypeError):
# Handles cases like _get_dtype(int) i.e.,
# Python objects that are valid dtypes
# (unlike user-defined types, in general)
#
# TypeError handles the float16 type code of 'e'
# further handle internal types
pass
return infer_dtype_from_object(np.dtype(dtype))
|
python
|
def infer_dtype_from_object(dtype):
"""
Get a numpy dtype.type-style object for a dtype object.
This methods also includes handling of the datetime64[ns] and
datetime64[ns, TZ] objects.
If no dtype can be found, we return ``object``.
Parameters
----------
dtype : dtype, type
The dtype object whose numpy dtype.type-style
object we want to extract.
Returns
-------
dtype_object : The extracted numpy dtype.type-style object.
"""
if isinstance(dtype, type) and issubclass(dtype, np.generic):
# Type object from a dtype
return dtype
elif isinstance(dtype, (np.dtype, PandasExtensionDtype, ExtensionDtype)):
# dtype object
try:
_validate_date_like_dtype(dtype)
except TypeError:
# Should still pass if we don't have a date-like
pass
return dtype.type
try:
dtype = pandas_dtype(dtype)
except TypeError:
pass
if is_extension_array_dtype(dtype):
return dtype.type
elif isinstance(dtype, str):
# TODO(jreback)
# should deprecate these
if dtype in ['datetimetz', 'datetime64tz']:
return DatetimeTZDtype.type
elif dtype in ['period']:
raise NotImplementedError
if dtype == 'datetime' or dtype == 'timedelta':
dtype += '64'
try:
return infer_dtype_from_object(getattr(np, dtype))
except (AttributeError, TypeError):
# Handles cases like _get_dtype(int) i.e.,
# Python objects that are valid dtypes
# (unlike user-defined types, in general)
#
# TypeError handles the float16 type code of 'e'
# further handle internal types
pass
return infer_dtype_from_object(np.dtype(dtype))
|
[
"def",
"infer_dtype_from_object",
"(",
"dtype",
")",
":",
"if",
"isinstance",
"(",
"dtype",
",",
"type",
")",
"and",
"issubclass",
"(",
"dtype",
",",
"np",
".",
"generic",
")",
":",
"# Type object from a dtype",
"return",
"dtype",
"elif",
"isinstance",
"(",
"dtype",
",",
"(",
"np",
".",
"dtype",
",",
"PandasExtensionDtype",
",",
"ExtensionDtype",
")",
")",
":",
"# dtype object",
"try",
":",
"_validate_date_like_dtype",
"(",
"dtype",
")",
"except",
"TypeError",
":",
"# Should still pass if we don't have a date-like",
"pass",
"return",
"dtype",
".",
"type",
"try",
":",
"dtype",
"=",
"pandas_dtype",
"(",
"dtype",
")",
"except",
"TypeError",
":",
"pass",
"if",
"is_extension_array_dtype",
"(",
"dtype",
")",
":",
"return",
"dtype",
".",
"type",
"elif",
"isinstance",
"(",
"dtype",
",",
"str",
")",
":",
"# TODO(jreback)",
"# should deprecate these",
"if",
"dtype",
"in",
"[",
"'datetimetz'",
",",
"'datetime64tz'",
"]",
":",
"return",
"DatetimeTZDtype",
".",
"type",
"elif",
"dtype",
"in",
"[",
"'period'",
"]",
":",
"raise",
"NotImplementedError",
"if",
"dtype",
"==",
"'datetime'",
"or",
"dtype",
"==",
"'timedelta'",
":",
"dtype",
"+=",
"'64'",
"try",
":",
"return",
"infer_dtype_from_object",
"(",
"getattr",
"(",
"np",
",",
"dtype",
")",
")",
"except",
"(",
"AttributeError",
",",
"TypeError",
")",
":",
"# Handles cases like _get_dtype(int) i.e.,",
"# Python objects that are valid dtypes",
"# (unlike user-defined types, in general)",
"#",
"# TypeError handles the float16 type code of 'e'",
"# further handle internal types",
"pass",
"return",
"infer_dtype_from_object",
"(",
"np",
".",
"dtype",
"(",
"dtype",
")",
")"
] |
Get a numpy dtype.type-style object for a dtype object.
This methods also includes handling of the datetime64[ns] and
datetime64[ns, TZ] objects.
If no dtype can be found, we return ``object``.
Parameters
----------
dtype : dtype, type
The dtype object whose numpy dtype.type-style
object we want to extract.
Returns
-------
dtype_object : The extracted numpy dtype.type-style object.
|
[
"Get",
"a",
"numpy",
"dtype",
".",
"type",
"-",
"style",
"object",
"for",
"a",
"dtype",
"object",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L1916-L1977
|
20,493
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
_validate_date_like_dtype
|
def _validate_date_like_dtype(dtype):
"""
Check whether the dtype is a date-like dtype. Raises an error if invalid.
Parameters
----------
dtype : dtype, type
The dtype to check.
Raises
------
TypeError : The dtype could not be casted to a date-like dtype.
ValueError : The dtype is an illegal date-like dtype (e.g. the
the frequency provided is too specific)
"""
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
raise TypeError('{error}'.format(error=e))
if typ != 'generic' and typ != 'ns':
msg = '{name!r} is too specific of a frequency, try passing {type!r}'
raise ValueError(msg.format(name=dtype.name, type=dtype.type.__name__))
|
python
|
def _validate_date_like_dtype(dtype):
"""
Check whether the dtype is a date-like dtype. Raises an error if invalid.
Parameters
----------
dtype : dtype, type
The dtype to check.
Raises
------
TypeError : The dtype could not be casted to a date-like dtype.
ValueError : The dtype is an illegal date-like dtype (e.g. the
the frequency provided is too specific)
"""
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
raise TypeError('{error}'.format(error=e))
if typ != 'generic' and typ != 'ns':
msg = '{name!r} is too specific of a frequency, try passing {type!r}'
raise ValueError(msg.format(name=dtype.name, type=dtype.type.__name__))
|
[
"def",
"_validate_date_like_dtype",
"(",
"dtype",
")",
":",
"try",
":",
"typ",
"=",
"np",
".",
"datetime_data",
"(",
"dtype",
")",
"[",
"0",
"]",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"TypeError",
"(",
"'{error}'",
".",
"format",
"(",
"error",
"=",
"e",
")",
")",
"if",
"typ",
"!=",
"'generic'",
"and",
"typ",
"!=",
"'ns'",
":",
"msg",
"=",
"'{name!r} is too specific of a frequency, try passing {type!r}'",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"name",
"=",
"dtype",
".",
"name",
",",
"type",
"=",
"dtype",
".",
"type",
".",
"__name__",
")",
")"
] |
Check whether the dtype is a date-like dtype. Raises an error if invalid.
Parameters
----------
dtype : dtype, type
The dtype to check.
Raises
------
TypeError : The dtype could not be casted to a date-like dtype.
ValueError : The dtype is an illegal date-like dtype (e.g. the
the frequency provided is too specific)
|
[
"Check",
"whether",
"the",
"dtype",
"is",
"a",
"date",
"-",
"like",
"dtype",
".",
"Raises",
"an",
"error",
"if",
"invalid",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L1980-L2002
|
20,494
|
pandas-dev/pandas
|
pandas/core/dtypes/common.py
|
pandas_dtype
|
def pandas_dtype(dtype):
"""
Convert input into a pandas only dtype object or a numpy dtype object.
Parameters
----------
dtype : object to be converted
Returns
-------
np.dtype or a pandas dtype
Raises
------
TypeError if not a dtype
"""
# short-circuit
if isinstance(dtype, np.ndarray):
return dtype.dtype
elif isinstance(dtype, (np.dtype, PandasExtensionDtype, ExtensionDtype)):
return dtype
# registered extension types
result = registry.find(dtype)
if result is not None:
return result
# try a numpy dtype
# raise a consistent TypeError if failed
try:
npdtype = np.dtype(dtype)
except Exception:
# we don't want to force a repr of the non-string
if not isinstance(dtype, str):
raise TypeError("data type not understood")
raise TypeError("data type '{}' not understood".format(
dtype))
# Any invalid dtype (such as pd.Timestamp) should raise an error.
# np.dtype(invalid_type).kind = 0 for such objects. However, this will
# also catch some valid dtypes such as object, np.object_ and 'object'
# which we safeguard against by catching them earlier and returning
# np.dtype(valid_dtype) before this condition is evaluated.
if is_hashable(dtype) and dtype in [object, np.object_, 'object', 'O']:
# check hashability to avoid errors/DeprecationWarning when we get
# here and `dtype` is an array
return npdtype
elif npdtype.kind == 'O':
raise TypeError("dtype '{}' not understood".format(dtype))
return npdtype
|
python
|
def pandas_dtype(dtype):
"""
Convert input into a pandas only dtype object or a numpy dtype object.
Parameters
----------
dtype : object to be converted
Returns
-------
np.dtype or a pandas dtype
Raises
------
TypeError if not a dtype
"""
# short-circuit
if isinstance(dtype, np.ndarray):
return dtype.dtype
elif isinstance(dtype, (np.dtype, PandasExtensionDtype, ExtensionDtype)):
return dtype
# registered extension types
result = registry.find(dtype)
if result is not None:
return result
# try a numpy dtype
# raise a consistent TypeError if failed
try:
npdtype = np.dtype(dtype)
except Exception:
# we don't want to force a repr of the non-string
if not isinstance(dtype, str):
raise TypeError("data type not understood")
raise TypeError("data type '{}' not understood".format(
dtype))
# Any invalid dtype (such as pd.Timestamp) should raise an error.
# np.dtype(invalid_type).kind = 0 for such objects. However, this will
# also catch some valid dtypes such as object, np.object_ and 'object'
# which we safeguard against by catching them earlier and returning
# np.dtype(valid_dtype) before this condition is evaluated.
if is_hashable(dtype) and dtype in [object, np.object_, 'object', 'O']:
# check hashability to avoid errors/DeprecationWarning when we get
# here and `dtype` is an array
return npdtype
elif npdtype.kind == 'O':
raise TypeError("dtype '{}' not understood".format(dtype))
return npdtype
|
[
"def",
"pandas_dtype",
"(",
"dtype",
")",
":",
"# short-circuit",
"if",
"isinstance",
"(",
"dtype",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"dtype",
".",
"dtype",
"elif",
"isinstance",
"(",
"dtype",
",",
"(",
"np",
".",
"dtype",
",",
"PandasExtensionDtype",
",",
"ExtensionDtype",
")",
")",
":",
"return",
"dtype",
"# registered extension types",
"result",
"=",
"registry",
".",
"find",
"(",
"dtype",
")",
"if",
"result",
"is",
"not",
"None",
":",
"return",
"result",
"# try a numpy dtype",
"# raise a consistent TypeError if failed",
"try",
":",
"npdtype",
"=",
"np",
".",
"dtype",
"(",
"dtype",
")",
"except",
"Exception",
":",
"# we don't want to force a repr of the non-string",
"if",
"not",
"isinstance",
"(",
"dtype",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"data type not understood\"",
")",
"raise",
"TypeError",
"(",
"\"data type '{}' not understood\"",
".",
"format",
"(",
"dtype",
")",
")",
"# Any invalid dtype (such as pd.Timestamp) should raise an error.",
"# np.dtype(invalid_type).kind = 0 for such objects. However, this will",
"# also catch some valid dtypes such as object, np.object_ and 'object'",
"# which we safeguard against by catching them earlier and returning",
"# np.dtype(valid_dtype) before this condition is evaluated.",
"if",
"is_hashable",
"(",
"dtype",
")",
"and",
"dtype",
"in",
"[",
"object",
",",
"np",
".",
"object_",
",",
"'object'",
",",
"'O'",
"]",
":",
"# check hashability to avoid errors/DeprecationWarning when we get",
"# here and `dtype` is an array",
"return",
"npdtype",
"elif",
"npdtype",
".",
"kind",
"==",
"'O'",
":",
"raise",
"TypeError",
"(",
"\"dtype '{}' not understood\"",
".",
"format",
"(",
"dtype",
")",
")",
"return",
"npdtype"
] |
Convert input into a pandas only dtype object or a numpy dtype object.
Parameters
----------
dtype : object to be converted
Returns
-------
np.dtype or a pandas dtype
Raises
------
TypeError if not a dtype
|
[
"Convert",
"input",
"into",
"a",
"pandas",
"only",
"dtype",
"object",
"or",
"a",
"numpy",
"dtype",
"object",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L2005-L2055
|
20,495
|
pandas-dev/pandas
|
pandas/core/reshape/merge.py
|
_groupby_and_merge
|
def _groupby_and_merge(by, on, left, right, _merge_pieces,
check_duplicates=True):
"""
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: left frame
right: right frame
_merge_pieces: function for merging
check_duplicates: boolean, default True
should we check & clean duplicates
"""
pieces = []
if not isinstance(by, (list, tuple)):
by = [by]
lby = left.groupby(by, sort=False)
# if we can groupby the rhs
# then we can get vastly better perf
try:
# we will check & remove duplicates if indicated
if check_duplicates:
if on is None:
on = []
elif not isinstance(on, (list, tuple)):
on = [on]
if right.duplicated(by + on).any():
right = right.drop_duplicates(by + on, keep='last')
rby = right.groupby(by, sort=False)
except KeyError:
rby = None
for key, lhs in lby:
if rby is None:
rhs = right
else:
try:
rhs = right.take(rby.indices[key])
except KeyError:
# key doesn't exist in left
lcols = lhs.columns.tolist()
cols = lcols + [r for r in right.columns
if r not in set(lcols)]
merged = lhs.reindex(columns=cols)
merged.index = range(len(merged))
pieces.append(merged)
continue
merged = _merge_pieces(lhs, rhs)
# make sure join keys are in the merged
# TODO, should _merge_pieces do this?
for k in by:
try:
if k in merged:
merged[k] = key
except KeyError:
pass
pieces.append(merged)
# preserve the original order
# if we have a missing piece this can be reset
from pandas.core.reshape.concat import concat
result = concat(pieces, ignore_index=True)
result = result.reindex(columns=pieces[0].columns, copy=False)
return result, lby
|
python
|
def _groupby_and_merge(by, on, left, right, _merge_pieces,
check_duplicates=True):
"""
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: left frame
right: right frame
_merge_pieces: function for merging
check_duplicates: boolean, default True
should we check & clean duplicates
"""
pieces = []
if not isinstance(by, (list, tuple)):
by = [by]
lby = left.groupby(by, sort=False)
# if we can groupby the rhs
# then we can get vastly better perf
try:
# we will check & remove duplicates if indicated
if check_duplicates:
if on is None:
on = []
elif not isinstance(on, (list, tuple)):
on = [on]
if right.duplicated(by + on).any():
right = right.drop_duplicates(by + on, keep='last')
rby = right.groupby(by, sort=False)
except KeyError:
rby = None
for key, lhs in lby:
if rby is None:
rhs = right
else:
try:
rhs = right.take(rby.indices[key])
except KeyError:
# key doesn't exist in left
lcols = lhs.columns.tolist()
cols = lcols + [r for r in right.columns
if r not in set(lcols)]
merged = lhs.reindex(columns=cols)
merged.index = range(len(merged))
pieces.append(merged)
continue
merged = _merge_pieces(lhs, rhs)
# make sure join keys are in the merged
# TODO, should _merge_pieces do this?
for k in by:
try:
if k in merged:
merged[k] = key
except KeyError:
pass
pieces.append(merged)
# preserve the original order
# if we have a missing piece this can be reset
from pandas.core.reshape.concat import concat
result = concat(pieces, ignore_index=True)
result = result.reindex(columns=pieces[0].columns, copy=False)
return result, lby
|
[
"def",
"_groupby_and_merge",
"(",
"by",
",",
"on",
",",
"left",
",",
"right",
",",
"_merge_pieces",
",",
"check_duplicates",
"=",
"True",
")",
":",
"pieces",
"=",
"[",
"]",
"if",
"not",
"isinstance",
"(",
"by",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"by",
"=",
"[",
"by",
"]",
"lby",
"=",
"left",
".",
"groupby",
"(",
"by",
",",
"sort",
"=",
"False",
")",
"# if we can groupby the rhs",
"# then we can get vastly better perf",
"try",
":",
"# we will check & remove duplicates if indicated",
"if",
"check_duplicates",
":",
"if",
"on",
"is",
"None",
":",
"on",
"=",
"[",
"]",
"elif",
"not",
"isinstance",
"(",
"on",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"on",
"=",
"[",
"on",
"]",
"if",
"right",
".",
"duplicated",
"(",
"by",
"+",
"on",
")",
".",
"any",
"(",
")",
":",
"right",
"=",
"right",
".",
"drop_duplicates",
"(",
"by",
"+",
"on",
",",
"keep",
"=",
"'last'",
")",
"rby",
"=",
"right",
".",
"groupby",
"(",
"by",
",",
"sort",
"=",
"False",
")",
"except",
"KeyError",
":",
"rby",
"=",
"None",
"for",
"key",
",",
"lhs",
"in",
"lby",
":",
"if",
"rby",
"is",
"None",
":",
"rhs",
"=",
"right",
"else",
":",
"try",
":",
"rhs",
"=",
"right",
".",
"take",
"(",
"rby",
".",
"indices",
"[",
"key",
"]",
")",
"except",
"KeyError",
":",
"# key doesn't exist in left",
"lcols",
"=",
"lhs",
".",
"columns",
".",
"tolist",
"(",
")",
"cols",
"=",
"lcols",
"+",
"[",
"r",
"for",
"r",
"in",
"right",
".",
"columns",
"if",
"r",
"not",
"in",
"set",
"(",
"lcols",
")",
"]",
"merged",
"=",
"lhs",
".",
"reindex",
"(",
"columns",
"=",
"cols",
")",
"merged",
".",
"index",
"=",
"range",
"(",
"len",
"(",
"merged",
")",
")",
"pieces",
".",
"append",
"(",
"merged",
")",
"continue",
"merged",
"=",
"_merge_pieces",
"(",
"lhs",
",",
"rhs",
")",
"# make sure join keys are in the merged",
"# TODO, should _merge_pieces do this?",
"for",
"k",
"in",
"by",
":",
"try",
":",
"if",
"k",
"in",
"merged",
":",
"merged",
"[",
"k",
"]",
"=",
"key",
"except",
"KeyError",
":",
"pass",
"pieces",
".",
"append",
"(",
"merged",
")",
"# preserve the original order",
"# if we have a missing piece this can be reset",
"from",
"pandas",
".",
"core",
".",
"reshape",
".",
"concat",
"import",
"concat",
"result",
"=",
"concat",
"(",
"pieces",
",",
"ignore_index",
"=",
"True",
")",
"result",
"=",
"result",
".",
"reindex",
"(",
"columns",
"=",
"pieces",
"[",
"0",
"]",
".",
"columns",
",",
"copy",
"=",
"False",
")",
"return",
"result",
",",
"lby"
] |
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: left frame
right: right frame
_merge_pieces: function for merging
check_duplicates: boolean, default True
should we check & clean duplicates
|
[
"groupby",
"&",
"merge",
";",
"we",
"are",
"always",
"performing",
"a",
"left",
"-",
"by",
"type",
"operation"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/merge.py#L54-L128
|
20,496
|
pandas-dev/pandas
|
pandas/core/reshape/merge.py
|
merge_asof
|
def merge_asof(left, right, on=None,
left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
suffixes=('_x', '_y'),
tolerance=None,
allow_exact_matches=True,
direction='backward'):
"""Perform an asof merge. This is similar to a left-join except that we
match on nearest key rather than equal keys.
Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
Optionally match on equivalent keys with 'by' before searching with 'on'.
.. versionadded:: 0.19.0
Parameters
----------
left : DataFrame
right : DataFrame
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : boolean
Use the index of the left DataFrame as the join key.
.. versionadded:: 0.19.2
right_index : boolean
Use the index of the right DataFrame as the join key.
.. versionadded:: 0.19.2
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
.. versionadded:: 0.19.2
right_by : column name
Field names to match on in the right DataFrame.
.. versionadded:: 0.19.2
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : integer or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : boolean, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., strictly less-than / strictly greater-than)
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
.. versionadded:: 0.20.0
Returns
-------
merged : DataFrame
See Also
--------
merge
merge_ordered
Examples
--------
>>> left = pd.DataFrame({'a': [1, 5, 10], 'left_val': ['a', 'b', 'c']})
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
... 'right_val': [1, 2, 3, 6, 7]})
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on='a')
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on='a', allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> pd.merge_asof(left, right, on='a', direction='forward')
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> pd.merge_asof(left, right, on='a', direction='nearest')
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left = pd.DataFrame({'left_val': ['a', 'b', 'c']}, index=[1, 5, 10])
>>> left
left_val
1 a
5 b
10 c
>>> right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7]},
... index=[1, 2, 3, 6, 7])
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker')
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms between the quote time and the trade time
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('2ms'))
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms between the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propagate forward
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('10ms'),
... allow_exact_matches=False)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
"""
op = _AsOfMerge(left, right,
on=on, left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
by=by, left_by=left_by, right_by=right_by,
suffixes=suffixes,
how='asof', tolerance=tolerance,
allow_exact_matches=allow_exact_matches,
direction=direction)
return op.get_result()
|
python
|
def merge_asof(left, right, on=None,
left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
suffixes=('_x', '_y'),
tolerance=None,
allow_exact_matches=True,
direction='backward'):
"""Perform an asof merge. This is similar to a left-join except that we
match on nearest key rather than equal keys.
Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
Optionally match on equivalent keys with 'by' before searching with 'on'.
.. versionadded:: 0.19.0
Parameters
----------
left : DataFrame
right : DataFrame
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : boolean
Use the index of the left DataFrame as the join key.
.. versionadded:: 0.19.2
right_index : boolean
Use the index of the right DataFrame as the join key.
.. versionadded:: 0.19.2
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
.. versionadded:: 0.19.2
right_by : column name
Field names to match on in the right DataFrame.
.. versionadded:: 0.19.2
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : integer or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : boolean, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., strictly less-than / strictly greater-than)
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
.. versionadded:: 0.20.0
Returns
-------
merged : DataFrame
See Also
--------
merge
merge_ordered
Examples
--------
>>> left = pd.DataFrame({'a': [1, 5, 10], 'left_val': ['a', 'b', 'c']})
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
... 'right_val': [1, 2, 3, 6, 7]})
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on='a')
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on='a', allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> pd.merge_asof(left, right, on='a', direction='forward')
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> pd.merge_asof(left, right, on='a', direction='nearest')
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left = pd.DataFrame({'left_val': ['a', 'b', 'c']}, index=[1, 5, 10])
>>> left
left_val
1 a
5 b
10 c
>>> right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7]},
... index=[1, 2, 3, 6, 7])
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker')
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms between the quote time and the trade time
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('2ms'))
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms between the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propagate forward
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('10ms'),
... allow_exact_matches=False)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
"""
op = _AsOfMerge(left, right,
on=on, left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
by=by, left_by=left_by, right_by=right_by,
suffixes=suffixes,
how='asof', tolerance=tolerance,
allow_exact_matches=allow_exact_matches,
direction=direction)
return op.get_result()
|
[
"def",
"merge_asof",
"(",
"left",
",",
"right",
",",
"on",
"=",
"None",
",",
"left_on",
"=",
"None",
",",
"right_on",
"=",
"None",
",",
"left_index",
"=",
"False",
",",
"right_index",
"=",
"False",
",",
"by",
"=",
"None",
",",
"left_by",
"=",
"None",
",",
"right_by",
"=",
"None",
",",
"suffixes",
"=",
"(",
"'_x'",
",",
"'_y'",
")",
",",
"tolerance",
"=",
"None",
",",
"allow_exact_matches",
"=",
"True",
",",
"direction",
"=",
"'backward'",
")",
":",
"op",
"=",
"_AsOfMerge",
"(",
"left",
",",
"right",
",",
"on",
"=",
"on",
",",
"left_on",
"=",
"left_on",
",",
"right_on",
"=",
"right_on",
",",
"left_index",
"=",
"left_index",
",",
"right_index",
"=",
"right_index",
",",
"by",
"=",
"by",
",",
"left_by",
"=",
"left_by",
",",
"right_by",
"=",
"right_by",
",",
"suffixes",
"=",
"suffixes",
",",
"how",
"=",
"'asof'",
",",
"tolerance",
"=",
"tolerance",
",",
"allow_exact_matches",
"=",
"allow_exact_matches",
",",
"direction",
"=",
"direction",
")",
"return",
"op",
".",
"get_result",
"(",
")"
] |
Perform an asof merge. This is similar to a left-join except that we
match on nearest key rather than equal keys.
Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
Optionally match on equivalent keys with 'by' before searching with 'on'.
.. versionadded:: 0.19.0
Parameters
----------
left : DataFrame
right : DataFrame
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : boolean
Use the index of the left DataFrame as the join key.
.. versionadded:: 0.19.2
right_index : boolean
Use the index of the right DataFrame as the join key.
.. versionadded:: 0.19.2
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
.. versionadded:: 0.19.2
right_by : column name
Field names to match on in the right DataFrame.
.. versionadded:: 0.19.2
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : integer or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : boolean, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., strictly less-than / strictly greater-than)
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
.. versionadded:: 0.20.0
Returns
-------
merged : DataFrame
See Also
--------
merge
merge_ordered
Examples
--------
>>> left = pd.DataFrame({'a': [1, 5, 10], 'left_val': ['a', 'b', 'c']})
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
... 'right_val': [1, 2, 3, 6, 7]})
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on='a')
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on='a', allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> pd.merge_asof(left, right, on='a', direction='forward')
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> pd.merge_asof(left, right, on='a', direction='nearest')
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left = pd.DataFrame({'left_val': ['a', 'b', 'c']}, index=[1, 5, 10])
>>> left
left_val
1 a
5 b
10 c
>>> right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7]},
... index=[1, 2, 3, 6, 7])
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker')
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms between the quote time and the trade time
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('2ms'))
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms between the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propagate forward
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('10ms'),
... allow_exact_matches=False)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
|
[
"Perform",
"an",
"asof",
"merge",
".",
"This",
"is",
"similar",
"to",
"a",
"left",
"-",
"join",
"except",
"that",
"we",
"match",
"on",
"nearest",
"key",
"rather",
"than",
"equal",
"keys",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/merge.py#L235-L467
|
20,497
|
pandas-dev/pandas
|
pandas/core/reshape/merge.py
|
_MergeOperation._maybe_restore_index_levels
|
def _maybe_restore_index_levels(self, result):
"""
Restore index levels specified as `on` parameters
Here we check for cases where `self.left_on` and `self.right_on` pairs
each reference an index level in their respective DataFrames. The
joined columns corresponding to these pairs are then restored to the
index of `result`.
**Note:** This method has side effects. It modifies `result` in-place
Parameters
----------
result: DataFrame
merge result
Returns
-------
None
"""
names_to_restore = []
for name, left_key, right_key in zip(self.join_names,
self.left_on,
self.right_on):
if (self.orig_left._is_level_reference(left_key) and
self.orig_right._is_level_reference(right_key) and
name not in result.index.names):
names_to_restore.append(name)
if names_to_restore:
result.set_index(names_to_restore, inplace=True)
|
python
|
def _maybe_restore_index_levels(self, result):
"""
Restore index levels specified as `on` parameters
Here we check for cases where `self.left_on` and `self.right_on` pairs
each reference an index level in their respective DataFrames. The
joined columns corresponding to these pairs are then restored to the
index of `result`.
**Note:** This method has side effects. It modifies `result` in-place
Parameters
----------
result: DataFrame
merge result
Returns
-------
None
"""
names_to_restore = []
for name, left_key, right_key in zip(self.join_names,
self.left_on,
self.right_on):
if (self.orig_left._is_level_reference(left_key) and
self.orig_right._is_level_reference(right_key) and
name not in result.index.names):
names_to_restore.append(name)
if names_to_restore:
result.set_index(names_to_restore, inplace=True)
|
[
"def",
"_maybe_restore_index_levels",
"(",
"self",
",",
"result",
")",
":",
"names_to_restore",
"=",
"[",
"]",
"for",
"name",
",",
"left_key",
",",
"right_key",
"in",
"zip",
"(",
"self",
".",
"join_names",
",",
"self",
".",
"left_on",
",",
"self",
".",
"right_on",
")",
":",
"if",
"(",
"self",
".",
"orig_left",
".",
"_is_level_reference",
"(",
"left_key",
")",
"and",
"self",
".",
"orig_right",
".",
"_is_level_reference",
"(",
"right_key",
")",
"and",
"name",
"not",
"in",
"result",
".",
"index",
".",
"names",
")",
":",
"names_to_restore",
".",
"append",
"(",
"name",
")",
"if",
"names_to_restore",
":",
"result",
".",
"set_index",
"(",
"names_to_restore",
",",
"inplace",
"=",
"True",
")"
] |
Restore index levels specified as `on` parameters
Here we check for cases where `self.left_on` and `self.right_on` pairs
each reference an index level in their respective DataFrames. The
joined columns corresponding to these pairs are then restored to the
index of `result`.
**Note:** This method has side effects. It modifies `result` in-place
Parameters
----------
result: DataFrame
merge result
Returns
-------
None
|
[
"Restore",
"index",
"levels",
"specified",
"as",
"on",
"parameters"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/merge.py#L619-L650
|
20,498
|
pandas-dev/pandas
|
pandas/core/reshape/merge.py
|
_MergeOperation._create_join_index
|
def _create_join_index(self, index, other_index, indexer,
other_indexer, how='left'):
"""
Create a join index by rearranging one index to match another
Parameters
----------
index: Index being rearranged
other_index: Index used to supply values not found in index
indexer: how to rearrange index
how: replacement is only necessary if indexer based on other_index
Returns
-------
join_index
"""
join_index = index.take(indexer)
if (self.how in (how, 'outer') and
not isinstance(other_index, MultiIndex)):
# if final index requires values in other_index but not target
# index, indexer may hold missing (-1) values, causing Index.take
# to take the final value in target index
mask = indexer == -1
if np.any(mask):
# if values missing (-1) from target index,
# take from other_index instead
join_list = join_index.to_numpy()
other_list = other_index.take(other_indexer).to_numpy()
join_list[mask] = other_list[mask]
join_index = Index(join_list, dtype=join_index.dtype,
name=join_index.name)
return join_index
|
python
|
def _create_join_index(self, index, other_index, indexer,
other_indexer, how='left'):
"""
Create a join index by rearranging one index to match another
Parameters
----------
index: Index being rearranged
other_index: Index used to supply values not found in index
indexer: how to rearrange index
how: replacement is only necessary if indexer based on other_index
Returns
-------
join_index
"""
join_index = index.take(indexer)
if (self.how in (how, 'outer') and
not isinstance(other_index, MultiIndex)):
# if final index requires values in other_index but not target
# index, indexer may hold missing (-1) values, causing Index.take
# to take the final value in target index
mask = indexer == -1
if np.any(mask):
# if values missing (-1) from target index,
# take from other_index instead
join_list = join_index.to_numpy()
other_list = other_index.take(other_indexer).to_numpy()
join_list[mask] = other_list[mask]
join_index = Index(join_list, dtype=join_index.dtype,
name=join_index.name)
return join_index
|
[
"def",
"_create_join_index",
"(",
"self",
",",
"index",
",",
"other_index",
",",
"indexer",
",",
"other_indexer",
",",
"how",
"=",
"'left'",
")",
":",
"join_index",
"=",
"index",
".",
"take",
"(",
"indexer",
")",
"if",
"(",
"self",
".",
"how",
"in",
"(",
"how",
",",
"'outer'",
")",
"and",
"not",
"isinstance",
"(",
"other_index",
",",
"MultiIndex",
")",
")",
":",
"# if final index requires values in other_index but not target",
"# index, indexer may hold missing (-1) values, causing Index.take",
"# to take the final value in target index",
"mask",
"=",
"indexer",
"==",
"-",
"1",
"if",
"np",
".",
"any",
"(",
"mask",
")",
":",
"# if values missing (-1) from target index,",
"# take from other_index instead",
"join_list",
"=",
"join_index",
".",
"to_numpy",
"(",
")",
"other_list",
"=",
"other_index",
".",
"take",
"(",
"other_indexer",
")",
".",
"to_numpy",
"(",
")",
"join_list",
"[",
"mask",
"]",
"=",
"other_list",
"[",
"mask",
"]",
"join_index",
"=",
"Index",
"(",
"join_list",
",",
"dtype",
"=",
"join_index",
".",
"dtype",
",",
"name",
"=",
"join_index",
".",
"name",
")",
"return",
"join_index"
] |
Create a join index by rearranging one index to match another
Parameters
----------
index: Index being rearranged
other_index: Index used to supply values not found in index
indexer: how to rearrange index
how: replacement is only necessary if indexer based on other_index
Returns
-------
join_index
|
[
"Create",
"a",
"join",
"index",
"by",
"rearranging",
"one",
"index",
"to",
"match",
"another"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/merge.py#L790-L821
|
20,499
|
pandas-dev/pandas
|
pandas/core/dtypes/base.py
|
_DtypeOpsMixin.is_dtype
|
def is_dtype(cls, dtype):
"""Check if we match 'dtype'.
Parameters
----------
dtype : object
The object to check.
Returns
-------
is_dtype : bool
Notes
-----
The default implementation is True if
1. ``cls.construct_from_string(dtype)`` is an instance
of ``cls``.
2. ``dtype`` is an object and is an instance of ``cls``
3. ``dtype`` has a ``dtype`` attribute, and any of the above
conditions is true for ``dtype.dtype``.
"""
dtype = getattr(dtype, 'dtype', dtype)
if isinstance(dtype, (ABCSeries, ABCIndexClass,
ABCDataFrame, np.dtype)):
# https://github.com/pandas-dev/pandas/issues/22960
# avoid passing data to `construct_from_string`. This could
# cause a FutureWarning from numpy about failing elementwise
# comparison from, e.g., comparing DataFrame == 'category'.
return False
elif dtype is None:
return False
elif isinstance(dtype, cls):
return True
try:
return cls.construct_from_string(dtype) is not None
except TypeError:
return False
|
python
|
def is_dtype(cls, dtype):
"""Check if we match 'dtype'.
Parameters
----------
dtype : object
The object to check.
Returns
-------
is_dtype : bool
Notes
-----
The default implementation is True if
1. ``cls.construct_from_string(dtype)`` is an instance
of ``cls``.
2. ``dtype`` is an object and is an instance of ``cls``
3. ``dtype`` has a ``dtype`` attribute, and any of the above
conditions is true for ``dtype.dtype``.
"""
dtype = getattr(dtype, 'dtype', dtype)
if isinstance(dtype, (ABCSeries, ABCIndexClass,
ABCDataFrame, np.dtype)):
# https://github.com/pandas-dev/pandas/issues/22960
# avoid passing data to `construct_from_string`. This could
# cause a FutureWarning from numpy about failing elementwise
# comparison from, e.g., comparing DataFrame == 'category'.
return False
elif dtype is None:
return False
elif isinstance(dtype, cls):
return True
try:
return cls.construct_from_string(dtype) is not None
except TypeError:
return False
|
[
"def",
"is_dtype",
"(",
"cls",
",",
"dtype",
")",
":",
"dtype",
"=",
"getattr",
"(",
"dtype",
",",
"'dtype'",
",",
"dtype",
")",
"if",
"isinstance",
"(",
"dtype",
",",
"(",
"ABCSeries",
",",
"ABCIndexClass",
",",
"ABCDataFrame",
",",
"np",
".",
"dtype",
")",
")",
":",
"# https://github.com/pandas-dev/pandas/issues/22960",
"# avoid passing data to `construct_from_string`. This could",
"# cause a FutureWarning from numpy about failing elementwise",
"# comparison from, e.g., comparing DataFrame == 'category'.",
"return",
"False",
"elif",
"dtype",
"is",
"None",
":",
"return",
"False",
"elif",
"isinstance",
"(",
"dtype",
",",
"cls",
")",
":",
"return",
"True",
"try",
":",
"return",
"cls",
".",
"construct_from_string",
"(",
"dtype",
")",
"is",
"not",
"None",
"except",
"TypeError",
":",
"return",
"False"
] |
Check if we match 'dtype'.
Parameters
----------
dtype : object
The object to check.
Returns
-------
is_dtype : bool
Notes
-----
The default implementation is True if
1. ``cls.construct_from_string(dtype)`` is an instance
of ``cls``.
2. ``dtype`` is an object and is an instance of ``cls``
3. ``dtype`` has a ``dtype`` attribute, and any of the above
conditions is true for ``dtype.dtype``.
|
[
"Check",
"if",
"we",
"match",
"dtype",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/base.py#L75-L113
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.