partition stringclasses 3 values | func_name stringlengths 1 134 | docstring stringlengths 1 46.9k | path stringlengths 4 223 | original_string stringlengths 75 104k | code stringlengths 75 104k | docstring_tokens listlengths 1 1.97k | repo stringlengths 7 55 | language stringclasses 1 value | url stringlengths 87 315 | code_tokens listlengths 19 28.4k | sha stringlengths 40 40 |
|---|---|---|---|---|---|---|---|---|---|---|---|
train | _return_parsed_timezone_results | Return results from array_strptime if a %z or %Z directive was passed.
Parameters
----------
result : ndarray
int64 date representations of the dates
timezones : ndarray
pytz timezone objects
box : boolean
True boxes result as an Index-like, False returns an ndarray
tz : object
None or pytz timezone object
name : string, default None
Name for a DatetimeIndex
Returns
-------
tz_result : ndarray of parsed dates with timezone
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False | pandas/core/tools/datetimes.py | def _return_parsed_timezone_results(result, timezones, box, tz, name):
"""
Return results from array_strptime if a %z or %Z directive was passed.
Parameters
----------
result : ndarray
int64 date representations of the dates
timezones : ndarray
pytz timezone objects
box : boolean
True boxes result as an Index-like, False returns an ndarray
tz : object
None or pytz timezone object
name : string, default None
Name for a DatetimeIndex
Returns
-------
tz_result : ndarray of parsed dates with timezone
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
if tz is not None:
raise ValueError("Cannot pass a tz argument when "
"parsing strings with timezone "
"information.")
tz_results = np.array([Timestamp(res).tz_localize(zone) for res, zone
in zip(result, timezones)])
if box:
from pandas import Index
return Index(tz_results, name=name)
return tz_results | def _return_parsed_timezone_results(result, timezones, box, tz, name):
"""
Return results from array_strptime if a %z or %Z directive was passed.
Parameters
----------
result : ndarray
int64 date representations of the dates
timezones : ndarray
pytz timezone objects
box : boolean
True boxes result as an Index-like, False returns an ndarray
tz : object
None or pytz timezone object
name : string, default None
Name for a DatetimeIndex
Returns
-------
tz_result : ndarray of parsed dates with timezone
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
if tz is not None:
raise ValueError("Cannot pass a tz argument when "
"parsing strings with timezone "
"information.")
tz_results = np.array([Timestamp(res).tz_localize(zone) for res, zone
in zip(result, timezones)])
if box:
from pandas import Index
return Index(tz_results, name=name)
return tz_results | [
"Return",
"results",
"from",
"array_strptime",
"if",
"a",
"%z",
"or",
"%Z",
"directive",
"was",
"passed",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/datetimes.py#L97-L132 | [
"def",
"_return_parsed_timezone_results",
"(",
"result",
",",
"timezones",
",",
"box",
",",
"tz",
",",
"name",
")",
":",
"if",
"tz",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"Cannot pass a tz argument when \"",
"\"parsing strings with timezone \"",
"\"information.\"",
")",
"tz_results",
"=",
"np",
".",
"array",
"(",
"[",
"Timestamp",
"(",
"res",
")",
".",
"tz_localize",
"(",
"zone",
")",
"for",
"res",
",",
"zone",
"in",
"zip",
"(",
"result",
",",
"timezones",
")",
"]",
")",
"if",
"box",
":",
"from",
"pandas",
"import",
"Index",
"return",
"Index",
"(",
"tz_results",
",",
"name",
"=",
"name",
")",
"return",
"tz_results"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | _convert_listlike_datetimes | Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parced
box : boolean
True boxes result as an Index-like, False returns an ndarray
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : string
None or string of the frequency of the passed data
errors : string
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : boolean
inferring format behavior from to_datetime
dayfirst : boolean
dayfirst parsing behavior from to_datetime
yearfirst : boolean
yearfirst parsing behavior from to_datetime
exact : boolean
exact format matching behavior from to_datetime
Returns
-------
ndarray of parsed dates
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False | pandas/core/tools/datetimes.py | def _convert_listlike_datetimes(arg, box, format, name=None, tz=None,
unit=None, errors=None,
infer_datetime_format=None, dayfirst=None,
yearfirst=None, exact=None):
"""
Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parced
box : boolean
True boxes result as an Index-like, False returns an ndarray
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : string
None or string of the frequency of the passed data
errors : string
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : boolean
inferring format behavior from to_datetime
dayfirst : boolean
dayfirst parsing behavior from to_datetime
yearfirst : boolean
yearfirst parsing behavior from to_datetime
exact : boolean
exact format matching behavior from to_datetime
Returns
-------
ndarray of parsed dates
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
from pandas import DatetimeIndex
from pandas.core.arrays import DatetimeArray
from pandas.core.arrays.datetimes import (
maybe_convert_dtype, objects_to_datetime64ns)
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
# these are shortcutable
if is_datetime64tz_dtype(arg):
if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
return DatetimeIndex(arg, tz=tz, name=name)
if tz == 'utc':
arg = arg.tz_convert(None).tz_localize(tz)
return arg
elif is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, (DatetimeArray, DatetimeIndex)):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
return arg
elif unit is not None:
if format is not None:
raise ValueError("cannot specify both format and unit")
arg = getattr(arg, 'values', arg)
result = tslib.array_with_unit_to_datetime(arg, unit,
errors=errors)
if box:
if errors == 'ignore':
from pandas import Index
result = Index(result, name=name)
# GH 23758: We may still need to localize the result with tz
try:
return result.tz_localize(tz)
except AttributeError:
return result
return DatetimeIndex(result, tz=tz, name=name)
return result
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
# warn if passing timedelta64, raise for PeriodDtype
# NB: this must come after unit transformation
orig_arg = arg
arg, _ = maybe_convert_dtype(arg, copy=False)
arg = ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
tz_parsed = None
result = None
if format is not None:
try:
# shortcut formatting here
if format == '%Y%m%d':
try:
# pass orig_arg as float-dtype may have been converted to
# datetime64[ns]
orig_arg = ensure_object(orig_arg)
result = _attempt_YYYYMMDD(orig_arg, errors=errors)
except (ValueError, TypeError, tslibs.OutOfBoundsDatetime):
raise ValueError("cannot convert the input to "
"'%Y%m%d' date format")
# fallback
if result is None:
try:
result, timezones = array_strptime(
arg, format, exact=exact, errors=errors)
if '%Z' in format or '%z' in format:
return _return_parsed_timezone_results(
result, timezones, box, tz, name)
except tslibs.OutOfBoundsDatetime:
if errors == 'raise':
raise
elif errors == 'coerce':
result = np.empty(arg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult.fill(tslibs.iNaT)
else:
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == 'raise':
raise
elif errors == 'coerce':
result = np.empty(arg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult.fill(tslibs.iNaT)
else:
result = arg
except ValueError as e:
# Fallback to try to convert datetime objects if timezone-aware
# datetime objects are found without passing `utc=True`
try:
values, tz = conversion.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
if result is None:
assert format is None or infer_datetime_format
utc = tz == 'utc'
result, tz_parsed = objects_to_datetime64ns(
arg, dayfirst=dayfirst, yearfirst=yearfirst,
utc=utc, errors=errors, require_iso8601=require_iso8601,
allow_object=True)
if tz_parsed is not None:
if box:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
return DatetimeIndex._simple_new(result, name=name,
tz=tz_parsed)
else:
# Convert the datetime64 numpy array to an numpy array
# of datetime objects
result = [Timestamp(ts, tz=tz_parsed).to_pydatetime()
for ts in result]
return np.array(result, dtype=object)
if box:
# Ensure we return an Index in all cases where box=True
if is_datetime64_dtype(result):
return DatetimeIndex(result, tz=tz, name=name)
elif is_object_dtype(result):
# e.g. an Index of datetime objects
from pandas import Index
return Index(result, name=name)
return result | def _convert_listlike_datetimes(arg, box, format, name=None, tz=None,
unit=None, errors=None,
infer_datetime_format=None, dayfirst=None,
yearfirst=None, exact=None):
"""
Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parced
box : boolean
True boxes result as an Index-like, False returns an ndarray
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : string
None or string of the frequency of the passed data
errors : string
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : boolean
inferring format behavior from to_datetime
dayfirst : boolean
dayfirst parsing behavior from to_datetime
yearfirst : boolean
yearfirst parsing behavior from to_datetime
exact : boolean
exact format matching behavior from to_datetime
Returns
-------
ndarray of parsed dates
Returns:
- Index-like if box=True
- ndarray of Timestamps if box=False
"""
from pandas import DatetimeIndex
from pandas.core.arrays import DatetimeArray
from pandas.core.arrays.datetimes import (
maybe_convert_dtype, objects_to_datetime64ns)
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
# these are shortcutable
if is_datetime64tz_dtype(arg):
if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
return DatetimeIndex(arg, tz=tz, name=name)
if tz == 'utc':
arg = arg.tz_convert(None).tz_localize(tz)
return arg
elif is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, (DatetimeArray, DatetimeIndex)):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
return arg
elif unit is not None:
if format is not None:
raise ValueError("cannot specify both format and unit")
arg = getattr(arg, 'values', arg)
result = tslib.array_with_unit_to_datetime(arg, unit,
errors=errors)
if box:
if errors == 'ignore':
from pandas import Index
result = Index(result, name=name)
# GH 23758: We may still need to localize the result with tz
try:
return result.tz_localize(tz)
except AttributeError:
return result
return DatetimeIndex(result, tz=tz, name=name)
return result
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
# warn if passing timedelta64, raise for PeriodDtype
# NB: this must come after unit transformation
orig_arg = arg
arg, _ = maybe_convert_dtype(arg, copy=False)
arg = ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
tz_parsed = None
result = None
if format is not None:
try:
# shortcut formatting here
if format == '%Y%m%d':
try:
# pass orig_arg as float-dtype may have been converted to
# datetime64[ns]
orig_arg = ensure_object(orig_arg)
result = _attempt_YYYYMMDD(orig_arg, errors=errors)
except (ValueError, TypeError, tslibs.OutOfBoundsDatetime):
raise ValueError("cannot convert the input to "
"'%Y%m%d' date format")
# fallback
if result is None:
try:
result, timezones = array_strptime(
arg, format, exact=exact, errors=errors)
if '%Z' in format or '%z' in format:
return _return_parsed_timezone_results(
result, timezones, box, tz, name)
except tslibs.OutOfBoundsDatetime:
if errors == 'raise':
raise
elif errors == 'coerce':
result = np.empty(arg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult.fill(tslibs.iNaT)
else:
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == 'raise':
raise
elif errors == 'coerce':
result = np.empty(arg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult.fill(tslibs.iNaT)
else:
result = arg
except ValueError as e:
# Fallback to try to convert datetime objects if timezone-aware
# datetime objects are found without passing `utc=True`
try:
values, tz = conversion.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
if result is None:
assert format is None or infer_datetime_format
utc = tz == 'utc'
result, tz_parsed = objects_to_datetime64ns(
arg, dayfirst=dayfirst, yearfirst=yearfirst,
utc=utc, errors=errors, require_iso8601=require_iso8601,
allow_object=True)
if tz_parsed is not None:
if box:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
return DatetimeIndex._simple_new(result, name=name,
tz=tz_parsed)
else:
# Convert the datetime64 numpy array to an numpy array
# of datetime objects
result = [Timestamp(ts, tz=tz_parsed).to_pydatetime()
for ts in result]
return np.array(result, dtype=object)
if box:
# Ensure we return an Index in all cases where box=True
if is_datetime64_dtype(result):
return DatetimeIndex(result, tz=tz, name=name)
elif is_object_dtype(result):
# e.g. an Index of datetime objects
from pandas import Index
return Index(result, name=name)
return result | [
"Helper",
"function",
"for",
"to_datetime",
".",
"Performs",
"the",
"conversions",
"of",
"1D",
"listlike",
"of",
"dates"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/datetimes.py#L135-L326 | [
"def",
"_convert_listlike_datetimes",
"(",
"arg",
",",
"box",
",",
"format",
",",
"name",
"=",
"None",
",",
"tz",
"=",
"None",
",",
"unit",
"=",
"None",
",",
"errors",
"=",
"None",
",",
"infer_datetime_format",
"=",
"None",
",",
"dayfirst",
"=",
"None",
",",
"yearfirst",
"=",
"None",
",",
"exact",
"=",
"None",
")",
":",
"from",
"pandas",
"import",
"DatetimeIndex",
"from",
"pandas",
".",
"core",
".",
"arrays",
"import",
"DatetimeArray",
"from",
"pandas",
".",
"core",
".",
"arrays",
".",
"datetimes",
"import",
"(",
"maybe_convert_dtype",
",",
"objects_to_datetime64ns",
")",
"if",
"isinstance",
"(",
"arg",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"arg",
"=",
"np",
".",
"array",
"(",
"arg",
",",
"dtype",
"=",
"'O'",
")",
"# these are shortcutable",
"if",
"is_datetime64tz_dtype",
"(",
"arg",
")",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"(",
"DatetimeArray",
",",
"DatetimeIndex",
")",
")",
":",
"return",
"DatetimeIndex",
"(",
"arg",
",",
"tz",
"=",
"tz",
",",
"name",
"=",
"name",
")",
"if",
"tz",
"==",
"'utc'",
":",
"arg",
"=",
"arg",
".",
"tz_convert",
"(",
"None",
")",
".",
"tz_localize",
"(",
"tz",
")",
"return",
"arg",
"elif",
"is_datetime64_ns_dtype",
"(",
"arg",
")",
":",
"if",
"box",
"and",
"not",
"isinstance",
"(",
"arg",
",",
"(",
"DatetimeArray",
",",
"DatetimeIndex",
")",
")",
":",
"try",
":",
"return",
"DatetimeIndex",
"(",
"arg",
",",
"tz",
"=",
"tz",
",",
"name",
"=",
"name",
")",
"except",
"ValueError",
":",
"pass",
"return",
"arg",
"elif",
"unit",
"is",
"not",
"None",
":",
"if",
"format",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"cannot specify both format and unit\"",
")",
"arg",
"=",
"getattr",
"(",
"arg",
",",
"'values'",
",",
"arg",
")",
"result",
"=",
"tslib",
".",
"array_with_unit_to_datetime",
"(",
"arg",
",",
"unit",
",",
"errors",
"=",
"errors",
")",
"if",
"box",
":",
"if",
"errors",
"==",
"'ignore'",
":",
"from",
"pandas",
"import",
"Index",
"result",
"=",
"Index",
"(",
"result",
",",
"name",
"=",
"name",
")",
"# GH 23758: We may still need to localize the result with tz",
"try",
":",
"return",
"result",
".",
"tz_localize",
"(",
"tz",
")",
"except",
"AttributeError",
":",
"return",
"result",
"return",
"DatetimeIndex",
"(",
"result",
",",
"tz",
"=",
"tz",
",",
"name",
"=",
"name",
")",
"return",
"result",
"elif",
"getattr",
"(",
"arg",
",",
"'ndim'",
",",
"1",
")",
">",
"1",
":",
"raise",
"TypeError",
"(",
"'arg must be a string, datetime, list, tuple, '",
"'1-d array, or Series'",
")",
"# warn if passing timedelta64, raise for PeriodDtype",
"# NB: this must come after unit transformation",
"orig_arg",
"=",
"arg",
"arg",
",",
"_",
"=",
"maybe_convert_dtype",
"(",
"arg",
",",
"copy",
"=",
"False",
")",
"arg",
"=",
"ensure_object",
"(",
"arg",
")",
"require_iso8601",
"=",
"False",
"if",
"infer_datetime_format",
"and",
"format",
"is",
"None",
":",
"format",
"=",
"_guess_datetime_format_for_array",
"(",
"arg",
",",
"dayfirst",
"=",
"dayfirst",
")",
"if",
"format",
"is",
"not",
"None",
":",
"# There is a special fast-path for iso8601 formatted",
"# datetime strings, so in those cases don't use the inferred",
"# format because this path makes process slower in this",
"# special case",
"format_is_iso8601",
"=",
"_format_is_iso",
"(",
"format",
")",
"if",
"format_is_iso8601",
":",
"require_iso8601",
"=",
"not",
"infer_datetime_format",
"format",
"=",
"None",
"tz_parsed",
"=",
"None",
"result",
"=",
"None",
"if",
"format",
"is",
"not",
"None",
":",
"try",
":",
"# shortcut formatting here",
"if",
"format",
"==",
"'%Y%m%d'",
":",
"try",
":",
"# pass orig_arg as float-dtype may have been converted to",
"# datetime64[ns]",
"orig_arg",
"=",
"ensure_object",
"(",
"orig_arg",
")",
"result",
"=",
"_attempt_YYYYMMDD",
"(",
"orig_arg",
",",
"errors",
"=",
"errors",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
",",
"tslibs",
".",
"OutOfBoundsDatetime",
")",
":",
"raise",
"ValueError",
"(",
"\"cannot convert the input to \"",
"\"'%Y%m%d' date format\"",
")",
"# fallback",
"if",
"result",
"is",
"None",
":",
"try",
":",
"result",
",",
"timezones",
"=",
"array_strptime",
"(",
"arg",
",",
"format",
",",
"exact",
"=",
"exact",
",",
"errors",
"=",
"errors",
")",
"if",
"'%Z'",
"in",
"format",
"or",
"'%z'",
"in",
"format",
":",
"return",
"_return_parsed_timezone_results",
"(",
"result",
",",
"timezones",
",",
"box",
",",
"tz",
",",
"name",
")",
"except",
"tslibs",
".",
"OutOfBoundsDatetime",
":",
"if",
"errors",
"==",
"'raise'",
":",
"raise",
"elif",
"errors",
"==",
"'coerce'",
":",
"result",
"=",
"np",
".",
"empty",
"(",
"arg",
".",
"shape",
",",
"dtype",
"=",
"'M8[ns]'",
")",
"iresult",
"=",
"result",
".",
"view",
"(",
"'i8'",
")",
"iresult",
".",
"fill",
"(",
"tslibs",
".",
"iNaT",
")",
"else",
":",
"result",
"=",
"arg",
"except",
"ValueError",
":",
"# if format was inferred, try falling back",
"# to array_to_datetime - terminate here",
"# for specified formats",
"if",
"not",
"infer_datetime_format",
":",
"if",
"errors",
"==",
"'raise'",
":",
"raise",
"elif",
"errors",
"==",
"'coerce'",
":",
"result",
"=",
"np",
".",
"empty",
"(",
"arg",
".",
"shape",
",",
"dtype",
"=",
"'M8[ns]'",
")",
"iresult",
"=",
"result",
".",
"view",
"(",
"'i8'",
")",
"iresult",
".",
"fill",
"(",
"tslibs",
".",
"iNaT",
")",
"else",
":",
"result",
"=",
"arg",
"except",
"ValueError",
"as",
"e",
":",
"# Fallback to try to convert datetime objects if timezone-aware",
"# datetime objects are found without passing `utc=True`",
"try",
":",
"values",
",",
"tz",
"=",
"conversion",
".",
"datetime_to_datetime64",
"(",
"arg",
")",
"return",
"DatetimeIndex",
".",
"_simple_new",
"(",
"values",
",",
"name",
"=",
"name",
",",
"tz",
"=",
"tz",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"raise",
"e",
"if",
"result",
"is",
"None",
":",
"assert",
"format",
"is",
"None",
"or",
"infer_datetime_format",
"utc",
"=",
"tz",
"==",
"'utc'",
"result",
",",
"tz_parsed",
"=",
"objects_to_datetime64ns",
"(",
"arg",
",",
"dayfirst",
"=",
"dayfirst",
",",
"yearfirst",
"=",
"yearfirst",
",",
"utc",
"=",
"utc",
",",
"errors",
"=",
"errors",
",",
"require_iso8601",
"=",
"require_iso8601",
",",
"allow_object",
"=",
"True",
")",
"if",
"tz_parsed",
"is",
"not",
"None",
":",
"if",
"box",
":",
"# We can take a shortcut since the datetime64 numpy array",
"# is in UTC",
"return",
"DatetimeIndex",
".",
"_simple_new",
"(",
"result",
",",
"name",
"=",
"name",
",",
"tz",
"=",
"tz_parsed",
")",
"else",
":",
"# Convert the datetime64 numpy array to an numpy array",
"# of datetime objects",
"result",
"=",
"[",
"Timestamp",
"(",
"ts",
",",
"tz",
"=",
"tz_parsed",
")",
".",
"to_pydatetime",
"(",
")",
"for",
"ts",
"in",
"result",
"]",
"return",
"np",
".",
"array",
"(",
"result",
",",
"dtype",
"=",
"object",
")",
"if",
"box",
":",
"# Ensure we return an Index in all cases where box=True",
"if",
"is_datetime64_dtype",
"(",
"result",
")",
":",
"return",
"DatetimeIndex",
"(",
"result",
",",
"tz",
"=",
"tz",
",",
"name",
"=",
"name",
")",
"elif",
"is_object_dtype",
"(",
"result",
")",
":",
"# e.g. an Index of datetime objects",
"from",
"pandas",
"import",
"Index",
"return",
"Index",
"(",
"result",
",",
"name",
"=",
"name",
")",
"return",
"result"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | _adjust_to_origin | Helper function for to_datetime.
Adjust input argument to the specified origin
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be adjusted
origin : 'julian' or Timestamp
origin offset for the arg
unit : string
passed unit from to_datetime, must be 'D'
Returns
-------
ndarray or scalar of adjusted date(s) | pandas/core/tools/datetimes.py | def _adjust_to_origin(arg, origin, unit):
"""
Helper function for to_datetime.
Adjust input argument to the specified origin
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be adjusted
origin : 'julian' or Timestamp
origin offset for the arg
unit : string
passed unit from to_datetime, must be 'D'
Returns
-------
ndarray or scalar of adjusted date(s)
"""
if origin == 'julian':
original = arg
j0 = Timestamp(0).to_julian_date()
if unit != 'D':
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = arg - j0
except TypeError:
raise ValueError("incompatible 'arg' type for given "
"'origin'='julian'")
# premptively check this for a nice range
j_max = Timestamp.max.to_julian_date() - j0
j_min = Timestamp.min.to_julian_date() - j0
if np.any(arg > j_max) or np.any(arg < j_min):
raise tslibs.OutOfBoundsDatetime(
"{original} is Out of Bounds for "
"origin='julian'".format(original=original))
else:
# arg must be numeric
if not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or
is_numeric_dtype(np.asarray(arg))):
raise ValueError(
"'{arg}' is not compatible with origin='{origin}'; "
"it must be numeric with a unit specified ".format(
arg=arg,
origin=origin))
# we are going to offset back to unix / epoch time
try:
offset = Timestamp(origin)
except tslibs.OutOfBoundsDatetime:
raise tslibs.OutOfBoundsDatetime(
"origin {origin} is Out of Bounds".format(origin=origin))
except ValueError:
raise ValueError("origin {origin} cannot be converted "
"to a Timestamp".format(origin=origin))
if offset.tz is not None:
raise ValueError(
"origin offset {} must be tz-naive".format(offset))
offset -= Timestamp(0)
# convert the offset to the unit of the arg
# this should be lossless in terms of precision
offset = offset // tslibs.Timedelta(1, unit=unit)
# scalars & ndarray-like can handle the addition
if is_list_like(arg) and not isinstance(
arg, (ABCSeries, ABCIndexClass, np.ndarray)):
arg = np.asarray(arg)
arg = arg + offset
return arg | def _adjust_to_origin(arg, origin, unit):
"""
Helper function for to_datetime.
Adjust input argument to the specified origin
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be adjusted
origin : 'julian' or Timestamp
origin offset for the arg
unit : string
passed unit from to_datetime, must be 'D'
Returns
-------
ndarray or scalar of adjusted date(s)
"""
if origin == 'julian':
original = arg
j0 = Timestamp(0).to_julian_date()
if unit != 'D':
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = arg - j0
except TypeError:
raise ValueError("incompatible 'arg' type for given "
"'origin'='julian'")
# premptively check this for a nice range
j_max = Timestamp.max.to_julian_date() - j0
j_min = Timestamp.min.to_julian_date() - j0
if np.any(arg > j_max) or np.any(arg < j_min):
raise tslibs.OutOfBoundsDatetime(
"{original} is Out of Bounds for "
"origin='julian'".format(original=original))
else:
# arg must be numeric
if not ((is_scalar(arg) and (is_integer(arg) or is_float(arg))) or
is_numeric_dtype(np.asarray(arg))):
raise ValueError(
"'{arg}' is not compatible with origin='{origin}'; "
"it must be numeric with a unit specified ".format(
arg=arg,
origin=origin))
# we are going to offset back to unix / epoch time
try:
offset = Timestamp(origin)
except tslibs.OutOfBoundsDatetime:
raise tslibs.OutOfBoundsDatetime(
"origin {origin} is Out of Bounds".format(origin=origin))
except ValueError:
raise ValueError("origin {origin} cannot be converted "
"to a Timestamp".format(origin=origin))
if offset.tz is not None:
raise ValueError(
"origin offset {} must be tz-naive".format(offset))
offset -= Timestamp(0)
# convert the offset to the unit of the arg
# this should be lossless in terms of precision
offset = offset // tslibs.Timedelta(1, unit=unit)
# scalars & ndarray-like can handle the addition
if is_list_like(arg) and not isinstance(
arg, (ABCSeries, ABCIndexClass, np.ndarray)):
arg = np.asarray(arg)
arg = arg + offset
return arg | [
"Helper",
"function",
"for",
"to_datetime",
".",
"Adjust",
"input",
"argument",
"to",
"the",
"specified",
"origin"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/datetimes.py#L329-L399 | [
"def",
"_adjust_to_origin",
"(",
"arg",
",",
"origin",
",",
"unit",
")",
":",
"if",
"origin",
"==",
"'julian'",
":",
"original",
"=",
"arg",
"j0",
"=",
"Timestamp",
"(",
"0",
")",
".",
"to_julian_date",
"(",
")",
"if",
"unit",
"!=",
"'D'",
":",
"raise",
"ValueError",
"(",
"\"unit must be 'D' for origin='julian'\"",
")",
"try",
":",
"arg",
"=",
"arg",
"-",
"j0",
"except",
"TypeError",
":",
"raise",
"ValueError",
"(",
"\"incompatible 'arg' type for given \"",
"\"'origin'='julian'\"",
")",
"# premptively check this for a nice range",
"j_max",
"=",
"Timestamp",
".",
"max",
".",
"to_julian_date",
"(",
")",
"-",
"j0",
"j_min",
"=",
"Timestamp",
".",
"min",
".",
"to_julian_date",
"(",
")",
"-",
"j0",
"if",
"np",
".",
"any",
"(",
"arg",
">",
"j_max",
")",
"or",
"np",
".",
"any",
"(",
"arg",
"<",
"j_min",
")",
":",
"raise",
"tslibs",
".",
"OutOfBoundsDatetime",
"(",
"\"{original} is Out of Bounds for \"",
"\"origin='julian'\"",
".",
"format",
"(",
"original",
"=",
"original",
")",
")",
"else",
":",
"# arg must be numeric",
"if",
"not",
"(",
"(",
"is_scalar",
"(",
"arg",
")",
"and",
"(",
"is_integer",
"(",
"arg",
")",
"or",
"is_float",
"(",
"arg",
")",
")",
")",
"or",
"is_numeric_dtype",
"(",
"np",
".",
"asarray",
"(",
"arg",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"'{arg}' is not compatible with origin='{origin}'; \"",
"\"it must be numeric with a unit specified \"",
".",
"format",
"(",
"arg",
"=",
"arg",
",",
"origin",
"=",
"origin",
")",
")",
"# we are going to offset back to unix / epoch time",
"try",
":",
"offset",
"=",
"Timestamp",
"(",
"origin",
")",
"except",
"tslibs",
".",
"OutOfBoundsDatetime",
":",
"raise",
"tslibs",
".",
"OutOfBoundsDatetime",
"(",
"\"origin {origin} is Out of Bounds\"",
".",
"format",
"(",
"origin",
"=",
"origin",
")",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"origin {origin} cannot be converted \"",
"\"to a Timestamp\"",
".",
"format",
"(",
"origin",
"=",
"origin",
")",
")",
"if",
"offset",
".",
"tz",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"origin offset {} must be tz-naive\"",
".",
"format",
"(",
"offset",
")",
")",
"offset",
"-=",
"Timestamp",
"(",
"0",
")",
"# convert the offset to the unit of the arg",
"# this should be lossless in terms of precision",
"offset",
"=",
"offset",
"//",
"tslibs",
".",
"Timedelta",
"(",
"1",
",",
"unit",
"=",
"unit",
")",
"# scalars & ndarray-like can handle the addition",
"if",
"is_list_like",
"(",
"arg",
")",
"and",
"not",
"isinstance",
"(",
"arg",
",",
"(",
"ABCSeries",
",",
"ABCIndexClass",
",",
"np",
".",
"ndarray",
")",
")",
":",
"arg",
"=",
"np",
".",
"asarray",
"(",
"arg",
")",
"arg",
"=",
"arg",
"+",
"offset",
"return",
"arg"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | to_datetime | Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
.. versionadded:: 0.18.1
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same
as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil behavior).
.. versionadded:: 0.16.1
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
- If True returns a DatetimeIndex or Index-like object
- If False returns ndarray of values.
.. deprecated:: 0.25.0
Use :meth:`.to_numpy` or :meth:`Timestamp.to_datetime64`
instead to get an ndarray of values or numpy.datetime64,
respectively.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
See strftime documentation for more information on choices:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
exact : boolean, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
unit : string, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default is 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
.. versionadded:: 0.20.0
cache : boolean, default False
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.23.0
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_timedelta : Convert argument to timedelta.
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s,infer_datetime_format=True)
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s,infer_datetime_format=False)
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
origin=pd.Timestamp('1960-01-01'))
0 1960-01-02
1 1960-01-03
2 1960-01-04 | pandas/core/tools/datetimes.py | def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True,
unit=None, infer_datetime_format=False, origin='unix',
cache=False):
"""
Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
.. versionadded:: 0.18.1
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same
as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil behavior).
.. versionadded:: 0.16.1
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
- If True returns a DatetimeIndex or Index-like object
- If False returns ndarray of values.
.. deprecated:: 0.25.0
Use :meth:`.to_numpy` or :meth:`Timestamp.to_datetime64`
instead to get an ndarray of values or numpy.datetime64,
respectively.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
See strftime documentation for more information on choices:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
exact : boolean, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
unit : string, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default is 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
.. versionadded:: 0.20.0
cache : boolean, default False
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.23.0
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_timedelta : Convert argument to timedelta.
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s,infer_datetime_format=True)
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s,infer_datetime_format=False)
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
origin=pd.Timestamp('1960-01-01'))
0 1960-01-02
1 1960-01-03
2 1960-01-04
"""
if arg is None:
return None
if origin != 'unix':
arg = _adjust_to_origin(arg, origin, unit)
tz = 'utc' if utc else None
convert_listlike = partial(_convert_listlike_datetimes, tz=tz, unit=unit,
dayfirst=dayfirst, yearfirst=yearfirst,
errors=errors, exact=exact,
infer_datetime_format=infer_datetime_format)
if isinstance(arg, Timestamp):
result = arg
if tz is not None:
if arg.tz is not None:
result = result.tz_convert(tz)
else:
result = result.tz_localize(tz)
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = arg.map(cache_array)
else:
values = convert_listlike(arg._values, True, format)
result = arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors, box, tz)
elif isinstance(arg, ABCIndexClass):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, errors,
name=arg.name)
else:
convert_listlike = partial(convert_listlike, name=arg.name)
result = convert_listlike(arg, box, format)
elif is_list_like(arg):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, errors)
else:
result = convert_listlike(arg, box, format)
else:
result = convert_listlike(np.array([arg]), box, format)[0]
return result | def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True,
unit=None, infer_datetime_format=False, origin='unix',
cache=False):
"""
Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
.. versionadded:: 0.18.1
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same
as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil behavior).
.. versionadded:: 0.16.1
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
- If True returns a DatetimeIndex or Index-like object
- If False returns ndarray of values.
.. deprecated:: 0.25.0
Use :meth:`.to_numpy` or :meth:`Timestamp.to_datetime64`
instead to get an ndarray of values or numpy.datetime64,
respectively.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
See strftime documentation for more information on choices:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
exact : boolean, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
unit : string, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default is 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
.. versionadded:: 0.20.0
cache : boolean, default False
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets.
.. versionadded:: 0.23.0
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_timedelta : Convert argument to timedelta.
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s,infer_datetime_format=True)
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s,infer_datetime_format=False)
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
origin=pd.Timestamp('1960-01-01'))
0 1960-01-02
1 1960-01-03
2 1960-01-04
"""
if arg is None:
return None
if origin != 'unix':
arg = _adjust_to_origin(arg, origin, unit)
tz = 'utc' if utc else None
convert_listlike = partial(_convert_listlike_datetimes, tz=tz, unit=unit,
dayfirst=dayfirst, yearfirst=yearfirst,
errors=errors, exact=exact,
infer_datetime_format=infer_datetime_format)
if isinstance(arg, Timestamp):
result = arg
if tz is not None:
if arg.tz is not None:
result = result.tz_convert(tz)
else:
result = result.tz_localize(tz)
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = arg.map(cache_array)
else:
values = convert_listlike(arg._values, True, format)
result = arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors, box, tz)
elif isinstance(arg, ABCIndexClass):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, errors,
name=arg.name)
else:
convert_listlike = partial(convert_listlike, name=arg.name)
result = convert_listlike(arg, box, format)
elif is_list_like(arg):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, box, errors)
else:
result = convert_listlike(arg, box, format)
else:
result = convert_listlike(np.array([arg]), box, format)[0]
return result | [
"Convert",
"argument",
"to",
"datetime",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/datetimes.py#L403-L622 | [
"def",
"to_datetime",
"(",
"arg",
",",
"errors",
"=",
"'raise'",
",",
"dayfirst",
"=",
"False",
",",
"yearfirst",
"=",
"False",
",",
"utc",
"=",
"None",
",",
"box",
"=",
"True",
",",
"format",
"=",
"None",
",",
"exact",
"=",
"True",
",",
"unit",
"=",
"None",
",",
"infer_datetime_format",
"=",
"False",
",",
"origin",
"=",
"'unix'",
",",
"cache",
"=",
"False",
")",
":",
"if",
"arg",
"is",
"None",
":",
"return",
"None",
"if",
"origin",
"!=",
"'unix'",
":",
"arg",
"=",
"_adjust_to_origin",
"(",
"arg",
",",
"origin",
",",
"unit",
")",
"tz",
"=",
"'utc'",
"if",
"utc",
"else",
"None",
"convert_listlike",
"=",
"partial",
"(",
"_convert_listlike_datetimes",
",",
"tz",
"=",
"tz",
",",
"unit",
"=",
"unit",
",",
"dayfirst",
"=",
"dayfirst",
",",
"yearfirst",
"=",
"yearfirst",
",",
"errors",
"=",
"errors",
",",
"exact",
"=",
"exact",
",",
"infer_datetime_format",
"=",
"infer_datetime_format",
")",
"if",
"isinstance",
"(",
"arg",
",",
"Timestamp",
")",
":",
"result",
"=",
"arg",
"if",
"tz",
"is",
"not",
"None",
":",
"if",
"arg",
".",
"tz",
"is",
"not",
"None",
":",
"result",
"=",
"result",
".",
"tz_convert",
"(",
"tz",
")",
"else",
":",
"result",
"=",
"result",
".",
"tz_localize",
"(",
"tz",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"ABCSeries",
")",
":",
"cache_array",
"=",
"_maybe_cache",
"(",
"arg",
",",
"format",
",",
"cache",
",",
"convert_listlike",
")",
"if",
"not",
"cache_array",
".",
"empty",
":",
"result",
"=",
"arg",
".",
"map",
"(",
"cache_array",
")",
"else",
":",
"values",
"=",
"convert_listlike",
"(",
"arg",
".",
"_values",
",",
"True",
",",
"format",
")",
"result",
"=",
"arg",
".",
"_constructor",
"(",
"values",
",",
"index",
"=",
"arg",
".",
"index",
",",
"name",
"=",
"arg",
".",
"name",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"(",
"ABCDataFrame",
",",
"abc",
".",
"MutableMapping",
")",
")",
":",
"result",
"=",
"_assemble_from_unit_mappings",
"(",
"arg",
",",
"errors",
",",
"box",
",",
"tz",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"ABCIndexClass",
")",
":",
"cache_array",
"=",
"_maybe_cache",
"(",
"arg",
",",
"format",
",",
"cache",
",",
"convert_listlike",
")",
"if",
"not",
"cache_array",
".",
"empty",
":",
"result",
"=",
"_convert_and_box_cache",
"(",
"arg",
",",
"cache_array",
",",
"box",
",",
"errors",
",",
"name",
"=",
"arg",
".",
"name",
")",
"else",
":",
"convert_listlike",
"=",
"partial",
"(",
"convert_listlike",
",",
"name",
"=",
"arg",
".",
"name",
")",
"result",
"=",
"convert_listlike",
"(",
"arg",
",",
"box",
",",
"format",
")",
"elif",
"is_list_like",
"(",
"arg",
")",
":",
"cache_array",
"=",
"_maybe_cache",
"(",
"arg",
",",
"format",
",",
"cache",
",",
"convert_listlike",
")",
"if",
"not",
"cache_array",
".",
"empty",
":",
"result",
"=",
"_convert_and_box_cache",
"(",
"arg",
",",
"cache_array",
",",
"box",
",",
"errors",
")",
"else",
":",
"result",
"=",
"convert_listlike",
"(",
"arg",
",",
"box",
",",
"format",
")",
"else",
":",
"result",
"=",
"convert_listlike",
"(",
"np",
".",
"array",
"(",
"[",
"arg",
"]",
")",
",",
"box",
",",
"format",
")",
"[",
"0",
"]",
"return",
"result"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | _assemble_from_unit_mappings | assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
box : boolean
- If True, return a DatetimeIndex
- If False, return an array
tz : None or 'utc'
Returns
-------
Series | pandas/core/tools/datetimes.py | def _assemble_from_unit_mappings(arg, errors, box, tz):
"""
assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
box : boolean
- If True, return a DatetimeIndex
- If False, return an array
tz : None or 'utc'
Returns
-------
Series
"""
from pandas import to_timedelta, to_numeric, DataFrame
arg = DataFrame(arg)
if not arg.columns.is_unique:
raise ValueError("cannot assemble with duplicate keys")
# replace passed unit with _unit_map
def f(value):
if value in _unit_map:
return _unit_map[value]
# m is case significant
if value.lower() in _unit_map:
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for k, v in unit.items()}
# we require at least Ymd
required = ['year', 'month', 'day']
req = sorted(list(set(required) - set(unit_rev.keys())))
if len(req):
raise ValueError("to assemble mappings requires at least that "
"[year, month, day] be specified: [{required}] "
"is missing".format(required=','.join(req)))
# keys we don't recognize
excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values())))
if len(excess):
raise ValueError("extra keys have been passed "
"to the datetime assemblage: "
"[{excess}]".format(excess=','.join(excess)))
def coerce(values):
# we allow coercion to if errors allows
values = to_numeric(values, errors=errors)
# prevent overflow in case of int8 or int16
if is_integer_dtype(values):
values = values.astype('int64', copy=False)
return values
values = (coerce(arg[unit_rev['year']]) * 10000 +
coerce(arg[unit_rev['month']]) * 100 +
coerce(arg[unit_rev['day']]))
try:
values = to_datetime(values, format='%Y%m%d', errors=errors, utc=tz)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the "
"datetimes: {error}".format(error=e))
for u in ['h', 'm', 's', 'ms', 'us', 'ns']:
value = unit_rev.get(u)
if value is not None and value in arg:
try:
values += to_timedelta(coerce(arg[value]),
unit=u,
errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the datetimes [{value}]: "
"{error}".format(value=value, error=e))
if not box:
return values.values
return values | def _assemble_from_unit_mappings(arg, errors, box, tz):
"""
assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
box : boolean
- If True, return a DatetimeIndex
- If False, return an array
tz : None or 'utc'
Returns
-------
Series
"""
from pandas import to_timedelta, to_numeric, DataFrame
arg = DataFrame(arg)
if not arg.columns.is_unique:
raise ValueError("cannot assemble with duplicate keys")
# replace passed unit with _unit_map
def f(value):
if value in _unit_map:
return _unit_map[value]
# m is case significant
if value.lower() in _unit_map:
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for k, v in unit.items()}
# we require at least Ymd
required = ['year', 'month', 'day']
req = sorted(list(set(required) - set(unit_rev.keys())))
if len(req):
raise ValueError("to assemble mappings requires at least that "
"[year, month, day] be specified: [{required}] "
"is missing".format(required=','.join(req)))
# keys we don't recognize
excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values())))
if len(excess):
raise ValueError("extra keys have been passed "
"to the datetime assemblage: "
"[{excess}]".format(excess=','.join(excess)))
def coerce(values):
# we allow coercion to if errors allows
values = to_numeric(values, errors=errors)
# prevent overflow in case of int8 or int16
if is_integer_dtype(values):
values = values.astype('int64', copy=False)
return values
values = (coerce(arg[unit_rev['year']]) * 10000 +
coerce(arg[unit_rev['month']]) * 100 +
coerce(arg[unit_rev['day']]))
try:
values = to_datetime(values, format='%Y%m%d', errors=errors, utc=tz)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the "
"datetimes: {error}".format(error=e))
for u in ['h', 'm', 's', 'ms', 'us', 'ns']:
value = unit_rev.get(u)
if value is not None and value in arg:
try:
values += to_timedelta(coerce(arg[value]),
unit=u,
errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the datetimes [{value}]: "
"{error}".format(value=value, error=e))
if not box:
return values.values
return values | [
"assemble",
"the",
"unit",
"specified",
"fields",
"from",
"the",
"arg",
"(",
"DataFrame",
")",
"Return",
"a",
"Series",
"for",
"actual",
"parsing"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/datetimes.py#L650-L737 | [
"def",
"_assemble_from_unit_mappings",
"(",
"arg",
",",
"errors",
",",
"box",
",",
"tz",
")",
":",
"from",
"pandas",
"import",
"to_timedelta",
",",
"to_numeric",
",",
"DataFrame",
"arg",
"=",
"DataFrame",
"(",
"arg",
")",
"if",
"not",
"arg",
".",
"columns",
".",
"is_unique",
":",
"raise",
"ValueError",
"(",
"\"cannot assemble with duplicate keys\"",
")",
"# replace passed unit with _unit_map",
"def",
"f",
"(",
"value",
")",
":",
"if",
"value",
"in",
"_unit_map",
":",
"return",
"_unit_map",
"[",
"value",
"]",
"# m is case significant",
"if",
"value",
".",
"lower",
"(",
")",
"in",
"_unit_map",
":",
"return",
"_unit_map",
"[",
"value",
".",
"lower",
"(",
")",
"]",
"return",
"value",
"unit",
"=",
"{",
"k",
":",
"f",
"(",
"k",
")",
"for",
"k",
"in",
"arg",
".",
"keys",
"(",
")",
"}",
"unit_rev",
"=",
"{",
"v",
":",
"k",
"for",
"k",
",",
"v",
"in",
"unit",
".",
"items",
"(",
")",
"}",
"# we require at least Ymd",
"required",
"=",
"[",
"'year'",
",",
"'month'",
",",
"'day'",
"]",
"req",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"required",
")",
"-",
"set",
"(",
"unit_rev",
".",
"keys",
"(",
")",
")",
")",
")",
"if",
"len",
"(",
"req",
")",
":",
"raise",
"ValueError",
"(",
"\"to assemble mappings requires at least that \"",
"\"[year, month, day] be specified: [{required}] \"",
"\"is missing\"",
".",
"format",
"(",
"required",
"=",
"','",
".",
"join",
"(",
"req",
")",
")",
")",
"# keys we don't recognize",
"excess",
"=",
"sorted",
"(",
"list",
"(",
"set",
"(",
"unit_rev",
".",
"keys",
"(",
")",
")",
"-",
"set",
"(",
"_unit_map",
".",
"values",
"(",
")",
")",
")",
")",
"if",
"len",
"(",
"excess",
")",
":",
"raise",
"ValueError",
"(",
"\"extra keys have been passed \"",
"\"to the datetime assemblage: \"",
"\"[{excess}]\"",
".",
"format",
"(",
"excess",
"=",
"','",
".",
"join",
"(",
"excess",
")",
")",
")",
"def",
"coerce",
"(",
"values",
")",
":",
"# we allow coercion to if errors allows",
"values",
"=",
"to_numeric",
"(",
"values",
",",
"errors",
"=",
"errors",
")",
"# prevent overflow in case of int8 or int16",
"if",
"is_integer_dtype",
"(",
"values",
")",
":",
"values",
"=",
"values",
".",
"astype",
"(",
"'int64'",
",",
"copy",
"=",
"False",
")",
"return",
"values",
"values",
"=",
"(",
"coerce",
"(",
"arg",
"[",
"unit_rev",
"[",
"'year'",
"]",
"]",
")",
"*",
"10000",
"+",
"coerce",
"(",
"arg",
"[",
"unit_rev",
"[",
"'month'",
"]",
"]",
")",
"*",
"100",
"+",
"coerce",
"(",
"arg",
"[",
"unit_rev",
"[",
"'day'",
"]",
"]",
")",
")",
"try",
":",
"values",
"=",
"to_datetime",
"(",
"values",
",",
"format",
"=",
"'%Y%m%d'",
",",
"errors",
"=",
"errors",
",",
"utc",
"=",
"tz",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"\"cannot assemble the \"",
"\"datetimes: {error}\"",
".",
"format",
"(",
"error",
"=",
"e",
")",
")",
"for",
"u",
"in",
"[",
"'h'",
",",
"'m'",
",",
"'s'",
",",
"'ms'",
",",
"'us'",
",",
"'ns'",
"]",
":",
"value",
"=",
"unit_rev",
".",
"get",
"(",
"u",
")",
"if",
"value",
"is",
"not",
"None",
"and",
"value",
"in",
"arg",
":",
"try",
":",
"values",
"+=",
"to_timedelta",
"(",
"coerce",
"(",
"arg",
"[",
"value",
"]",
")",
",",
"unit",
"=",
"u",
",",
"errors",
"=",
"errors",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"\"cannot assemble the datetimes [{value}]: \"",
"\"{error}\"",
".",
"format",
"(",
"value",
"=",
"value",
",",
"error",
"=",
"e",
")",
")",
"if",
"not",
"box",
":",
"return",
"values",
".",
"values",
"return",
"values"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | _attempt_YYYYMMDD | try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce' | pandas/core/tools/datetimes.py | def _attempt_YYYYMMDD(arg, errors):
"""
try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce'
"""
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
parsed = parsing.try_parse_year_month_day(carg / 10000,
carg / 100 % 100,
carg % 100)
return tslib.array_to_datetime(parsed, errors=errors)[0]
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult[~mask] = tslibs.iNaT
masked_result = calc(carg[mask].astype(np.float64).astype(np.int64))
result[mask] = masked_result.astype('M8[ns]')
return result
# try intlike / strings that are ints
try:
return calc(arg.astype(np.int64))
except ValueError:
pass
# a float with actual np.nan
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
except ValueError:
pass
# string with NaN-like
try:
mask = ~algorithms.isin(arg, list(tslib.nat_strings))
return calc_with_mask(arg, mask)
except ValueError:
pass
return None | def _attempt_YYYYMMDD(arg, errors):
"""
try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce'
"""
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
parsed = parsing.try_parse_year_month_day(carg / 10000,
carg / 100 % 100,
carg % 100)
return tslib.array_to_datetime(parsed, errors=errors)[0]
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult[~mask] = tslibs.iNaT
masked_result = calc(carg[mask].astype(np.float64).astype(np.int64))
result[mask] = masked_result.astype('M8[ns]')
return result
# try intlike / strings that are ints
try:
return calc(arg.astype(np.int64))
except ValueError:
pass
# a float with actual np.nan
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
except ValueError:
pass
# string with NaN-like
try:
mask = ~algorithms.isin(arg, list(tslib.nat_strings))
return calc_with_mask(arg, mask)
except ValueError:
pass
return None | [
"try",
"to",
"parse",
"the",
"YYYYMMDD",
"/",
"%Y%m%d",
"format",
"try",
"to",
"deal",
"with",
"NaT",
"-",
"like",
"arg",
"is",
"a",
"passed",
"in",
"as",
"an",
"object",
"dtype",
"but",
"could",
"really",
"be",
"ints",
"/",
"strings",
"with",
"nan",
"-",
"like",
"/",
"or",
"floats",
"(",
"e",
".",
"g",
".",
"with",
"nan",
")"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/datetimes.py#L740-L789 | [
"def",
"_attempt_YYYYMMDD",
"(",
"arg",
",",
"errors",
")",
":",
"def",
"calc",
"(",
"carg",
")",
":",
"# calculate the actual result",
"carg",
"=",
"carg",
".",
"astype",
"(",
"object",
")",
"parsed",
"=",
"parsing",
".",
"try_parse_year_month_day",
"(",
"carg",
"/",
"10000",
",",
"carg",
"/",
"100",
"%",
"100",
",",
"carg",
"%",
"100",
")",
"return",
"tslib",
".",
"array_to_datetime",
"(",
"parsed",
",",
"errors",
"=",
"errors",
")",
"[",
"0",
"]",
"def",
"calc_with_mask",
"(",
"carg",
",",
"mask",
")",
":",
"result",
"=",
"np",
".",
"empty",
"(",
"carg",
".",
"shape",
",",
"dtype",
"=",
"'M8[ns]'",
")",
"iresult",
"=",
"result",
".",
"view",
"(",
"'i8'",
")",
"iresult",
"[",
"~",
"mask",
"]",
"=",
"tslibs",
".",
"iNaT",
"masked_result",
"=",
"calc",
"(",
"carg",
"[",
"mask",
"]",
".",
"astype",
"(",
"np",
".",
"float64",
")",
".",
"astype",
"(",
"np",
".",
"int64",
")",
")",
"result",
"[",
"mask",
"]",
"=",
"masked_result",
".",
"astype",
"(",
"'M8[ns]'",
")",
"return",
"result",
"# try intlike / strings that are ints",
"try",
":",
"return",
"calc",
"(",
"arg",
".",
"astype",
"(",
"np",
".",
"int64",
")",
")",
"except",
"ValueError",
":",
"pass",
"# a float with actual np.nan",
"try",
":",
"carg",
"=",
"arg",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"return",
"calc_with_mask",
"(",
"carg",
",",
"notna",
"(",
"carg",
")",
")",
"except",
"ValueError",
":",
"pass",
"# string with NaN-like",
"try",
":",
"mask",
"=",
"~",
"algorithms",
".",
"isin",
"(",
"arg",
",",
"list",
"(",
"tslib",
".",
"nat_strings",
")",
")",
"return",
"calc_with_mask",
"(",
"arg",
",",
"mask",
")",
"except",
"ValueError",
":",
"pass",
"return",
"None"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | to_time | Parse time strings to time objects using fixed strptime formats ("%H:%M",
"%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
Use infer_time_format if all the strings are in the same format to speed
up conversion.
Parameters
----------
arg : string in time format, datetime.time, list, tuple, 1-d array, Series
format : str, default None
Format used to convert arg into a time object. If None, fixed formats
are used.
infer_time_format: bool, default False
Infer the time format based on the first non-NaN element. If all
strings are in the same format, this will speed up conversion.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as None
- If 'ignore', then invalid parsing will return the input
Returns
-------
datetime.time | pandas/core/tools/datetimes.py | def to_time(arg, format=None, infer_time_format=False, errors='raise'):
"""
Parse time strings to time objects using fixed strptime formats ("%H:%M",
"%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
Use infer_time_format if all the strings are in the same format to speed
up conversion.
Parameters
----------
arg : string in time format, datetime.time, list, tuple, 1-d array, Series
format : str, default None
Format used to convert arg into a time object. If None, fixed formats
are used.
infer_time_format: bool, default False
Infer the time format based on the first non-NaN element. If all
strings are in the same format, this will speed up conversion.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as None
- If 'ignore', then invalid parsing will return the input
Returns
-------
datetime.time
"""
def _convert_listlike(arg, format):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
arg = ensure_object(arg)
if infer_time_format and format is None:
format = _guess_time_format_for_array(arg)
times = []
if format is not None:
for element in arg:
try:
times.append(datetime.strptime(element, format).time())
except (ValueError, TypeError):
if errors == 'raise':
msg = ("Cannot convert {element} to a time with given "
"format {format}").format(element=element,
format=format)
raise ValueError(msg)
elif errors == 'ignore':
return arg
else:
times.append(None)
else:
formats = _time_formats[:]
format_found = False
for element in arg:
time_object = None
for time_format in formats:
try:
time_object = datetime.strptime(element,
time_format).time()
if not format_found:
# Put the found format in front
fmt = formats.pop(formats.index(time_format))
formats.insert(0, fmt)
format_found = True
break
except (ValueError, TypeError):
continue
if time_object is not None:
times.append(time_object)
elif errors == 'raise':
raise ValueError("Cannot convert arg {arg} to "
"a time".format(arg=arg))
elif errors == 'ignore':
return arg
else:
times.append(None)
return times
if arg is None:
return arg
elif isinstance(arg, time):
return arg
elif isinstance(arg, ABCSeries):
values = _convert_listlike(arg._values, format)
return arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, format)
elif is_list_like(arg):
return _convert_listlike(arg, format)
return _convert_listlike(np.array([arg]), format)[0] | def to_time(arg, format=None, infer_time_format=False, errors='raise'):
"""
Parse time strings to time objects using fixed strptime formats ("%H:%M",
"%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
Use infer_time_format if all the strings are in the same format to speed
up conversion.
Parameters
----------
arg : string in time format, datetime.time, list, tuple, 1-d array, Series
format : str, default None
Format used to convert arg into a time object. If None, fixed formats
are used.
infer_time_format: bool, default False
Infer the time format based on the first non-NaN element. If all
strings are in the same format, this will speed up conversion.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as None
- If 'ignore', then invalid parsing will return the input
Returns
-------
datetime.time
"""
def _convert_listlike(arg, format):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
arg = ensure_object(arg)
if infer_time_format and format is None:
format = _guess_time_format_for_array(arg)
times = []
if format is not None:
for element in arg:
try:
times.append(datetime.strptime(element, format).time())
except (ValueError, TypeError):
if errors == 'raise':
msg = ("Cannot convert {element} to a time with given "
"format {format}").format(element=element,
format=format)
raise ValueError(msg)
elif errors == 'ignore':
return arg
else:
times.append(None)
else:
formats = _time_formats[:]
format_found = False
for element in arg:
time_object = None
for time_format in formats:
try:
time_object = datetime.strptime(element,
time_format).time()
if not format_found:
# Put the found format in front
fmt = formats.pop(formats.index(time_format))
formats.insert(0, fmt)
format_found = True
break
except (ValueError, TypeError):
continue
if time_object is not None:
times.append(time_object)
elif errors == 'raise':
raise ValueError("Cannot convert arg {arg} to "
"a time".format(arg=arg))
elif errors == 'ignore':
return arg
else:
times.append(None)
return times
if arg is None:
return arg
elif isinstance(arg, time):
return arg
elif isinstance(arg, ABCSeries):
values = _convert_listlike(arg._values, format)
return arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, format)
elif is_list_like(arg):
return _convert_listlike(arg, format)
return _convert_listlike(np.array([arg]), format)[0] | [
"Parse",
"time",
"strings",
"to",
"time",
"objects",
"using",
"fixed",
"strptime",
"formats",
"(",
"%H",
":",
"%M",
"%H%M",
"%I",
":",
"%M%p",
"%I%M%p",
"%H",
":",
"%M",
":",
"%S",
"%H%M%S",
"%I",
":",
"%M",
":",
"%S%p",
"%I%M%S%p",
")"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/tools/datetimes.py#L812-L911 | [
"def",
"to_time",
"(",
"arg",
",",
"format",
"=",
"None",
",",
"infer_time_format",
"=",
"False",
",",
"errors",
"=",
"'raise'",
")",
":",
"def",
"_convert_listlike",
"(",
"arg",
",",
"format",
")",
":",
"if",
"isinstance",
"(",
"arg",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"arg",
"=",
"np",
".",
"array",
"(",
"arg",
",",
"dtype",
"=",
"'O'",
")",
"elif",
"getattr",
"(",
"arg",
",",
"'ndim'",
",",
"1",
")",
">",
"1",
":",
"raise",
"TypeError",
"(",
"'arg must be a string, datetime, list, tuple, '",
"'1-d array, or Series'",
")",
"arg",
"=",
"ensure_object",
"(",
"arg",
")",
"if",
"infer_time_format",
"and",
"format",
"is",
"None",
":",
"format",
"=",
"_guess_time_format_for_array",
"(",
"arg",
")",
"times",
"=",
"[",
"]",
"if",
"format",
"is",
"not",
"None",
":",
"for",
"element",
"in",
"arg",
":",
"try",
":",
"times",
".",
"append",
"(",
"datetime",
".",
"strptime",
"(",
"element",
",",
"format",
")",
".",
"time",
"(",
")",
")",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"if",
"errors",
"==",
"'raise'",
":",
"msg",
"=",
"(",
"\"Cannot convert {element} to a time with given \"",
"\"format {format}\"",
")",
".",
"format",
"(",
"element",
"=",
"element",
",",
"format",
"=",
"format",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"elif",
"errors",
"==",
"'ignore'",
":",
"return",
"arg",
"else",
":",
"times",
".",
"append",
"(",
"None",
")",
"else",
":",
"formats",
"=",
"_time_formats",
"[",
":",
"]",
"format_found",
"=",
"False",
"for",
"element",
"in",
"arg",
":",
"time_object",
"=",
"None",
"for",
"time_format",
"in",
"formats",
":",
"try",
":",
"time_object",
"=",
"datetime",
".",
"strptime",
"(",
"element",
",",
"time_format",
")",
".",
"time",
"(",
")",
"if",
"not",
"format_found",
":",
"# Put the found format in front",
"fmt",
"=",
"formats",
".",
"pop",
"(",
"formats",
".",
"index",
"(",
"time_format",
")",
")",
"formats",
".",
"insert",
"(",
"0",
",",
"fmt",
")",
"format_found",
"=",
"True",
"break",
"except",
"(",
"ValueError",
",",
"TypeError",
")",
":",
"continue",
"if",
"time_object",
"is",
"not",
"None",
":",
"times",
".",
"append",
"(",
"time_object",
")",
"elif",
"errors",
"==",
"'raise'",
":",
"raise",
"ValueError",
"(",
"\"Cannot convert arg {arg} to \"",
"\"a time\"",
".",
"format",
"(",
"arg",
"=",
"arg",
")",
")",
"elif",
"errors",
"==",
"'ignore'",
":",
"return",
"arg",
"else",
":",
"times",
".",
"append",
"(",
"None",
")",
"return",
"times",
"if",
"arg",
"is",
"None",
":",
"return",
"arg",
"elif",
"isinstance",
"(",
"arg",
",",
"time",
")",
":",
"return",
"arg",
"elif",
"isinstance",
"(",
"arg",
",",
"ABCSeries",
")",
":",
"values",
"=",
"_convert_listlike",
"(",
"arg",
".",
"_values",
",",
"format",
")",
"return",
"arg",
".",
"_constructor",
"(",
"values",
",",
"index",
"=",
"arg",
".",
"index",
",",
"name",
"=",
"arg",
".",
"name",
")",
"elif",
"isinstance",
"(",
"arg",
",",
"ABCIndexClass",
")",
":",
"return",
"_convert_listlike",
"(",
"arg",
",",
"format",
")",
"elif",
"is_list_like",
"(",
"arg",
")",
":",
"return",
"_convert_listlike",
"(",
"arg",
",",
"format",
")",
"return",
"_convert_listlike",
"(",
"np",
".",
"array",
"(",
"[",
"arg",
"]",
")",
",",
"format",
")",
"[",
"0",
"]"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | deprecate | Return a new function that emits a deprecation warning on use.
To use this method for a deprecated function, another function
`alternative` with the same signature must exist. The deprecated
function will emit a deprecation warning, and in the docstring
it will contain the deprecation directive with the provided version
so it can be detected for future removal.
Parameters
----------
name : str
Name of function to deprecate.
alternative : func
Function to use instead.
version : str
Version of pandas in which the method has been deprecated.
alt_name : str, optional
Name to use in preference of alternative.__name__.
klass : Warning, default FutureWarning
stacklevel : int, default 2
msg : str
The message to display in the warning.
Default is '{name} is deprecated. Use {alt_name} instead.' | pandas/util/_decorators.py | def deprecate(name, alternative, version, alt_name=None,
klass=None, stacklevel=2, msg=None):
"""
Return a new function that emits a deprecation warning on use.
To use this method for a deprecated function, another function
`alternative` with the same signature must exist. The deprecated
function will emit a deprecation warning, and in the docstring
it will contain the deprecation directive with the provided version
so it can be detected for future removal.
Parameters
----------
name : str
Name of function to deprecate.
alternative : func
Function to use instead.
version : str
Version of pandas in which the method has been deprecated.
alt_name : str, optional
Name to use in preference of alternative.__name__.
klass : Warning, default FutureWarning
stacklevel : int, default 2
msg : str
The message to display in the warning.
Default is '{name} is deprecated. Use {alt_name} instead.'
"""
alt_name = alt_name or alternative.__name__
klass = klass or FutureWarning
warning_msg = msg or '{} is deprecated, use {} instead'.format(name,
alt_name)
@wraps(alternative)
def wrapper(*args, **kwargs):
warnings.warn(warning_msg, klass, stacklevel=stacklevel)
return alternative(*args, **kwargs)
# adding deprecated directive to the docstring
msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name)
doc_error_msg = ('deprecate needs a correctly formatted docstring in '
'the target function (should have a one liner short '
'summary, and opening quotes should be in their own '
'line). Found:\n{}'.format(alternative.__doc__))
# when python is running in optimized mode (i.e. `-OO`), docstrings are
# removed, so we check that a docstring with correct formatting is used
# but we allow empty docstrings
if alternative.__doc__:
if alternative.__doc__.count('\n') < 3:
raise AssertionError(doc_error_msg)
empty1, summary, empty2, doc = alternative.__doc__.split('\n', 3)
if empty1 or empty2 and not summary:
raise AssertionError(doc_error_msg)
wrapper.__doc__ = dedent("""
{summary}
.. deprecated:: {depr_version}
{depr_msg}
{rest_of_docstring}""").format(summary=summary.strip(),
depr_version=version,
depr_msg=msg,
rest_of_docstring=dedent(doc))
return wrapper | def deprecate(name, alternative, version, alt_name=None,
klass=None, stacklevel=2, msg=None):
"""
Return a new function that emits a deprecation warning on use.
To use this method for a deprecated function, another function
`alternative` with the same signature must exist. The deprecated
function will emit a deprecation warning, and in the docstring
it will contain the deprecation directive with the provided version
so it can be detected for future removal.
Parameters
----------
name : str
Name of function to deprecate.
alternative : func
Function to use instead.
version : str
Version of pandas in which the method has been deprecated.
alt_name : str, optional
Name to use in preference of alternative.__name__.
klass : Warning, default FutureWarning
stacklevel : int, default 2
msg : str
The message to display in the warning.
Default is '{name} is deprecated. Use {alt_name} instead.'
"""
alt_name = alt_name or alternative.__name__
klass = klass or FutureWarning
warning_msg = msg or '{} is deprecated, use {} instead'.format(name,
alt_name)
@wraps(alternative)
def wrapper(*args, **kwargs):
warnings.warn(warning_msg, klass, stacklevel=stacklevel)
return alternative(*args, **kwargs)
# adding deprecated directive to the docstring
msg = msg or 'Use `{alt_name}` instead.'.format(alt_name=alt_name)
doc_error_msg = ('deprecate needs a correctly formatted docstring in '
'the target function (should have a one liner short '
'summary, and opening quotes should be in their own '
'line). Found:\n{}'.format(alternative.__doc__))
# when python is running in optimized mode (i.e. `-OO`), docstrings are
# removed, so we check that a docstring with correct formatting is used
# but we allow empty docstrings
if alternative.__doc__:
if alternative.__doc__.count('\n') < 3:
raise AssertionError(doc_error_msg)
empty1, summary, empty2, doc = alternative.__doc__.split('\n', 3)
if empty1 or empty2 and not summary:
raise AssertionError(doc_error_msg)
wrapper.__doc__ = dedent("""
{summary}
.. deprecated:: {depr_version}
{depr_msg}
{rest_of_docstring}""").format(summary=summary.strip(),
depr_version=version,
depr_msg=msg,
rest_of_docstring=dedent(doc))
return wrapper | [
"Return",
"a",
"new",
"function",
"that",
"emits",
"a",
"deprecation",
"warning",
"on",
"use",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_decorators.py#L9-L74 | [
"def",
"deprecate",
"(",
"name",
",",
"alternative",
",",
"version",
",",
"alt_name",
"=",
"None",
",",
"klass",
"=",
"None",
",",
"stacklevel",
"=",
"2",
",",
"msg",
"=",
"None",
")",
":",
"alt_name",
"=",
"alt_name",
"or",
"alternative",
".",
"__name__",
"klass",
"=",
"klass",
"or",
"FutureWarning",
"warning_msg",
"=",
"msg",
"or",
"'{} is deprecated, use {} instead'",
".",
"format",
"(",
"name",
",",
"alt_name",
")",
"@",
"wraps",
"(",
"alternative",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"warning_msg",
",",
"klass",
",",
"stacklevel",
"=",
"stacklevel",
")",
"return",
"alternative",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# adding deprecated directive to the docstring",
"msg",
"=",
"msg",
"or",
"'Use `{alt_name}` instead.'",
".",
"format",
"(",
"alt_name",
"=",
"alt_name",
")",
"doc_error_msg",
"=",
"(",
"'deprecate needs a correctly formatted docstring in '",
"'the target function (should have a one liner short '",
"'summary, and opening quotes should be in their own '",
"'line). Found:\\n{}'",
".",
"format",
"(",
"alternative",
".",
"__doc__",
")",
")",
"# when python is running in optimized mode (i.e. `-OO`), docstrings are",
"# removed, so we check that a docstring with correct formatting is used",
"# but we allow empty docstrings",
"if",
"alternative",
".",
"__doc__",
":",
"if",
"alternative",
".",
"__doc__",
".",
"count",
"(",
"'\\n'",
")",
"<",
"3",
":",
"raise",
"AssertionError",
"(",
"doc_error_msg",
")",
"empty1",
",",
"summary",
",",
"empty2",
",",
"doc",
"=",
"alternative",
".",
"__doc__",
".",
"split",
"(",
"'\\n'",
",",
"3",
")",
"if",
"empty1",
"or",
"empty2",
"and",
"not",
"summary",
":",
"raise",
"AssertionError",
"(",
"doc_error_msg",
")",
"wrapper",
".",
"__doc__",
"=",
"dedent",
"(",
"\"\"\"\n {summary}\n\n .. deprecated:: {depr_version}\n {depr_msg}\n\n {rest_of_docstring}\"\"\"",
")",
".",
"format",
"(",
"summary",
"=",
"summary",
".",
"strip",
"(",
")",
",",
"depr_version",
"=",
"version",
",",
"depr_msg",
"=",
"msg",
",",
"rest_of_docstring",
"=",
"dedent",
"(",
"doc",
")",
")",
"return",
"wrapper"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | deprecate_kwarg | Decorator to deprecate a keyword argument of a function.
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str or None
Name of preferred argument in function. Use None to raise warning that
``old_arg_name`` keyword is deprecated.
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
To raise a warning that a keyword will be removed entirely in the future
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None)
... def f(cols='', another_param=''):
... print(cols)
...
>>> f(cols='should raise warning')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
>>> f(another_param='should not raise warning')
should not raise warning
>>> f(cols='should raise warning', another_param='')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning | pandas/util/_decorators.py | def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2):
"""
Decorator to deprecate a keyword argument of a function.
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str or None
Name of preferred argument in function. Use None to raise warning that
``old_arg_name`` keyword is deprecated.
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
To raise a warning that a keyword will be removed entirely in the future
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None)
... def f(cols='', another_param=''):
... print(cols)
...
>>> f(cols='should raise warning')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
>>> f(another_param='should not raise warning')
should not raise warning
>>> f(cols='should raise warning', another_param='')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
"""
if mapping is not None and not hasattr(mapping, 'get') and \
not callable(mapping):
raise TypeError("mapping from old to new argument values "
"must be dict or callable!")
def _deprecate_kwarg(func):
@wraps(func)
def wrapper(*args, **kwargs):
old_arg_value = kwargs.pop(old_arg_name, None)
if new_arg_name is None and old_arg_value is not None:
msg = (
"the '{old_name}' keyword is deprecated and will be "
"removed in a future version. "
"Please take steps to stop the use of '{old_name}'"
).format(old_name=old_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
kwargs[old_arg_name] = old_arg_value
return func(*args, **kwargs)
if old_arg_value is not None:
if mapping is not None:
if hasattr(mapping, 'get'):
new_arg_value = mapping.get(old_arg_value,
old_arg_value)
else:
new_arg_value = mapping(old_arg_value)
msg = ("the {old_name}={old_val!r} keyword is deprecated, "
"use {new_name}={new_val!r} instead"
).format(old_name=old_arg_name,
old_val=old_arg_value,
new_name=new_arg_name,
new_val=new_arg_value)
else:
new_arg_value = old_arg_value
msg = ("the '{old_name}' keyword is deprecated, "
"use '{new_name}' instead"
).format(old_name=old_arg_name,
new_name=new_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
if kwargs.get(new_arg_name, None) is not None:
msg = ("Can only specify '{old_name}' or '{new_name}', "
"not both").format(old_name=old_arg_name,
new_name=new_arg_name)
raise TypeError(msg)
else:
kwargs[new_arg_name] = new_arg_value
return func(*args, **kwargs)
return wrapper
return _deprecate_kwarg | def deprecate_kwarg(old_arg_name, new_arg_name, mapping=None, stacklevel=2):
"""
Decorator to deprecate a keyword argument of a function.
Parameters
----------
old_arg_name : str
Name of argument in function to deprecate
new_arg_name : str or None
Name of preferred argument in function. Use None to raise warning that
``old_arg_name`` keyword is deprecated.
mapping : dict or callable
If mapping is present, use it to translate old arguments to
new arguments. A callable must do its own value checking;
values not found in a dict will be forwarded unchanged.
Examples
--------
The following deprecates 'cols', using 'columns' instead
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns')
... def f(columns=''):
... print(columns)
...
>>> f(columns='should work ok')
should work ok
>>> f(cols='should raise warning')
FutureWarning: cols is deprecated, use columns instead
warnings.warn(msg, FutureWarning)
should raise warning
>>> f(cols='should error', columns="can\'t pass do both")
TypeError: Can only specify 'cols' or 'columns', not both
>>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False})
... def f(new=False):
... print('yes!' if new else 'no!')
...
>>> f(old='yes')
FutureWarning: old='yes' is deprecated, use new=True instead
warnings.warn(msg, FutureWarning)
yes!
To raise a warning that a keyword will be removed entirely in the future
>>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None)
... def f(cols='', another_param=''):
... print(cols)
...
>>> f(cols='should raise warning')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
>>> f(another_param='should not raise warning')
should not raise warning
>>> f(cols='should raise warning', another_param='')
FutureWarning: the 'cols' keyword is deprecated and will be removed in a
future version please takes steps to stop use of 'cols'
should raise warning
"""
if mapping is not None and not hasattr(mapping, 'get') and \
not callable(mapping):
raise TypeError("mapping from old to new argument values "
"must be dict or callable!")
def _deprecate_kwarg(func):
@wraps(func)
def wrapper(*args, **kwargs):
old_arg_value = kwargs.pop(old_arg_name, None)
if new_arg_name is None and old_arg_value is not None:
msg = (
"the '{old_name}' keyword is deprecated and will be "
"removed in a future version. "
"Please take steps to stop the use of '{old_name}'"
).format(old_name=old_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
kwargs[old_arg_name] = old_arg_value
return func(*args, **kwargs)
if old_arg_value is not None:
if mapping is not None:
if hasattr(mapping, 'get'):
new_arg_value = mapping.get(old_arg_value,
old_arg_value)
else:
new_arg_value = mapping(old_arg_value)
msg = ("the {old_name}={old_val!r} keyword is deprecated, "
"use {new_name}={new_val!r} instead"
).format(old_name=old_arg_name,
old_val=old_arg_value,
new_name=new_arg_name,
new_val=new_arg_value)
else:
new_arg_value = old_arg_value
msg = ("the '{old_name}' keyword is deprecated, "
"use '{new_name}' instead"
).format(old_name=old_arg_name,
new_name=new_arg_name)
warnings.warn(msg, FutureWarning, stacklevel=stacklevel)
if kwargs.get(new_arg_name, None) is not None:
msg = ("Can only specify '{old_name}' or '{new_name}', "
"not both").format(old_name=old_arg_name,
new_name=new_arg_name)
raise TypeError(msg)
else:
kwargs[new_arg_name] = new_arg_value
return func(*args, **kwargs)
return wrapper
return _deprecate_kwarg | [
"Decorator",
"to",
"deprecate",
"a",
"keyword",
"argument",
"of",
"a",
"function",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_decorators.py#L77-L190 | [
"def",
"deprecate_kwarg",
"(",
"old_arg_name",
",",
"new_arg_name",
",",
"mapping",
"=",
"None",
",",
"stacklevel",
"=",
"2",
")",
":",
"if",
"mapping",
"is",
"not",
"None",
"and",
"not",
"hasattr",
"(",
"mapping",
",",
"'get'",
")",
"and",
"not",
"callable",
"(",
"mapping",
")",
":",
"raise",
"TypeError",
"(",
"\"mapping from old to new argument values \"",
"\"must be dict or callable!\"",
")",
"def",
"_deprecate_kwarg",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"old_arg_value",
"=",
"kwargs",
".",
"pop",
"(",
"old_arg_name",
",",
"None",
")",
"if",
"new_arg_name",
"is",
"None",
"and",
"old_arg_value",
"is",
"not",
"None",
":",
"msg",
"=",
"(",
"\"the '{old_name}' keyword is deprecated and will be \"",
"\"removed in a future version. \"",
"\"Please take steps to stop the use of '{old_name}'\"",
")",
".",
"format",
"(",
"old_name",
"=",
"old_arg_name",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"stacklevel",
")",
"kwargs",
"[",
"old_arg_name",
"]",
"=",
"old_arg_value",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"old_arg_value",
"is",
"not",
"None",
":",
"if",
"mapping",
"is",
"not",
"None",
":",
"if",
"hasattr",
"(",
"mapping",
",",
"'get'",
")",
":",
"new_arg_value",
"=",
"mapping",
".",
"get",
"(",
"old_arg_value",
",",
"old_arg_value",
")",
"else",
":",
"new_arg_value",
"=",
"mapping",
"(",
"old_arg_value",
")",
"msg",
"=",
"(",
"\"the {old_name}={old_val!r} keyword is deprecated, \"",
"\"use {new_name}={new_val!r} instead\"",
")",
".",
"format",
"(",
"old_name",
"=",
"old_arg_name",
",",
"old_val",
"=",
"old_arg_value",
",",
"new_name",
"=",
"new_arg_name",
",",
"new_val",
"=",
"new_arg_value",
")",
"else",
":",
"new_arg_value",
"=",
"old_arg_value",
"msg",
"=",
"(",
"\"the '{old_name}' keyword is deprecated, \"",
"\"use '{new_name}' instead\"",
")",
".",
"format",
"(",
"old_name",
"=",
"old_arg_name",
",",
"new_name",
"=",
"new_arg_name",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"stacklevel",
")",
"if",
"kwargs",
".",
"get",
"(",
"new_arg_name",
",",
"None",
")",
"is",
"not",
"None",
":",
"msg",
"=",
"(",
"\"Can only specify '{old_name}' or '{new_name}', \"",
"\"not both\"",
")",
".",
"format",
"(",
"old_name",
"=",
"old_arg_name",
",",
"new_name",
"=",
"new_arg_name",
")",
"raise",
"TypeError",
"(",
"msg",
")",
"else",
":",
"kwargs",
"[",
"new_arg_name",
"]",
"=",
"new_arg_value",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper",
"return",
"_deprecate_kwarg"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | make_signature | Returns a tuple containing the paramenter list with defaults
and parameter list.
Examples
--------
>>> def f(a, b, c=2):
>>> return a * b * c
>>> print(make_signature(f))
(['a', 'b', 'c=2'], ['a', 'b', 'c']) | pandas/util/_decorators.py | def make_signature(func):
"""
Returns a tuple containing the paramenter list with defaults
and parameter list.
Examples
--------
>>> def f(a, b, c=2):
>>> return a * b * c
>>> print(make_signature(f))
(['a', 'b', 'c=2'], ['a', 'b', 'c'])
"""
spec = inspect.getfullargspec(func)
if spec.defaults is None:
n_wo_defaults = len(spec.args)
defaults = ('',) * n_wo_defaults
else:
n_wo_defaults = len(spec.args) - len(spec.defaults)
defaults = ('',) * n_wo_defaults + tuple(spec.defaults)
args = []
for var, default in zip(spec.args, defaults):
args.append(var if default == '' else var + '=' + repr(default))
if spec.varargs:
args.append('*' + spec.varargs)
if spec.varkw:
args.append('**' + spec.varkw)
return args, spec.args | def make_signature(func):
"""
Returns a tuple containing the paramenter list with defaults
and parameter list.
Examples
--------
>>> def f(a, b, c=2):
>>> return a * b * c
>>> print(make_signature(f))
(['a', 'b', 'c=2'], ['a', 'b', 'c'])
"""
spec = inspect.getfullargspec(func)
if spec.defaults is None:
n_wo_defaults = len(spec.args)
defaults = ('',) * n_wo_defaults
else:
n_wo_defaults = len(spec.args) - len(spec.defaults)
defaults = ('',) * n_wo_defaults + tuple(spec.defaults)
args = []
for var, default in zip(spec.args, defaults):
args.append(var if default == '' else var + '=' + repr(default))
if spec.varargs:
args.append('*' + spec.varargs)
if spec.varkw:
args.append('**' + spec.varkw)
return args, spec.args | [
"Returns",
"a",
"tuple",
"containing",
"the",
"paramenter",
"list",
"with",
"defaults",
"and",
"parameter",
"list",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_decorators.py#L324-L351 | [
"def",
"make_signature",
"(",
"func",
")",
":",
"spec",
"=",
"inspect",
".",
"getfullargspec",
"(",
"func",
")",
"if",
"spec",
".",
"defaults",
"is",
"None",
":",
"n_wo_defaults",
"=",
"len",
"(",
"spec",
".",
"args",
")",
"defaults",
"=",
"(",
"''",
",",
")",
"*",
"n_wo_defaults",
"else",
":",
"n_wo_defaults",
"=",
"len",
"(",
"spec",
".",
"args",
")",
"-",
"len",
"(",
"spec",
".",
"defaults",
")",
"defaults",
"=",
"(",
"''",
",",
")",
"*",
"n_wo_defaults",
"+",
"tuple",
"(",
"spec",
".",
"defaults",
")",
"args",
"=",
"[",
"]",
"for",
"var",
",",
"default",
"in",
"zip",
"(",
"spec",
".",
"args",
",",
"defaults",
")",
":",
"args",
".",
"append",
"(",
"var",
"if",
"default",
"==",
"''",
"else",
"var",
"+",
"'='",
"+",
"repr",
"(",
"default",
")",
")",
"if",
"spec",
".",
"varargs",
":",
"args",
".",
"append",
"(",
"'*'",
"+",
"spec",
".",
"varargs",
")",
"if",
"spec",
".",
"varkw",
":",
"args",
".",
"append",
"(",
"'**'",
"+",
"spec",
".",
"varkw",
")",
"return",
"args",
",",
"spec",
".",
"args"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | period_range | Return a fixed frequency PeriodIndex, with day (calendar) as the default
frequency
Parameters
----------
start : string or period-like, default None
Left bound for generating periods
end : string or period-like, default None
Right bound for generating periods
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, optional
Frequency alias. By default the freq is taken from `start` or `end`
if those are Period objects. Otherwise, the default is ``"D"`` for
daily frequency.
name : string, default None
Name of the resulting PeriodIndex
Returns
-------
prng : PeriodIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05',
'2017-06', '2017-06', '2017-07', '2017-08', '2017-09',
'2017-10', '2017-11', '2017-12', '2018-01'],
dtype='period[M]', freq='M')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
... end=pd.Period('2017Q2', freq='Q'), freq='M')
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]', freq='M') | pandas/core/indexes/period.py | def period_range(start=None, end=None, periods=None, freq=None, name=None):
"""
Return a fixed frequency PeriodIndex, with day (calendar) as the default
frequency
Parameters
----------
start : string or period-like, default None
Left bound for generating periods
end : string or period-like, default None
Right bound for generating periods
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, optional
Frequency alias. By default the freq is taken from `start` or `end`
if those are Period objects. Otherwise, the default is ``"D"`` for
daily frequency.
name : string, default None
Name of the resulting PeriodIndex
Returns
-------
prng : PeriodIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05',
'2017-06', '2017-06', '2017-07', '2017-08', '2017-09',
'2017-10', '2017-11', '2017-12', '2018-01'],
dtype='period[M]', freq='M')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
... end=pd.Period('2017Q2', freq='Q'), freq='M')
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]', freq='M')
"""
if com.count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
if freq is None and (not isinstance(start, Period)
and not isinstance(end, Period)):
freq = 'D'
data, freq = PeriodArray._generate_range(start, end, periods, freq,
fields={})
data = PeriodArray(data, freq=freq)
return PeriodIndex(data, name=name) | def period_range(start=None, end=None, periods=None, freq=None, name=None):
"""
Return a fixed frequency PeriodIndex, with day (calendar) as the default
frequency
Parameters
----------
start : string or period-like, default None
Left bound for generating periods
end : string or period-like, default None
Right bound for generating periods
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, optional
Frequency alias. By default the freq is taken from `start` or `end`
if those are Period objects. Otherwise, the default is ``"D"`` for
daily frequency.
name : string, default None
Name of the resulting PeriodIndex
Returns
-------
prng : PeriodIndex
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.period_range(start='2017-01-01', end='2018-01-01', freq='M')
PeriodIndex(['2017-01', '2017-02', '2017-03', '2017-04', '2017-05',
'2017-06', '2017-06', '2017-07', '2017-08', '2017-09',
'2017-10', '2017-11', '2017-12', '2018-01'],
dtype='period[M]', freq='M')
If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor
endpoints for a ``PeriodIndex`` with frequency matching that of the
``period_range`` constructor.
>>> pd.period_range(start=pd.Period('2017Q1', freq='Q'),
... end=pd.Period('2017Q2', freq='Q'), freq='M')
PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'],
dtype='period[M]', freq='M')
"""
if com.count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
if freq is None and (not isinstance(start, Period)
and not isinstance(end, Period)):
freq = 'D'
data, freq = PeriodArray._generate_range(start, end, periods, freq,
fields={})
data = PeriodArray(data, freq=freq)
return PeriodIndex(data, name=name) | [
"Return",
"a",
"fixed",
"frequency",
"PeriodIndex",
"with",
"day",
"(",
"calendar",
")",
"as",
"the",
"default",
"frequency"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/period.py#L903-L964 | [
"def",
"period_range",
"(",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"periods",
"=",
"None",
",",
"freq",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"com",
".",
"count_not_none",
"(",
"start",
",",
"end",
",",
"periods",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Of the three parameters: start, end, and periods, '",
"'exactly two must be specified'",
")",
"if",
"freq",
"is",
"None",
"and",
"(",
"not",
"isinstance",
"(",
"start",
",",
"Period",
")",
"and",
"not",
"isinstance",
"(",
"end",
",",
"Period",
")",
")",
":",
"freq",
"=",
"'D'",
"data",
",",
"freq",
"=",
"PeriodArray",
".",
"_generate_range",
"(",
"start",
",",
"end",
",",
"periods",
",",
"freq",
",",
"fields",
"=",
"{",
"}",
")",
"data",
"=",
"PeriodArray",
"(",
"data",
",",
"freq",
"=",
"freq",
")",
"return",
"PeriodIndex",
"(",
"data",
",",
"name",
"=",
"name",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | RangeIndex.from_range | Create RangeIndex from a range object. | pandas/core/indexes/range.py | def from_range(cls, data, name=None, dtype=None, **kwargs):
""" Create RangeIndex from a range object. """
if not isinstance(data, range):
raise TypeError(
'{0}(...) must be called with object coercible to a '
'range, {1} was passed'.format(cls.__name__, repr(data)))
start, stop, step = data.start, data.stop, data.step
return RangeIndex(start, stop, step, dtype=dtype, name=name, **kwargs) | def from_range(cls, data, name=None, dtype=None, **kwargs):
""" Create RangeIndex from a range object. """
if not isinstance(data, range):
raise TypeError(
'{0}(...) must be called with object coercible to a '
'range, {1} was passed'.format(cls.__name__, repr(data)))
start, stop, step = data.start, data.stop, data.step
return RangeIndex(start, stop, step, dtype=dtype, name=name, **kwargs) | [
"Create",
"RangeIndex",
"from",
"a",
"range",
"object",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L128-L136 | [
"def",
"from_range",
"(",
"cls",
",",
"data",
",",
"name",
"=",
"None",
",",
"dtype",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"range",
")",
":",
"raise",
"TypeError",
"(",
"'{0}(...) must be called with object coercible to a '",
"'range, {1} was passed'",
".",
"format",
"(",
"cls",
".",
"__name__",
",",
"repr",
"(",
"data",
")",
")",
")",
"start",
",",
"stop",
",",
"step",
"=",
"data",
".",
"start",
",",
"data",
".",
"stop",
",",
"data",
".",
"step",
"return",
"RangeIndex",
"(",
"start",
",",
"stop",
",",
"step",
",",
"dtype",
"=",
"dtype",
",",
"name",
"=",
"name",
",",
"*",
"*",
"kwargs",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | RangeIndex._format_attrs | Return a list of tuples of the (attr, formatted_value) | pandas/core/indexes/range.py | def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
return attrs | def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
return attrs | [
"Return",
"a",
"list",
"of",
"tuples",
"of",
"the",
"(",
"attr",
"formatted_value",
")"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L200-L207 | [
"def",
"_format_attrs",
"(",
"self",
")",
":",
"attrs",
"=",
"self",
".",
"_get_data_as_items",
"(",
")",
"if",
"self",
".",
"name",
"is",
"not",
"None",
":",
"attrs",
".",
"append",
"(",
"(",
"'name'",
",",
"ibase",
".",
"default_pprint",
"(",
"self",
".",
"name",
")",
")",
")",
"return",
"attrs"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | RangeIndex.min | The minimum value of the RangeIndex | pandas/core/indexes/range.py | def min(self, axis=None, skipna=True):
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
return self._minmax('min') | def min(self, axis=None, skipna=True):
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
return self._minmax('min') | [
"The",
"minimum",
"value",
"of",
"the",
"RangeIndex"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L325-L328 | [
"def",
"min",
"(",
"self",
",",
"axis",
"=",
"None",
",",
"skipna",
"=",
"True",
")",
":",
"nv",
".",
"validate_minmax_axis",
"(",
"axis",
")",
"return",
"self",
".",
"_minmax",
"(",
"'min'",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | RangeIndex.max | The maximum value of the RangeIndex | pandas/core/indexes/range.py | def max(self, axis=None, skipna=True):
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
return self._minmax('max') | def max(self, axis=None, skipna=True):
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
return self._minmax('max') | [
"The",
"maximum",
"value",
"of",
"the",
"RangeIndex"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L330-L333 | [
"def",
"max",
"(",
"self",
",",
"axis",
"=",
"None",
",",
"skipna",
"=",
"True",
")",
":",
"nv",
".",
"validate_minmax_axis",
"(",
"axis",
")",
"return",
"self",
".",
"_minmax",
"(",
"'max'",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | RangeIndex.argsort | Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See Also
--------
numpy.ndarray.argsort | pandas/core/indexes/range.py | def argsort(self, *args, **kwargs):
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See Also
--------
numpy.ndarray.argsort
"""
nv.validate_argsort(args, kwargs)
if self._step > 0:
return np.arange(len(self))
else:
return np.arange(len(self) - 1, -1, -1) | def argsort(self, *args, **kwargs):
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See Also
--------
numpy.ndarray.argsort
"""
nv.validate_argsort(args, kwargs)
if self._step > 0:
return np.arange(len(self))
else:
return np.arange(len(self) - 1, -1, -1) | [
"Returns",
"the",
"indices",
"that",
"would",
"sort",
"the",
"index",
"and",
"its",
"underlying",
"data",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L335-L353 | [
"def",
"argsort",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"nv",
".",
"validate_argsort",
"(",
"args",
",",
"kwargs",
")",
"if",
"self",
".",
"_step",
">",
"0",
":",
"return",
"np",
".",
"arange",
"(",
"len",
"(",
"self",
")",
")",
"else",
":",
"return",
"np",
".",
"arange",
"(",
"len",
"(",
"self",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | RangeIndex.equals | Determines if two Index objects contain the same elements. | pandas/core/indexes/range.py | def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
ls = len(self)
lo = len(other)
return (ls == lo == 0 or
ls == lo == 1 and
self._start == other._start or
ls == lo and
self._start == other._start and
self._step == other._step)
return super().equals(other) | def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
ls = len(self)
lo = len(other)
return (ls == lo == 0 or
ls == lo == 1 and
self._start == other._start or
ls == lo and
self._start == other._start and
self._step == other._step)
return super().equals(other) | [
"Determines",
"if",
"two",
"Index",
"objects",
"contain",
"the",
"same",
"elements",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L355-L369 | [
"def",
"equals",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"RangeIndex",
")",
":",
"ls",
"=",
"len",
"(",
"self",
")",
"lo",
"=",
"len",
"(",
"other",
")",
"return",
"(",
"ls",
"==",
"lo",
"==",
"0",
"or",
"ls",
"==",
"lo",
"==",
"1",
"and",
"self",
".",
"_start",
"==",
"other",
".",
"_start",
"or",
"ls",
"==",
"lo",
"and",
"self",
".",
"_start",
"==",
"other",
".",
"_start",
"and",
"self",
".",
"_step",
"==",
"other",
".",
"_step",
")",
"return",
"super",
"(",
")",
".",
"equals",
"(",
"other",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | RangeIndex.intersection | Form the intersection of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Sort the resulting index if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default to ``False`` to match the behaviour
from before 0.24.0.
Returns
-------
intersection : Index | pandas/core/indexes/range.py | def intersection(self, other, sort=False):
"""
Form the intersection of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Sort the resulting index if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default to ``False`` to match the behaviour
from before 0.24.0.
Returns
-------
intersection : Index
"""
self._validate_sort_keyword(sort)
if self.equals(other):
return self._get_reconciled_name_object(other)
if not isinstance(other, RangeIndex):
return super().intersection(other, sort=sort)
if not len(self) or not len(other):
return RangeIndex._simple_new(None)
first = self[::-1] if self._step < 0 else self
second = other[::-1] if other._step < 0 else other
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first._start, second._start)
int_high = min(first._stop, second._stop)
if int_high <= int_low:
return RangeIndex._simple_new(None)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, t = first._extended_gcd(first._step, second._step)
# check whether element sets intersect
if (first._start - second._start) % gcd:
return RangeIndex._simple_new(None)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first._start + (second._start - first._start) * \
first._step // gcd * s
new_step = first._step * second._step // gcd
new_index = RangeIndex._simple_new(tmp_start, int_high, new_step)
# adjust index to limiting interval
new_index._start = new_index._min_fitting_element(int_low)
if (self._step < 0 and other._step < 0) is not (new_index._step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return new_index | def intersection(self, other, sort=False):
"""
Form the intersection of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Sort the resulting index if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default to ``False`` to match the behaviour
from before 0.24.0.
Returns
-------
intersection : Index
"""
self._validate_sort_keyword(sort)
if self.equals(other):
return self._get_reconciled_name_object(other)
if not isinstance(other, RangeIndex):
return super().intersection(other, sort=sort)
if not len(self) or not len(other):
return RangeIndex._simple_new(None)
first = self[::-1] if self._step < 0 else self
second = other[::-1] if other._step < 0 else other
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first._start, second._start)
int_high = min(first._stop, second._stop)
if int_high <= int_low:
return RangeIndex._simple_new(None)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, t = first._extended_gcd(first._step, second._step)
# check whether element sets intersect
if (first._start - second._start) % gcd:
return RangeIndex._simple_new(None)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first._start + (second._start - first._start) * \
first._step // gcd * s
new_step = first._step * second._step // gcd
new_index = RangeIndex._simple_new(tmp_start, int_high, new_step)
# adjust index to limiting interval
new_index._start = new_index._min_fitting_element(int_low)
if (self._step < 0 and other._step < 0) is not (new_index._step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return new_index | [
"Form",
"the",
"intersection",
"of",
"two",
"Index",
"objects",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L371-L437 | [
"def",
"intersection",
"(",
"self",
",",
"other",
",",
"sort",
"=",
"False",
")",
":",
"self",
".",
"_validate_sort_keyword",
"(",
"sort",
")",
"if",
"self",
".",
"equals",
"(",
"other",
")",
":",
"return",
"self",
".",
"_get_reconciled_name_object",
"(",
"other",
")",
"if",
"not",
"isinstance",
"(",
"other",
",",
"RangeIndex",
")",
":",
"return",
"super",
"(",
")",
".",
"intersection",
"(",
"other",
",",
"sort",
"=",
"sort",
")",
"if",
"not",
"len",
"(",
"self",
")",
"or",
"not",
"len",
"(",
"other",
")",
":",
"return",
"RangeIndex",
".",
"_simple_new",
"(",
"None",
")",
"first",
"=",
"self",
"[",
":",
":",
"-",
"1",
"]",
"if",
"self",
".",
"_step",
"<",
"0",
"else",
"self",
"second",
"=",
"other",
"[",
":",
":",
"-",
"1",
"]",
"if",
"other",
".",
"_step",
"<",
"0",
"else",
"other",
"# check whether intervals intersect",
"# deals with in- and decreasing ranges",
"int_low",
"=",
"max",
"(",
"first",
".",
"_start",
",",
"second",
".",
"_start",
")",
"int_high",
"=",
"min",
"(",
"first",
".",
"_stop",
",",
"second",
".",
"_stop",
")",
"if",
"int_high",
"<=",
"int_low",
":",
"return",
"RangeIndex",
".",
"_simple_new",
"(",
"None",
")",
"# Method hint: linear Diophantine equation",
"# solve intersection problem",
"# performance hint: for identical step sizes, could use",
"# cheaper alternative",
"gcd",
",",
"s",
",",
"t",
"=",
"first",
".",
"_extended_gcd",
"(",
"first",
".",
"_step",
",",
"second",
".",
"_step",
")",
"# check whether element sets intersect",
"if",
"(",
"first",
".",
"_start",
"-",
"second",
".",
"_start",
")",
"%",
"gcd",
":",
"return",
"RangeIndex",
".",
"_simple_new",
"(",
"None",
")",
"# calculate parameters for the RangeIndex describing the",
"# intersection disregarding the lower bounds",
"tmp_start",
"=",
"first",
".",
"_start",
"+",
"(",
"second",
".",
"_start",
"-",
"first",
".",
"_start",
")",
"*",
"first",
".",
"_step",
"//",
"gcd",
"*",
"s",
"new_step",
"=",
"first",
".",
"_step",
"*",
"second",
".",
"_step",
"//",
"gcd",
"new_index",
"=",
"RangeIndex",
".",
"_simple_new",
"(",
"tmp_start",
",",
"int_high",
",",
"new_step",
")",
"# adjust index to limiting interval",
"new_index",
".",
"_start",
"=",
"new_index",
".",
"_min_fitting_element",
"(",
"int_low",
")",
"if",
"(",
"self",
".",
"_step",
"<",
"0",
"and",
"other",
".",
"_step",
"<",
"0",
")",
"is",
"not",
"(",
"new_index",
".",
"_step",
"<",
"0",
")",
":",
"new_index",
"=",
"new_index",
"[",
":",
":",
"-",
"1",
"]",
"if",
"sort",
"is",
"None",
":",
"new_index",
"=",
"new_index",
".",
"sort_values",
"(",
")",
"return",
"new_index"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | RangeIndex._min_fitting_element | Returns the smallest element greater than or equal to the limit | pandas/core/indexes/range.py | def _min_fitting_element(self, lower_limit):
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self._start) // abs(self._step))
return self._start + abs(self._step) * no_steps | def _min_fitting_element(self, lower_limit):
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self._start) // abs(self._step))
return self._start + abs(self._step) * no_steps | [
"Returns",
"the",
"smallest",
"element",
"greater",
"than",
"or",
"equal",
"to",
"the",
"limit"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L439-L442 | [
"def",
"_min_fitting_element",
"(",
"self",
",",
"lower_limit",
")",
":",
"no_steps",
"=",
"-",
"(",
"-",
"(",
"lower_limit",
"-",
"self",
".",
"_start",
")",
"//",
"abs",
"(",
"self",
".",
"_step",
")",
")",
"return",
"self",
".",
"_start",
"+",
"abs",
"(",
"self",
".",
"_step",
")",
"*",
"no_steps"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | RangeIndex._max_fitting_element | Returns the largest element smaller than or equal to the limit | pandas/core/indexes/range.py | def _max_fitting_element(self, upper_limit):
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self._start) // abs(self._step)
return self._start + abs(self._step) * no_steps | def _max_fitting_element(self, upper_limit):
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self._start) // abs(self._step)
return self._start + abs(self._step) * no_steps | [
"Returns",
"the",
"largest",
"element",
"smaller",
"than",
"or",
"equal",
"to",
"the",
"limit"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L444-L447 | [
"def",
"_max_fitting_element",
"(",
"self",
",",
"upper_limit",
")",
":",
"no_steps",
"=",
"(",
"upper_limit",
"-",
"self",
".",
"_start",
")",
"//",
"abs",
"(",
"self",
".",
"_step",
")",
"return",
"self",
".",
"_start",
"+",
"abs",
"(",
"self",
".",
"_step",
")",
"*",
"no_steps"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | RangeIndex._extended_gcd | Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t | pandas/core/indexes/range.py | def _extended_gcd(self, a, b):
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t | def _extended_gcd(self, a, b):
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t | [
"Extended",
"Euclidean",
"algorithms",
"to",
"solve",
"Bezout",
"s",
"identity",
":",
"a",
"*",
"x",
"+",
"b",
"*",
"y",
"=",
"gcd",
"(",
"x",
"y",
")",
"Finds",
"one",
"particular",
"solution",
"for",
"x",
"y",
":",
"s",
"t",
"Returns",
":",
"gcd",
"s",
"t"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L449-L464 | [
"def",
"_extended_gcd",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"s",
",",
"old_s",
"=",
"0",
",",
"1",
"t",
",",
"old_t",
"=",
"1",
",",
"0",
"r",
",",
"old_r",
"=",
"b",
",",
"a",
"while",
"r",
":",
"quotient",
"=",
"old_r",
"//",
"r",
"old_r",
",",
"r",
"=",
"r",
",",
"old_r",
"-",
"quotient",
"*",
"r",
"old_s",
",",
"s",
"=",
"s",
",",
"old_s",
"-",
"quotient",
"*",
"s",
"old_t",
",",
"t",
"=",
"t",
",",
"old_t",
"-",
"quotient",
"*",
"t",
"return",
"old_r",
",",
"old_s",
",",
"old_t"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | RangeIndex.union | Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort resulting index. ``sort=None`` returns a
mononotically increasing ``RangeIndex`` if possible or a sorted
``Int64Index`` if not. ``sort=False`` always returns an
unsorted ``Int64Index``
.. versionadded:: 0.25.0
Returns
-------
union : Index | pandas/core/indexes/range.py | def union(self, other, sort=None):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort resulting index. ``sort=None`` returns a
mononotically increasing ``RangeIndex`` if possible or a sorted
``Int64Index`` if not. ``sort=False`` always returns an
unsorted ``Int64Index``
.. versionadded:: 0.25.0
Returns
-------
union : Index
"""
self._assert_can_do_setop(other)
if len(other) == 0 or self.equals(other) or len(self) == 0:
return super().union(other, sort=sort)
if isinstance(other, RangeIndex) and sort is None:
start_s, step_s = self._start, self._step
end_s = self._start + self._step * (len(self) - 1)
start_o, step_o = other._start, other._step
end_o = other._start + other._step * (len(other) - 1)
if self._step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other._step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self._start - other._start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if ((start_s - start_o) % step_s == 0 and
(start_s - end_o) <= step_s and
(start_o - end_s) <= step_s):
return RangeIndex(start_r, end_r + step_s, step_s)
if ((step_s % 2 == 0) and
(abs(start_s - start_o) <= step_s / 2) and
(abs(end_s - end_o) <= step_s / 2)):
return RangeIndex(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if ((start_o - start_s) % step_s == 0 and
(start_o + step_s >= start_s) and
(end_o - step_s <= end_s)):
return RangeIndex(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if ((start_s - start_o) % step_o == 0 and
(start_s + step_o >= start_o) and
(end_s - step_o <= end_o)):
return RangeIndex(start_r, end_r + step_o, step_o)
return self._int64index.union(other, sort=sort) | def union(self, other, sort=None):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort resulting index. ``sort=None`` returns a
mononotically increasing ``RangeIndex`` if possible or a sorted
``Int64Index`` if not. ``sort=False`` always returns an
unsorted ``Int64Index``
.. versionadded:: 0.25.0
Returns
-------
union : Index
"""
self._assert_can_do_setop(other)
if len(other) == 0 or self.equals(other) or len(self) == 0:
return super().union(other, sort=sort)
if isinstance(other, RangeIndex) and sort is None:
start_s, step_s = self._start, self._step
end_s = self._start + self._step * (len(self) - 1)
start_o, step_o = other._start, other._step
end_o = other._start + other._step * (len(other) - 1)
if self._step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other._step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self._start - other._start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if ((start_s - start_o) % step_s == 0 and
(start_s - end_o) <= step_s and
(start_o - end_s) <= step_s):
return RangeIndex(start_r, end_r + step_s, step_s)
if ((step_s % 2 == 0) and
(abs(start_s - start_o) <= step_s / 2) and
(abs(end_s - end_o) <= step_s / 2)):
return RangeIndex(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if ((start_o - start_s) % step_s == 0 and
(start_o + step_s >= start_s) and
(end_o - step_s <= end_s)):
return RangeIndex(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if ((start_s - start_o) % step_o == 0 and
(start_s + step_o >= start_o) and
(end_s - step_o <= end_o)):
return RangeIndex(start_r, end_r + step_o, step_o)
return self._int64index.union(other, sort=sort) | [
"Form",
"the",
"union",
"of",
"two",
"Index",
"objects",
"and",
"sorts",
"if",
"possible"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L466-L527 | [
"def",
"union",
"(",
"self",
",",
"other",
",",
"sort",
"=",
"None",
")",
":",
"self",
".",
"_assert_can_do_setop",
"(",
"other",
")",
"if",
"len",
"(",
"other",
")",
"==",
"0",
"or",
"self",
".",
"equals",
"(",
"other",
")",
"or",
"len",
"(",
"self",
")",
"==",
"0",
":",
"return",
"super",
"(",
")",
".",
"union",
"(",
"other",
",",
"sort",
"=",
"sort",
")",
"if",
"isinstance",
"(",
"other",
",",
"RangeIndex",
")",
"and",
"sort",
"is",
"None",
":",
"start_s",
",",
"step_s",
"=",
"self",
".",
"_start",
",",
"self",
".",
"_step",
"end_s",
"=",
"self",
".",
"_start",
"+",
"self",
".",
"_step",
"*",
"(",
"len",
"(",
"self",
")",
"-",
"1",
")",
"start_o",
",",
"step_o",
"=",
"other",
".",
"_start",
",",
"other",
".",
"_step",
"end_o",
"=",
"other",
".",
"_start",
"+",
"other",
".",
"_step",
"*",
"(",
"len",
"(",
"other",
")",
"-",
"1",
")",
"if",
"self",
".",
"_step",
"<",
"0",
":",
"start_s",
",",
"step_s",
",",
"end_s",
"=",
"end_s",
",",
"-",
"step_s",
",",
"start_s",
"if",
"other",
".",
"_step",
"<",
"0",
":",
"start_o",
",",
"step_o",
",",
"end_o",
"=",
"end_o",
",",
"-",
"step_o",
",",
"start_o",
"if",
"len",
"(",
"self",
")",
"==",
"1",
"and",
"len",
"(",
"other",
")",
"==",
"1",
":",
"step_s",
"=",
"step_o",
"=",
"abs",
"(",
"self",
".",
"_start",
"-",
"other",
".",
"_start",
")",
"elif",
"len",
"(",
"self",
")",
"==",
"1",
":",
"step_s",
"=",
"step_o",
"elif",
"len",
"(",
"other",
")",
"==",
"1",
":",
"step_o",
"=",
"step_s",
"start_r",
"=",
"min",
"(",
"start_s",
",",
"start_o",
")",
"end_r",
"=",
"max",
"(",
"end_s",
",",
"end_o",
")",
"if",
"step_o",
"==",
"step_s",
":",
"if",
"(",
"(",
"start_s",
"-",
"start_o",
")",
"%",
"step_s",
"==",
"0",
"and",
"(",
"start_s",
"-",
"end_o",
")",
"<=",
"step_s",
"and",
"(",
"start_o",
"-",
"end_s",
")",
"<=",
"step_s",
")",
":",
"return",
"RangeIndex",
"(",
"start_r",
",",
"end_r",
"+",
"step_s",
",",
"step_s",
")",
"if",
"(",
"(",
"step_s",
"%",
"2",
"==",
"0",
")",
"and",
"(",
"abs",
"(",
"start_s",
"-",
"start_o",
")",
"<=",
"step_s",
"/",
"2",
")",
"and",
"(",
"abs",
"(",
"end_s",
"-",
"end_o",
")",
"<=",
"step_s",
"/",
"2",
")",
")",
":",
"return",
"RangeIndex",
"(",
"start_r",
",",
"end_r",
"+",
"step_s",
"/",
"2",
",",
"step_s",
"/",
"2",
")",
"elif",
"step_o",
"%",
"step_s",
"==",
"0",
":",
"if",
"(",
"(",
"start_o",
"-",
"start_s",
")",
"%",
"step_s",
"==",
"0",
"and",
"(",
"start_o",
"+",
"step_s",
">=",
"start_s",
")",
"and",
"(",
"end_o",
"-",
"step_s",
"<=",
"end_s",
")",
")",
":",
"return",
"RangeIndex",
"(",
"start_r",
",",
"end_r",
"+",
"step_s",
",",
"step_s",
")",
"elif",
"step_s",
"%",
"step_o",
"==",
"0",
":",
"if",
"(",
"(",
"start_s",
"-",
"start_o",
")",
"%",
"step_o",
"==",
"0",
"and",
"(",
"start_s",
"+",
"step_o",
">=",
"start_o",
")",
"and",
"(",
"end_s",
"-",
"step_o",
"<=",
"end_o",
")",
")",
":",
"return",
"RangeIndex",
"(",
"start_r",
",",
"end_r",
"+",
"step_o",
",",
"step_o",
")",
"return",
"self",
".",
"_int64index",
".",
"union",
"(",
"other",
",",
"sort",
"=",
"sort",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | RangeIndex._add_numeric_methods_binary | add in numeric methods, specialized to RangeIndex | pandas/core/indexes/range.py | def _add_numeric_methods_binary(cls):
""" add in numeric methods, specialized to RangeIndex """
def _make_evaluate_binop(op, step=False):
"""
Parameters
----------
op : callable that accepts 2 parms
perform the binary op
step : callable, optional, default to False
op to apply to the step parm if not None
if False, use the existing step
"""
def _evaluate_numeric_binop(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
other = self._validate_for_numeric_binop(other, op)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
left, right = self, other
try:
# apply if we have an override
if step:
with np.errstate(all='ignore'):
rstep = step(left._step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left._step
with np.errstate(all='ignore'):
rstart = op(left._start, right)
rstop = op(left._stop, right)
result = RangeIndex(rstart,
rstop,
rstep,
**attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all(is_integer(x) for x in
[rstart, rstop, rstep]):
result = result.astype('float64')
return result
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
return op(self._int64index, other)
# TODO: Do attrs get handled reliably?
name = '__{name}__'.format(name=op.__name__)
return compat.set_function_name(_evaluate_numeric_binop, name, cls)
cls.__add__ = _make_evaluate_binop(operator.add)
cls.__radd__ = _make_evaluate_binop(ops.radd)
cls.__sub__ = _make_evaluate_binop(operator.sub)
cls.__rsub__ = _make_evaluate_binop(ops.rsub)
cls.__mul__ = _make_evaluate_binop(operator.mul, step=operator.mul)
cls.__rmul__ = _make_evaluate_binop(ops.rmul, step=ops.rmul)
cls.__truediv__ = _make_evaluate_binop(operator.truediv,
step=operator.truediv)
cls.__rtruediv__ = _make_evaluate_binop(ops.rtruediv,
step=ops.rtruediv) | def _add_numeric_methods_binary(cls):
""" add in numeric methods, specialized to RangeIndex """
def _make_evaluate_binop(op, step=False):
"""
Parameters
----------
op : callable that accepts 2 parms
perform the binary op
step : callable, optional, default to False
op to apply to the step parm if not None
if False, use the existing step
"""
def _evaluate_numeric_binop(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
other = self._validate_for_numeric_binop(other, op)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
left, right = self, other
try:
# apply if we have an override
if step:
with np.errstate(all='ignore'):
rstep = step(left._step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left._step
with np.errstate(all='ignore'):
rstart = op(left._start, right)
rstop = op(left._stop, right)
result = RangeIndex(rstart,
rstop,
rstep,
**attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all(is_integer(x) for x in
[rstart, rstop, rstep]):
result = result.astype('float64')
return result
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
return op(self._int64index, other)
# TODO: Do attrs get handled reliably?
name = '__{name}__'.format(name=op.__name__)
return compat.set_function_name(_evaluate_numeric_binop, name, cls)
cls.__add__ = _make_evaluate_binop(operator.add)
cls.__radd__ = _make_evaluate_binop(ops.radd)
cls.__sub__ = _make_evaluate_binop(operator.sub)
cls.__rsub__ = _make_evaluate_binop(ops.rsub)
cls.__mul__ = _make_evaluate_binop(operator.mul, step=operator.mul)
cls.__rmul__ = _make_evaluate_binop(ops.rmul, step=ops.rmul)
cls.__truediv__ = _make_evaluate_binop(operator.truediv,
step=operator.truediv)
cls.__rtruediv__ = _make_evaluate_binop(ops.rtruediv,
step=ops.rtruediv) | [
"add",
"in",
"numeric",
"methods",
"specialized",
"to",
"RangeIndex"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/range.py#L644-L727 | [
"def",
"_add_numeric_methods_binary",
"(",
"cls",
")",
":",
"def",
"_make_evaluate_binop",
"(",
"op",
",",
"step",
"=",
"False",
")",
":",
"\"\"\"\n Parameters\n ----------\n op : callable that accepts 2 parms\n perform the binary op\n step : callable, optional, default to False\n op to apply to the step parm if not None\n if False, use the existing step\n \"\"\"",
"def",
"_evaluate_numeric_binop",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"(",
"ABCSeries",
",",
"ABCDataFrame",
")",
")",
":",
"return",
"NotImplemented",
"elif",
"isinstance",
"(",
"other",
",",
"ABCTimedeltaIndex",
")",
":",
"# Defer to TimedeltaIndex implementation",
"return",
"NotImplemented",
"elif",
"isinstance",
"(",
"other",
",",
"(",
"timedelta",
",",
"np",
".",
"timedelta64",
")",
")",
":",
"# GH#19333 is_integer evaluated True on timedelta64,",
"# so we need to catch these explicitly",
"return",
"op",
"(",
"self",
".",
"_int64index",
",",
"other",
")",
"elif",
"is_timedelta64_dtype",
"(",
"other",
")",
":",
"# Must be an np.ndarray; GH#22390",
"return",
"op",
"(",
"self",
".",
"_int64index",
",",
"other",
")",
"other",
"=",
"self",
".",
"_validate_for_numeric_binop",
"(",
"other",
",",
"op",
")",
"attrs",
"=",
"self",
".",
"_get_attributes_dict",
"(",
")",
"attrs",
"=",
"self",
".",
"_maybe_update_attributes",
"(",
"attrs",
")",
"left",
",",
"right",
"=",
"self",
",",
"other",
"try",
":",
"# apply if we have an override",
"if",
"step",
":",
"with",
"np",
".",
"errstate",
"(",
"all",
"=",
"'ignore'",
")",
":",
"rstep",
"=",
"step",
"(",
"left",
".",
"_step",
",",
"right",
")",
"# we don't have a representable op",
"# so return a base index",
"if",
"not",
"is_integer",
"(",
"rstep",
")",
"or",
"not",
"rstep",
":",
"raise",
"ValueError",
"else",
":",
"rstep",
"=",
"left",
".",
"_step",
"with",
"np",
".",
"errstate",
"(",
"all",
"=",
"'ignore'",
")",
":",
"rstart",
"=",
"op",
"(",
"left",
".",
"_start",
",",
"right",
")",
"rstop",
"=",
"op",
"(",
"left",
".",
"_stop",
",",
"right",
")",
"result",
"=",
"RangeIndex",
"(",
"rstart",
",",
"rstop",
",",
"rstep",
",",
"*",
"*",
"attrs",
")",
"# for compat with numpy / Int64Index",
"# even if we can represent as a RangeIndex, return",
"# as a Float64Index if we have float-like descriptors",
"if",
"not",
"all",
"(",
"is_integer",
"(",
"x",
")",
"for",
"x",
"in",
"[",
"rstart",
",",
"rstop",
",",
"rstep",
"]",
")",
":",
"result",
"=",
"result",
".",
"astype",
"(",
"'float64'",
")",
"return",
"result",
"except",
"(",
"ValueError",
",",
"TypeError",
",",
"ZeroDivisionError",
")",
":",
"# Defer to Int64Index implementation",
"return",
"op",
"(",
"self",
".",
"_int64index",
",",
"other",
")",
"# TODO: Do attrs get handled reliably?",
"name",
"=",
"'__{name}__'",
".",
"format",
"(",
"name",
"=",
"op",
".",
"__name__",
")",
"return",
"compat",
".",
"set_function_name",
"(",
"_evaluate_numeric_binop",
",",
"name",
",",
"cls",
")",
"cls",
".",
"__add__",
"=",
"_make_evaluate_binop",
"(",
"operator",
".",
"add",
")",
"cls",
".",
"__radd__",
"=",
"_make_evaluate_binop",
"(",
"ops",
".",
"radd",
")",
"cls",
".",
"__sub__",
"=",
"_make_evaluate_binop",
"(",
"operator",
".",
"sub",
")",
"cls",
".",
"__rsub__",
"=",
"_make_evaluate_binop",
"(",
"ops",
".",
"rsub",
")",
"cls",
".",
"__mul__",
"=",
"_make_evaluate_binop",
"(",
"operator",
".",
"mul",
",",
"step",
"=",
"operator",
".",
"mul",
")",
"cls",
".",
"__rmul__",
"=",
"_make_evaluate_binop",
"(",
"ops",
".",
"rmul",
",",
"step",
"=",
"ops",
".",
"rmul",
")",
"cls",
".",
"__truediv__",
"=",
"_make_evaluate_binop",
"(",
"operator",
".",
"truediv",
",",
"step",
"=",
"operator",
".",
"truediv",
")",
"cls",
".",
"__rtruediv__",
"=",
"_make_evaluate_binop",
"(",
"ops",
".",
"rtruediv",
",",
"step",
"=",
"ops",
".",
"rtruediv",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | PandasArray.to_numpy | Convert the PandasArray to a :class:`numpy.ndarray`.
By default, this requires no coercion or copying of data.
Parameters
----------
dtype : numpy.dtype
The NumPy dtype to pass to :func:`numpy.asarray`.
copy : bool, default False
Whether to copy the underlying data.
Returns
-------
ndarray | pandas/core/arrays/numpy_.py | def to_numpy(self, dtype=None, copy=False):
"""
Convert the PandasArray to a :class:`numpy.ndarray`.
By default, this requires no coercion or copying of data.
Parameters
----------
dtype : numpy.dtype
The NumPy dtype to pass to :func:`numpy.asarray`.
copy : bool, default False
Whether to copy the underlying data.
Returns
-------
ndarray
"""
result = np.asarray(self._ndarray, dtype=dtype)
if copy and result is self._ndarray:
result = result.copy()
return result | def to_numpy(self, dtype=None, copy=False):
"""
Convert the PandasArray to a :class:`numpy.ndarray`.
By default, this requires no coercion or copying of data.
Parameters
----------
dtype : numpy.dtype
The NumPy dtype to pass to :func:`numpy.asarray`.
copy : bool, default False
Whether to copy the underlying data.
Returns
-------
ndarray
"""
result = np.asarray(self._ndarray, dtype=dtype)
if copy and result is self._ndarray:
result = result.copy()
return result | [
"Convert",
"the",
"PandasArray",
"to",
"a",
":",
"class",
":",
"numpy",
".",
"ndarray",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/numpy_.py#L388-L409 | [
"def",
"to_numpy",
"(",
"self",
",",
"dtype",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"result",
"=",
"np",
".",
"asarray",
"(",
"self",
".",
"_ndarray",
",",
"dtype",
"=",
"dtype",
")",
"if",
"copy",
"and",
"result",
"is",
"self",
".",
"_ndarray",
":",
"result",
"=",
"result",
".",
"copy",
"(",
")",
"return",
"result"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | adjoin | Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
----------
space : int
number of spaces for padding
lists : str
list of str which being joined
strlen : callable
function used to calculate the length of each str. Needed for unicode
handling.
justfunc : callable
function used to justify str. Needed for unicode handling. | pandas/io/formats/printing.py | def adjoin(space, *lists, **kwargs):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
----------
space : int
number of spaces for padding
lists : str
list of str which being joined
strlen : callable
function used to calculate the length of each str. Needed for unicode
handling.
justfunc : callable
function used to justify str. Needed for unicode handling.
"""
strlen = kwargs.pop('strlen', len)
justfunc = kwargs.pop('justfunc', justify)
out_lines = []
newLists = []
lengths = [max(map(strlen, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = justfunc(lst, lengths[i], mode='left')
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n') | def adjoin(space, *lists, **kwargs):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
----------
space : int
number of spaces for padding
lists : str
list of str which being joined
strlen : callable
function used to calculate the length of each str. Needed for unicode
handling.
justfunc : callable
function used to justify str. Needed for unicode handling.
"""
strlen = kwargs.pop('strlen', len)
justfunc = kwargs.pop('justfunc', justify)
out_lines = []
newLists = []
lengths = [max(map(strlen, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = justfunc(lst, lengths[i], mode='left')
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n') | [
"Glues",
"together",
"two",
"sets",
"of",
"strings",
"using",
"the",
"amount",
"of",
"space",
"requested",
".",
"The",
"idea",
"is",
"to",
"prettify",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/printing.py#L12-L44 | [
"def",
"adjoin",
"(",
"space",
",",
"*",
"lists",
",",
"*",
"*",
"kwargs",
")",
":",
"strlen",
"=",
"kwargs",
".",
"pop",
"(",
"'strlen'",
",",
"len",
")",
"justfunc",
"=",
"kwargs",
".",
"pop",
"(",
"'justfunc'",
",",
"justify",
")",
"out_lines",
"=",
"[",
"]",
"newLists",
"=",
"[",
"]",
"lengths",
"=",
"[",
"max",
"(",
"map",
"(",
"strlen",
",",
"x",
")",
")",
"+",
"space",
"for",
"x",
"in",
"lists",
"[",
":",
"-",
"1",
"]",
"]",
"# not the last one",
"lengths",
".",
"append",
"(",
"max",
"(",
"map",
"(",
"len",
",",
"lists",
"[",
"-",
"1",
"]",
")",
")",
")",
"maxLen",
"=",
"max",
"(",
"map",
"(",
"len",
",",
"lists",
")",
")",
"for",
"i",
",",
"lst",
"in",
"enumerate",
"(",
"lists",
")",
":",
"nl",
"=",
"justfunc",
"(",
"lst",
",",
"lengths",
"[",
"i",
"]",
",",
"mode",
"=",
"'left'",
")",
"nl",
".",
"extend",
"(",
"[",
"' '",
"*",
"lengths",
"[",
"i",
"]",
"]",
"*",
"(",
"maxLen",
"-",
"len",
"(",
"lst",
")",
")",
")",
"newLists",
".",
"append",
"(",
"nl",
")",
"toJoin",
"=",
"zip",
"(",
"*",
"newLists",
")",
"for",
"lines",
"in",
"toJoin",
":",
"out_lines",
".",
"append",
"(",
"_join_unicode",
"(",
"lines",
")",
")",
"return",
"_join_unicode",
"(",
"out_lines",
",",
"sep",
"=",
"'\\n'",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | justify | Perform ljust, center, rjust against string or list-like | pandas/io/formats/printing.py | def justify(texts, max_len, mode='right'):
"""
Perform ljust, center, rjust against string or list-like
"""
if mode == 'left':
return [x.ljust(max_len) for x in texts]
elif mode == 'center':
return [x.center(max_len) for x in texts]
else:
return [x.rjust(max_len) for x in texts] | def justify(texts, max_len, mode='right'):
"""
Perform ljust, center, rjust against string or list-like
"""
if mode == 'left':
return [x.ljust(max_len) for x in texts]
elif mode == 'center':
return [x.center(max_len) for x in texts]
else:
return [x.rjust(max_len) for x in texts] | [
"Perform",
"ljust",
"center",
"rjust",
"against",
"string",
"or",
"list",
"-",
"like"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/printing.py#L47-L56 | [
"def",
"justify",
"(",
"texts",
",",
"max_len",
",",
"mode",
"=",
"'right'",
")",
":",
"if",
"mode",
"==",
"'left'",
":",
"return",
"[",
"x",
".",
"ljust",
"(",
"max_len",
")",
"for",
"x",
"in",
"texts",
"]",
"elif",
"mode",
"==",
"'center'",
":",
"return",
"[",
"x",
".",
"center",
"(",
"max_len",
")",
"for",
"x",
"in",
"texts",
"]",
"else",
":",
"return",
"[",
"x",
".",
"rjust",
"(",
"max_len",
")",
"for",
"x",
"in",
"texts",
"]"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | _pprint_seq | internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
bounds length of printed sequence, depending on options | pandas/io/formats/printing.py | def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
bounds length of printed sequence, depending on options
"""
if isinstance(seq, set):
fmt = "{{{body}}}"
else:
fmt = "[{body}]" if hasattr(seq, '__setitem__') else "({body})"
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
s = iter(seq)
# handle sets, no slicing
r = [pprint_thing(next(s),
_nest_lvl + 1, max_seq_items=max_seq_items, **kwds)
for i in range(min(nitems, len(seq)))]
body = ", ".join(r)
if nitems < len(seq):
body += ", ..."
elif isinstance(seq, tuple) and len(seq) == 1:
body += ','
return fmt.format(body=body) | def _pprint_seq(seq, _nest_lvl=0, max_seq_items=None, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
bounds length of printed sequence, depending on options
"""
if isinstance(seq, set):
fmt = "{{{body}}}"
else:
fmt = "[{body}]" if hasattr(seq, '__setitem__') else "({body})"
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
s = iter(seq)
# handle sets, no slicing
r = [pprint_thing(next(s),
_nest_lvl + 1, max_seq_items=max_seq_items, **kwds)
for i in range(min(nitems, len(seq)))]
body = ", ".join(r)
if nitems < len(seq):
body += ", ..."
elif isinstance(seq, tuple) and len(seq) == 1:
body += ','
return fmt.format(body=body) | [
"internal",
".",
"pprinter",
"for",
"iterables",
".",
"you",
"should",
"probably",
"use",
"pprint_thing",
"()",
"rather",
"then",
"calling",
"this",
"directly",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/printing.py#L92-L121 | [
"def",
"_pprint_seq",
"(",
"seq",
",",
"_nest_lvl",
"=",
"0",
",",
"max_seq_items",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"if",
"isinstance",
"(",
"seq",
",",
"set",
")",
":",
"fmt",
"=",
"\"{{{body}}}\"",
"else",
":",
"fmt",
"=",
"\"[{body}]\"",
"if",
"hasattr",
"(",
"seq",
",",
"'__setitem__'",
")",
"else",
"\"({body})\"",
"if",
"max_seq_items",
"is",
"False",
":",
"nitems",
"=",
"len",
"(",
"seq",
")",
"else",
":",
"nitems",
"=",
"max_seq_items",
"or",
"get_option",
"(",
"\"max_seq_items\"",
")",
"or",
"len",
"(",
"seq",
")",
"s",
"=",
"iter",
"(",
"seq",
")",
"# handle sets, no slicing",
"r",
"=",
"[",
"pprint_thing",
"(",
"next",
"(",
"s",
")",
",",
"_nest_lvl",
"+",
"1",
",",
"max_seq_items",
"=",
"max_seq_items",
",",
"*",
"*",
"kwds",
")",
"for",
"i",
"in",
"range",
"(",
"min",
"(",
"nitems",
",",
"len",
"(",
"seq",
")",
")",
")",
"]",
"body",
"=",
"\", \"",
".",
"join",
"(",
"r",
")",
"if",
"nitems",
"<",
"len",
"(",
"seq",
")",
":",
"body",
"+=",
"\", ...\"",
"elif",
"isinstance",
"(",
"seq",
",",
"tuple",
")",
"and",
"len",
"(",
"seq",
")",
"==",
"1",
":",
"body",
"+=",
"','",
"return",
"fmt",
".",
"format",
"(",
"body",
"=",
"body",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | _pprint_dict | internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly. | pandas/io/formats/printing.py | def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
"""
fmt = "{{{things}}}"
pairs = []
pfmt = "{key}: {val}"
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
for k, v in list(seq.items())[:nitems]:
pairs.append(
pfmt.format(
key=pprint_thing(k, _nest_lvl + 1,
max_seq_items=max_seq_items, **kwds),
val=pprint_thing(v, _nest_lvl + 1,
max_seq_items=max_seq_items, **kwds)))
if nitems < len(seq):
return fmt.format(things=", ".join(pairs) + ", ...")
else:
return fmt.format(things=", ".join(pairs)) | def _pprint_dict(seq, _nest_lvl=0, max_seq_items=None, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
"""
fmt = "{{{things}}}"
pairs = []
pfmt = "{key}: {val}"
if max_seq_items is False:
nitems = len(seq)
else:
nitems = max_seq_items or get_option("max_seq_items") or len(seq)
for k, v in list(seq.items())[:nitems]:
pairs.append(
pfmt.format(
key=pprint_thing(k, _nest_lvl + 1,
max_seq_items=max_seq_items, **kwds),
val=pprint_thing(v, _nest_lvl + 1,
max_seq_items=max_seq_items, **kwds)))
if nitems < len(seq):
return fmt.format(things=", ".join(pairs) + ", ...")
else:
return fmt.format(things=", ".join(pairs)) | [
"internal",
".",
"pprinter",
"for",
"iterables",
".",
"you",
"should",
"probably",
"use",
"pprint_thing",
"()",
"rather",
"then",
"calling",
"this",
"directly",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/printing.py#L124-L150 | [
"def",
"_pprint_dict",
"(",
"seq",
",",
"_nest_lvl",
"=",
"0",
",",
"max_seq_items",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"fmt",
"=",
"\"{{{things}}}\"",
"pairs",
"=",
"[",
"]",
"pfmt",
"=",
"\"{key}: {val}\"",
"if",
"max_seq_items",
"is",
"False",
":",
"nitems",
"=",
"len",
"(",
"seq",
")",
"else",
":",
"nitems",
"=",
"max_seq_items",
"or",
"get_option",
"(",
"\"max_seq_items\"",
")",
"or",
"len",
"(",
"seq",
")",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"seq",
".",
"items",
"(",
")",
")",
"[",
":",
"nitems",
"]",
":",
"pairs",
".",
"append",
"(",
"pfmt",
".",
"format",
"(",
"key",
"=",
"pprint_thing",
"(",
"k",
",",
"_nest_lvl",
"+",
"1",
",",
"max_seq_items",
"=",
"max_seq_items",
",",
"*",
"*",
"kwds",
")",
",",
"val",
"=",
"pprint_thing",
"(",
"v",
",",
"_nest_lvl",
"+",
"1",
",",
"max_seq_items",
"=",
"max_seq_items",
",",
"*",
"*",
"kwds",
")",
")",
")",
"if",
"nitems",
"<",
"len",
"(",
"seq",
")",
":",
"return",
"fmt",
".",
"format",
"(",
"things",
"=",
"\", \"",
".",
"join",
"(",
"pairs",
")",
"+",
"\", ...\"",
")",
"else",
":",
"return",
"fmt",
".",
"format",
"(",
"things",
"=",
"\", \"",
".",
"join",
"(",
"pairs",
")",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | pprint_thing | This function is the sanctioned way of converting objects
to a unicode representation.
properly handles nested sequences containing unicode strings
(unicode(object) does not)
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
max_seq_items : False, int, default None
Pass thru to other pretty printers to limit sequence printing
Returns
-------
result - unicode str | pandas/io/formats/printing.py | def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
quote_strings=False, max_seq_items=None):
"""
This function is the sanctioned way of converting objects
to a unicode representation.
properly handles nested sequences containing unicode strings
(unicode(object) does not)
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
max_seq_items : False, int, default None
Pass thru to other pretty printers to limit sequence printing
Returns
-------
result - unicode str
"""
def as_escaped_unicode(thing, escape_chars=escape_chars):
# Unicode is fine, else we try to decode using utf-8 and 'replace'
# if that's not it either, we have no way of knowing and the user
# should deal with it himself.
try:
result = str(thing) # we should try this first
except UnicodeDecodeError:
# either utf-8 or we replace errors
result = str(thing).decode('utf-8', "replace")
translate = {'\t': r'\t', '\n': r'\n', '\r': r'\r', }
if isinstance(escape_chars, dict):
if default_escapes:
translate.update(escape_chars)
else:
translate = escape_chars
escape_chars = list(escape_chars.keys())
else:
escape_chars = escape_chars or tuple()
for c in escape_chars:
result = result.replace(c, translate[c])
return str(result)
if hasattr(thing, '__next__'):
return str(thing)
elif (isinstance(thing, dict) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_dict(thing, _nest_lvl, quote_strings=True,
max_seq_items=max_seq_items)
elif (is_sequence(thing) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,
quote_strings=quote_strings,
max_seq_items=max_seq_items)
elif isinstance(thing, str) and quote_strings:
result = "'{thing}'".format(thing=as_escaped_unicode(thing))
else:
result = as_escaped_unicode(thing)
return str(result) | def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
quote_strings=False, max_seq_items=None):
"""
This function is the sanctioned way of converting objects
to a unicode representation.
properly handles nested sequences containing unicode strings
(unicode(object) does not)
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
max_seq_items : False, int, default None
Pass thru to other pretty printers to limit sequence printing
Returns
-------
result - unicode str
"""
def as_escaped_unicode(thing, escape_chars=escape_chars):
# Unicode is fine, else we try to decode using utf-8 and 'replace'
# if that's not it either, we have no way of knowing and the user
# should deal with it himself.
try:
result = str(thing) # we should try this first
except UnicodeDecodeError:
# either utf-8 or we replace errors
result = str(thing).decode('utf-8', "replace")
translate = {'\t': r'\t', '\n': r'\n', '\r': r'\r', }
if isinstance(escape_chars, dict):
if default_escapes:
translate.update(escape_chars)
else:
translate = escape_chars
escape_chars = list(escape_chars.keys())
else:
escape_chars = escape_chars or tuple()
for c in escape_chars:
result = result.replace(c, translate[c])
return str(result)
if hasattr(thing, '__next__'):
return str(thing)
elif (isinstance(thing, dict) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_dict(thing, _nest_lvl, quote_strings=True,
max_seq_items=max_seq_items)
elif (is_sequence(thing) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,
quote_strings=quote_strings,
max_seq_items=max_seq_items)
elif isinstance(thing, str) and quote_strings:
result = "'{thing}'".format(thing=as_escaped_unicode(thing))
else:
result = as_escaped_unicode(thing)
return str(result) | [
"This",
"function",
"is",
"the",
"sanctioned",
"way",
"of",
"converting",
"objects",
"to",
"a",
"unicode",
"representation",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/printing.py#L153-L223 | [
"def",
"pprint_thing",
"(",
"thing",
",",
"_nest_lvl",
"=",
"0",
",",
"escape_chars",
"=",
"None",
",",
"default_escapes",
"=",
"False",
",",
"quote_strings",
"=",
"False",
",",
"max_seq_items",
"=",
"None",
")",
":",
"def",
"as_escaped_unicode",
"(",
"thing",
",",
"escape_chars",
"=",
"escape_chars",
")",
":",
"# Unicode is fine, else we try to decode using utf-8 and 'replace'",
"# if that's not it either, we have no way of knowing and the user",
"# should deal with it himself.",
"try",
":",
"result",
"=",
"str",
"(",
"thing",
")",
"# we should try this first",
"except",
"UnicodeDecodeError",
":",
"# either utf-8 or we replace errors",
"result",
"=",
"str",
"(",
"thing",
")",
".",
"decode",
"(",
"'utf-8'",
",",
"\"replace\"",
")",
"translate",
"=",
"{",
"'\\t'",
":",
"r'\\t'",
",",
"'\\n'",
":",
"r'\\n'",
",",
"'\\r'",
":",
"r'\\r'",
",",
"}",
"if",
"isinstance",
"(",
"escape_chars",
",",
"dict",
")",
":",
"if",
"default_escapes",
":",
"translate",
".",
"update",
"(",
"escape_chars",
")",
"else",
":",
"translate",
"=",
"escape_chars",
"escape_chars",
"=",
"list",
"(",
"escape_chars",
".",
"keys",
"(",
")",
")",
"else",
":",
"escape_chars",
"=",
"escape_chars",
"or",
"tuple",
"(",
")",
"for",
"c",
"in",
"escape_chars",
":",
"result",
"=",
"result",
".",
"replace",
"(",
"c",
",",
"translate",
"[",
"c",
"]",
")",
"return",
"str",
"(",
"result",
")",
"if",
"hasattr",
"(",
"thing",
",",
"'__next__'",
")",
":",
"return",
"str",
"(",
"thing",
")",
"elif",
"(",
"isinstance",
"(",
"thing",
",",
"dict",
")",
"and",
"_nest_lvl",
"<",
"get_option",
"(",
"\"display.pprint_nest_depth\"",
")",
")",
":",
"result",
"=",
"_pprint_dict",
"(",
"thing",
",",
"_nest_lvl",
",",
"quote_strings",
"=",
"True",
",",
"max_seq_items",
"=",
"max_seq_items",
")",
"elif",
"(",
"is_sequence",
"(",
"thing",
")",
"and",
"_nest_lvl",
"<",
"get_option",
"(",
"\"display.pprint_nest_depth\"",
")",
")",
":",
"result",
"=",
"_pprint_seq",
"(",
"thing",
",",
"_nest_lvl",
",",
"escape_chars",
"=",
"escape_chars",
",",
"quote_strings",
"=",
"quote_strings",
",",
"max_seq_items",
"=",
"max_seq_items",
")",
"elif",
"isinstance",
"(",
"thing",
",",
"str",
")",
"and",
"quote_strings",
":",
"result",
"=",
"\"'{thing}'\"",
".",
"format",
"(",
"thing",
"=",
"as_escaped_unicode",
"(",
"thing",
")",
")",
"else",
":",
"result",
"=",
"as_escaped_unicode",
"(",
"thing",
")",
"return",
"str",
"(",
"result",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | format_object_summary | Return the formatted obj as a unicode string
Parameters
----------
obj : object
must be iterable and support __getitem__
formatter : callable
string formatter for an element
is_justify : boolean
should justify the display
name : name, optional
defaults to the class name of the obj
indent_for_name : bool, default True
Whether subsequent lines should be be indented to
align with the name.
Returns
-------
summary string | pandas/io/formats/printing.py | def format_object_summary(obj, formatter, is_justify=True, name=None,
indent_for_name=True):
"""
Return the formatted obj as a unicode string
Parameters
----------
obj : object
must be iterable and support __getitem__
formatter : callable
string formatter for an element
is_justify : boolean
should justify the display
name : name, optional
defaults to the class name of the obj
indent_for_name : bool, default True
Whether subsequent lines should be be indented to
align with the name.
Returns
-------
summary string
"""
from pandas.io.formats.console import get_console_size
from pandas.io.formats.format import _get_adjustment
display_width, _ = get_console_size()
if display_width is None:
display_width = get_option('display.width') or 80
if name is None:
name = obj.__class__.__name__
if indent_for_name:
name_len = len(name)
space1 = "\n%s" % (' ' * (name_len + 1))
space2 = "\n%s" % (' ' * (name_len + 2))
else:
space1 = "\n"
space2 = "\n " # space for the opening '['
n = len(obj)
sep = ','
max_seq_items = get_option('display.max_seq_items') or n
# are we a truncated display
is_truncated = n > max_seq_items
# adj can optionally handle unicode eastern asian width
adj = _get_adjustment()
def _extend_line(s, line, value, display_width, next_line_prefix):
if (adj.len(line.rstrip()) + adj.len(value.rstrip()) >=
display_width):
s += line.rstrip()
line = next_line_prefix
line += value
return s, line
def best_len(values):
if values:
return max(adj.len(x) for x in values)
else:
return 0
close = ', '
if n == 0:
summary = '[]{}'.format(close)
elif n == 1:
first = formatter(obj[0])
summary = '[{}]{}'.format(first, close)
elif n == 2:
first = formatter(obj[0])
last = formatter(obj[-1])
summary = '[{}, {}]{}'.format(first, last, close)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in obj[:n]]
tail = [formatter(x) for x in obj[-n:]]
else:
head = []
tail = [formatter(x) for x in obj]
# adjust all values to max length if needed
if is_justify:
# however, if we are not truncated and we are only a single
# line, then don't justify
if (is_truncated or
not (len(', '.join(head)) < display_width and
len(', '.join(tail)) < display_width)):
max_len = max(best_len(head), best_len(tail))
head = [x.rjust(max_len) for x in head]
tail = [x.rjust(max_len) for x in tail]
summary = ""
line = space2
for i in range(len(head)):
word = head[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
if is_truncated:
# remove trailing space of last line
summary += line.rstrip() + space2 + '...'
line = space2
for i in range(len(tail) - 1):
word = tail[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
# last value: no sep added + 1 space of width used for trailing ','
summary, line = _extend_line(summary, line, tail[-1],
display_width - 2, space2)
summary += line
# right now close is either '' or ', '
# Now we want to include the ']', but not the maybe space.
close = ']' + close.rstrip(' ')
summary += close
if len(summary) > (display_width):
summary += space1
else: # one row
summary += ' '
# remove initial space
summary = '[' + summary[len(space2):]
return summary | def format_object_summary(obj, formatter, is_justify=True, name=None,
indent_for_name=True):
"""
Return the formatted obj as a unicode string
Parameters
----------
obj : object
must be iterable and support __getitem__
formatter : callable
string formatter for an element
is_justify : boolean
should justify the display
name : name, optional
defaults to the class name of the obj
indent_for_name : bool, default True
Whether subsequent lines should be be indented to
align with the name.
Returns
-------
summary string
"""
from pandas.io.formats.console import get_console_size
from pandas.io.formats.format import _get_adjustment
display_width, _ = get_console_size()
if display_width is None:
display_width = get_option('display.width') or 80
if name is None:
name = obj.__class__.__name__
if indent_for_name:
name_len = len(name)
space1 = "\n%s" % (' ' * (name_len + 1))
space2 = "\n%s" % (' ' * (name_len + 2))
else:
space1 = "\n"
space2 = "\n " # space for the opening '['
n = len(obj)
sep = ','
max_seq_items = get_option('display.max_seq_items') or n
# are we a truncated display
is_truncated = n > max_seq_items
# adj can optionally handle unicode eastern asian width
adj = _get_adjustment()
def _extend_line(s, line, value, display_width, next_line_prefix):
if (adj.len(line.rstrip()) + adj.len(value.rstrip()) >=
display_width):
s += line.rstrip()
line = next_line_prefix
line += value
return s, line
def best_len(values):
if values:
return max(adj.len(x) for x in values)
else:
return 0
close = ', '
if n == 0:
summary = '[]{}'.format(close)
elif n == 1:
first = formatter(obj[0])
summary = '[{}]{}'.format(first, close)
elif n == 2:
first = formatter(obj[0])
last = formatter(obj[-1])
summary = '[{}, {}]{}'.format(first, last, close)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in obj[:n]]
tail = [formatter(x) for x in obj[-n:]]
else:
head = []
tail = [formatter(x) for x in obj]
# adjust all values to max length if needed
if is_justify:
# however, if we are not truncated and we are only a single
# line, then don't justify
if (is_truncated or
not (len(', '.join(head)) < display_width and
len(', '.join(tail)) < display_width)):
max_len = max(best_len(head), best_len(tail))
head = [x.rjust(max_len) for x in head]
tail = [x.rjust(max_len) for x in tail]
summary = ""
line = space2
for i in range(len(head)):
word = head[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
if is_truncated:
# remove trailing space of last line
summary += line.rstrip() + space2 + '...'
line = space2
for i in range(len(tail) - 1):
word = tail[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
# last value: no sep added + 1 space of width used for trailing ','
summary, line = _extend_line(summary, line, tail[-1],
display_width - 2, space2)
summary += line
# right now close is either '' or ', '
# Now we want to include the ']', but not the maybe space.
close = ']' + close.rstrip(' ')
summary += close
if len(summary) > (display_width):
summary += space1
else: # one row
summary += ' '
# remove initial space
summary = '[' + summary[len(space2):]
return summary | [
"Return",
"the",
"formatted",
"obj",
"as",
"a",
"unicode",
"string"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/printing.py#L267-L402 | [
"def",
"format_object_summary",
"(",
"obj",
",",
"formatter",
",",
"is_justify",
"=",
"True",
",",
"name",
"=",
"None",
",",
"indent_for_name",
"=",
"True",
")",
":",
"from",
"pandas",
".",
"io",
".",
"formats",
".",
"console",
"import",
"get_console_size",
"from",
"pandas",
".",
"io",
".",
"formats",
".",
"format",
"import",
"_get_adjustment",
"display_width",
",",
"_",
"=",
"get_console_size",
"(",
")",
"if",
"display_width",
"is",
"None",
":",
"display_width",
"=",
"get_option",
"(",
"'display.width'",
")",
"or",
"80",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"obj",
".",
"__class__",
".",
"__name__",
"if",
"indent_for_name",
":",
"name_len",
"=",
"len",
"(",
"name",
")",
"space1",
"=",
"\"\\n%s\"",
"%",
"(",
"' '",
"*",
"(",
"name_len",
"+",
"1",
")",
")",
"space2",
"=",
"\"\\n%s\"",
"%",
"(",
"' '",
"*",
"(",
"name_len",
"+",
"2",
")",
")",
"else",
":",
"space1",
"=",
"\"\\n\"",
"space2",
"=",
"\"\\n \"",
"# space for the opening '['",
"n",
"=",
"len",
"(",
"obj",
")",
"sep",
"=",
"','",
"max_seq_items",
"=",
"get_option",
"(",
"'display.max_seq_items'",
")",
"or",
"n",
"# are we a truncated display",
"is_truncated",
"=",
"n",
">",
"max_seq_items",
"# adj can optionally handle unicode eastern asian width",
"adj",
"=",
"_get_adjustment",
"(",
")",
"def",
"_extend_line",
"(",
"s",
",",
"line",
",",
"value",
",",
"display_width",
",",
"next_line_prefix",
")",
":",
"if",
"(",
"adj",
".",
"len",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"+",
"adj",
".",
"len",
"(",
"value",
".",
"rstrip",
"(",
")",
")",
">=",
"display_width",
")",
":",
"s",
"+=",
"line",
".",
"rstrip",
"(",
")",
"line",
"=",
"next_line_prefix",
"line",
"+=",
"value",
"return",
"s",
",",
"line",
"def",
"best_len",
"(",
"values",
")",
":",
"if",
"values",
":",
"return",
"max",
"(",
"adj",
".",
"len",
"(",
"x",
")",
"for",
"x",
"in",
"values",
")",
"else",
":",
"return",
"0",
"close",
"=",
"', '",
"if",
"n",
"==",
"0",
":",
"summary",
"=",
"'[]{}'",
".",
"format",
"(",
"close",
")",
"elif",
"n",
"==",
"1",
":",
"first",
"=",
"formatter",
"(",
"obj",
"[",
"0",
"]",
")",
"summary",
"=",
"'[{}]{}'",
".",
"format",
"(",
"first",
",",
"close",
")",
"elif",
"n",
"==",
"2",
":",
"first",
"=",
"formatter",
"(",
"obj",
"[",
"0",
"]",
")",
"last",
"=",
"formatter",
"(",
"obj",
"[",
"-",
"1",
"]",
")",
"summary",
"=",
"'[{}, {}]{}'",
".",
"format",
"(",
"first",
",",
"last",
",",
"close",
")",
"else",
":",
"if",
"n",
">",
"max_seq_items",
":",
"n",
"=",
"min",
"(",
"max_seq_items",
"//",
"2",
",",
"10",
")",
"head",
"=",
"[",
"formatter",
"(",
"x",
")",
"for",
"x",
"in",
"obj",
"[",
":",
"n",
"]",
"]",
"tail",
"=",
"[",
"formatter",
"(",
"x",
")",
"for",
"x",
"in",
"obj",
"[",
"-",
"n",
":",
"]",
"]",
"else",
":",
"head",
"=",
"[",
"]",
"tail",
"=",
"[",
"formatter",
"(",
"x",
")",
"for",
"x",
"in",
"obj",
"]",
"# adjust all values to max length if needed",
"if",
"is_justify",
":",
"# however, if we are not truncated and we are only a single",
"# line, then don't justify",
"if",
"(",
"is_truncated",
"or",
"not",
"(",
"len",
"(",
"', '",
".",
"join",
"(",
"head",
")",
")",
"<",
"display_width",
"and",
"len",
"(",
"', '",
".",
"join",
"(",
"tail",
")",
")",
"<",
"display_width",
")",
")",
":",
"max_len",
"=",
"max",
"(",
"best_len",
"(",
"head",
")",
",",
"best_len",
"(",
"tail",
")",
")",
"head",
"=",
"[",
"x",
".",
"rjust",
"(",
"max_len",
")",
"for",
"x",
"in",
"head",
"]",
"tail",
"=",
"[",
"x",
".",
"rjust",
"(",
"max_len",
")",
"for",
"x",
"in",
"tail",
"]",
"summary",
"=",
"\"\"",
"line",
"=",
"space2",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"head",
")",
")",
":",
"word",
"=",
"head",
"[",
"i",
"]",
"+",
"sep",
"+",
"' '",
"summary",
",",
"line",
"=",
"_extend_line",
"(",
"summary",
",",
"line",
",",
"word",
",",
"display_width",
",",
"space2",
")",
"if",
"is_truncated",
":",
"# remove trailing space of last line",
"summary",
"+=",
"line",
".",
"rstrip",
"(",
")",
"+",
"space2",
"+",
"'...'",
"line",
"=",
"space2",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"tail",
")",
"-",
"1",
")",
":",
"word",
"=",
"tail",
"[",
"i",
"]",
"+",
"sep",
"+",
"' '",
"summary",
",",
"line",
"=",
"_extend_line",
"(",
"summary",
",",
"line",
",",
"word",
",",
"display_width",
",",
"space2",
")",
"# last value: no sep added + 1 space of width used for trailing ','",
"summary",
",",
"line",
"=",
"_extend_line",
"(",
"summary",
",",
"line",
",",
"tail",
"[",
"-",
"1",
"]",
",",
"display_width",
"-",
"2",
",",
"space2",
")",
"summary",
"+=",
"line",
"# right now close is either '' or ', '",
"# Now we want to include the ']', but not the maybe space.",
"close",
"=",
"']'",
"+",
"close",
".",
"rstrip",
"(",
"' '",
")",
"summary",
"+=",
"close",
"if",
"len",
"(",
"summary",
")",
">",
"(",
"display_width",
")",
":",
"summary",
"+=",
"space1",
"else",
":",
"# one row",
"summary",
"+=",
"' '",
"# remove initial space",
"summary",
"=",
"'['",
"+",
"summary",
"[",
"len",
"(",
"space2",
")",
":",
"]",
"return",
"summary"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | format_object_attrs | Return a list of tuples of the (attr, formatted_value)
for common attrs, including dtype, name, length
Parameters
----------
obj : object
must be iterable
Returns
-------
list | pandas/io/formats/printing.py | def format_object_attrs(obj):
"""
Return a list of tuples of the (attr, formatted_value)
for common attrs, including dtype, name, length
Parameters
----------
obj : object
must be iterable
Returns
-------
list
"""
attrs = []
if hasattr(obj, 'dtype'):
attrs.append(('dtype', "'{}'".format(obj.dtype)))
if getattr(obj, 'name', None) is not None:
attrs.append(('name', default_pprint(obj.name)))
max_seq_items = get_option('display.max_seq_items') or len(obj)
if len(obj) > max_seq_items:
attrs.append(('length', len(obj)))
return attrs | def format_object_attrs(obj):
"""
Return a list of tuples of the (attr, formatted_value)
for common attrs, including dtype, name, length
Parameters
----------
obj : object
must be iterable
Returns
-------
list
"""
attrs = []
if hasattr(obj, 'dtype'):
attrs.append(('dtype', "'{}'".format(obj.dtype)))
if getattr(obj, 'name', None) is not None:
attrs.append(('name', default_pprint(obj.name)))
max_seq_items = get_option('display.max_seq_items') or len(obj)
if len(obj) > max_seq_items:
attrs.append(('length', len(obj)))
return attrs | [
"Return",
"a",
"list",
"of",
"tuples",
"of",
"the",
"(",
"attr",
"formatted_value",
")",
"for",
"common",
"attrs",
"including",
"dtype",
"name",
"length"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/printing.py#L405-L428 | [
"def",
"format_object_attrs",
"(",
"obj",
")",
":",
"attrs",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"obj",
",",
"'dtype'",
")",
":",
"attrs",
".",
"append",
"(",
"(",
"'dtype'",
",",
"\"'{}'\"",
".",
"format",
"(",
"obj",
".",
"dtype",
")",
")",
")",
"if",
"getattr",
"(",
"obj",
",",
"'name'",
",",
"None",
")",
"is",
"not",
"None",
":",
"attrs",
".",
"append",
"(",
"(",
"'name'",
",",
"default_pprint",
"(",
"obj",
".",
"name",
")",
")",
")",
"max_seq_items",
"=",
"get_option",
"(",
"'display.max_seq_items'",
")",
"or",
"len",
"(",
"obj",
")",
"if",
"len",
"(",
"obj",
")",
">",
"max_seq_items",
":",
"attrs",
".",
"append",
"(",
"(",
"'length'",
",",
"len",
"(",
"obj",
")",
")",
")",
"return",
"attrs"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | read_gbq | Load data from Google BigQuery.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
dialect : str, default 'legacy'
Note: The default value is changing to 'standard' in a future verion.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
.. versionchanged:: 0.24.0
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
*New in version 0.5.0 of pandas-gbq*.
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
This feature requires version 0.10.0 or later of the ``pandas-gbq``
package. It also requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.25.0
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
verbose : None, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module to
adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
Returns
-------
df: DataFrame
DataFrame representing results of query.
See Also
--------
pandas_gbq.read_gbq : This function in the pandas-gbq library.
DataFrame.to_gbq : Write a DataFrame to Google BigQuery. | pandas/io/gbq.py | def read_gbq(query, project_id=None, index_col=None, col_order=None,
reauth=False, auth_local_webserver=False, dialect=None,
location=None, configuration=None, credentials=None,
use_bqstorage_api=None, private_key=None, verbose=None):
"""
Load data from Google BigQuery.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
dialect : str, default 'legacy'
Note: The default value is changing to 'standard' in a future verion.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
.. versionchanged:: 0.24.0
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
*New in version 0.5.0 of pandas-gbq*.
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
This feature requires version 0.10.0 or later of the ``pandas-gbq``
package. It also requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.25.0
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
verbose : None, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module to
adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
Returns
-------
df: DataFrame
DataFrame representing results of query.
See Also
--------
pandas_gbq.read_gbq : This function in the pandas-gbq library.
DataFrame.to_gbq : Write a DataFrame to Google BigQuery.
"""
pandas_gbq = _try_import()
kwargs = {}
# START: new kwargs. Don't populate unless explicitly set.
if use_bqstorage_api is not None:
kwargs["use_bqstorage_api"] = use_bqstorage_api
# END: new kwargs
# START: deprecated kwargs. Don't populate unless explicitly set.
if verbose is not None:
kwargs["verbose"] = verbose
if private_key is not None:
kwargs["private_key"] = private_key
# END: deprecated kwargs
return pandas_gbq.read_gbq(
query, project_id=project_id, index_col=index_col,
col_order=col_order, reauth=reauth,
auth_local_webserver=auth_local_webserver, dialect=dialect,
location=location, configuration=configuration,
credentials=credentials, **kwargs) | def read_gbq(query, project_id=None, index_col=None, col_order=None,
reauth=False, auth_local_webserver=False, dialect=None,
location=None, configuration=None, credentials=None,
use_bqstorage_api=None, private_key=None, verbose=None):
"""
Load data from Google BigQuery.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
dialect : str, default 'legacy'
Note: The default value is changing to 'standard' in a future verion.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
.. versionchanged:: 0.24.0
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
*New in version 0.5.0 of pandas-gbq*.
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service Account
:class:`google.oauth2.service_account.Credentials` directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
use_bqstorage_api : bool, default False
Use the `BigQuery Storage API
<https://cloud.google.com/bigquery/docs/reference/storage/>`__ to
download query results quickly, but at an increased cost. To use this
API, first `enable it in the Cloud Console
<https://console.cloud.google.com/apis/library/bigquerystorage.googleapis.com>`__.
You must also have the `bigquery.readsessions.create
<https://cloud.google.com/bigquery/docs/access-control#roles>`__
permission on the project you are billing queries to.
This feature requires version 0.10.0 or later of the ``pandas-gbq``
package. It also requires the ``google-cloud-bigquery-storage`` and
``fastavro`` packages.
.. versionadded:: 0.25.0
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
verbose : None, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module to
adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
Returns
-------
df: DataFrame
DataFrame representing results of query.
See Also
--------
pandas_gbq.read_gbq : This function in the pandas-gbq library.
DataFrame.to_gbq : Write a DataFrame to Google BigQuery.
"""
pandas_gbq = _try_import()
kwargs = {}
# START: new kwargs. Don't populate unless explicitly set.
if use_bqstorage_api is not None:
kwargs["use_bqstorage_api"] = use_bqstorage_api
# END: new kwargs
# START: deprecated kwargs. Don't populate unless explicitly set.
if verbose is not None:
kwargs["verbose"] = verbose
if private_key is not None:
kwargs["private_key"] = private_key
# END: deprecated kwargs
return pandas_gbq.read_gbq(
query, project_id=project_id, index_col=index_col,
col_order=col_order, reauth=reauth,
auth_local_webserver=auth_local_webserver, dialect=dialect,
location=location, configuration=configuration,
credentials=credentials, **kwargs) | [
"Load",
"data",
"from",
"Google",
"BigQuery",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/gbq.py#L24-L167 | [
"def",
"read_gbq",
"(",
"query",
",",
"project_id",
"=",
"None",
",",
"index_col",
"=",
"None",
",",
"col_order",
"=",
"None",
",",
"reauth",
"=",
"False",
",",
"auth_local_webserver",
"=",
"False",
",",
"dialect",
"=",
"None",
",",
"location",
"=",
"None",
",",
"configuration",
"=",
"None",
",",
"credentials",
"=",
"None",
",",
"use_bqstorage_api",
"=",
"None",
",",
"private_key",
"=",
"None",
",",
"verbose",
"=",
"None",
")",
":",
"pandas_gbq",
"=",
"_try_import",
"(",
")",
"kwargs",
"=",
"{",
"}",
"# START: new kwargs. Don't populate unless explicitly set.",
"if",
"use_bqstorage_api",
"is",
"not",
"None",
":",
"kwargs",
"[",
"\"use_bqstorage_api\"",
"]",
"=",
"use_bqstorage_api",
"# END: new kwargs",
"# START: deprecated kwargs. Don't populate unless explicitly set.",
"if",
"verbose",
"is",
"not",
"None",
":",
"kwargs",
"[",
"\"verbose\"",
"]",
"=",
"verbose",
"if",
"private_key",
"is",
"not",
"None",
":",
"kwargs",
"[",
"\"private_key\"",
"]",
"=",
"private_key",
"# END: deprecated kwargs",
"return",
"pandas_gbq",
".",
"read_gbq",
"(",
"query",
",",
"project_id",
"=",
"project_id",
",",
"index_col",
"=",
"index_col",
",",
"col_order",
"=",
"col_order",
",",
"reauth",
"=",
"reauth",
",",
"auth_local_webserver",
"=",
"auth_local_webserver",
",",
"dialect",
"=",
"dialect",
",",
"location",
"=",
"location",
",",
"configuration",
"=",
"configuration",
",",
"credentials",
"=",
"credentials",
",",
"*",
"*",
"kwargs",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | scatter_matrix | Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2) | pandas/plotting/_misc.py | def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = notna(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# GH 14855
kwds.setdefault('edgecolors', 'none')
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j != 0:
ax.yaxis.set_visible(False)
if i != n - 1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes | def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_adjust(wspace=0, hspace=0)
mask = notna(df)
marker = _get_marker_compat(marker)
hist_kwds = hist_kwds or {}
density_kwds = density_kwds or {}
# GH 14855
kwds.setdefault('edgecolors', 'none')
boundaries_list = []
for a in df.columns:
values = df[a].values[mask[a].values]
rmin_, rmax_ = np.min(values), np.max(values)
rdelta_ext = (rmax_ - rmin_) * range_padding / 2.
boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext))
for i, a in zip(lrange(n), df.columns):
for j, b in zip(lrange(n), df.columns):
ax = axes[i, j]
if i == j:
values = df[a].values[mask[a].values]
# Deal with the diagonal by drawing a histogram there.
if diagonal == 'hist':
ax.hist(values, **hist_kwds)
elif diagonal in ('kde', 'density'):
from scipy.stats import gaussian_kde
y = values
gkde = gaussian_kde(y)
ind = np.linspace(y.min(), y.max(), 1000)
ax.plot(ind, gkde.evaluate(ind), **density_kwds)
ax.set_xlim(boundaries_list[i])
else:
common = (mask[a] & mask[b]).values
ax.scatter(df[b][common], df[a][common],
marker=marker, alpha=alpha, **kwds)
ax.set_xlim(boundaries_list[j])
ax.set_ylim(boundaries_list[i])
ax.set_xlabel(b)
ax.set_ylabel(a)
if j != 0:
ax.yaxis.set_visible(False)
if i != n - 1:
ax.xaxis.set_visible(False)
if len(df.columns) > 1:
lim1 = boundaries_list[0]
locs = axes[0][1].yaxis.get_majorticklocs()
locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])]
adj = (locs - lim1[0]) / (lim1[1] - lim1[0])
lim0 = axes[0][0].get_ylim()
adj = adj * (lim0[1] - lim0[0]) + lim0[0]
axes[0][0].yaxis.set_ticks(adj)
if np.all(locs == locs.astype(int)):
# if all ticks are int
locs = locs.astype(int)
axes[0][0].yaxis.set_ticklabels(locs)
_set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0)
return axes | [
"Draw",
"a",
"matrix",
"of",
"scatter",
"plots",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_misc.py#L14-L133 | [
"def",
"scatter_matrix",
"(",
"frame",
",",
"alpha",
"=",
"0.5",
",",
"figsize",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"grid",
"=",
"False",
",",
"diagonal",
"=",
"'hist'",
",",
"marker",
"=",
"'.'",
",",
"density_kwds",
"=",
"None",
",",
"hist_kwds",
"=",
"None",
",",
"range_padding",
"=",
"0.05",
",",
"*",
"*",
"kwds",
")",
":",
"df",
"=",
"frame",
".",
"_get_numeric_data",
"(",
")",
"n",
"=",
"df",
".",
"columns",
".",
"size",
"naxes",
"=",
"n",
"*",
"n",
"fig",
",",
"axes",
"=",
"_subplots",
"(",
"naxes",
"=",
"naxes",
",",
"figsize",
"=",
"figsize",
",",
"ax",
"=",
"ax",
",",
"squeeze",
"=",
"False",
")",
"# no gaps between subplots",
"fig",
".",
"subplots_adjust",
"(",
"wspace",
"=",
"0",
",",
"hspace",
"=",
"0",
")",
"mask",
"=",
"notna",
"(",
"df",
")",
"marker",
"=",
"_get_marker_compat",
"(",
"marker",
")",
"hist_kwds",
"=",
"hist_kwds",
"or",
"{",
"}",
"density_kwds",
"=",
"density_kwds",
"or",
"{",
"}",
"# GH 14855",
"kwds",
".",
"setdefault",
"(",
"'edgecolors'",
",",
"'none'",
")",
"boundaries_list",
"=",
"[",
"]",
"for",
"a",
"in",
"df",
".",
"columns",
":",
"values",
"=",
"df",
"[",
"a",
"]",
".",
"values",
"[",
"mask",
"[",
"a",
"]",
".",
"values",
"]",
"rmin_",
",",
"rmax_",
"=",
"np",
".",
"min",
"(",
"values",
")",
",",
"np",
".",
"max",
"(",
"values",
")",
"rdelta_ext",
"=",
"(",
"rmax_",
"-",
"rmin_",
")",
"*",
"range_padding",
"/",
"2.",
"boundaries_list",
".",
"append",
"(",
"(",
"rmin_",
"-",
"rdelta_ext",
",",
"rmax_",
"+",
"rdelta_ext",
")",
")",
"for",
"i",
",",
"a",
"in",
"zip",
"(",
"lrange",
"(",
"n",
")",
",",
"df",
".",
"columns",
")",
":",
"for",
"j",
",",
"b",
"in",
"zip",
"(",
"lrange",
"(",
"n",
")",
",",
"df",
".",
"columns",
")",
":",
"ax",
"=",
"axes",
"[",
"i",
",",
"j",
"]",
"if",
"i",
"==",
"j",
":",
"values",
"=",
"df",
"[",
"a",
"]",
".",
"values",
"[",
"mask",
"[",
"a",
"]",
".",
"values",
"]",
"# Deal with the diagonal by drawing a histogram there.",
"if",
"diagonal",
"==",
"'hist'",
":",
"ax",
".",
"hist",
"(",
"values",
",",
"*",
"*",
"hist_kwds",
")",
"elif",
"diagonal",
"in",
"(",
"'kde'",
",",
"'density'",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"gaussian_kde",
"y",
"=",
"values",
"gkde",
"=",
"gaussian_kde",
"(",
"y",
")",
"ind",
"=",
"np",
".",
"linspace",
"(",
"y",
".",
"min",
"(",
")",
",",
"y",
".",
"max",
"(",
")",
",",
"1000",
")",
"ax",
".",
"plot",
"(",
"ind",
",",
"gkde",
".",
"evaluate",
"(",
"ind",
")",
",",
"*",
"*",
"density_kwds",
")",
"ax",
".",
"set_xlim",
"(",
"boundaries_list",
"[",
"i",
"]",
")",
"else",
":",
"common",
"=",
"(",
"mask",
"[",
"a",
"]",
"&",
"mask",
"[",
"b",
"]",
")",
".",
"values",
"ax",
".",
"scatter",
"(",
"df",
"[",
"b",
"]",
"[",
"common",
"]",
",",
"df",
"[",
"a",
"]",
"[",
"common",
"]",
",",
"marker",
"=",
"marker",
",",
"alpha",
"=",
"alpha",
",",
"*",
"*",
"kwds",
")",
"ax",
".",
"set_xlim",
"(",
"boundaries_list",
"[",
"j",
"]",
")",
"ax",
".",
"set_ylim",
"(",
"boundaries_list",
"[",
"i",
"]",
")",
"ax",
".",
"set_xlabel",
"(",
"b",
")",
"ax",
".",
"set_ylabel",
"(",
"a",
")",
"if",
"j",
"!=",
"0",
":",
"ax",
".",
"yaxis",
".",
"set_visible",
"(",
"False",
")",
"if",
"i",
"!=",
"n",
"-",
"1",
":",
"ax",
".",
"xaxis",
".",
"set_visible",
"(",
"False",
")",
"if",
"len",
"(",
"df",
".",
"columns",
")",
">",
"1",
":",
"lim1",
"=",
"boundaries_list",
"[",
"0",
"]",
"locs",
"=",
"axes",
"[",
"0",
"]",
"[",
"1",
"]",
".",
"yaxis",
".",
"get_majorticklocs",
"(",
")",
"locs",
"=",
"locs",
"[",
"(",
"lim1",
"[",
"0",
"]",
"<=",
"locs",
")",
"&",
"(",
"locs",
"<=",
"lim1",
"[",
"1",
"]",
")",
"]",
"adj",
"=",
"(",
"locs",
"-",
"lim1",
"[",
"0",
"]",
")",
"/",
"(",
"lim1",
"[",
"1",
"]",
"-",
"lim1",
"[",
"0",
"]",
")",
"lim0",
"=",
"axes",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"get_ylim",
"(",
")",
"adj",
"=",
"adj",
"*",
"(",
"lim0",
"[",
"1",
"]",
"-",
"lim0",
"[",
"0",
"]",
")",
"+",
"lim0",
"[",
"0",
"]",
"axes",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"yaxis",
".",
"set_ticks",
"(",
"adj",
")",
"if",
"np",
".",
"all",
"(",
"locs",
"==",
"locs",
".",
"astype",
"(",
"int",
")",
")",
":",
"# if all ticks are int",
"locs",
"=",
"locs",
".",
"astype",
"(",
"int",
")",
"axes",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"yaxis",
".",
"set_ticklabels",
"(",
"locs",
")",
"_set_ticks_props",
"(",
"axes",
",",
"xlabelsize",
"=",
"8",
",",
"xrot",
"=",
"90",
",",
"ylabelsize",
"=",
"8",
",",
"yrot",
"=",
"0",
")",
"return",
"axes"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | radviz | Plot a multidimensional dataset in 2D.
Each Series in the DataFrame is represented as a evenly distributed
slice on a circle. Each data point is rendered in the circle according to
the value on each Series. Highly correlated `Series` in the `DataFrame`
are placed closer on the unit circle.
RadViz allow to project a N-dimensional data set into a 2D space where the
influence of each dimension can be interpreted as a balance between the
influence of all dimensions.
More info available at the `original article
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.889>`_
describing RadViz.
Parameters
----------
frame : `DataFrame`
Pandas object holding the data.
class_column : str
Column name containing the name of the data point category.
ax : :class:`matplotlib.axes.Axes`, optional
A plot instance to which to add the information.
color : list[str] or tuple[str], optional
Assign a color to each category. Example: ['blue', 'green'].
colormap : str or :class:`matplotlib.colors.Colormap`, default None
Colormap to select colors from. If string, load colormap with that
name from matplotlib.
kwds : optional
Options to pass to matplotlib scatter plotting method.
Returns
-------
class:`matplotlib.axes.Axes`
See Also
--------
plotting.andrews_curves : Plot clustering visualization.
Examples
--------
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'SepalLength': [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6,
... 6.7, 4.6],
... 'SepalWidth': [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2,
... 3.3, 3.6],
... 'PetalLength': [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4,
... 5.7, 1.0],
... 'PetalWidth': [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2,
... 2.1, 0.2],
... 'Category': ['virginica', 'virginica', 'setosa',
... 'virginica', 'virginica', 'versicolor',
... 'versicolor', 'setosa', 'virginica',
... 'setosa']
... })
>>> rad_viz = pd.plotting.radviz(df, 'Category') # doctest: +SKIP | pandas/plotting/_misc.py | def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""
Plot a multidimensional dataset in 2D.
Each Series in the DataFrame is represented as a evenly distributed
slice on a circle. Each data point is rendered in the circle according to
the value on each Series. Highly correlated `Series` in the `DataFrame`
are placed closer on the unit circle.
RadViz allow to project a N-dimensional data set into a 2D space where the
influence of each dimension can be interpreted as a balance between the
influence of all dimensions.
More info available at the `original article
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.889>`_
describing RadViz.
Parameters
----------
frame : `DataFrame`
Pandas object holding the data.
class_column : str
Column name containing the name of the data point category.
ax : :class:`matplotlib.axes.Axes`, optional
A plot instance to which to add the information.
color : list[str] or tuple[str], optional
Assign a color to each category. Example: ['blue', 'green'].
colormap : str or :class:`matplotlib.colors.Colormap`, default None
Colormap to select colors from. If string, load colormap with that
name from matplotlib.
kwds : optional
Options to pass to matplotlib scatter plotting method.
Returns
-------
class:`matplotlib.axes.Axes`
See Also
--------
plotting.andrews_curves : Plot clustering visualization.
Examples
--------
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'SepalLength': [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6,
... 6.7, 4.6],
... 'SepalWidth': [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2,
... 3.3, 3.6],
... 'PetalLength': [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4,
... 5.7, 1.0],
... 'PetalWidth': [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2,
... 2.1, 0.2],
... 'Category': ['virginica', 'virginica', 'setosa',
... 'virginica', 'virginica', 'versicolor',
... 'versicolor', 'setosa', 'virginica',
... 'setosa']
... })
>>> rad_viz = pd.plotting.radviz(df, 'Category') # doctest: +SKIP
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax | def radviz(frame, class_column, ax=None, color=None, colormap=None, **kwds):
"""
Plot a multidimensional dataset in 2D.
Each Series in the DataFrame is represented as a evenly distributed
slice on a circle. Each data point is rendered in the circle according to
the value on each Series. Highly correlated `Series` in the `DataFrame`
are placed closer on the unit circle.
RadViz allow to project a N-dimensional data set into a 2D space where the
influence of each dimension can be interpreted as a balance between the
influence of all dimensions.
More info available at the `original article
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.889>`_
describing RadViz.
Parameters
----------
frame : `DataFrame`
Pandas object holding the data.
class_column : str
Column name containing the name of the data point category.
ax : :class:`matplotlib.axes.Axes`, optional
A plot instance to which to add the information.
color : list[str] or tuple[str], optional
Assign a color to each category. Example: ['blue', 'green'].
colormap : str or :class:`matplotlib.colors.Colormap`, default None
Colormap to select colors from. If string, load colormap with that
name from matplotlib.
kwds : optional
Options to pass to matplotlib scatter plotting method.
Returns
-------
class:`matplotlib.axes.Axes`
See Also
--------
plotting.andrews_curves : Plot clustering visualization.
Examples
--------
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'SepalLength': [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6,
... 6.7, 4.6],
... 'SepalWidth': [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2,
... 3.3, 3.6],
... 'PetalLength': [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4,
... 5.7, 1.0],
... 'PetalWidth': [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2,
... 2.1, 0.2],
... 'Category': ['virginica', 'virginica', 'setosa',
... 'virginica', 'virginica', 'versicolor',
... 'versicolor', 'setosa', 'virginica',
... 'setosa']
... })
>>> rad_viz = pd.plotting.radviz(df, 'Category') # doctest: +SKIP
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def normalize(series):
a = min(series)
b = max(series)
return (series - a) / (b - a)
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
df = frame.drop(class_column, axis=1).apply(normalize)
if ax is None:
ax = plt.gca(xlim=[-1, 1], ylim=[-1, 1])
to_plot = {}
colors = _get_standard_colors(num_colors=len(classes), colormap=colormap,
color_type='random', color=color)
for kls in classes:
to_plot[kls] = [[], []]
m = len(frame.columns) - 1
s = np.array([(np.cos(t), np.sin(t))
for t in [2.0 * np.pi * (i / float(m))
for i in range(m)]])
for i in range(n):
row = df.iloc[i].values
row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1)
y = (s * row_).sum(axis=0) / row.sum()
kls = class_col.iat[i]
to_plot[kls][0].append(y[0])
to_plot[kls][1].append(y[1])
for i, kls in enumerate(classes):
ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i],
label=pprint_thing(kls), **kwds)
ax.legend()
ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor='none'))
for xy, name in zip(s, df.columns):
ax.add_patch(patches.Circle(xy, radius=0.025, facecolor='gray'))
if xy[0] < 0.0 and xy[1] < 0.0:
ax.text(xy[0] - 0.025, xy[1] - 0.025, name,
ha='right', va='top', size='small')
elif xy[0] < 0.0 and xy[1] >= 0.0:
ax.text(xy[0] - 0.025, xy[1] + 0.025, name,
ha='right', va='bottom', size='small')
elif xy[0] >= 0.0 and xy[1] < 0.0:
ax.text(xy[0] + 0.025, xy[1] - 0.025, name,
ha='left', va='top', size='small')
elif xy[0] >= 0.0 and xy[1] >= 0.0:
ax.text(xy[0] + 0.025, xy[1] + 0.025, name,
ha='left', va='bottom', size='small')
ax.axis('equal')
return ax | [
"Plot",
"a",
"multidimensional",
"dataset",
"in",
"2D",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_misc.py#L143-L266 | [
"def",
"radviz",
"(",
"frame",
",",
"class_column",
",",
"ax",
"=",
"None",
",",
"color",
"=",
"None",
",",
"colormap",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"import",
"matplotlib",
".",
"patches",
"as",
"patches",
"def",
"normalize",
"(",
"series",
")",
":",
"a",
"=",
"min",
"(",
"series",
")",
"b",
"=",
"max",
"(",
"series",
")",
"return",
"(",
"series",
"-",
"a",
")",
"/",
"(",
"b",
"-",
"a",
")",
"n",
"=",
"len",
"(",
"frame",
")",
"classes",
"=",
"frame",
"[",
"class_column",
"]",
".",
"drop_duplicates",
"(",
")",
"class_col",
"=",
"frame",
"[",
"class_column",
"]",
"df",
"=",
"frame",
".",
"drop",
"(",
"class_column",
",",
"axis",
"=",
"1",
")",
".",
"apply",
"(",
"normalize",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
"xlim",
"=",
"[",
"-",
"1",
",",
"1",
"]",
",",
"ylim",
"=",
"[",
"-",
"1",
",",
"1",
"]",
")",
"to_plot",
"=",
"{",
"}",
"colors",
"=",
"_get_standard_colors",
"(",
"num_colors",
"=",
"len",
"(",
"classes",
")",
",",
"colormap",
"=",
"colormap",
",",
"color_type",
"=",
"'random'",
",",
"color",
"=",
"color",
")",
"for",
"kls",
"in",
"classes",
":",
"to_plot",
"[",
"kls",
"]",
"=",
"[",
"[",
"]",
",",
"[",
"]",
"]",
"m",
"=",
"len",
"(",
"frame",
".",
"columns",
")",
"-",
"1",
"s",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"np",
".",
"cos",
"(",
"t",
")",
",",
"np",
".",
"sin",
"(",
"t",
")",
")",
"for",
"t",
"in",
"[",
"2.0",
"*",
"np",
".",
"pi",
"*",
"(",
"i",
"/",
"float",
"(",
"m",
")",
")",
"for",
"i",
"in",
"range",
"(",
"m",
")",
"]",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"row",
"=",
"df",
".",
"iloc",
"[",
"i",
"]",
".",
"values",
"row_",
"=",
"np",
".",
"repeat",
"(",
"np",
".",
"expand_dims",
"(",
"row",
",",
"axis",
"=",
"1",
")",
",",
"2",
",",
"axis",
"=",
"1",
")",
"y",
"=",
"(",
"s",
"*",
"row_",
")",
".",
"sum",
"(",
"axis",
"=",
"0",
")",
"/",
"row",
".",
"sum",
"(",
")",
"kls",
"=",
"class_col",
".",
"iat",
"[",
"i",
"]",
"to_plot",
"[",
"kls",
"]",
"[",
"0",
"]",
".",
"append",
"(",
"y",
"[",
"0",
"]",
")",
"to_plot",
"[",
"kls",
"]",
"[",
"1",
"]",
".",
"append",
"(",
"y",
"[",
"1",
"]",
")",
"for",
"i",
",",
"kls",
"in",
"enumerate",
"(",
"classes",
")",
":",
"ax",
".",
"scatter",
"(",
"to_plot",
"[",
"kls",
"]",
"[",
"0",
"]",
",",
"to_plot",
"[",
"kls",
"]",
"[",
"1",
"]",
",",
"color",
"=",
"colors",
"[",
"i",
"]",
",",
"label",
"=",
"pprint_thing",
"(",
"kls",
")",
",",
"*",
"*",
"kwds",
")",
"ax",
".",
"legend",
"(",
")",
"ax",
".",
"add_patch",
"(",
"patches",
".",
"Circle",
"(",
"(",
"0.0",
",",
"0.0",
")",
",",
"radius",
"=",
"1.0",
",",
"facecolor",
"=",
"'none'",
")",
")",
"for",
"xy",
",",
"name",
"in",
"zip",
"(",
"s",
",",
"df",
".",
"columns",
")",
":",
"ax",
".",
"add_patch",
"(",
"patches",
".",
"Circle",
"(",
"xy",
",",
"radius",
"=",
"0.025",
",",
"facecolor",
"=",
"'gray'",
")",
")",
"if",
"xy",
"[",
"0",
"]",
"<",
"0.0",
"and",
"xy",
"[",
"1",
"]",
"<",
"0.0",
":",
"ax",
".",
"text",
"(",
"xy",
"[",
"0",
"]",
"-",
"0.025",
",",
"xy",
"[",
"1",
"]",
"-",
"0.025",
",",
"name",
",",
"ha",
"=",
"'right'",
",",
"va",
"=",
"'top'",
",",
"size",
"=",
"'small'",
")",
"elif",
"xy",
"[",
"0",
"]",
"<",
"0.0",
"and",
"xy",
"[",
"1",
"]",
">=",
"0.0",
":",
"ax",
".",
"text",
"(",
"xy",
"[",
"0",
"]",
"-",
"0.025",
",",
"xy",
"[",
"1",
"]",
"+",
"0.025",
",",
"name",
",",
"ha",
"=",
"'right'",
",",
"va",
"=",
"'bottom'",
",",
"size",
"=",
"'small'",
")",
"elif",
"xy",
"[",
"0",
"]",
">=",
"0.0",
"and",
"xy",
"[",
"1",
"]",
"<",
"0.0",
":",
"ax",
".",
"text",
"(",
"xy",
"[",
"0",
"]",
"+",
"0.025",
",",
"xy",
"[",
"1",
"]",
"-",
"0.025",
",",
"name",
",",
"ha",
"=",
"'left'",
",",
"va",
"=",
"'top'",
",",
"size",
"=",
"'small'",
")",
"elif",
"xy",
"[",
"0",
"]",
">=",
"0.0",
"and",
"xy",
"[",
"1",
"]",
">=",
"0.0",
":",
"ax",
".",
"text",
"(",
"xy",
"[",
"0",
"]",
"+",
"0.025",
",",
"xy",
"[",
"1",
"]",
"+",
"0.025",
",",
"name",
",",
"ha",
"=",
"'left'",
",",
"va",
"=",
"'bottom'",
",",
"size",
"=",
"'small'",
")",
"ax",
".",
"axis",
"(",
"'equal'",
")",
"return",
"ax"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | andrews_curves | Generate a matplotlib plot of Andrews curves, for visualising clusters of
multivariate data.
Andrews curves have the functional form:
f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
x_4 sin(2t) + x_5 cos(2t) + ...
Where x coefficients correspond to the values of each dimension and t is
linearly spaced between -pi and +pi. Each row of frame then corresponds to
a single curve.
Parameters
----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color : list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlip.axis.Axes` | pandas/plotting/_misc.py | def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Generate a matplotlib plot of Andrews curves, for visualising clusters of
multivariate data.
Andrews curves have the functional form:
f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
x_4 sin(2t) + x_5 cos(2t) + ...
Where x coefficients correspond to the values of each dimension and t is
linearly spaced between -pi and +pi. Each row of frame then corresponds to
a single curve.
Parameters
----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color : list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlip.axis.Axes`
"""
from math import sqrt, pi
import matplotlib.pyplot as plt
def function(amplitudes):
def f(t):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
# Take the rest of the coefficients and resize them
# appropriately. Take a copy of amplitudes as otherwise numpy
# deletes the element from amplitudes itself.
coeffs = np.delete(np.copy(amplitudes), 0)
coeffs.resize(int((coeffs.size + 1) / 2), 2)
# Generate the harmonics and arguments for the sin and cos
# functions.
harmonics = np.arange(0, coeffs.shape[0]) + 1
trig_args = np.outer(harmonics, t)
result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) +
coeffs[:, 1, np.newaxis] * np.cos(trig_args),
axis=0)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
t = np.linspace(-pi, pi, samples)
used_legends = set()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = f(t)
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(t, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(t, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax | def andrews_curves(frame, class_column, ax=None, samples=200, color=None,
colormap=None, **kwds):
"""
Generate a matplotlib plot of Andrews curves, for visualising clusters of
multivariate data.
Andrews curves have the functional form:
f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) +
x_4 sin(2t) + x_5 cos(2t) + ...
Where x coefficients correspond to the values of each dimension and t is
linearly spaced between -pi and +pi. Each row of frame then corresponds to
a single curve.
Parameters
----------
frame : DataFrame
Data to be plotted, preferably normalized to (0.0, 1.0)
class_column : Name of the column containing class names
ax : matplotlib axes object, default None
samples : Number of points to plot in each curve
color : list or tuple, optional
Colors to use for the different classes
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlip.axis.Axes`
"""
from math import sqrt, pi
import matplotlib.pyplot as plt
def function(amplitudes):
def f(t):
x1 = amplitudes[0]
result = x1 / sqrt(2.0)
# Take the rest of the coefficients and resize them
# appropriately. Take a copy of amplitudes as otherwise numpy
# deletes the element from amplitudes itself.
coeffs = np.delete(np.copy(amplitudes), 0)
coeffs.resize(int((coeffs.size + 1) / 2), 2)
# Generate the harmonics and arguments for the sin and cos
# functions.
harmonics = np.arange(0, coeffs.shape[0]) + 1
trig_args = np.outer(harmonics, t)
result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) +
coeffs[:, 1, np.newaxis] * np.cos(trig_args),
axis=0)
return result
return f
n = len(frame)
class_col = frame[class_column]
classes = frame[class_column].drop_duplicates()
df = frame.drop(class_column, axis=1)
t = np.linspace(-pi, pi, samples)
used_legends = set()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
colors = dict(zip(classes, color_values))
if ax is None:
ax = plt.gca(xlim=(-pi, pi))
for i in range(n):
row = df.iloc[i].values
f = function(row)
y = f(t)
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(t, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(t, y, color=colors[kls], **kwds)
ax.legend(loc='upper right')
ax.grid()
return ax | [
"Generate",
"a",
"matplotlib",
"plot",
"of",
"Andrews",
"curves",
"for",
"visualising",
"clusters",
"of",
"multivariate",
"data",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_misc.py#L270-L356 | [
"def",
"andrews_curves",
"(",
"frame",
",",
"class_column",
",",
"ax",
"=",
"None",
",",
"samples",
"=",
"200",
",",
"color",
"=",
"None",
",",
"colormap",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"from",
"math",
"import",
"sqrt",
",",
"pi",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"def",
"function",
"(",
"amplitudes",
")",
":",
"def",
"f",
"(",
"t",
")",
":",
"x1",
"=",
"amplitudes",
"[",
"0",
"]",
"result",
"=",
"x1",
"/",
"sqrt",
"(",
"2.0",
")",
"# Take the rest of the coefficients and resize them",
"# appropriately. Take a copy of amplitudes as otherwise numpy",
"# deletes the element from amplitudes itself.",
"coeffs",
"=",
"np",
".",
"delete",
"(",
"np",
".",
"copy",
"(",
"amplitudes",
")",
",",
"0",
")",
"coeffs",
".",
"resize",
"(",
"int",
"(",
"(",
"coeffs",
".",
"size",
"+",
"1",
")",
"/",
"2",
")",
",",
"2",
")",
"# Generate the harmonics and arguments for the sin and cos",
"# functions.",
"harmonics",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"coeffs",
".",
"shape",
"[",
"0",
"]",
")",
"+",
"1",
"trig_args",
"=",
"np",
".",
"outer",
"(",
"harmonics",
",",
"t",
")",
"result",
"+=",
"np",
".",
"sum",
"(",
"coeffs",
"[",
":",
",",
"0",
",",
"np",
".",
"newaxis",
"]",
"*",
"np",
".",
"sin",
"(",
"trig_args",
")",
"+",
"coeffs",
"[",
":",
",",
"1",
",",
"np",
".",
"newaxis",
"]",
"*",
"np",
".",
"cos",
"(",
"trig_args",
")",
",",
"axis",
"=",
"0",
")",
"return",
"result",
"return",
"f",
"n",
"=",
"len",
"(",
"frame",
")",
"class_col",
"=",
"frame",
"[",
"class_column",
"]",
"classes",
"=",
"frame",
"[",
"class_column",
"]",
".",
"drop_duplicates",
"(",
")",
"df",
"=",
"frame",
".",
"drop",
"(",
"class_column",
",",
"axis",
"=",
"1",
")",
"t",
"=",
"np",
".",
"linspace",
"(",
"-",
"pi",
",",
"pi",
",",
"samples",
")",
"used_legends",
"=",
"set",
"(",
")",
"color_values",
"=",
"_get_standard_colors",
"(",
"num_colors",
"=",
"len",
"(",
"classes",
")",
",",
"colormap",
"=",
"colormap",
",",
"color_type",
"=",
"'random'",
",",
"color",
"=",
"color",
")",
"colors",
"=",
"dict",
"(",
"zip",
"(",
"classes",
",",
"color_values",
")",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
"xlim",
"=",
"(",
"-",
"pi",
",",
"pi",
")",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"row",
"=",
"df",
".",
"iloc",
"[",
"i",
"]",
".",
"values",
"f",
"=",
"function",
"(",
"row",
")",
"y",
"=",
"f",
"(",
"t",
")",
"kls",
"=",
"class_col",
".",
"iat",
"[",
"i",
"]",
"label",
"=",
"pprint_thing",
"(",
"kls",
")",
"if",
"label",
"not",
"in",
"used_legends",
":",
"used_legends",
".",
"add",
"(",
"label",
")",
"ax",
".",
"plot",
"(",
"t",
",",
"y",
",",
"color",
"=",
"colors",
"[",
"kls",
"]",
",",
"label",
"=",
"label",
",",
"*",
"*",
"kwds",
")",
"else",
":",
"ax",
".",
"plot",
"(",
"t",
",",
"y",
",",
"color",
"=",
"colors",
"[",
"kls",
"]",
",",
"*",
"*",
"kwds",
")",
"ax",
".",
"legend",
"(",
"loc",
"=",
"'upper right'",
")",
"ax",
".",
"grid",
"(",
")",
"return",
"ax"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | bootstrap_plot | Bootstrap plot on mean, median and mid-range statistics.
The bootstrap plot is used to estimate the uncertainty of a statistic
by relaying on random sampling with replacement [1]_. This function will
generate bootstrapping plots for mean, median and mid-range statistics
for the given number of samples of the given size.
.. [1] "Bootstrapping (statistics)" in \
https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29
Parameters
----------
series : pandas.Series
Pandas Series from where to get the samplings for the bootstrapping.
fig : matplotlib.figure.Figure, default None
If given, it will use the `fig` reference for plotting instead of
creating a new one with default parameters.
size : int, default 50
Number of data points to consider during each sampling. It must be
greater or equal than the length of the `series`.
samples : int, default 500
Number of times the bootstrap procedure is performed.
**kwds :
Options to pass to matplotlib plotting method.
Returns
-------
matplotlib.figure.Figure
Matplotlib figure.
See Also
--------
DataFrame.plot : Basic plotting for DataFrame objects.
Series.plot : Basic plotting for Series objects.
Examples
--------
.. plot::
:context: close-figs
>>> s = pd.Series(np.random.uniform(size=100))
>>> fig = pd.plotting.bootstrap_plot(s) # doctest: +SKIP | pandas/plotting/_misc.py | def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""
Bootstrap plot on mean, median and mid-range statistics.
The bootstrap plot is used to estimate the uncertainty of a statistic
by relaying on random sampling with replacement [1]_. This function will
generate bootstrapping plots for mean, median and mid-range statistics
for the given number of samples of the given size.
.. [1] "Bootstrapping (statistics)" in \
https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29
Parameters
----------
series : pandas.Series
Pandas Series from where to get the samplings for the bootstrapping.
fig : matplotlib.figure.Figure, default None
If given, it will use the `fig` reference for plotting instead of
creating a new one with default parameters.
size : int, default 50
Number of data points to consider during each sampling. It must be
greater or equal than the length of the `series`.
samples : int, default 500
Number of times the bootstrap procedure is performed.
**kwds :
Options to pass to matplotlib plotting method.
Returns
-------
matplotlib.figure.Figure
Matplotlib figure.
See Also
--------
DataFrame.plot : Basic plotting for DataFrame objects.
Series.plot : Basic plotting for Series objects.
Examples
--------
.. plot::
:context: close-figs
>>> s = pd.Series(np.random.uniform(size=100))
>>> fig = pd.plotting.bootstrap_plot(s) # doctest: +SKIP
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig | def bootstrap_plot(series, fig=None, size=50, samples=500, **kwds):
"""
Bootstrap plot on mean, median and mid-range statistics.
The bootstrap plot is used to estimate the uncertainty of a statistic
by relaying on random sampling with replacement [1]_. This function will
generate bootstrapping plots for mean, median and mid-range statistics
for the given number of samples of the given size.
.. [1] "Bootstrapping (statistics)" in \
https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29
Parameters
----------
series : pandas.Series
Pandas Series from where to get the samplings for the bootstrapping.
fig : matplotlib.figure.Figure, default None
If given, it will use the `fig` reference for plotting instead of
creating a new one with default parameters.
size : int, default 50
Number of data points to consider during each sampling. It must be
greater or equal than the length of the `series`.
samples : int, default 500
Number of times the bootstrap procedure is performed.
**kwds :
Options to pass to matplotlib plotting method.
Returns
-------
matplotlib.figure.Figure
Matplotlib figure.
See Also
--------
DataFrame.plot : Basic plotting for DataFrame objects.
Series.plot : Basic plotting for Series objects.
Examples
--------
.. plot::
:context: close-figs
>>> s = pd.Series(np.random.uniform(size=100))
>>> fig = pd.plotting.bootstrap_plot(s) # doctest: +SKIP
"""
import random
import matplotlib.pyplot as plt
# random.sample(ndarray, int) fails on python 3.3, sigh
data = list(series.values)
samplings = [random.sample(data, size) for _ in range(samples)]
means = np.array([np.mean(sampling) for sampling in samplings])
medians = np.array([np.median(sampling) for sampling in samplings])
midranges = np.array([(min(sampling) + max(sampling)) * 0.5
for sampling in samplings])
if fig is None:
fig = plt.figure()
x = lrange(samples)
axes = []
ax1 = fig.add_subplot(2, 3, 1)
ax1.set_xlabel("Sample")
axes.append(ax1)
ax1.plot(x, means, **kwds)
ax2 = fig.add_subplot(2, 3, 2)
ax2.set_xlabel("Sample")
axes.append(ax2)
ax2.plot(x, medians, **kwds)
ax3 = fig.add_subplot(2, 3, 3)
ax3.set_xlabel("Sample")
axes.append(ax3)
ax3.plot(x, midranges, **kwds)
ax4 = fig.add_subplot(2, 3, 4)
ax4.set_xlabel("Mean")
axes.append(ax4)
ax4.hist(means, **kwds)
ax5 = fig.add_subplot(2, 3, 5)
ax5.set_xlabel("Median")
axes.append(ax5)
ax5.hist(medians, **kwds)
ax6 = fig.add_subplot(2, 3, 6)
ax6.set_xlabel("Midrange")
axes.append(ax6)
ax6.hist(midranges, **kwds)
for axis in axes:
plt.setp(axis.get_xticklabels(), fontsize=8)
plt.setp(axis.get_yticklabels(), fontsize=8)
return fig | [
"Bootstrap",
"plot",
"on",
"mean",
"median",
"and",
"mid",
"-",
"range",
"statistics",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_misc.py#L359-L447 | [
"def",
"bootstrap_plot",
"(",
"series",
",",
"fig",
"=",
"None",
",",
"size",
"=",
"50",
",",
"samples",
"=",
"500",
",",
"*",
"*",
"kwds",
")",
":",
"import",
"random",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"# random.sample(ndarray, int) fails on python 3.3, sigh",
"data",
"=",
"list",
"(",
"series",
".",
"values",
")",
"samplings",
"=",
"[",
"random",
".",
"sample",
"(",
"data",
",",
"size",
")",
"for",
"_",
"in",
"range",
"(",
"samples",
")",
"]",
"means",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"mean",
"(",
"sampling",
")",
"for",
"sampling",
"in",
"samplings",
"]",
")",
"medians",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"median",
"(",
"sampling",
")",
"for",
"sampling",
"in",
"samplings",
"]",
")",
"midranges",
"=",
"np",
".",
"array",
"(",
"[",
"(",
"min",
"(",
"sampling",
")",
"+",
"max",
"(",
"sampling",
")",
")",
"*",
"0.5",
"for",
"sampling",
"in",
"samplings",
"]",
")",
"if",
"fig",
"is",
"None",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"x",
"=",
"lrange",
"(",
"samples",
")",
"axes",
"=",
"[",
"]",
"ax1",
"=",
"fig",
".",
"add_subplot",
"(",
"2",
",",
"3",
",",
"1",
")",
"ax1",
".",
"set_xlabel",
"(",
"\"Sample\"",
")",
"axes",
".",
"append",
"(",
"ax1",
")",
"ax1",
".",
"plot",
"(",
"x",
",",
"means",
",",
"*",
"*",
"kwds",
")",
"ax2",
"=",
"fig",
".",
"add_subplot",
"(",
"2",
",",
"3",
",",
"2",
")",
"ax2",
".",
"set_xlabel",
"(",
"\"Sample\"",
")",
"axes",
".",
"append",
"(",
"ax2",
")",
"ax2",
".",
"plot",
"(",
"x",
",",
"medians",
",",
"*",
"*",
"kwds",
")",
"ax3",
"=",
"fig",
".",
"add_subplot",
"(",
"2",
",",
"3",
",",
"3",
")",
"ax3",
".",
"set_xlabel",
"(",
"\"Sample\"",
")",
"axes",
".",
"append",
"(",
"ax3",
")",
"ax3",
".",
"plot",
"(",
"x",
",",
"midranges",
",",
"*",
"*",
"kwds",
")",
"ax4",
"=",
"fig",
".",
"add_subplot",
"(",
"2",
",",
"3",
",",
"4",
")",
"ax4",
".",
"set_xlabel",
"(",
"\"Mean\"",
")",
"axes",
".",
"append",
"(",
"ax4",
")",
"ax4",
".",
"hist",
"(",
"means",
",",
"*",
"*",
"kwds",
")",
"ax5",
"=",
"fig",
".",
"add_subplot",
"(",
"2",
",",
"3",
",",
"5",
")",
"ax5",
".",
"set_xlabel",
"(",
"\"Median\"",
")",
"axes",
".",
"append",
"(",
"ax5",
")",
"ax5",
".",
"hist",
"(",
"medians",
",",
"*",
"*",
"kwds",
")",
"ax6",
"=",
"fig",
".",
"add_subplot",
"(",
"2",
",",
"3",
",",
"6",
")",
"ax6",
".",
"set_xlabel",
"(",
"\"Midrange\"",
")",
"axes",
".",
"append",
"(",
"ax6",
")",
"ax6",
".",
"hist",
"(",
"midranges",
",",
"*",
"*",
"kwds",
")",
"for",
"axis",
"in",
"axes",
":",
"plt",
".",
"setp",
"(",
"axis",
".",
"get_xticklabels",
"(",
")",
",",
"fontsize",
"=",
"8",
")",
"plt",
".",
"setp",
"(",
"axis",
".",
"get_yticklabels",
"(",
")",
",",
"fontsize",
"=",
"8",
")",
"return",
"fig"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | parallel_coordinates | Parallel coordinates plotting.
Parameters
----------
frame : DataFrame
class_column : str
Column name containing class names
cols : list, optional
A list of column names to use
ax : matplotlib.axis, optional
matplotlib axis object
color : list or tuple, optional
Colors to use for the different classes
use_columns : bool, optional
If true, columns will be used as xticks
xticks : list or tuple, optional
A list of values to use for xticks
colormap : str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines : bool, optional
If true, vertical lines will be added at each xtick
axvlines_kwds : keywords, optional
Options to be passed to axvline method for vertical lines
sort_labels : bool, False
Sort class_column labels, useful when assigning colors
.. versionadded:: 0.20.0
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlib.axis.Axes`
Examples
--------
>>> from matplotlib import pyplot as plt
>>> df = pd.read_csv('https://raw.github.com/pandas-dev/pandas/master'
'/pandas/tests/data/iris.csv')
>>> pd.plotting.parallel_coordinates(
df, 'Name',
color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show() | pandas/plotting/_misc.py | def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, axvlines_kwds=None, sort_labels=False,
**kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame : DataFrame
class_column : str
Column name containing class names
cols : list, optional
A list of column names to use
ax : matplotlib.axis, optional
matplotlib axis object
color : list or tuple, optional
Colors to use for the different classes
use_columns : bool, optional
If true, columns will be used as xticks
xticks : list or tuple, optional
A list of values to use for xticks
colormap : str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines : bool, optional
If true, vertical lines will be added at each xtick
axvlines_kwds : keywords, optional
Options to be passed to axvline method for vertical lines
sort_labels : bool, False
Sort class_column labels, useful when assigning colors
.. versionadded:: 0.20.0
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlib.axis.Axes`
Examples
--------
>>> from matplotlib import pyplot as plt
>>> df = pd.read_csv('https://raw.github.com/pandas-dev/pandas/master'
'/pandas/tests/data/iris.csv')
>>> pd.plotting.parallel_coordinates(
df, 'Name',
color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
if axvlines_kwds is None:
axvlines_kwds = {'linewidth': 1, 'color': 'black'}
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set()
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
if sort_labels:
classes = sorted(classes)
color_values = sorted(color_values)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, **axvlines_kwds)
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax | def parallel_coordinates(frame, class_column, cols=None, ax=None, color=None,
use_columns=False, xticks=None, colormap=None,
axvlines=True, axvlines_kwds=None, sort_labels=False,
**kwds):
"""Parallel coordinates plotting.
Parameters
----------
frame : DataFrame
class_column : str
Column name containing class names
cols : list, optional
A list of column names to use
ax : matplotlib.axis, optional
matplotlib axis object
color : list or tuple, optional
Colors to use for the different classes
use_columns : bool, optional
If true, columns will be used as xticks
xticks : list or tuple, optional
A list of values to use for xticks
colormap : str or matplotlib colormap, default None
Colormap to use for line colors.
axvlines : bool, optional
If true, vertical lines will be added at each xtick
axvlines_kwds : keywords, optional
Options to be passed to axvline method for vertical lines
sort_labels : bool, False
Sort class_column labels, useful when assigning colors
.. versionadded:: 0.20.0
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
class:`matplotlib.axis.Axes`
Examples
--------
>>> from matplotlib import pyplot as plt
>>> df = pd.read_csv('https://raw.github.com/pandas-dev/pandas/master'
'/pandas/tests/data/iris.csv')
>>> pd.plotting.parallel_coordinates(
df, 'Name',
color=('#556270', '#4ECDC4', '#C7F464'))
>>> plt.show()
"""
if axvlines_kwds is None:
axvlines_kwds = {'linewidth': 1, 'color': 'black'}
import matplotlib.pyplot as plt
n = len(frame)
classes = frame[class_column].drop_duplicates()
class_col = frame[class_column]
if cols is None:
df = frame.drop(class_column, axis=1)
else:
df = frame[cols]
used_legends = set()
ncols = len(df.columns)
# determine values to use for xticks
if use_columns is True:
if not np.all(np.isreal(list(df.columns))):
raise ValueError('Columns must be numeric to be used as xticks')
x = df.columns
elif xticks is not None:
if not np.all(np.isreal(xticks)):
raise ValueError('xticks specified must be numeric')
elif len(xticks) != ncols:
raise ValueError('Length of xticks must match number of columns')
x = xticks
else:
x = lrange(ncols)
if ax is None:
ax = plt.gca()
color_values = _get_standard_colors(num_colors=len(classes),
colormap=colormap, color_type='random',
color=color)
if sort_labels:
classes = sorted(classes)
color_values = sorted(color_values)
colors = dict(zip(classes, color_values))
for i in range(n):
y = df.iloc[i].values
kls = class_col.iat[i]
label = pprint_thing(kls)
if label not in used_legends:
used_legends.add(label)
ax.plot(x, y, color=colors[kls], label=label, **kwds)
else:
ax.plot(x, y, color=colors[kls], **kwds)
if axvlines:
for i in x:
ax.axvline(i, **axvlines_kwds)
ax.set_xticks(x)
ax.set_xticklabels(df.columns)
ax.set_xlim(x[0], x[-1])
ax.legend(loc='upper right')
ax.grid()
return ax | [
"Parallel",
"coordinates",
"plotting",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_misc.py#L452-L563 | [
"def",
"parallel_coordinates",
"(",
"frame",
",",
"class_column",
",",
"cols",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"color",
"=",
"None",
",",
"use_columns",
"=",
"False",
",",
"xticks",
"=",
"None",
",",
"colormap",
"=",
"None",
",",
"axvlines",
"=",
"True",
",",
"axvlines_kwds",
"=",
"None",
",",
"sort_labels",
"=",
"False",
",",
"*",
"*",
"kwds",
")",
":",
"if",
"axvlines_kwds",
"is",
"None",
":",
"axvlines_kwds",
"=",
"{",
"'linewidth'",
":",
"1",
",",
"'color'",
":",
"'black'",
"}",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"n",
"=",
"len",
"(",
"frame",
")",
"classes",
"=",
"frame",
"[",
"class_column",
"]",
".",
"drop_duplicates",
"(",
")",
"class_col",
"=",
"frame",
"[",
"class_column",
"]",
"if",
"cols",
"is",
"None",
":",
"df",
"=",
"frame",
".",
"drop",
"(",
"class_column",
",",
"axis",
"=",
"1",
")",
"else",
":",
"df",
"=",
"frame",
"[",
"cols",
"]",
"used_legends",
"=",
"set",
"(",
")",
"ncols",
"=",
"len",
"(",
"df",
".",
"columns",
")",
"# determine values to use for xticks",
"if",
"use_columns",
"is",
"True",
":",
"if",
"not",
"np",
".",
"all",
"(",
"np",
".",
"isreal",
"(",
"list",
"(",
"df",
".",
"columns",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Columns must be numeric to be used as xticks'",
")",
"x",
"=",
"df",
".",
"columns",
"elif",
"xticks",
"is",
"not",
"None",
":",
"if",
"not",
"np",
".",
"all",
"(",
"np",
".",
"isreal",
"(",
"xticks",
")",
")",
":",
"raise",
"ValueError",
"(",
"'xticks specified must be numeric'",
")",
"elif",
"len",
"(",
"xticks",
")",
"!=",
"ncols",
":",
"raise",
"ValueError",
"(",
"'Length of xticks must match number of columns'",
")",
"x",
"=",
"xticks",
"else",
":",
"x",
"=",
"lrange",
"(",
"ncols",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"color_values",
"=",
"_get_standard_colors",
"(",
"num_colors",
"=",
"len",
"(",
"classes",
")",
",",
"colormap",
"=",
"colormap",
",",
"color_type",
"=",
"'random'",
",",
"color",
"=",
"color",
")",
"if",
"sort_labels",
":",
"classes",
"=",
"sorted",
"(",
"classes",
")",
"color_values",
"=",
"sorted",
"(",
"color_values",
")",
"colors",
"=",
"dict",
"(",
"zip",
"(",
"classes",
",",
"color_values",
")",
")",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"y",
"=",
"df",
".",
"iloc",
"[",
"i",
"]",
".",
"values",
"kls",
"=",
"class_col",
".",
"iat",
"[",
"i",
"]",
"label",
"=",
"pprint_thing",
"(",
"kls",
")",
"if",
"label",
"not",
"in",
"used_legends",
":",
"used_legends",
".",
"add",
"(",
"label",
")",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"color",
"=",
"colors",
"[",
"kls",
"]",
",",
"label",
"=",
"label",
",",
"*",
"*",
"kwds",
")",
"else",
":",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"color",
"=",
"colors",
"[",
"kls",
"]",
",",
"*",
"*",
"kwds",
")",
"if",
"axvlines",
":",
"for",
"i",
"in",
"x",
":",
"ax",
".",
"axvline",
"(",
"i",
",",
"*",
"*",
"axvlines_kwds",
")",
"ax",
".",
"set_xticks",
"(",
"x",
")",
"ax",
".",
"set_xticklabels",
"(",
"df",
".",
"columns",
")",
"ax",
".",
"set_xlim",
"(",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"-",
"1",
"]",
")",
"ax",
".",
"legend",
"(",
"loc",
"=",
"'upper right'",
")",
"ax",
".",
"grid",
"(",
")",
"return",
"ax"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | lag_plot | Lag plot for time series.
Parameters
----------
series : Time series
lag : lag of the scatter plot, default 1
ax : Matplotlib axis object, optional
kwds : Matplotlib scatter method keyword arguments, optional
Returns
-------
class:`matplotlib.axis.Axes` | pandas/plotting/_misc.py | def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters
----------
series : Time series
lag : lag of the scatter plot, default 1
ax : Matplotlib axis object, optional
kwds : Matplotlib scatter method keyword arguments, optional
Returns
-------
class:`matplotlib.axis.Axes`
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + {lag})".format(lag=lag))
ax.scatter(y1, y2, **kwds)
return ax | def lag_plot(series, lag=1, ax=None, **kwds):
"""Lag plot for time series.
Parameters
----------
series : Time series
lag : lag of the scatter plot, default 1
ax : Matplotlib axis object, optional
kwds : Matplotlib scatter method keyword arguments, optional
Returns
-------
class:`matplotlib.axis.Axes`
"""
import matplotlib.pyplot as plt
# workaround because `c='b'` is hardcoded in matplotlibs scatter method
kwds.setdefault('c', plt.rcParams['patch.facecolor'])
data = series.values
y1 = data[:-lag]
y2 = data[lag:]
if ax is None:
ax = plt.gca()
ax.set_xlabel("y(t)")
ax.set_ylabel("y(t + {lag})".format(lag=lag))
ax.scatter(y1, y2, **kwds)
return ax | [
"Lag",
"plot",
"for",
"time",
"series",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_misc.py#L566-L593 | [
"def",
"lag_plot",
"(",
"series",
",",
"lag",
"=",
"1",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"# workaround because `c='b'` is hardcoded in matplotlibs scatter method",
"kwds",
".",
"setdefault",
"(",
"'c'",
",",
"plt",
".",
"rcParams",
"[",
"'patch.facecolor'",
"]",
")",
"data",
"=",
"series",
".",
"values",
"y1",
"=",
"data",
"[",
":",
"-",
"lag",
"]",
"y2",
"=",
"data",
"[",
"lag",
":",
"]",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"ax",
".",
"set_xlabel",
"(",
"\"y(t)\"",
")",
"ax",
".",
"set_ylabel",
"(",
"\"y(t + {lag})\"",
".",
"format",
"(",
"lag",
"=",
"lag",
")",
")",
"ax",
".",
"scatter",
"(",
"y1",
",",
"y2",
",",
"*",
"*",
"kwds",
")",
"return",
"ax"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | autocorrelation_plot | Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
class:`matplotlib.axis.Axes` | pandas/plotting/_misc.py | def autocorrelation_plot(series, ax=None, **kwds):
"""
Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
class:`matplotlib.axis.Axes`
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) *
(data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax | def autocorrelation_plot(series, ax=None, **kwds):
"""
Autocorrelation plot for time series.
Parameters:
-----------
series: Time series
ax: Matplotlib axis object, optional
kwds : keywords
Options to pass to matplotlib plotting method
Returns:
-----------
class:`matplotlib.axis.Axes`
"""
import matplotlib.pyplot as plt
n = len(series)
data = np.asarray(series)
if ax is None:
ax = plt.gca(xlim=(1, n), ylim=(-1.0, 1.0))
mean = np.mean(data)
c0 = np.sum((data - mean) ** 2) / float(n)
def r(h):
return ((data[:n - h] - mean) *
(data[h:] - mean)).sum() / float(n) / c0
x = np.arange(n) + 1
y = lmap(r, x)
z95 = 1.959963984540054
z99 = 2.5758293035489004
ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')
ax.axhline(y=z95 / np.sqrt(n), color='grey')
ax.axhline(y=0.0, color='black')
ax.axhline(y=-z95 / np.sqrt(n), color='grey')
ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')
ax.set_xlabel("Lag")
ax.set_ylabel("Autocorrelation")
ax.plot(x, y, **kwds)
if 'label' in kwds:
ax.legend()
ax.grid()
return ax | [
"Autocorrelation",
"plot",
"for",
"time",
"series",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_misc.py#L596-L637 | [
"def",
"autocorrelation_plot",
"(",
"series",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"n",
"=",
"len",
"(",
"series",
")",
"data",
"=",
"np",
".",
"asarray",
"(",
"series",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
"xlim",
"=",
"(",
"1",
",",
"n",
")",
",",
"ylim",
"=",
"(",
"-",
"1.0",
",",
"1.0",
")",
")",
"mean",
"=",
"np",
".",
"mean",
"(",
"data",
")",
"c0",
"=",
"np",
".",
"sum",
"(",
"(",
"data",
"-",
"mean",
")",
"**",
"2",
")",
"/",
"float",
"(",
"n",
")",
"def",
"r",
"(",
"h",
")",
":",
"return",
"(",
"(",
"data",
"[",
":",
"n",
"-",
"h",
"]",
"-",
"mean",
")",
"*",
"(",
"data",
"[",
"h",
":",
"]",
"-",
"mean",
")",
")",
".",
"sum",
"(",
")",
"/",
"float",
"(",
"n",
")",
"/",
"c0",
"x",
"=",
"np",
".",
"arange",
"(",
"n",
")",
"+",
"1",
"y",
"=",
"lmap",
"(",
"r",
",",
"x",
")",
"z95",
"=",
"1.959963984540054",
"z99",
"=",
"2.5758293035489004",
"ax",
".",
"axhline",
"(",
"y",
"=",
"z99",
"/",
"np",
".",
"sqrt",
"(",
"n",
")",
",",
"linestyle",
"=",
"'--'",
",",
"color",
"=",
"'grey'",
")",
"ax",
".",
"axhline",
"(",
"y",
"=",
"z95",
"/",
"np",
".",
"sqrt",
"(",
"n",
")",
",",
"color",
"=",
"'grey'",
")",
"ax",
".",
"axhline",
"(",
"y",
"=",
"0.0",
",",
"color",
"=",
"'black'",
")",
"ax",
".",
"axhline",
"(",
"y",
"=",
"-",
"z95",
"/",
"np",
".",
"sqrt",
"(",
"n",
")",
",",
"color",
"=",
"'grey'",
")",
"ax",
".",
"axhline",
"(",
"y",
"=",
"-",
"z99",
"/",
"np",
".",
"sqrt",
"(",
"n",
")",
",",
"linestyle",
"=",
"'--'",
",",
"color",
"=",
"'grey'",
")",
"ax",
".",
"set_xlabel",
"(",
"\"Lag\"",
")",
"ax",
".",
"set_ylabel",
"(",
"\"Autocorrelation\"",
")",
"ax",
".",
"plot",
"(",
"x",
",",
"y",
",",
"*",
"*",
"kwds",
")",
"if",
"'label'",
"in",
"kwds",
":",
"ax",
".",
"legend",
"(",
")",
"ax",
".",
"grid",
"(",
")",
"return",
"ax"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | _any_pandas_objects | Check a sequence of terms for instances of PandasObject. | pandas/core/computation/align.py | def _any_pandas_objects(terms):
"""Check a sequence of terms for instances of PandasObject."""
return any(isinstance(term.value, pd.core.generic.PandasObject)
for term in terms) | def _any_pandas_objects(terms):
"""Check a sequence of terms for instances of PandasObject."""
return any(isinstance(term.value, pd.core.generic.PandasObject)
for term in terms) | [
"Check",
"a",
"sequence",
"of",
"terms",
"for",
"instances",
"of",
"PandasObject",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/align.py#L36-L39 | [
"def",
"_any_pandas_objects",
"(",
"terms",
")",
":",
"return",
"any",
"(",
"isinstance",
"(",
"term",
".",
"value",
",",
"pd",
".",
"core",
".",
"generic",
".",
"PandasObject",
")",
"for",
"term",
"in",
"terms",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | _align | Align a set of terms | pandas/core/computation/align.py | def _align(terms):
"""Align a set of terms"""
try:
# flatten the parse tree (a nested list, really)
terms = list(com.flatten(terms))
except TypeError:
# can't iterate so it must just be a constant or single variable
if isinstance(terms.value, pd.core.generic.NDFrame):
typ = type(terms.value)
return typ, _zip_axes_from_type(typ, terms.value.axes)
return np.result_type(terms.type), None
# if all resolved variables are numeric scalars
if all(term.is_scalar for term in terms):
return _result_type_many(*(term.value for term in terms)).type, None
# perform the main alignment
typ, axes = _align_core(terms)
return typ, axes | def _align(terms):
"""Align a set of terms"""
try:
# flatten the parse tree (a nested list, really)
terms = list(com.flatten(terms))
except TypeError:
# can't iterate so it must just be a constant or single variable
if isinstance(terms.value, pd.core.generic.NDFrame):
typ = type(terms.value)
return typ, _zip_axes_from_type(typ, terms.value.axes)
return np.result_type(terms.type), None
# if all resolved variables are numeric scalars
if all(term.is_scalar for term in terms):
return _result_type_many(*(term.value for term in terms)).type, None
# perform the main alignment
typ, axes = _align_core(terms)
return typ, axes | [
"Align",
"a",
"set",
"of",
"terms"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/align.py#L114-L132 | [
"def",
"_align",
"(",
"terms",
")",
":",
"try",
":",
"# flatten the parse tree (a nested list, really)",
"terms",
"=",
"list",
"(",
"com",
".",
"flatten",
"(",
"terms",
")",
")",
"except",
"TypeError",
":",
"# can't iterate so it must just be a constant or single variable",
"if",
"isinstance",
"(",
"terms",
".",
"value",
",",
"pd",
".",
"core",
".",
"generic",
".",
"NDFrame",
")",
":",
"typ",
"=",
"type",
"(",
"terms",
".",
"value",
")",
"return",
"typ",
",",
"_zip_axes_from_type",
"(",
"typ",
",",
"terms",
".",
"value",
".",
"axes",
")",
"return",
"np",
".",
"result_type",
"(",
"terms",
".",
"type",
")",
",",
"None",
"# if all resolved variables are numeric scalars",
"if",
"all",
"(",
"term",
".",
"is_scalar",
"for",
"term",
"in",
"terms",
")",
":",
"return",
"_result_type_many",
"(",
"*",
"(",
"term",
".",
"value",
"for",
"term",
"in",
"terms",
")",
")",
".",
"type",
",",
"None",
"# perform the main alignment",
"typ",
",",
"axes",
"=",
"_align_core",
"(",
"terms",
")",
"return",
"typ",
",",
"axes"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | _reconstruct_object | Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
----------
typ : object
A type
obj : object
The value to use in the type constructor
axes : dict
The axes to use to construct the resulting pandas object
Returns
-------
ret : typ
An object of type ``typ`` with the value `obj` and possible axes
`axes`. | pandas/core/computation/align.py | def _reconstruct_object(typ, obj, axes, dtype):
"""Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
----------
typ : object
A type
obj : object
The value to use in the type constructor
axes : dict
The axes to use to construct the resulting pandas object
Returns
-------
ret : typ
An object of type ``typ`` with the value `obj` and possible axes
`axes`.
"""
try:
typ = typ.type
except AttributeError:
pass
res_t = np.result_type(obj.dtype, dtype)
if (not isinstance(typ, partial) and
issubclass(typ, pd.core.generic.PandasObject)):
return typ(obj, dtype=res_t, **axes)
# special case for pathological things like ~True/~False
if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_:
ret_value = res_t.type(obj)
else:
ret_value = typ(obj).astype(res_t)
# The condition is to distinguish 0-dim array (returned in case of
# scalar) and 1 element array
# e.g. np.array(0) and np.array([0])
if len(obj.shape) == 1 and len(obj) == 1:
if not isinstance(ret_value, np.ndarray):
ret_value = np.array([ret_value]).astype(res_t)
return ret_value | def _reconstruct_object(typ, obj, axes, dtype):
"""Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
----------
typ : object
A type
obj : object
The value to use in the type constructor
axes : dict
The axes to use to construct the resulting pandas object
Returns
-------
ret : typ
An object of type ``typ`` with the value `obj` and possible axes
`axes`.
"""
try:
typ = typ.type
except AttributeError:
pass
res_t = np.result_type(obj.dtype, dtype)
if (not isinstance(typ, partial) and
issubclass(typ, pd.core.generic.PandasObject)):
return typ(obj, dtype=res_t, **axes)
# special case for pathological things like ~True/~False
if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_:
ret_value = res_t.type(obj)
else:
ret_value = typ(obj).astype(res_t)
# The condition is to distinguish 0-dim array (returned in case of
# scalar) and 1 element array
# e.g. np.array(0) and np.array([0])
if len(obj.shape) == 1 and len(obj) == 1:
if not isinstance(ret_value, np.ndarray):
ret_value = np.array([ret_value]).astype(res_t)
return ret_value | [
"Reconstruct",
"an",
"object",
"given",
"its",
"type",
"raw",
"value",
"and",
"possibly",
"empty",
"(",
"None",
")",
"axes",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/align.py#L135-L177 | [
"def",
"_reconstruct_object",
"(",
"typ",
",",
"obj",
",",
"axes",
",",
"dtype",
")",
":",
"try",
":",
"typ",
"=",
"typ",
".",
"type",
"except",
"AttributeError",
":",
"pass",
"res_t",
"=",
"np",
".",
"result_type",
"(",
"obj",
".",
"dtype",
",",
"dtype",
")",
"if",
"(",
"not",
"isinstance",
"(",
"typ",
",",
"partial",
")",
"and",
"issubclass",
"(",
"typ",
",",
"pd",
".",
"core",
".",
"generic",
".",
"PandasObject",
")",
")",
":",
"return",
"typ",
"(",
"obj",
",",
"dtype",
"=",
"res_t",
",",
"*",
"*",
"axes",
")",
"# special case for pathological things like ~True/~False",
"if",
"hasattr",
"(",
"res_t",
",",
"'type'",
")",
"and",
"typ",
"==",
"np",
".",
"bool_",
"and",
"res_t",
"!=",
"np",
".",
"bool_",
":",
"ret_value",
"=",
"res_t",
".",
"type",
"(",
"obj",
")",
"else",
":",
"ret_value",
"=",
"typ",
"(",
"obj",
")",
".",
"astype",
"(",
"res_t",
")",
"# The condition is to distinguish 0-dim array (returned in case of",
"# scalar) and 1 element array",
"# e.g. np.array(0) and np.array([0])",
"if",
"len",
"(",
"obj",
".",
"shape",
")",
"==",
"1",
"and",
"len",
"(",
"obj",
")",
"==",
"1",
":",
"if",
"not",
"isinstance",
"(",
"ret_value",
",",
"np",
".",
"ndarray",
")",
":",
"ret_value",
"=",
"np",
".",
"array",
"(",
"[",
"ret_value",
"]",
")",
".",
"astype",
"(",
"res_t",
")",
"return",
"ret_value"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | tsplot | Plots a Series on the given Matplotlib axes or the current axes
Parameters
----------
axes : Axes
series : Series
Notes
_____
Supports same kwargs as Axes.plot
.. deprecated:: 0.23.0
Use Series.plot() instead | pandas/plotting/_timeseries.py | def tsplot(series, plotf, ax=None, **kwargs):
import warnings
"""
Plots a Series on the given Matplotlib axes or the current axes
Parameters
----------
axes : Axes
series : Series
Notes
_____
Supports same kwargs as Axes.plot
.. deprecated:: 0.23.0
Use Series.plot() instead
"""
warnings.warn("'tsplot' is deprecated and will be removed in a "
"future version. Please use Series.plot() instead.",
FutureWarning, stacklevel=2)
# Used inferred freq is possible, need a test case for inferred
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
freq, series = _maybe_resample(series, ax, kwargs)
# Set ax with freq info
_decorate_axes(ax, freq, kwargs)
ax._plot_data.append((series, plotf, kwargs))
lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq, series.index)
return lines | def tsplot(series, plotf, ax=None, **kwargs):
import warnings
"""
Plots a Series on the given Matplotlib axes or the current axes
Parameters
----------
axes : Axes
series : Series
Notes
_____
Supports same kwargs as Axes.plot
.. deprecated:: 0.23.0
Use Series.plot() instead
"""
warnings.warn("'tsplot' is deprecated and will be removed in a "
"future version. Please use Series.plot() instead.",
FutureWarning, stacklevel=2)
# Used inferred freq is possible, need a test case for inferred
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
freq, series = _maybe_resample(series, ax, kwargs)
# Set ax with freq info
_decorate_axes(ax, freq, kwargs)
ax._plot_data.append((series, plotf, kwargs))
lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq, series.index)
return lines | [
"Plots",
"a",
"Series",
"on",
"the",
"given",
"Matplotlib",
"axes",
"or",
"the",
"current",
"axes"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_timeseries.py#L26-L62 | [
"def",
"tsplot",
"(",
"series",
",",
"plotf",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"warnings",
"warnings",
".",
"warn",
"(",
"\"'tsplot' is deprecated and will be removed in a \"",
"\"future version. Please use Series.plot() instead.\"",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"# Used inferred freq is possible, need a test case for inferred",
"if",
"ax",
"is",
"None",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"freq",
",",
"series",
"=",
"_maybe_resample",
"(",
"series",
",",
"ax",
",",
"kwargs",
")",
"# Set ax with freq info",
"_decorate_axes",
"(",
"ax",
",",
"freq",
",",
"kwargs",
")",
"ax",
".",
"_plot_data",
".",
"append",
"(",
"(",
"series",
",",
"plotf",
",",
"kwargs",
")",
")",
"lines",
"=",
"plotf",
"(",
"ax",
",",
"series",
".",
"index",
".",
"_mpl_repr",
"(",
")",
",",
"series",
".",
"values",
",",
"*",
"*",
"kwargs",
")",
"# set date formatter, locators and rescale limits",
"format_dateaxis",
"(",
"ax",
",",
"ax",
".",
"freq",
",",
"series",
".",
"index",
")",
"return",
"lines"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | _decorate_axes | Initialize axes for time-series plotting | pandas/plotting/_timeseries.py | def _decorate_axes(ax, freq, kwargs):
"""Initialize axes for time-series plotting"""
if not hasattr(ax, '_plot_data'):
ax._plot_data = []
ax.freq = freq
xaxis = ax.get_xaxis()
xaxis.freq = freq
if not hasattr(ax, 'legendlabels'):
ax.legendlabels = [kwargs.get('label', None)]
else:
ax.legendlabels.append(kwargs.get('label', None))
ax.view_interval = None
ax.date_axis_info = None | def _decorate_axes(ax, freq, kwargs):
"""Initialize axes for time-series plotting"""
if not hasattr(ax, '_plot_data'):
ax._plot_data = []
ax.freq = freq
xaxis = ax.get_xaxis()
xaxis.freq = freq
if not hasattr(ax, 'legendlabels'):
ax.legendlabels = [kwargs.get('label', None)]
else:
ax.legendlabels.append(kwargs.get('label', None))
ax.view_interval = None
ax.date_axis_info = None | [
"Initialize",
"axes",
"for",
"time",
"-",
"series",
"plotting"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_timeseries.py#L157-L170 | [
"def",
"_decorate_axes",
"(",
"ax",
",",
"freq",
",",
"kwargs",
")",
":",
"if",
"not",
"hasattr",
"(",
"ax",
",",
"'_plot_data'",
")",
":",
"ax",
".",
"_plot_data",
"=",
"[",
"]",
"ax",
".",
"freq",
"=",
"freq",
"xaxis",
"=",
"ax",
".",
"get_xaxis",
"(",
")",
"xaxis",
".",
"freq",
"=",
"freq",
"if",
"not",
"hasattr",
"(",
"ax",
",",
"'legendlabels'",
")",
":",
"ax",
".",
"legendlabels",
"=",
"[",
"kwargs",
".",
"get",
"(",
"'label'",
",",
"None",
")",
"]",
"else",
":",
"ax",
".",
"legendlabels",
".",
"append",
"(",
"kwargs",
".",
"get",
"(",
"'label'",
",",
"None",
")",
")",
"ax",
".",
"view_interval",
"=",
"None",
"ax",
".",
"date_axis_info",
"=",
"None"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | _get_ax_freq | Get the freq attribute of the ax object if set.
Also checks shared axes (eg when using secondary yaxis, sharex=True
or twinx) | pandas/plotting/_timeseries.py | def _get_ax_freq(ax):
"""
Get the freq attribute of the ax object if set.
Also checks shared axes (eg when using secondary yaxis, sharex=True
or twinx)
"""
ax_freq = getattr(ax, 'freq', None)
if ax_freq is None:
# check for left/right ax in case of secondary yaxis
if hasattr(ax, 'left_ax'):
ax_freq = getattr(ax.left_ax, 'freq', None)
elif hasattr(ax, 'right_ax'):
ax_freq = getattr(ax.right_ax, 'freq', None)
if ax_freq is None:
# check if a shared ax (sharex/twinx) has already freq set
shared_axes = ax.get_shared_x_axes().get_siblings(ax)
if len(shared_axes) > 1:
for shared_ax in shared_axes:
ax_freq = getattr(shared_ax, 'freq', None)
if ax_freq is not None:
break
return ax_freq | def _get_ax_freq(ax):
"""
Get the freq attribute of the ax object if set.
Also checks shared axes (eg when using secondary yaxis, sharex=True
or twinx)
"""
ax_freq = getattr(ax, 'freq', None)
if ax_freq is None:
# check for left/right ax in case of secondary yaxis
if hasattr(ax, 'left_ax'):
ax_freq = getattr(ax.left_ax, 'freq', None)
elif hasattr(ax, 'right_ax'):
ax_freq = getattr(ax.right_ax, 'freq', None)
if ax_freq is None:
# check if a shared ax (sharex/twinx) has already freq set
shared_axes = ax.get_shared_x_axes().get_siblings(ax)
if len(shared_axes) > 1:
for shared_ax in shared_axes:
ax_freq = getattr(shared_ax, 'freq', None)
if ax_freq is not None:
break
return ax_freq | [
"Get",
"the",
"freq",
"attribute",
"of",
"the",
"ax",
"object",
"if",
"set",
".",
"Also",
"checks",
"shared",
"axes",
"(",
"eg",
"when",
"using",
"secondary",
"yaxis",
"sharex",
"=",
"True",
"or",
"twinx",
")"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_timeseries.py#L173-L194 | [
"def",
"_get_ax_freq",
"(",
"ax",
")",
":",
"ax_freq",
"=",
"getattr",
"(",
"ax",
",",
"'freq'",
",",
"None",
")",
"if",
"ax_freq",
"is",
"None",
":",
"# check for left/right ax in case of secondary yaxis",
"if",
"hasattr",
"(",
"ax",
",",
"'left_ax'",
")",
":",
"ax_freq",
"=",
"getattr",
"(",
"ax",
".",
"left_ax",
",",
"'freq'",
",",
"None",
")",
"elif",
"hasattr",
"(",
"ax",
",",
"'right_ax'",
")",
":",
"ax_freq",
"=",
"getattr",
"(",
"ax",
".",
"right_ax",
",",
"'freq'",
",",
"None",
")",
"if",
"ax_freq",
"is",
"None",
":",
"# check if a shared ax (sharex/twinx) has already freq set",
"shared_axes",
"=",
"ax",
".",
"get_shared_x_axes",
"(",
")",
".",
"get_siblings",
"(",
"ax",
")",
"if",
"len",
"(",
"shared_axes",
")",
">",
"1",
":",
"for",
"shared_ax",
"in",
"shared_axes",
":",
"ax_freq",
"=",
"getattr",
"(",
"shared_ax",
",",
"'freq'",
",",
"None",
")",
"if",
"ax_freq",
"is",
"not",
"None",
":",
"break",
"return",
"ax_freq"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | format_timedelta_ticks | Convert seconds to 'D days HH:MM:SS.F' | pandas/plotting/_timeseries.py | def format_timedelta_ticks(x, pos, n_decimals):
"""
Convert seconds to 'D days HH:MM:SS.F'
"""
s, ns = divmod(x, 1e9)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
decimals = int(ns * 10**(n_decimals - 9))
s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s))
if n_decimals > 0:
s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals)
if d != 0:
s = '{:d} days '.format(int(d)) + s
return s | def format_timedelta_ticks(x, pos, n_decimals):
"""
Convert seconds to 'D days HH:MM:SS.F'
"""
s, ns = divmod(x, 1e9)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
decimals = int(ns * 10**(n_decimals - 9))
s = r'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s))
if n_decimals > 0:
s += '.{{:0{:0d}d}}'.format(n_decimals).format(decimals)
if d != 0:
s = '{:d} days '.format(int(d)) + s
return s | [
"Convert",
"seconds",
"to",
"D",
"days",
"HH",
":",
"MM",
":",
"SS",
".",
"F"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_timeseries.py#L289-L303 | [
"def",
"format_timedelta_ticks",
"(",
"x",
",",
"pos",
",",
"n_decimals",
")",
":",
"s",
",",
"ns",
"=",
"divmod",
"(",
"x",
",",
"1e9",
")",
"m",
",",
"s",
"=",
"divmod",
"(",
"s",
",",
"60",
")",
"h",
",",
"m",
"=",
"divmod",
"(",
"m",
",",
"60",
")",
"d",
",",
"h",
"=",
"divmod",
"(",
"h",
",",
"24",
")",
"decimals",
"=",
"int",
"(",
"ns",
"*",
"10",
"**",
"(",
"n_decimals",
"-",
"9",
")",
")",
"s",
"=",
"r'{:02d}:{:02d}:{:02d}'",
".",
"format",
"(",
"int",
"(",
"h",
")",
",",
"int",
"(",
"m",
")",
",",
"int",
"(",
"s",
")",
")",
"if",
"n_decimals",
">",
"0",
":",
"s",
"+=",
"'.{{:0{:0d}d}}'",
".",
"format",
"(",
"n_decimals",
")",
".",
"format",
"(",
"decimals",
")",
"if",
"d",
"!=",
"0",
":",
"s",
"=",
"'{:d} days '",
".",
"format",
"(",
"int",
"(",
"d",
")",
")",
"+",
"s",
"return",
"s"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | format_dateaxis | Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the positions of the ticks. | pandas/plotting/_timeseries.py | def format_dateaxis(subplot, freq, index):
"""
Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the positions of the ticks.
"""
# handle index specific formatting
# Note: DatetimeIndex does not use this
# interface. DatetimeIndex uses matplotlib.date directly
if isinstance(index, ABCPeriodIndex):
majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_locator(majlocator)
subplot.xaxis.set_minor_locator(minlocator)
majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_formatter(majformatter)
subplot.xaxis.set_minor_formatter(minformatter)
# x and y coord info
subplot.format_coord = functools.partial(_format_coord, freq)
elif isinstance(index, ABCTimedeltaIndex):
subplot.xaxis.set_major_formatter(
TimeSeries_TimedeltaFormatter())
else:
raise TypeError('index type not supported')
pylab.draw_if_interactive() | def format_dateaxis(subplot, freq, index):
"""
Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the positions of the ticks.
"""
# handle index specific formatting
# Note: DatetimeIndex does not use this
# interface. DatetimeIndex uses matplotlib.date directly
if isinstance(index, ABCPeriodIndex):
majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_locator(majlocator)
subplot.xaxis.set_minor_locator(minlocator)
majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_formatter(majformatter)
subplot.xaxis.set_minor_formatter(minformatter)
# x and y coord info
subplot.format_coord = functools.partial(_format_coord, freq)
elif isinstance(index, ABCTimedeltaIndex):
subplot.xaxis.set_major_formatter(
TimeSeries_TimedeltaFormatter())
else:
raise TypeError('index type not supported')
pylab.draw_if_interactive() | [
"Pretty",
"-",
"formats",
"the",
"date",
"axis",
"(",
"x",
"-",
"axis",
")",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_timeseries.py#L310-L352 | [
"def",
"format_dateaxis",
"(",
"subplot",
",",
"freq",
",",
"index",
")",
":",
"# handle index specific formatting",
"# Note: DatetimeIndex does not use this",
"# interface. DatetimeIndex uses matplotlib.date directly",
"if",
"isinstance",
"(",
"index",
",",
"ABCPeriodIndex",
")",
":",
"majlocator",
"=",
"TimeSeries_DateLocator",
"(",
"freq",
",",
"dynamic_mode",
"=",
"True",
",",
"minor_locator",
"=",
"False",
",",
"plot_obj",
"=",
"subplot",
")",
"minlocator",
"=",
"TimeSeries_DateLocator",
"(",
"freq",
",",
"dynamic_mode",
"=",
"True",
",",
"minor_locator",
"=",
"True",
",",
"plot_obj",
"=",
"subplot",
")",
"subplot",
".",
"xaxis",
".",
"set_major_locator",
"(",
"majlocator",
")",
"subplot",
".",
"xaxis",
".",
"set_minor_locator",
"(",
"minlocator",
")",
"majformatter",
"=",
"TimeSeries_DateFormatter",
"(",
"freq",
",",
"dynamic_mode",
"=",
"True",
",",
"minor_locator",
"=",
"False",
",",
"plot_obj",
"=",
"subplot",
")",
"minformatter",
"=",
"TimeSeries_DateFormatter",
"(",
"freq",
",",
"dynamic_mode",
"=",
"True",
",",
"minor_locator",
"=",
"True",
",",
"plot_obj",
"=",
"subplot",
")",
"subplot",
".",
"xaxis",
".",
"set_major_formatter",
"(",
"majformatter",
")",
"subplot",
".",
"xaxis",
".",
"set_minor_formatter",
"(",
"minformatter",
")",
"# x and y coord info",
"subplot",
".",
"format_coord",
"=",
"functools",
".",
"partial",
"(",
"_format_coord",
",",
"freq",
")",
"elif",
"isinstance",
"(",
"index",
",",
"ABCTimedeltaIndex",
")",
":",
"subplot",
".",
"xaxis",
".",
"set_major_formatter",
"(",
"TimeSeries_TimedeltaFormatter",
"(",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'index type not supported'",
")",
"pylab",
".",
"draw_if_interactive",
"(",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame._is_homogeneous_type | Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False | pandas/core/frame.py | def _is_homogeneous_type(self):
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if self._data.any_extension_types:
return len({block.dtype for block in self._data.blocks}) == 1
else:
return not self._data.is_mixed_type | def _is_homogeneous_type(self):
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if self._data.any_extension_types:
return len({block.dtype for block in self._data.blocks}) == 1
else:
return not self._data.is_mixed_type | [
"Whether",
"all",
"the",
"columns",
"in",
"a",
"DataFrame",
"have",
"the",
"same",
"type",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L514-L540 | [
"def",
"_is_homogeneous_type",
"(",
"self",
")",
":",
"if",
"self",
".",
"_data",
".",
"any_extension_types",
":",
"return",
"len",
"(",
"{",
"block",
".",
"dtype",
"for",
"block",
"in",
"self",
".",
"_data",
".",
"blocks",
"}",
")",
"==",
"1",
"else",
":",
"return",
"not",
"self",
".",
"_data",
".",
"is_mixed_type"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame._repr_html_ | Return a html representation for a particular DataFrame.
Mainly for IPython notebook. | pandas/core/frame.py | def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO("")
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None | def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO("")
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1)
val = val.replace('>', r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions, notebook=True)
else:
return None | [
"Return",
"a",
"html",
"representation",
"for",
"a",
"particular",
"DataFrame",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L635-L657 | [
"def",
"_repr_html_",
"(",
"self",
")",
":",
"if",
"self",
".",
"_info_repr",
"(",
")",
":",
"buf",
"=",
"StringIO",
"(",
"\"\"",
")",
"self",
".",
"info",
"(",
"buf",
"=",
"buf",
")",
"# need to escape the <class>, should be the first line.",
"val",
"=",
"buf",
".",
"getvalue",
"(",
")",
".",
"replace",
"(",
"'<'",
",",
"r'<'",
",",
"1",
")",
"val",
"=",
"val",
".",
"replace",
"(",
"'>'",
",",
"r'>'",
",",
"1",
")",
"return",
"'<pre>'",
"+",
"val",
"+",
"'</pre>'",
"if",
"get_option",
"(",
"\"display.notebook_repr_html\"",
")",
":",
"max_rows",
"=",
"get_option",
"(",
"\"display.max_rows\"",
")",
"max_cols",
"=",
"get_option",
"(",
"\"display.max_columns\"",
")",
"show_dimensions",
"=",
"get_option",
"(",
"\"display.show_dimensions\"",
")",
"return",
"self",
".",
"to_html",
"(",
"max_rows",
"=",
"max_rows",
",",
"max_cols",
"=",
"max_cols",
",",
"show_dimensions",
"=",
"show_dimensions",
",",
"notebook",
"=",
"True",
")",
"else",
":",
"return",
"None"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.to_string | Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6 | pandas/core/frame.py | def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
line_width=line_width)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result | def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
line_width=line_width)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result | [
"Render",
"a",
"DataFrame",
"to",
"a",
"console",
"-",
"friendly",
"tabular",
"output",
".",
"%",
"(",
"shared_params",
")",
"s",
"line_width",
":",
"int",
"optional",
"Width",
"to",
"wrap",
"a",
"line",
"in",
"characters",
".",
"%",
"(",
"returns",
")",
"s",
"See",
"Also",
"--------",
"to_html",
":",
"Convert",
"DataFrame",
"to",
"HTML",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L666-L708 | [
"def",
"to_string",
"(",
"self",
",",
"buf",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"col_space",
"=",
"None",
",",
"header",
"=",
"True",
",",
"index",
"=",
"True",
",",
"na_rep",
"=",
"'NaN'",
",",
"formatters",
"=",
"None",
",",
"float_format",
"=",
"None",
",",
"sparsify",
"=",
"None",
",",
"index_names",
"=",
"True",
",",
"justify",
"=",
"None",
",",
"max_rows",
"=",
"None",
",",
"max_cols",
"=",
"None",
",",
"show_dimensions",
"=",
"False",
",",
"decimal",
"=",
"'.'",
",",
"line_width",
"=",
"None",
")",
":",
"formatter",
"=",
"fmt",
".",
"DataFrameFormatter",
"(",
"self",
",",
"buf",
"=",
"buf",
",",
"columns",
"=",
"columns",
",",
"col_space",
"=",
"col_space",
",",
"na_rep",
"=",
"na_rep",
",",
"formatters",
"=",
"formatters",
",",
"float_format",
"=",
"float_format",
",",
"sparsify",
"=",
"sparsify",
",",
"justify",
"=",
"justify",
",",
"index_names",
"=",
"index_names",
",",
"header",
"=",
"header",
",",
"index",
"=",
"index",
",",
"max_rows",
"=",
"max_rows",
",",
"max_cols",
"=",
"max_cols",
",",
"show_dimensions",
"=",
"show_dimensions",
",",
"decimal",
"=",
"decimal",
",",
"line_width",
"=",
"line_width",
")",
"formatter",
".",
"to_string",
"(",
")",
"if",
"buf",
"is",
"None",
":",
"result",
"=",
"formatter",
".",
"buf",
".",
"getvalue",
"(",
")",
"return",
"result"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.iteritems | r"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content, sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64 | pandas/core/frame.py | def iteritems(self):
r"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content, sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1) | def iteritems(self):
r"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content, sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1) | [
"r",
"Iterator",
"over",
"(",
"column",
"name",
"Series",
")",
"pairs",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L725-L778 | [
"def",
"iteritems",
"(",
"self",
")",
":",
"if",
"self",
".",
"columns",
".",
"is_unique",
"and",
"hasattr",
"(",
"self",
",",
"'_item_cache'",
")",
":",
"for",
"k",
"in",
"self",
".",
"columns",
":",
"yield",
"k",
",",
"self",
".",
"_get_item_cache",
"(",
"k",
")",
"else",
":",
"for",
"i",
",",
"k",
"in",
"enumerate",
"(",
"self",
".",
"columns",
")",
":",
"yield",
"k",
",",
"self",
".",
"_ixs",
"(",
"i",
",",
"axis",
"=",
"1",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.iterrows | Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
it : generator
A generator that iterates over the rows of the frame.
See Also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect. | pandas/core/frame.py | def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
it : generator
A generator that iterates over the rows of the frame.
See Also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s | def iterrows(self):
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
it : generator
A generator that iterates over the rows of the frame.
See Also
--------
itertuples : Iterate over DataFrame rows as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s | [
"Iterate",
"over",
"DataFrame",
"rows",
"as",
"(",
"index",
"Series",
")",
"pairs",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L780-L830 | [
"def",
"iterrows",
"(",
"self",
")",
":",
"columns",
"=",
"self",
".",
"columns",
"klass",
"=",
"self",
".",
"_constructor_sliced",
"for",
"k",
",",
"v",
"in",
"zip",
"(",
"self",
".",
"index",
",",
"self",
".",
"values",
")",
":",
"s",
"=",
"klass",
"(",
"v",
",",
"index",
"=",
"columns",
",",
"name",
"=",
"k",
")",
"yield",
"k",
",",
"s"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.itertuples | Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Yields
-------
collections.namedtuple
Yields a namedtuple for each row in the DataFrame with the first
field possibly being the index and following fields being the
column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.iteritems : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2) | pandas/core/frame.py | def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Yields
-------
collections.namedtuple
Yields a namedtuple for each row in the DataFrame with the first
field possibly being the index and following fields being the
column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.iteritems : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor
if name is not None and len(self.columns) + index < 256:
itertuple = collections.namedtuple(name, fields, rename=True)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays) | def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Yields
-------
collections.namedtuple
Yields a namedtuple for each row in the DataFrame with the first
field possibly being the index and following fields being the
column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.iteritems : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
With a large number of columns (>255), regular tuples are returned.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python 3 supports at most 255 arguments to constructor
if name is not None and len(self.columns) + index < 256:
itertuple = collections.namedtuple(name, fields, rename=True)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays) | [
"Iterate",
"over",
"DataFrame",
"rows",
"as",
"namedtuples",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L832-L910 | [
"def",
"itertuples",
"(",
"self",
",",
"index",
"=",
"True",
",",
"name",
"=",
"\"Pandas\"",
")",
":",
"arrays",
"=",
"[",
"]",
"fields",
"=",
"list",
"(",
"self",
".",
"columns",
")",
"if",
"index",
":",
"arrays",
".",
"append",
"(",
"self",
".",
"index",
")",
"fields",
".",
"insert",
"(",
"0",
",",
"\"Index\"",
")",
"# use integer indexing because of possible duplicate column names",
"arrays",
".",
"extend",
"(",
"self",
".",
"iloc",
"[",
":",
",",
"k",
"]",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"columns",
")",
")",
")",
"# Python 3 supports at most 255 arguments to constructor",
"if",
"name",
"is",
"not",
"None",
"and",
"len",
"(",
"self",
".",
"columns",
")",
"+",
"index",
"<",
"256",
":",
"itertuple",
"=",
"collections",
".",
"namedtuple",
"(",
"name",
",",
"fields",
",",
"rename",
"=",
"True",
")",
"return",
"map",
"(",
"itertuple",
".",
"_make",
",",
"zip",
"(",
"*",
"arrays",
")",
")",
"# fallback to regular tuples",
"return",
"zip",
"(",
"*",
"arrays",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.dot | Compute the matrix mutiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Serie. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2 | pandas/core/frame.py | def dot(self, other):
"""
Compute the matrix mutiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Serie. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, '
'{s} vs {r}'.format(s=lvals.shape,
r=rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: {oth}'.format(oth=type(other))) | def dot(self, other):
"""
Compute the matrix mutiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Serie. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, '
'{s} vs {r}'.format(s=lvals.shape,
r=rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals), index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: {oth}'.format(oth=type(other))) | [
"Compute",
"the",
"matrix",
"mutiplication",
"between",
"the",
"DataFrame",
"and",
"other",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L920-L1018 | [
"def",
"dot",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"(",
"Series",
",",
"DataFrame",
")",
")",
":",
"common",
"=",
"self",
".",
"columns",
".",
"union",
"(",
"other",
".",
"index",
")",
"if",
"(",
"len",
"(",
"common",
")",
">",
"len",
"(",
"self",
".",
"columns",
")",
"or",
"len",
"(",
"common",
")",
">",
"len",
"(",
"other",
".",
"index",
")",
")",
":",
"raise",
"ValueError",
"(",
"'matrices are not aligned'",
")",
"left",
"=",
"self",
".",
"reindex",
"(",
"columns",
"=",
"common",
",",
"copy",
"=",
"False",
")",
"right",
"=",
"other",
".",
"reindex",
"(",
"index",
"=",
"common",
",",
"copy",
"=",
"False",
")",
"lvals",
"=",
"left",
".",
"values",
"rvals",
"=",
"right",
".",
"values",
"else",
":",
"left",
"=",
"self",
"lvals",
"=",
"self",
".",
"values",
"rvals",
"=",
"np",
".",
"asarray",
"(",
"other",
")",
"if",
"lvals",
".",
"shape",
"[",
"1",
"]",
"!=",
"rvals",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"'Dot product shape mismatch, '",
"'{s} vs {r}'",
".",
"format",
"(",
"s",
"=",
"lvals",
".",
"shape",
",",
"r",
"=",
"rvals",
".",
"shape",
")",
")",
"if",
"isinstance",
"(",
"other",
",",
"DataFrame",
")",
":",
"return",
"self",
".",
"_constructor",
"(",
"np",
".",
"dot",
"(",
"lvals",
",",
"rvals",
")",
",",
"index",
"=",
"left",
".",
"index",
",",
"columns",
"=",
"other",
".",
"columns",
")",
"elif",
"isinstance",
"(",
"other",
",",
"Series",
")",
":",
"return",
"Series",
"(",
"np",
".",
"dot",
"(",
"lvals",
",",
"rvals",
")",
",",
"index",
"=",
"left",
".",
"index",
")",
"elif",
"isinstance",
"(",
"rvals",
",",
"(",
"np",
".",
"ndarray",
",",
"Index",
")",
")",
":",
"result",
"=",
"np",
".",
"dot",
"(",
"lvals",
",",
"rvals",
")",
"if",
"result",
".",
"ndim",
"==",
"2",
":",
"return",
"self",
".",
"_constructor",
"(",
"result",
",",
"index",
"=",
"left",
".",
"index",
")",
"else",
":",
"return",
"Series",
"(",
"result",
",",
"index",
"=",
"left",
".",
"index",
")",
"else",
":",
"# pragma: no cover",
"raise",
"TypeError",
"(",
"'unsupported type: {oth}'",
".",
"format",
"(",
"oth",
"=",
"type",
"(",
"other",
")",
")",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.from_dict | Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from ndarray (structured
dtype), list of tuples, dict, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d | pandas/core/frame.py | def from_dict(cls, data, orient='columns', dtype=None, columns=None):
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from ndarray (structured
dtype), list of tuples, dict, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == 'columns':
if columns is not None:
raise ValueError("cannot use columns parameter with "
"orient='columns'")
else: # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype) | def from_dict(cls, data, orient='columns', dtype=None, columns=None):
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from ndarray (structured
dtype), list of tuples, dict, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == 'columns':
if columns is not None:
raise ValueError("cannot use columns parameter with "
"orient='columns'")
else: # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype) | [
"Construct",
"DataFrame",
"from",
"dict",
"of",
"array",
"-",
"like",
"or",
"dicts",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1036-L1115 | [
"def",
"from_dict",
"(",
"cls",
",",
"data",
",",
"orient",
"=",
"'columns'",
",",
"dtype",
"=",
"None",
",",
"columns",
"=",
"None",
")",
":",
"index",
"=",
"None",
"orient",
"=",
"orient",
".",
"lower",
"(",
")",
"if",
"orient",
"==",
"'index'",
":",
"if",
"len",
"(",
"data",
")",
">",
"0",
":",
"# TODO speed up Series case",
"if",
"isinstance",
"(",
"list",
"(",
"data",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
",",
"(",
"Series",
",",
"dict",
")",
")",
":",
"data",
"=",
"_from_nested_dict",
"(",
"data",
")",
"else",
":",
"data",
",",
"index",
"=",
"list",
"(",
"data",
".",
"values",
"(",
")",
")",
",",
"list",
"(",
"data",
".",
"keys",
"(",
")",
")",
"elif",
"orient",
"==",
"'columns'",
":",
"if",
"columns",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"cannot use columns parameter with \"",
"\"orient='columns'\"",
")",
"else",
":",
"# pragma: no cover",
"raise",
"ValueError",
"(",
"'only recognize index or columns for orient'",
")",
"return",
"cls",
"(",
"data",
",",
"index",
"=",
"index",
",",
"columns",
"=",
"columns",
",",
"dtype",
"=",
"dtype",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.to_numpy | Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogenous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object) | pandas/core/frame.py | def to_numpy(self, dtype=None, copy=False):
"""
Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogenous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
result = np.array(self.values, dtype=dtype, copy=copy)
return result | def to_numpy(self, dtype=None, copy=False):
"""
Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogenous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
result = np.array(self.values, dtype=dtype, copy=copy)
return result | [
"Convert",
"the",
"DataFrame",
"to",
"a",
"NumPy",
"array",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1117-L1170 | [
"def",
"to_numpy",
"(",
"self",
",",
"dtype",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"result",
"=",
"np",
".",
"array",
"(",
"self",
".",
"values",
",",
"dtype",
"=",
"dtype",
",",
"copy",
"=",
"copy",
")",
"return",
"result"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.to_dict | Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})] | pandas/core/frame.py | def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning,
stacklevel=2)
# GH16122
into_c = com.standardize_mapping(into)
if orient.lower().startswith('d'):
return into_c(
(k, v.to_dict(into)) for k, v in self.items())
elif orient.lower().startswith('l'):
return into_c((k, v.tolist()) for k, v in self.items())
elif orient.lower().startswith('sp'):
return into_c((('index', self.index.tolist()),
('columns', self.columns.tolist()),
('data', [
list(map(com.maybe_box_datetimelike, t))
for t in self.itertuples(index=False, name=None)
])))
elif orient.lower().startswith('s'):
return into_c((k, com.maybe_box_datetimelike(v))
for k, v in self.items())
elif orient.lower().startswith('r'):
columns = self.columns.tolist()
rows = (dict(zip(columns, row))
for row in self.itertuples(index=False, name=None))
return [
into_c((k, com.maybe_box_datetimelike(v))
for k, v in row.items())
for row in rows]
elif orient.lower().startswith('i'):
if not self.index.is_unique:
raise ValueError(
"DataFrame index must be unique for orient='index'."
)
return into_c((t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples(name=None))
else:
raise ValueError("orient '{o}' not understood".format(o=orient)) | def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning,
stacklevel=2)
# GH16122
into_c = com.standardize_mapping(into)
if orient.lower().startswith('d'):
return into_c(
(k, v.to_dict(into)) for k, v in self.items())
elif orient.lower().startswith('l'):
return into_c((k, v.tolist()) for k, v in self.items())
elif orient.lower().startswith('sp'):
return into_c((('index', self.index.tolist()),
('columns', self.columns.tolist()),
('data', [
list(map(com.maybe_box_datetimelike, t))
for t in self.itertuples(index=False, name=None)
])))
elif orient.lower().startswith('s'):
return into_c((k, com.maybe_box_datetimelike(v))
for k, v in self.items())
elif orient.lower().startswith('r'):
columns = self.columns.tolist()
rows = (dict(zip(columns, row))
for row in self.itertuples(index=False, name=None))
return [
into_c((k, com.maybe_box_datetimelike(v))
for k, v in row.items())
for row in rows]
elif orient.lower().startswith('i'):
if not self.index.is_unique:
raise ValueError(
"DataFrame index must be unique for orient='index'."
)
return into_c((t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples(name=None))
else:
raise ValueError("orient '{o}' not understood".format(o=orient)) | [
"Convert",
"the",
"DataFrame",
"to",
"a",
"dictionary",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1172-L1298 | [
"def",
"to_dict",
"(",
"self",
",",
"orient",
"=",
"'dict'",
",",
"into",
"=",
"dict",
")",
":",
"if",
"not",
"self",
".",
"columns",
".",
"is_unique",
":",
"warnings",
".",
"warn",
"(",
"\"DataFrame columns are not unique, some \"",
"\"columns will be omitted.\"",
",",
"UserWarning",
",",
"stacklevel",
"=",
"2",
")",
"# GH16122",
"into_c",
"=",
"com",
".",
"standardize_mapping",
"(",
"into",
")",
"if",
"orient",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'d'",
")",
":",
"return",
"into_c",
"(",
"(",
"k",
",",
"v",
".",
"to_dict",
"(",
"into",
")",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
")",
"elif",
"orient",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'l'",
")",
":",
"return",
"into_c",
"(",
"(",
"k",
",",
"v",
".",
"tolist",
"(",
")",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
")",
"elif",
"orient",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'sp'",
")",
":",
"return",
"into_c",
"(",
"(",
"(",
"'index'",
",",
"self",
".",
"index",
".",
"tolist",
"(",
")",
")",
",",
"(",
"'columns'",
",",
"self",
".",
"columns",
".",
"tolist",
"(",
")",
")",
",",
"(",
"'data'",
",",
"[",
"list",
"(",
"map",
"(",
"com",
".",
"maybe_box_datetimelike",
",",
"t",
")",
")",
"for",
"t",
"in",
"self",
".",
"itertuples",
"(",
"index",
"=",
"False",
",",
"name",
"=",
"None",
")",
"]",
")",
")",
")",
"elif",
"orient",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'s'",
")",
":",
"return",
"into_c",
"(",
"(",
"k",
",",
"com",
".",
"maybe_box_datetimelike",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
")",
"elif",
"orient",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'r'",
")",
":",
"columns",
"=",
"self",
".",
"columns",
".",
"tolist",
"(",
")",
"rows",
"=",
"(",
"dict",
"(",
"zip",
"(",
"columns",
",",
"row",
")",
")",
"for",
"row",
"in",
"self",
".",
"itertuples",
"(",
"index",
"=",
"False",
",",
"name",
"=",
"None",
")",
")",
"return",
"[",
"into_c",
"(",
"(",
"k",
",",
"com",
".",
"maybe_box_datetimelike",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"row",
".",
"items",
"(",
")",
")",
"for",
"row",
"in",
"rows",
"]",
"elif",
"orient",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'i'",
")",
":",
"if",
"not",
"self",
".",
"index",
".",
"is_unique",
":",
"raise",
"ValueError",
"(",
"\"DataFrame index must be unique for orient='index'.\"",
")",
"return",
"into_c",
"(",
"(",
"t",
"[",
"0",
"]",
",",
"dict",
"(",
"zip",
"(",
"self",
".",
"columns",
",",
"t",
"[",
"1",
":",
"]",
")",
")",
")",
"for",
"t",
"in",
"self",
".",
"itertuples",
"(",
"name",
"=",
"None",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"orient '{o}' not understood\"",
".",
"format",
"(",
"o",
"=",
"orient",
")",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.to_gbq | Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
verbose : bool, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery. | pandas/core/frame.py | def to_gbq(self, destination_table, project_id=None, chunksize=None,
reauth=False, if_exists='fail', auth_local_webserver=False,
table_schema=None, location=None, progress_bar=True,
credentials=None, verbose=None, private_key=None):
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
verbose : bool, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
return gbq.to_gbq(
self, destination_table, project_id=project_id,
chunksize=chunksize, reauth=reauth, if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema, location=location,
progress_bar=progress_bar, credentials=credentials,
verbose=verbose, private_key=private_key) | def to_gbq(self, destination_table, project_id=None, chunksize=None,
reauth=False, if_exists='fail', auth_local_webserver=False,
table_schema=None, location=None, progress_bar=True,
credentials=None, verbose=None, private_key=None):
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
verbose : bool, deprecated
Deprecated in pandas-gbq version 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
private_key : str, deprecated
Deprecated in pandas-gbq version 0.8.0. Use the ``credentials``
parameter and
:func:`google.oauth2.service_account.Credentials.from_service_account_info`
or
:func:`google.oauth2.service_account.Credentials.from_service_account_file`
instead.
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
return gbq.to_gbq(
self, destination_table, project_id=project_id,
chunksize=chunksize, reauth=reauth, if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema, location=location,
progress_bar=progress_bar, credentials=credentials,
verbose=verbose, private_key=private_key) | [
"Write",
"a",
"DataFrame",
"to",
"a",
"Google",
"BigQuery",
"table",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1300-L1405 | [
"def",
"to_gbq",
"(",
"self",
",",
"destination_table",
",",
"project_id",
"=",
"None",
",",
"chunksize",
"=",
"None",
",",
"reauth",
"=",
"False",
",",
"if_exists",
"=",
"'fail'",
",",
"auth_local_webserver",
"=",
"False",
",",
"table_schema",
"=",
"None",
",",
"location",
"=",
"None",
",",
"progress_bar",
"=",
"True",
",",
"credentials",
"=",
"None",
",",
"verbose",
"=",
"None",
",",
"private_key",
"=",
"None",
")",
":",
"from",
"pandas",
".",
"io",
"import",
"gbq",
"return",
"gbq",
".",
"to_gbq",
"(",
"self",
",",
"destination_table",
",",
"project_id",
"=",
"project_id",
",",
"chunksize",
"=",
"chunksize",
",",
"reauth",
"=",
"reauth",
",",
"if_exists",
"=",
"if_exists",
",",
"auth_local_webserver",
"=",
"auth_local_webserver",
",",
"table_schema",
"=",
"table_schema",
",",
"location",
"=",
"location",
",",
"progress_bar",
"=",
"progress_bar",
",",
"credentials",
"=",
"credentials",
",",
"verbose",
"=",
"verbose",
",",
"private_key",
"=",
"private_key",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.from_records | Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
DataFrame | pandas/core/frame.py | def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in data.items():
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, str) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)]
for field in index]
result_index = ensure_index_from_sequences(index_data,
names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr) | def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in data.items():
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, str) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)]
for field in index]
result_index = ensure_index_from_sequences(index_data,
names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr) | [
"Convert",
"structured",
"or",
"record",
"ndarray",
"to",
"DataFrame",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1408-L1533 | [
"def",
"from_records",
"(",
"cls",
",",
"data",
",",
"index",
"=",
"None",
",",
"exclude",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"coerce_float",
"=",
"False",
",",
"nrows",
"=",
"None",
")",
":",
"# Make a copy of the input columns so we can modify it",
"if",
"columns",
"is",
"not",
"None",
":",
"columns",
"=",
"ensure_index",
"(",
"columns",
")",
"if",
"is_iterator",
"(",
"data",
")",
":",
"if",
"nrows",
"==",
"0",
":",
"return",
"cls",
"(",
")",
"try",
":",
"first_row",
"=",
"next",
"(",
"data",
")",
"except",
"StopIteration",
":",
"return",
"cls",
"(",
"index",
"=",
"index",
",",
"columns",
"=",
"columns",
")",
"dtype",
"=",
"None",
"if",
"hasattr",
"(",
"first_row",
",",
"'dtype'",
")",
"and",
"first_row",
".",
"dtype",
".",
"names",
":",
"dtype",
"=",
"first_row",
".",
"dtype",
"values",
"=",
"[",
"first_row",
"]",
"if",
"nrows",
"is",
"None",
":",
"values",
"+=",
"data",
"else",
":",
"values",
".",
"extend",
"(",
"itertools",
".",
"islice",
"(",
"data",
",",
"nrows",
"-",
"1",
")",
")",
"if",
"dtype",
"is",
"not",
"None",
":",
"data",
"=",
"np",
".",
"array",
"(",
"values",
",",
"dtype",
"=",
"dtype",
")",
"else",
":",
"data",
"=",
"values",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"if",
"columns",
"is",
"None",
":",
"columns",
"=",
"arr_columns",
"=",
"ensure_index",
"(",
"sorted",
"(",
"data",
")",
")",
"arrays",
"=",
"[",
"data",
"[",
"k",
"]",
"for",
"k",
"in",
"columns",
"]",
"else",
":",
"arrays",
"=",
"[",
"]",
"arr_columns",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
":",
"if",
"k",
"in",
"columns",
":",
"arr_columns",
".",
"append",
"(",
"k",
")",
"arrays",
".",
"append",
"(",
"v",
")",
"arrays",
",",
"arr_columns",
"=",
"reorder_arrays",
"(",
"arrays",
",",
"arr_columns",
",",
"columns",
")",
"elif",
"isinstance",
"(",
"data",
",",
"(",
"np",
".",
"ndarray",
",",
"DataFrame",
")",
")",
":",
"arrays",
",",
"columns",
"=",
"to_arrays",
"(",
"data",
",",
"columns",
")",
"if",
"columns",
"is",
"not",
"None",
":",
"columns",
"=",
"ensure_index",
"(",
"columns",
")",
"arr_columns",
"=",
"columns",
"else",
":",
"arrays",
",",
"arr_columns",
"=",
"to_arrays",
"(",
"data",
",",
"columns",
",",
"coerce_float",
"=",
"coerce_float",
")",
"arr_columns",
"=",
"ensure_index",
"(",
"arr_columns",
")",
"if",
"columns",
"is",
"not",
"None",
":",
"columns",
"=",
"ensure_index",
"(",
"columns",
")",
"else",
":",
"columns",
"=",
"arr_columns",
"if",
"exclude",
"is",
"None",
":",
"exclude",
"=",
"set",
"(",
")",
"else",
":",
"exclude",
"=",
"set",
"(",
"exclude",
")",
"result_index",
"=",
"None",
"if",
"index",
"is",
"not",
"None",
":",
"if",
"(",
"isinstance",
"(",
"index",
",",
"str",
")",
"or",
"not",
"hasattr",
"(",
"index",
",",
"\"__iter__\"",
")",
")",
":",
"i",
"=",
"columns",
".",
"get_loc",
"(",
"index",
")",
"exclude",
".",
"add",
"(",
"index",
")",
"if",
"len",
"(",
"arrays",
")",
">",
"0",
":",
"result_index",
"=",
"Index",
"(",
"arrays",
"[",
"i",
"]",
",",
"name",
"=",
"index",
")",
"else",
":",
"result_index",
"=",
"Index",
"(",
"[",
"]",
",",
"name",
"=",
"index",
")",
"else",
":",
"try",
":",
"index_data",
"=",
"[",
"arrays",
"[",
"arr_columns",
".",
"get_loc",
"(",
"field",
")",
"]",
"for",
"field",
"in",
"index",
"]",
"result_index",
"=",
"ensure_index_from_sequences",
"(",
"index_data",
",",
"names",
"=",
"index",
")",
"exclude",
".",
"update",
"(",
"index",
")",
"except",
"Exception",
":",
"result_index",
"=",
"index",
"if",
"any",
"(",
"exclude",
")",
":",
"arr_exclude",
"=",
"[",
"x",
"for",
"x",
"in",
"exclude",
"if",
"x",
"in",
"arr_columns",
"]",
"to_remove",
"=",
"[",
"arr_columns",
".",
"get_loc",
"(",
"col",
")",
"for",
"col",
"in",
"arr_exclude",
"]",
"arrays",
"=",
"[",
"v",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"arrays",
")",
"if",
"i",
"not",
"in",
"to_remove",
"]",
"arr_columns",
"=",
"arr_columns",
".",
"drop",
"(",
"arr_exclude",
")",
"columns",
"=",
"columns",
".",
"drop",
"(",
"exclude",
")",
"mgr",
"=",
"arrays_to_mgr",
"(",
"arrays",
",",
"arr_columns",
",",
"result_index",
",",
"columns",
")",
"return",
"cls",
"(",
"mgr",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.to_records | Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
.. deprecated:: 0.23.0
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = "<S{}".format(df.index.str.len().max())
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')]) | pandas/core/frame.py | def to_records(self, index=True, convert_datetime64=None,
column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
.. deprecated:: 0.23.0
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = "<S{}".format(df.index.str.len().max())
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if convert_datetime64 is not None:
warnings.warn("The 'convert_datetime64' parameter is "
"deprecated and will be removed in a future "
"version",
FutureWarning, stacklevel=2)
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = lmap(str, index_names) + lmap(str, self.columns)
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(str, self.columns)
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
msg = ("Invalid dtype {dtype} specified for "
"{element} {name}").format(dtype=dtype_mapping,
element=element, name=name)
raise ValueError(msg)
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
) | def to_records(self, index=True, convert_datetime64=None,
column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
.. deprecated:: 0.23.0
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = "<S{}".format(df.index.str.len().max())
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if convert_datetime64 is not None:
warnings.warn("The 'convert_datetime64' parameter is "
"deprecated and will be removed in a future "
"version",
FutureWarning, stacklevel=2)
if index:
if is_datetime64_any_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = lmap(str, index_names) + lmap(str, self.columns)
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(str, self.columns)
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
msg = ("Invalid dtype {dtype} specified for "
"{element} {name}").format(dtype=dtype_mapping,
element=element, name=name)
raise ValueError(msg)
return np.rec.fromarrays(
arrays,
dtype={'names': names, 'formats': formats}
) | [
"Convert",
"DataFrame",
"to",
"a",
"NumPy",
"record",
"array",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1535-L1717 | [
"def",
"to_records",
"(",
"self",
",",
"index",
"=",
"True",
",",
"convert_datetime64",
"=",
"None",
",",
"column_dtypes",
"=",
"None",
",",
"index_dtypes",
"=",
"None",
")",
":",
"if",
"convert_datetime64",
"is",
"not",
"None",
":",
"warnings",
".",
"warn",
"(",
"\"The 'convert_datetime64' parameter is \"",
"\"deprecated and will be removed in a future \"",
"\"version\"",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"if",
"index",
":",
"if",
"is_datetime64_any_dtype",
"(",
"self",
".",
"index",
")",
"and",
"convert_datetime64",
":",
"ix_vals",
"=",
"[",
"self",
".",
"index",
".",
"to_pydatetime",
"(",
")",
"]",
"else",
":",
"if",
"isinstance",
"(",
"self",
".",
"index",
",",
"MultiIndex",
")",
":",
"# array of tuples to numpy cols. copy copy copy",
"ix_vals",
"=",
"lmap",
"(",
"np",
".",
"array",
",",
"zip",
"(",
"*",
"self",
".",
"index",
".",
"values",
")",
")",
"else",
":",
"ix_vals",
"=",
"[",
"self",
".",
"index",
".",
"values",
"]",
"arrays",
"=",
"ix_vals",
"+",
"[",
"self",
"[",
"c",
"]",
".",
"get_values",
"(",
")",
"for",
"c",
"in",
"self",
".",
"columns",
"]",
"count",
"=",
"0",
"index_names",
"=",
"list",
"(",
"self",
".",
"index",
".",
"names",
")",
"if",
"isinstance",
"(",
"self",
".",
"index",
",",
"MultiIndex",
")",
":",
"for",
"i",
",",
"n",
"in",
"enumerate",
"(",
"index_names",
")",
":",
"if",
"n",
"is",
"None",
":",
"index_names",
"[",
"i",
"]",
"=",
"'level_%d'",
"%",
"count",
"count",
"+=",
"1",
"elif",
"index_names",
"[",
"0",
"]",
"is",
"None",
":",
"index_names",
"=",
"[",
"'index'",
"]",
"names",
"=",
"lmap",
"(",
"str",
",",
"index_names",
")",
"+",
"lmap",
"(",
"str",
",",
"self",
".",
"columns",
")",
"else",
":",
"arrays",
"=",
"[",
"self",
"[",
"c",
"]",
".",
"get_values",
"(",
")",
"for",
"c",
"in",
"self",
".",
"columns",
"]",
"names",
"=",
"lmap",
"(",
"str",
",",
"self",
".",
"columns",
")",
"index_names",
"=",
"[",
"]",
"index_len",
"=",
"len",
"(",
"index_names",
")",
"formats",
"=",
"[",
"]",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"arrays",
")",
":",
"index",
"=",
"i",
"# When the names and arrays are collected, we",
"# first collect those in the DataFrame's index,",
"# followed by those in its columns.",
"#",
"# Thus, the total length of the array is:",
"# len(index_names) + len(DataFrame.columns).",
"#",
"# This check allows us to see whether we are",
"# handling a name / array in the index or column.",
"if",
"index",
"<",
"index_len",
":",
"dtype_mapping",
"=",
"index_dtypes",
"name",
"=",
"index_names",
"[",
"index",
"]",
"else",
":",
"index",
"-=",
"index_len",
"dtype_mapping",
"=",
"column_dtypes",
"name",
"=",
"self",
".",
"columns",
"[",
"index",
"]",
"# We have a dictionary, so we get the data type",
"# associated with the index or column (which can",
"# be denoted by its name in the DataFrame or its",
"# position in DataFrame's array of indices or",
"# columns, whichever is applicable.",
"if",
"is_dict_like",
"(",
"dtype_mapping",
")",
":",
"if",
"name",
"in",
"dtype_mapping",
":",
"dtype_mapping",
"=",
"dtype_mapping",
"[",
"name",
"]",
"elif",
"index",
"in",
"dtype_mapping",
":",
"dtype_mapping",
"=",
"dtype_mapping",
"[",
"index",
"]",
"else",
":",
"dtype_mapping",
"=",
"None",
"# If no mapping can be found, use the array's",
"# dtype attribute for formatting.",
"#",
"# A valid dtype must either be a type or",
"# string naming a type.",
"if",
"dtype_mapping",
"is",
"None",
":",
"formats",
".",
"append",
"(",
"v",
".",
"dtype",
")",
"elif",
"isinstance",
"(",
"dtype_mapping",
",",
"(",
"type",
",",
"np",
".",
"dtype",
",",
"str",
")",
")",
":",
"formats",
".",
"append",
"(",
"dtype_mapping",
")",
"else",
":",
"element",
"=",
"\"row\"",
"if",
"i",
"<",
"index_len",
"else",
"\"column\"",
"msg",
"=",
"(",
"\"Invalid dtype {dtype} specified for \"",
"\"{element} {name}\"",
")",
".",
"format",
"(",
"dtype",
"=",
"dtype_mapping",
",",
"element",
"=",
"element",
",",
"name",
"=",
"name",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"return",
"np",
".",
"rec",
".",
"fromarrays",
"(",
"arrays",
",",
"dtype",
"=",
"{",
"'names'",
":",
"names",
",",
"'formats'",
":",
"formats",
"}",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.from_items | Construct a DataFrame from a list of tuples.
.. deprecated:: 0.23.0
`from_items` is deprecated and will be removed in a future version.
Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>`
instead.
:meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>`
may be used to preserve the key order.
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
DataFrame | pandas/core/frame.py | def from_items(cls, items, columns=None, orient='columns'):
"""
Construct a DataFrame from a list of tuples.
.. deprecated:: 0.23.0
`from_items` is deprecated and will be removed in a future version.
Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>`
instead.
:meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>`
may be used to preserve the key order.
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
DataFrame
"""
warnings.warn("from_items is deprecated. Please use "
"DataFrame.from_dict(dict(items), ...) instead. "
"DataFrame.from_dict(OrderedDict(items)) may be used to "
"preserve the key order.",
FutureWarning, stacklevel=2)
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = ensure_index(keys)
arrays = values
# GH 17312
# Provide more informative error msg when scalar values passed
try:
return cls._from_arrays(arrays, columns, None)
except ValueError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = ensure_index(keys)
# GH 17312
# Provide more informative error msg when scalar values passed
try:
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
except TypeError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'") | def from_items(cls, items, columns=None, orient='columns'):
"""
Construct a DataFrame from a list of tuples.
.. deprecated:: 0.23.0
`from_items` is deprecated and will be removed in a future version.
Use :meth:`DataFrame.from_dict(dict(items)) <DataFrame.from_dict>`
instead.
:meth:`DataFrame.from_dict(OrderedDict(items)) <DataFrame.from_dict>`
may be used to preserve the key order.
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
DataFrame
"""
warnings.warn("from_items is deprecated. Please use "
"DataFrame.from_dict(dict(items), ...) instead. "
"DataFrame.from_dict(OrderedDict(items)) may be used to "
"preserve the key order.",
FutureWarning, stacklevel=2)
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = ensure_index(keys)
arrays = values
# GH 17312
# Provide more informative error msg when scalar values passed
try:
return cls._from_arrays(arrays, columns, None)
except ValueError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = ensure_index(keys)
# GH 17312
# Provide more informative error msg when scalar values passed
try:
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
except TypeError:
if not is_nested_list_like(values):
raise ValueError('The value in each (key, value) pair '
'must be an array, Series, or dict')
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'") | [
"Construct",
"a",
"DataFrame",
"from",
"a",
"list",
"of",
"tuples",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1720-L1805 | [
"def",
"from_items",
"(",
"cls",
",",
"items",
",",
"columns",
"=",
"None",
",",
"orient",
"=",
"'columns'",
")",
":",
"warnings",
".",
"warn",
"(",
"\"from_items is deprecated. Please use \"",
"\"DataFrame.from_dict(dict(items), ...) instead. \"",
"\"DataFrame.from_dict(OrderedDict(items)) may be used to \"",
"\"preserve the key order.\"",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"keys",
",",
"values",
"=",
"lzip",
"(",
"*",
"items",
")",
"if",
"orient",
"==",
"'columns'",
":",
"if",
"columns",
"is",
"not",
"None",
":",
"columns",
"=",
"ensure_index",
"(",
"columns",
")",
"idict",
"=",
"dict",
"(",
"items",
")",
"if",
"len",
"(",
"idict",
")",
"<",
"len",
"(",
"items",
")",
":",
"if",
"not",
"columns",
".",
"equals",
"(",
"ensure_index",
"(",
"keys",
")",
")",
":",
"raise",
"ValueError",
"(",
"'With non-unique item names, passed '",
"'columns must be identical'",
")",
"arrays",
"=",
"values",
"else",
":",
"arrays",
"=",
"[",
"idict",
"[",
"k",
"]",
"for",
"k",
"in",
"columns",
"if",
"k",
"in",
"idict",
"]",
"else",
":",
"columns",
"=",
"ensure_index",
"(",
"keys",
")",
"arrays",
"=",
"values",
"# GH 17312",
"# Provide more informative error msg when scalar values passed",
"try",
":",
"return",
"cls",
".",
"_from_arrays",
"(",
"arrays",
",",
"columns",
",",
"None",
")",
"except",
"ValueError",
":",
"if",
"not",
"is_nested_list_like",
"(",
"values",
")",
":",
"raise",
"ValueError",
"(",
"'The value in each (key, value) pair '",
"'must be an array, Series, or dict'",
")",
"elif",
"orient",
"==",
"'index'",
":",
"if",
"columns",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"Must pass columns with orient='index'\"",
")",
"keys",
"=",
"ensure_index",
"(",
"keys",
")",
"# GH 17312",
"# Provide more informative error msg when scalar values passed",
"try",
":",
"arr",
"=",
"np",
".",
"array",
"(",
"values",
",",
"dtype",
"=",
"object",
")",
".",
"T",
"data",
"=",
"[",
"lib",
".",
"maybe_convert_objects",
"(",
"v",
")",
"for",
"v",
"in",
"arr",
"]",
"return",
"cls",
".",
"_from_arrays",
"(",
"data",
",",
"columns",
",",
"keys",
")",
"except",
"TypeError",
":",
"if",
"not",
"is_nested_list_like",
"(",
"values",
")",
":",
"raise",
"ValueError",
"(",
"'The value in each (key, value) pair '",
"'must be an array, Series, or dict'",
")",
"else",
":",
"# pragma: no cover",
"raise",
"ValueError",
"(",
"\"'orient' must be either 'columns' or 'index'\"",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.from_csv | Read CSV file.
.. deprecated:: 0.21.0
Use :func:`read_csv` instead.
It is preferable to use the more powerful :func:`read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format : boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
Returns
-------
DataFrame
See Also
--------
read_csv | pandas/core/frame.py | def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=None,
infer_datetime_format=False):
"""
Read CSV file.
.. deprecated:: 0.21.0
Use :func:`read_csv` instead.
It is preferable to use the more powerful :func:`read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format : boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
Returns
-------
DataFrame
See Also
--------
read_csv
"""
warnings.warn("from_csv is deprecated. Please use read_csv(...) "
"instead. Note that some of the default arguments are "
"different, so please refer to the documentation "
"for from_csv when changing your function calls",
FutureWarning, stacklevel=2)
from pandas.io.parsers import read_csv
return read_csv(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format) | def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True,
encoding=None, tupleize_cols=None,
infer_datetime_format=False):
"""
Read CSV file.
.. deprecated:: 0.21.0
Use :func:`read_csv` instead.
It is preferable to use the more powerful :func:`read_csv`
for most general purposes, but ``from_csv`` makes for an easy
roundtrip to and from a file (the exact counterpart of
``to_csv``), especially with a DataFrame of time series data.
This method only differs from the preferred :func:`read_csv`
in some defaults:
- `index_col` is ``0`` instead of ``None`` (take first column as index
by default)
- `parse_dates` is ``True`` instead of ``False`` (try parsing the index
as datetime by default)
So a ``pd.DataFrame.from_csv(path)`` can be replaced by
``pd.read_csv(path, index_col=0, parse_dates=True)``.
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use as header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format : boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
Returns
-------
DataFrame
See Also
--------
read_csv
"""
warnings.warn("from_csv is deprecated. Please use read_csv(...) "
"instead. Note that some of the default arguments are "
"different, so please refer to the documentation "
"for from_csv when changing your function calls",
FutureWarning, stacklevel=2)
from pandas.io.parsers import read_csv
return read_csv(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format) | [
"Read",
"CSV",
"file",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1813-L1877 | [
"def",
"from_csv",
"(",
"cls",
",",
"path",
",",
"header",
"=",
"0",
",",
"sep",
"=",
"','",
",",
"index_col",
"=",
"0",
",",
"parse_dates",
"=",
"True",
",",
"encoding",
"=",
"None",
",",
"tupleize_cols",
"=",
"None",
",",
"infer_datetime_format",
"=",
"False",
")",
":",
"warnings",
".",
"warn",
"(",
"\"from_csv is deprecated. Please use read_csv(...) \"",
"\"instead. Note that some of the default arguments are \"",
"\"different, so please refer to the documentation \"",
"\"for from_csv when changing your function calls\"",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"from",
"pandas",
".",
"io",
".",
"parsers",
"import",
"read_csv",
"return",
"read_csv",
"(",
"path",
",",
"header",
"=",
"header",
",",
"sep",
"=",
"sep",
",",
"parse_dates",
"=",
"parse_dates",
",",
"index_col",
"=",
"index_col",
",",
"encoding",
"=",
"encoding",
",",
"tupleize_cols",
"=",
"tupleize_cols",
",",
"infer_datetime_format",
"=",
"infer_datetime_format",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.to_sparse | Convert to SparseDataFrame.
Implement the sparse version of the DataFrame meaning that any data
matching a specific value it's omitted in the representation.
The sparse DataFrame allows for a more efficient storage.
Parameters
----------
fill_value : float, default None
The specific value that should be omitted in the representation.
kind : {'block', 'integer'}, default 'block'
The kind of the SparseIndex tracking where data is not equal to
the fill value:
- 'block' tracks only the locations and sizes of blocks of data.
- 'integer' keeps an array with all the locations of the data.
In most cases 'block' is recommended, since it's more memory
efficient.
Returns
-------
SparseDataFrame
The sparse representation of the DataFrame.
See Also
--------
DataFrame.to_dense :
Converts the DataFrame back to the its dense form.
Examples
--------
>>> df = pd.DataFrame([(np.nan, np.nan),
... (1., np.nan),
... (np.nan, 1.)])
>>> df
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
>>> sdf = df.to_sparse()
>>> sdf
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(sdf)
<class 'pandas.core.sparse.frame.SparseDataFrame'> | pandas/core/frame.py | def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame.
Implement the sparse version of the DataFrame meaning that any data
matching a specific value it's omitted in the representation.
The sparse DataFrame allows for a more efficient storage.
Parameters
----------
fill_value : float, default None
The specific value that should be omitted in the representation.
kind : {'block', 'integer'}, default 'block'
The kind of the SparseIndex tracking where data is not equal to
the fill value:
- 'block' tracks only the locations and sizes of blocks of data.
- 'integer' keeps an array with all the locations of the data.
In most cases 'block' is recommended, since it's more memory
efficient.
Returns
-------
SparseDataFrame
The sparse representation of the DataFrame.
See Also
--------
DataFrame.to_dense :
Converts the DataFrame back to the its dense form.
Examples
--------
>>> df = pd.DataFrame([(np.nan, np.nan),
... (1., np.nan),
... (np.nan, 1.)])
>>> df
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
>>> sdf = df.to_sparse()
>>> sdf
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(sdf)
<class 'pandas.core.sparse.frame.SparseDataFrame'>
"""
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value) | def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame.
Implement the sparse version of the DataFrame meaning that any data
matching a specific value it's omitted in the representation.
The sparse DataFrame allows for a more efficient storage.
Parameters
----------
fill_value : float, default None
The specific value that should be omitted in the representation.
kind : {'block', 'integer'}, default 'block'
The kind of the SparseIndex tracking where data is not equal to
the fill value:
- 'block' tracks only the locations and sizes of blocks of data.
- 'integer' keeps an array with all the locations of the data.
In most cases 'block' is recommended, since it's more memory
efficient.
Returns
-------
SparseDataFrame
The sparse representation of the DataFrame.
See Also
--------
DataFrame.to_dense :
Converts the DataFrame back to the its dense form.
Examples
--------
>>> df = pd.DataFrame([(np.nan, np.nan),
... (1., np.nan),
... (np.nan, 1.)])
>>> df
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(df)
<class 'pandas.core.frame.DataFrame'>
>>> sdf = df.to_sparse()
>>> sdf
0 1
0 NaN NaN
1 1.0 NaN
2 NaN 1.0
>>> type(sdf)
<class 'pandas.core.sparse.frame.SparseDataFrame'>
"""
from pandas.core.sparse.api import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
columns=self.columns, default_kind=kind,
default_fill_value=fill_value) | [
"Convert",
"to",
"SparseDataFrame",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1879-L1936 | [
"def",
"to_sparse",
"(",
"self",
",",
"fill_value",
"=",
"None",
",",
"kind",
"=",
"'block'",
")",
":",
"from",
"pandas",
".",
"core",
".",
"sparse",
".",
"api",
"import",
"SparseDataFrame",
"return",
"SparseDataFrame",
"(",
"self",
".",
"_series",
",",
"index",
"=",
"self",
".",
"index",
",",
"columns",
"=",
"self",
".",
"columns",
",",
"default_kind",
"=",
"kind",
",",
"default_fill_value",
"=",
"fill_value",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.to_stata | Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
fname : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
version : {114, 117}, default 114
Version to use in the output dta file. Version 114 can be used
read by Stata 10 and later. Version 117 can be read by Stata 13
or later. Version 114 limits string variables to 244 characters or
fewer while 117 allows strings with lengths up to 2,000,000
characters.
.. versionadded:: 0.23.0
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
.. versionadded:: 0.23.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df.to_stata('animals.dta') # doctest: +SKIP | pandas/core/frame.py | def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None, version=114,
convert_strl=None):
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
fname : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
version : {114, 117}, default 114
Version to use in the output dta file. Version 114 can be used
read by Stata 10 and later. Version 117 can be read by Stata 13
or later. Version 114 limits string variables to 244 characters or
fewer while 117 allows strings with lengths up to 2,000,000
characters.
.. versionadded:: 0.23.0
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
.. versionadded:: 0.23.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
kwargs = {}
if version not in (114, 117):
raise ValueError('Only formats 114 and 117 supported.')
if version == 114:
if convert_strl is not None:
raise ValueError('strl support is only available when using '
'format 117')
from pandas.io.stata import StataWriter as statawriter
else:
from pandas.io.stata import StataWriter117 as statawriter
kwargs['convert_strl'] = convert_strl
writer = statawriter(fname, self, convert_dates=convert_dates,
byteorder=byteorder, time_stamp=time_stamp,
data_label=data_label, write_index=write_index,
variable_labels=variable_labels, **kwargs)
writer.write_file() | def to_stata(self, fname, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None, version=114,
convert_strl=None):
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
fname : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
version : {114, 117}, default 114
Version to use in the output dta file. Version 114 can be used
read by Stata 10 and later. Version 117 can be read by Stata 13
or later. Version 114 limits string variables to 244 characters or
fewer while 117 allows strings with lengths up to 2,000,000
characters.
.. versionadded:: 0.23.0
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
.. versionadded:: 0.23.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
.. versionadded:: 0.19.0
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
kwargs = {}
if version not in (114, 117):
raise ValueError('Only formats 114 and 117 supported.')
if version == 114:
if convert_strl is not None:
raise ValueError('strl support is only available when using '
'format 117')
from pandas.io.stata import StataWriter as statawriter
else:
from pandas.io.stata import StataWriter117 as statawriter
kwargs['convert_strl'] = convert_strl
writer = statawriter(fname, self, convert_dates=convert_dates,
byteorder=byteorder, time_stamp=time_stamp,
data_label=data_label, write_index=write_index,
variable_labels=variable_labels, **kwargs)
writer.write_file() | [
"Export",
"DataFrame",
"object",
"to",
"Stata",
"dta",
"format",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L1955-L2055 | [
"def",
"to_stata",
"(",
"self",
",",
"fname",
",",
"convert_dates",
"=",
"None",
",",
"write_index",
"=",
"True",
",",
"encoding",
"=",
"\"latin-1\"",
",",
"byteorder",
"=",
"None",
",",
"time_stamp",
"=",
"None",
",",
"data_label",
"=",
"None",
",",
"variable_labels",
"=",
"None",
",",
"version",
"=",
"114",
",",
"convert_strl",
"=",
"None",
")",
":",
"kwargs",
"=",
"{",
"}",
"if",
"version",
"not",
"in",
"(",
"114",
",",
"117",
")",
":",
"raise",
"ValueError",
"(",
"'Only formats 114 and 117 supported.'",
")",
"if",
"version",
"==",
"114",
":",
"if",
"convert_strl",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'strl support is only available when using '",
"'format 117'",
")",
"from",
"pandas",
".",
"io",
".",
"stata",
"import",
"StataWriter",
"as",
"statawriter",
"else",
":",
"from",
"pandas",
".",
"io",
".",
"stata",
"import",
"StataWriter117",
"as",
"statawriter",
"kwargs",
"[",
"'convert_strl'",
"]",
"=",
"convert_strl",
"writer",
"=",
"statawriter",
"(",
"fname",
",",
"self",
",",
"convert_dates",
"=",
"convert_dates",
",",
"byteorder",
"=",
"byteorder",
",",
"time_stamp",
"=",
"time_stamp",
",",
"data_label",
"=",
"data_label",
",",
"write_index",
"=",
"write_index",
",",
"variable_labels",
"=",
"variable_labels",
",",
"*",
"*",
"kwargs",
")",
"writer",
".",
"write_file",
"(",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.to_feather | Write out the binary feather-format for DataFrames.
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path | pandas/core/frame.py | def to_feather(self, fname):
"""
Write out the binary feather-format for DataFrames.
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname) | def to_feather(self, fname):
"""
Write out the binary feather-format for DataFrames.
.. versionadded:: 0.20.0
Parameters
----------
fname : str
string file path
"""
from pandas.io.feather_format import to_feather
to_feather(self, fname) | [
"Write",
"out",
"the",
"binary",
"feather",
"-",
"format",
"for",
"DataFrames",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2057-L2069 | [
"def",
"to_feather",
"(",
"self",
",",
"fname",
")",
":",
"from",
"pandas",
".",
"io",
".",
"feather_format",
"import",
"to_feather",
"to_feather",
"(",
"self",
",",
"fname",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.to_parquet | Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
fname : str
File path or Root Directory path. Will be used as Root Directory
path while writing a partitioned dataset.
.. versionchanged:: 0.24.0
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file. If ``None``,
the behavior depends on the chosen engine.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset
Columns are partitioned in the order they are given
.. versionadded:: 0.24.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4 | pandas/core/frame.py | def to_parquet(self, fname, engine='auto', compression='snappy',
index=None, partition_cols=None, **kwargs):
"""
Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
fname : str
File path or Root Directory path. Will be used as Root Directory
path while writing a partitioned dataset.
.. versionchanged:: 0.24.0
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file. If ``None``,
the behavior depends on the chosen engine.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset
Columns are partitioned in the order they are given
.. versionadded:: 0.24.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
"""
from pandas.io.parquet import to_parquet
to_parquet(self, fname, engine,
compression=compression, index=index,
partition_cols=partition_cols, **kwargs) | def to_parquet(self, fname, engine='auto', compression='snappy',
index=None, partition_cols=None, **kwargs):
"""
Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
fname : str
File path or Root Directory path. Will be used as Root Directory
path while writing a partitioned dataset.
.. versionchanged:: 0.24.0
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file. If ``None``,
the behavior depends on the chosen engine.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset
Columns are partitioned in the order they are given
.. versionadded:: 0.24.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
"""
from pandas.io.parquet import to_parquet
to_parquet(self, fname, engine,
compression=compression, index=index,
partition_cols=partition_cols, **kwargs) | [
"Write",
"a",
"DataFrame",
"to",
"the",
"binary",
"parquet",
"format",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2071-L2141 | [
"def",
"to_parquet",
"(",
"self",
",",
"fname",
",",
"engine",
"=",
"'auto'",
",",
"compression",
"=",
"'snappy'",
",",
"index",
"=",
"None",
",",
"partition_cols",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"pandas",
".",
"io",
".",
"parquet",
"import",
"to_parquet",
"to_parquet",
"(",
"self",
",",
"fname",
",",
"engine",
",",
"compression",
"=",
"compression",
",",
"index",
"=",
"index",
",",
"partition_cols",
"=",
"partition_cols",
",",
"*",
"*",
"kwargs",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.to_html | Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
.. versionadded:: 0.23.0
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string. | pandas/core/frame.py | def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, max_rows=None,
max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False,
border=None, table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
.. versionadded:: 0.23.0
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if (justify is not None and
justify not in fmt._VALID_JUSTIFY_PARAMETERS):
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal, table_id=table_id,
render_links=render_links)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue() | def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None, max_rows=None,
max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False,
border=None, table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
.. versionadded:: 0.23.0
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if (justify is not None and
justify not in fmt._VALID_JUSTIFY_PARAMETERS):
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify, justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows, escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal, table_id=table_id,
render_links=render_links)
# TODO: a generic formatter wld b in DataFrameFormatter
formatter.to_html(classes=classes, notebook=notebook, border=border)
if buf is None:
return formatter.buf.getvalue() | [
"Render",
"a",
"DataFrame",
"as",
"an",
"HTML",
"table",
".",
"%",
"(",
"shared_params",
")",
"s",
"bold_rows",
":",
"bool",
"default",
"True",
"Make",
"the",
"row",
"labels",
"bold",
"in",
"the",
"output",
".",
"classes",
":",
"str",
"or",
"list",
"or",
"tuple",
"default",
"None",
"CSS",
"class",
"(",
"es",
")",
"to",
"apply",
"to",
"the",
"resulting",
"html",
"table",
".",
"escape",
":",
"bool",
"default",
"True",
"Convert",
"the",
"characters",
"<",
">",
"and",
"&",
"to",
"HTML",
"-",
"safe",
"sequences",
".",
"notebook",
":",
"{",
"True",
"False",
"}",
"default",
"False",
"Whether",
"the",
"generated",
"HTML",
"is",
"for",
"IPython",
"Notebook",
".",
"border",
":",
"int",
"A",
"border",
"=",
"border",
"attribute",
"is",
"included",
"in",
"the",
"opening",
"<table",
">",
"tag",
".",
"Default",
"pd",
".",
"options",
".",
"html",
".",
"border",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2151-L2210 | [
"def",
"to_html",
"(",
"self",
",",
"buf",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"col_space",
"=",
"None",
",",
"header",
"=",
"True",
",",
"index",
"=",
"True",
",",
"na_rep",
"=",
"'NaN'",
",",
"formatters",
"=",
"None",
",",
"float_format",
"=",
"None",
",",
"sparsify",
"=",
"None",
",",
"index_names",
"=",
"True",
",",
"justify",
"=",
"None",
",",
"max_rows",
"=",
"None",
",",
"max_cols",
"=",
"None",
",",
"show_dimensions",
"=",
"False",
",",
"decimal",
"=",
"'.'",
",",
"bold_rows",
"=",
"True",
",",
"classes",
"=",
"None",
",",
"escape",
"=",
"True",
",",
"notebook",
"=",
"False",
",",
"border",
"=",
"None",
",",
"table_id",
"=",
"None",
",",
"render_links",
"=",
"False",
")",
":",
"if",
"(",
"justify",
"is",
"not",
"None",
"and",
"justify",
"not",
"in",
"fmt",
".",
"_VALID_JUSTIFY_PARAMETERS",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for justify parameter\"",
")",
"formatter",
"=",
"fmt",
".",
"DataFrameFormatter",
"(",
"self",
",",
"buf",
"=",
"buf",
",",
"columns",
"=",
"columns",
",",
"col_space",
"=",
"col_space",
",",
"na_rep",
"=",
"na_rep",
",",
"formatters",
"=",
"formatters",
",",
"float_format",
"=",
"float_format",
",",
"sparsify",
"=",
"sparsify",
",",
"justify",
"=",
"justify",
",",
"index_names",
"=",
"index_names",
",",
"header",
"=",
"header",
",",
"index",
"=",
"index",
",",
"bold_rows",
"=",
"bold_rows",
",",
"escape",
"=",
"escape",
",",
"max_rows",
"=",
"max_rows",
",",
"max_cols",
"=",
"max_cols",
",",
"show_dimensions",
"=",
"show_dimensions",
",",
"decimal",
"=",
"decimal",
",",
"table_id",
"=",
"table_id",
",",
"render_links",
"=",
"render_links",
")",
"# TODO: a generic formatter wld b in DataFrameFormatter",
"formatter",
".",
"to_html",
"(",
"classes",
"=",
"classes",
",",
"notebook",
"=",
"notebook",
",",
"border",
"=",
"border",
")",
"if",
"buf",
"is",
"None",
":",
"return",
"formatter",
".",
"buf",
".",
"getvalue",
"(",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.info | Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
int_col 5 non-null int64
text_col 5 non-null object
float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB | pandas/core/frame.py | def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
int_col 5 non-null int64
text_col 5 non-null object
float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index._summary())
if len(self.columns) == 0:
lines.append('Empty {name}'.format(name=type(self).__name__))
fmt.buffer_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max(len(pprint_thing(k)) for k in self.columns) + 4
counts = None
tmpl = "{count}{dtype}"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError(
'Columns must equal counts '
'({cols:d} != {counts:d})'.format(
cols=len(cols), counts=len(counts)))
tmpl = "{count} non-null {dtype}"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl.format(count=count,
dtype=dtype))
def _non_verbose_repr():
lines.append(self.columns._summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return ("{num:3.1f}{size_q} "
"{x}".format(num=num, size_q=size_qualifier, x=x))
num /= 1024.0
return "{num:3.1f}{size_q} {pb}".format(num=num,
size_q=size_qualifier,
pb='PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k
in sorted(counts.items())]
lines.append('dtypes: {types}'.format(types=', '.join(dtypes)))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: {mem}\n".format(
mem=_sizeof_fmt(mem_usage, size_qualifier)))
fmt.buffer_put_lines(buf, lines) | def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
int_col 5 non-null int64
text_col 5 non-null object
float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 200.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
column_1 1000000 non-null object
column_2 1000000 non-null object
column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index._summary())
if len(self.columns) == 0:
lines.append('Empty {name}'.format(name=type(self).__name__))
fmt.buffer_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option('display.max_info_columns',
len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max(len(pprint_thing(k)) for k in self.columns) + 4
counts = None
tmpl = "{count}{dtype}"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError(
'Columns must equal counts '
'({cols:d} != {counts:d})'.format(
cols=len(cols), counts=len(counts)))
tmpl = "{count} non-null {dtype}"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes.iloc[i]
col = pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) + tmpl.format(count=count,
dtype=dtype))
def _non_verbose_repr():
lines.append(self.columns._summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return ("{num:3.1f}{size_q} "
"{x}".format(num=num, size_q=size_qualifier, x=x))
num /= 1024.0
return "{num:3.1f}{size_q} {pb}".format(num=num,
size_q=size_qualifier,
pb='PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['{k}({kk:d})'.format(k=k[0], kk=k[1]) for k
in sorted(counts.items())]
lines.append('dtypes: {types}'.format(types=', '.join(dtypes)))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage:
# append memory usage of df to display
size_qualifier = ''
if memory_usage == 'deep':
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if ('object' in counts or
self.index._is_memory_usage_qualified()):
size_qualifier = '+'
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append("memory usage: {mem}\n".format(
mem=_sizeof_fmt(mem_usage, size_qualifier)))
fmt.buffer_put_lines(buf, lines) | [
"Print",
"a",
"concise",
"summary",
"of",
"a",
"DataFrame",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2214-L2451 | [
"def",
"info",
"(",
"self",
",",
"verbose",
"=",
"None",
",",
"buf",
"=",
"None",
",",
"max_cols",
"=",
"None",
",",
"memory_usage",
"=",
"None",
",",
"null_counts",
"=",
"None",
")",
":",
"if",
"buf",
"is",
"None",
":",
"# pragma: no cover",
"buf",
"=",
"sys",
".",
"stdout",
"lines",
"=",
"[",
"]",
"lines",
".",
"append",
"(",
"str",
"(",
"type",
"(",
"self",
")",
")",
")",
"lines",
".",
"append",
"(",
"self",
".",
"index",
".",
"_summary",
"(",
")",
")",
"if",
"len",
"(",
"self",
".",
"columns",
")",
"==",
"0",
":",
"lines",
".",
"append",
"(",
"'Empty {name}'",
".",
"format",
"(",
"name",
"=",
"type",
"(",
"self",
")",
".",
"__name__",
")",
")",
"fmt",
".",
"buffer_put_lines",
"(",
"buf",
",",
"lines",
")",
"return",
"cols",
"=",
"self",
".",
"columns",
"# hack",
"if",
"max_cols",
"is",
"None",
":",
"max_cols",
"=",
"get_option",
"(",
"'display.max_info_columns'",
",",
"len",
"(",
"self",
".",
"columns",
")",
"+",
"1",
")",
"max_rows",
"=",
"get_option",
"(",
"'display.max_info_rows'",
",",
"len",
"(",
"self",
")",
"+",
"1",
")",
"if",
"null_counts",
"is",
"None",
":",
"show_counts",
"=",
"(",
"(",
"len",
"(",
"self",
".",
"columns",
")",
"<=",
"max_cols",
")",
"and",
"(",
"len",
"(",
"self",
")",
"<",
"max_rows",
")",
")",
"else",
":",
"show_counts",
"=",
"null_counts",
"exceeds_info_cols",
"=",
"len",
"(",
"self",
".",
"columns",
")",
">",
"max_cols",
"def",
"_verbose_repr",
"(",
")",
":",
"lines",
".",
"append",
"(",
"'Data columns (total %d columns):'",
"%",
"len",
"(",
"self",
".",
"columns",
")",
")",
"space",
"=",
"max",
"(",
"len",
"(",
"pprint_thing",
"(",
"k",
")",
")",
"for",
"k",
"in",
"self",
".",
"columns",
")",
"+",
"4",
"counts",
"=",
"None",
"tmpl",
"=",
"\"{count}{dtype}\"",
"if",
"show_counts",
":",
"counts",
"=",
"self",
".",
"count",
"(",
")",
"if",
"len",
"(",
"cols",
")",
"!=",
"len",
"(",
"counts",
")",
":",
"# pragma: no cover",
"raise",
"AssertionError",
"(",
"'Columns must equal counts '",
"'({cols:d} != {counts:d})'",
".",
"format",
"(",
"cols",
"=",
"len",
"(",
"cols",
")",
",",
"counts",
"=",
"len",
"(",
"counts",
")",
")",
")",
"tmpl",
"=",
"\"{count} non-null {dtype}\"",
"dtypes",
"=",
"self",
".",
"dtypes",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"self",
".",
"columns",
")",
":",
"dtype",
"=",
"dtypes",
".",
"iloc",
"[",
"i",
"]",
"col",
"=",
"pprint_thing",
"(",
"col",
")",
"count",
"=",
"\"\"",
"if",
"show_counts",
":",
"count",
"=",
"counts",
".",
"iloc",
"[",
"i",
"]",
"lines",
".",
"append",
"(",
"_put_str",
"(",
"col",
",",
"space",
")",
"+",
"tmpl",
".",
"format",
"(",
"count",
"=",
"count",
",",
"dtype",
"=",
"dtype",
")",
")",
"def",
"_non_verbose_repr",
"(",
")",
":",
"lines",
".",
"append",
"(",
"self",
".",
"columns",
".",
"_summary",
"(",
"name",
"=",
"'Columns'",
")",
")",
"def",
"_sizeof_fmt",
"(",
"num",
",",
"size_qualifier",
")",
":",
"# returns size in human readable format",
"for",
"x",
"in",
"[",
"'bytes'",
",",
"'KB'",
",",
"'MB'",
",",
"'GB'",
",",
"'TB'",
"]",
":",
"if",
"num",
"<",
"1024.0",
":",
"return",
"(",
"\"{num:3.1f}{size_q} \"",
"\"{x}\"",
".",
"format",
"(",
"num",
"=",
"num",
",",
"size_q",
"=",
"size_qualifier",
",",
"x",
"=",
"x",
")",
")",
"num",
"/=",
"1024.0",
"return",
"\"{num:3.1f}{size_q} {pb}\"",
".",
"format",
"(",
"num",
"=",
"num",
",",
"size_q",
"=",
"size_qualifier",
",",
"pb",
"=",
"'PB'",
")",
"if",
"verbose",
":",
"_verbose_repr",
"(",
")",
"elif",
"verbose",
"is",
"False",
":",
"# specifically set to False, not nesc None",
"_non_verbose_repr",
"(",
")",
"else",
":",
"if",
"exceeds_info_cols",
":",
"_non_verbose_repr",
"(",
")",
"else",
":",
"_verbose_repr",
"(",
")",
"counts",
"=",
"self",
".",
"get_dtype_counts",
"(",
")",
"dtypes",
"=",
"[",
"'{k}({kk:d})'",
".",
"format",
"(",
"k",
"=",
"k",
"[",
"0",
"]",
",",
"kk",
"=",
"k",
"[",
"1",
"]",
")",
"for",
"k",
"in",
"sorted",
"(",
"counts",
".",
"items",
"(",
")",
")",
"]",
"lines",
".",
"append",
"(",
"'dtypes: {types}'",
".",
"format",
"(",
"types",
"=",
"', '",
".",
"join",
"(",
"dtypes",
")",
")",
")",
"if",
"memory_usage",
"is",
"None",
":",
"memory_usage",
"=",
"get_option",
"(",
"'display.memory_usage'",
")",
"if",
"memory_usage",
":",
"# append memory usage of df to display",
"size_qualifier",
"=",
"''",
"if",
"memory_usage",
"==",
"'deep'",
":",
"deep",
"=",
"True",
"else",
":",
"# size_qualifier is just a best effort; not guaranteed to catch",
"# all cases (e.g., it misses categorical data even with object",
"# categories)",
"deep",
"=",
"False",
"if",
"(",
"'object'",
"in",
"counts",
"or",
"self",
".",
"index",
".",
"_is_memory_usage_qualified",
"(",
")",
")",
":",
"size_qualifier",
"=",
"'+'",
"mem_usage",
"=",
"self",
".",
"memory_usage",
"(",
"index",
"=",
"True",
",",
"deep",
"=",
"deep",
")",
".",
"sum",
"(",
")",
"lines",
".",
"append",
"(",
"\"memory usage: {mem}\\n\"",
".",
"format",
"(",
"mem",
"=",
"_sizeof_fmt",
"(",
"mem_usage",
",",
"size_qualifier",
")",
")",
")",
"fmt",
".",
"buffer_put_lines",
"(",
"buf",
",",
"lines",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.memory_usage | Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.0+0.0j 1 True
1 1 1.0 1.0+0.0j 1 True
2 1 1.0 1.0+0.0j 1 True
3 1 1.0 1.0+0.0j 1 True
4 1 1.0 1.0+0.0j 1 True
>>> df.memory_usage()
Index 80
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 80
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5168 | pandas/core/frame.py | def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.0+0.0j 1 True
1 1 1.0 1.0+0.0j 1 True
2 1 1.0 1.0+0.0j 1 True
3 1 1.0 1.0+0.0j 1 True
4 1 1.0 1.0+0.0j 1 True
>>> df.memory_usage()
Index 80
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 80
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5168
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result | def memory_usage(self, index=True, deep=False):
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.0+0.0j 1 True
1 1 1.0 1.0+0.0j 1 True
2 1 1.0 1.0+0.0j 1 True
3 1 1.0 1.0+0.0j 1 True
4 1 1.0 1.0+0.0j 1 True
>>> df.memory_usage()
Index 80
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 80
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5168
"""
result = Series([c.memory_usage(index=False, deep=deep)
for col, c in self.iteritems()], index=self.columns)
if index:
result = Series(self.index.memory_usage(deep=deep),
index=['Index']).append(result)
return result | [
"Return",
"the",
"memory",
"usage",
"of",
"each",
"column",
"in",
"bytes",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2453-L2542 | [
"def",
"memory_usage",
"(",
"self",
",",
"index",
"=",
"True",
",",
"deep",
"=",
"False",
")",
":",
"result",
"=",
"Series",
"(",
"[",
"c",
".",
"memory_usage",
"(",
"index",
"=",
"False",
",",
"deep",
"=",
"deep",
")",
"for",
"col",
",",
"c",
"in",
"self",
".",
"iteritems",
"(",
")",
"]",
",",
"index",
"=",
"self",
".",
"columns",
")",
"if",
"index",
":",
"result",
"=",
"Series",
"(",
"self",
".",
"index",
".",
"memory_usage",
"(",
"deep",
"=",
"deep",
")",
",",
"index",
"=",
"[",
"'Index'",
"]",
")",
".",
"append",
"(",
"result",
")",
"return",
"result"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.transpose | Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
copy : bool, default False
If True, the underlying data is copied. Otherwise (default), no
copy is made if possible.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object | pandas/core/frame.py | def transpose(self, *args, **kwargs):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
copy : bool, default False
If True, the underlying data is copied. Otherwise (default), no
copy is made if possible.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, dict())
return super().transpose(1, 0, **kwargs) | def transpose(self, *args, **kwargs):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
copy : bool, default False
If True, the underlying data is copied. Otherwise (default), no
copy is made if possible.
*args, **kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, dict())
return super().transpose(1, 0, **kwargs) | [
"Transpose",
"index",
"and",
"columns",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2544-L2640 | [
"def",
"transpose",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"nv",
".",
"validate_transpose",
"(",
"args",
",",
"dict",
"(",
")",
")",
"return",
"super",
"(",
")",
".",
"transpose",
"(",
"1",
",",
"0",
",",
"*",
"*",
"kwargs",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.get_value | Quickly retrieve single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar | pandas/core/frame.py | def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable) | def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable) | [
"Quickly",
"retrieve",
"single",
"value",
"at",
"passed",
"column",
"and",
"index",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2679-L2701 | [
"def",
"get_value",
"(",
"self",
",",
"index",
",",
"col",
",",
"takeable",
"=",
"False",
")",
":",
"warnings",
".",
"warn",
"(",
"\"get_value is deprecated and will be removed \"",
"\"in a future release. Please use \"",
"\".at[] or .iat[] accessors instead\"",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"self",
".",
"_get_value",
"(",
"index",
",",
"col",
",",
"takeable",
"=",
"takeable",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.set_value | Put single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
Returns
-------
DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object. | pandas/core/frame.py | def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
Returns
-------
DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object.
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable) | def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index.
.. deprecated:: 0.21.0
Use .at[] or .iat[] accessors instead.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
Returns
-------
DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object.
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable) | [
"Put",
"single",
"value",
"at",
"passed",
"column",
"and",
"index",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2723-L2747 | [
"def",
"set_value",
"(",
"self",
",",
"index",
",",
"col",
",",
"value",
",",
"takeable",
"=",
"False",
")",
":",
"warnings",
".",
"warn",
"(",
"\"set_value is deprecated and will be removed \"",
"\"in a future release. Please use \"",
"\".at[] or .iat[] accessors instead\"",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"self",
".",
"_set_value",
"(",
"index",
",",
"col",
",",
"value",
",",
"takeable",
"=",
"takeable",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame._ixs | Parameters
----------
i : int, slice, or sequence of integers
axis : int
Notes
-----
If slice passed, the resulting data will be a view. | pandas/core/frame.py | def _ixs(self, i, axis=0):
"""
Parameters
----------
i : int, slice, or sequence of integers
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self._take(i, axis=1)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._box_col_values(values, label)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result | def _ixs(self, i, axis=0):
"""
Parameters
----------
i : int, slice, or sequence of integers
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy = True
else:
new_values = self._data.fast_xs(i)
if is_scalar(new_values):
return new_values
# if we are a copy, mark as such
copy = (isinstance(new_values, np.ndarray) and
new_values.base is None)
result = self._constructor_sliced(new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.loc[:, lab_slice]
else:
if isinstance(label, Index):
return self._take(i, axis=1)
index_len = len(self.index)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if index_len and not len(values):
values = np.array([np.nan] * index_len, dtype=object)
result = self._box_col_values(values, label)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result | [
"Parameters",
"----------",
"i",
":",
"int",
"slice",
"or",
"sequence",
"of",
"integers",
"axis",
":",
"int"
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2771-L2833 | [
"def",
"_ixs",
"(",
"self",
",",
"i",
",",
"axis",
"=",
"0",
")",
":",
"# irow",
"if",
"axis",
"==",
"0",
":",
"if",
"isinstance",
"(",
"i",
",",
"slice",
")",
":",
"return",
"self",
"[",
"i",
"]",
"else",
":",
"label",
"=",
"self",
".",
"index",
"[",
"i",
"]",
"if",
"isinstance",
"(",
"label",
",",
"Index",
")",
":",
"# a location index by definition",
"result",
"=",
"self",
".",
"take",
"(",
"i",
",",
"axis",
"=",
"axis",
")",
"copy",
"=",
"True",
"else",
":",
"new_values",
"=",
"self",
".",
"_data",
".",
"fast_xs",
"(",
"i",
")",
"if",
"is_scalar",
"(",
"new_values",
")",
":",
"return",
"new_values",
"# if we are a copy, mark as such",
"copy",
"=",
"(",
"isinstance",
"(",
"new_values",
",",
"np",
".",
"ndarray",
")",
"and",
"new_values",
".",
"base",
"is",
"None",
")",
"result",
"=",
"self",
".",
"_constructor_sliced",
"(",
"new_values",
",",
"index",
"=",
"self",
".",
"columns",
",",
"name",
"=",
"self",
".",
"index",
"[",
"i",
"]",
",",
"dtype",
"=",
"new_values",
".",
"dtype",
")",
"result",
".",
"_set_is_copy",
"(",
"self",
",",
"copy",
"=",
"copy",
")",
"return",
"result",
"# icol",
"else",
":",
"label",
"=",
"self",
".",
"columns",
"[",
"i",
"]",
"if",
"isinstance",
"(",
"i",
",",
"slice",
")",
":",
"# need to return view",
"lab_slice",
"=",
"slice",
"(",
"label",
"[",
"0",
"]",
",",
"label",
"[",
"-",
"1",
"]",
")",
"return",
"self",
".",
"loc",
"[",
":",
",",
"lab_slice",
"]",
"else",
":",
"if",
"isinstance",
"(",
"label",
",",
"Index",
")",
":",
"return",
"self",
".",
"_take",
"(",
"i",
",",
"axis",
"=",
"1",
")",
"index_len",
"=",
"len",
"(",
"self",
".",
"index",
")",
"# if the values returned are not the same length",
"# as the index (iow a not found value), iget returns",
"# a 0-len ndarray. This is effectively catching",
"# a numpy error (as numpy should really raise)",
"values",
"=",
"self",
".",
"_data",
".",
"iget",
"(",
"i",
")",
"if",
"index_len",
"and",
"not",
"len",
"(",
"values",
")",
":",
"values",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"nan",
"]",
"*",
"index_len",
",",
"dtype",
"=",
"object",
")",
"result",
"=",
"self",
".",
"_box_col_values",
"(",
"values",
",",
"label",
")",
"# this is a cached value, mark it so",
"result",
".",
"_set_as_cached",
"(",
"label",
",",
"self",
")",
"return",
"result"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.query | Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
.. versionadded:: 0.25.0
You can refer to column names that contain spaces by surrounding
them in backticks.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
.. versionadded:: 0.18.0
Returns
-------
DataFrame
DataFrame resulting from the provided query expression.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10 | pandas/core/frame.py | def query(self, expr, inplace=False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
.. versionadded:: 0.25.0
You can refer to column names that contain spaces by surrounding
them in backticks.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
.. versionadded:: 0.18.0
Returns
-------
DataFrame
DataFrame resulting from the provided query expression.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, str):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data | def query(self, expr, inplace=False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
.. versionadded:: 0.25.0
You can refer to column names that contain spaces by surrounding
them in backticks.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
.. versionadded:: 0.18.0
Returns
-------
DataFrame
DataFrame resulting from the provided query expression.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(expr, str):
msg = "expr must be a string to be evaluated, {0} given"
raise ValueError(msg.format(type(expr)))
kwargs['level'] = kwargs.pop('level', 0) + 1
kwargs['target'] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data | [
"Query",
"the",
"columns",
"of",
"a",
"DataFrame",
"with",
"a",
"boolean",
"expression",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L2955-L3082 | [
"def",
"query",
"(",
"self",
",",
"expr",
",",
"inplace",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"inplace",
"=",
"validate_bool_kwarg",
"(",
"inplace",
",",
"'inplace'",
")",
"if",
"not",
"isinstance",
"(",
"expr",
",",
"str",
")",
":",
"msg",
"=",
"\"expr must be a string to be evaluated, {0} given\"",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"type",
"(",
"expr",
")",
")",
")",
"kwargs",
"[",
"'level'",
"]",
"=",
"kwargs",
".",
"pop",
"(",
"'level'",
",",
"0",
")",
"+",
"1",
"kwargs",
"[",
"'target'",
"]",
"=",
"None",
"res",
"=",
"self",
".",
"eval",
"(",
"expr",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"new_data",
"=",
"self",
".",
"loc",
"[",
"res",
"]",
"except",
"ValueError",
":",
"# when res is multi-dimensional loc raises, but this is sometimes a",
"# valid query",
"new_data",
"=",
"self",
"[",
"res",
"]",
"if",
"inplace",
":",
"self",
".",
"_update_inplace",
"(",
"new_data",
")",
"else",
":",
"return",
"new_data"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.eval | Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
.. versionadded:: 0.18.0.
kwargs : dict
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, or pandas object
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7 | pandas/core/frame.py | def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
.. versionadded:: 0.18.0.
kwargs : dict
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, or pandas object
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
column_resolvers = \
self._get_space_character_free_column_resolvers()
resolvers = column_resolvers, index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs) | def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
.. versionadded:: 0.18.0.
kwargs : dict
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, or pandas object
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, 'inplace')
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
column_resolvers = \
self._get_space_character_free_column_resolvers()
resolvers = column_resolvers, index_resolvers
if 'target' not in kwargs:
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs) | [
"Evaluate",
"a",
"string",
"describing",
"operations",
"on",
"DataFrame",
"columns",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3084-L3187 | [
"def",
"eval",
"(",
"self",
",",
"expr",
",",
"inplace",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"pandas",
".",
"core",
".",
"computation",
".",
"eval",
"import",
"eval",
"as",
"_eval",
"inplace",
"=",
"validate_bool_kwarg",
"(",
"inplace",
",",
"'inplace'",
")",
"resolvers",
"=",
"kwargs",
".",
"pop",
"(",
"'resolvers'",
",",
"None",
")",
"kwargs",
"[",
"'level'",
"]",
"=",
"kwargs",
".",
"pop",
"(",
"'level'",
",",
"0",
")",
"+",
"1",
"if",
"resolvers",
"is",
"None",
":",
"index_resolvers",
"=",
"self",
".",
"_get_index_resolvers",
"(",
")",
"column_resolvers",
"=",
"self",
".",
"_get_space_character_free_column_resolvers",
"(",
")",
"resolvers",
"=",
"column_resolvers",
",",
"index_resolvers",
"if",
"'target'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'target'",
"]",
"=",
"self",
"kwargs",
"[",
"'resolvers'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'resolvers'",
",",
"(",
")",
")",
"+",
"tuple",
"(",
"resolvers",
")",
"return",
"_eval",
"(",
"expr",
",",
"inplace",
"=",
"inplace",
",",
"*",
"*",
"kwargs",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.select_dtypes | Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0 | pandas/core/frame.py | def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
msg = 'object of type {typ!r} has no info axis'
raise TypeError(msg.format(typ=type(obj).__name__))
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(infer_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=(include & exclude)))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(idx, dtype):
return idx, functools.partial(issubclass, dtype.type)
for idx, f in itertools.starmap(is_dtype_instance_mapper,
enumerate(self.dtypes)):
if include: # checks for the case of empty include or exclude
include_these.iloc[idx] = any(map(f, include))
if exclude:
exclude_these.iloc[idx] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[_get_info_slice(self, dtype_indexer)] | def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
msg = 'object of type {typ!r} has no info axis'
raise TypeError(msg.format(typ=type(obj).__name__))
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(
lambda x: frozenset(map(infer_dtype_from_object, x)), selection)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=(include & exclude)))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(idx, dtype):
return idx, functools.partial(issubclass, dtype.type)
for idx, f in itertools.starmap(is_dtype_instance_mapper,
enumerate(self.dtypes)):
if include: # checks for the case of empty include or exclude
include_these.iloc[idx] = any(map(f, include))
if exclude:
exclude_these.iloc[idx] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[_get_info_slice(self, dtype_indexer)] | [
"Return",
"a",
"subset",
"of",
"the",
"DataFrame",
"s",
"columns",
"based",
"on",
"the",
"column",
"dtypes",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3189-L3324 | [
"def",
"select_dtypes",
"(",
"self",
",",
"include",
"=",
"None",
",",
"exclude",
"=",
"None",
")",
":",
"def",
"_get_info_slice",
"(",
"obj",
",",
"indexer",
")",
":",
"\"\"\"Slice the info axis of `obj` with `indexer`.\"\"\"",
"if",
"not",
"hasattr",
"(",
"obj",
",",
"'_info_axis_number'",
")",
":",
"msg",
"=",
"'object of type {typ!r} has no info axis'",
"raise",
"TypeError",
"(",
"msg",
".",
"format",
"(",
"typ",
"=",
"type",
"(",
"obj",
")",
".",
"__name__",
")",
")",
"slices",
"=",
"[",
"slice",
"(",
"None",
")",
"]",
"*",
"obj",
".",
"ndim",
"slices",
"[",
"obj",
".",
"_info_axis_number",
"]",
"=",
"indexer",
"return",
"tuple",
"(",
"slices",
")",
"if",
"not",
"is_list_like",
"(",
"include",
")",
":",
"include",
"=",
"(",
"include",
",",
")",
"if",
"include",
"is",
"not",
"None",
"else",
"(",
")",
"if",
"not",
"is_list_like",
"(",
"exclude",
")",
":",
"exclude",
"=",
"(",
"exclude",
",",
")",
"if",
"exclude",
"is",
"not",
"None",
"else",
"(",
")",
"selection",
"=",
"tuple",
"(",
"map",
"(",
"frozenset",
",",
"(",
"include",
",",
"exclude",
")",
")",
")",
"if",
"not",
"any",
"(",
"selection",
")",
":",
"raise",
"ValueError",
"(",
"'at least one of include or exclude must be '",
"'nonempty'",
")",
"# convert the myriad valid dtypes object to a single representation",
"include",
",",
"exclude",
"=",
"map",
"(",
"lambda",
"x",
":",
"frozenset",
"(",
"map",
"(",
"infer_dtype_from_object",
",",
"x",
")",
")",
",",
"selection",
")",
"for",
"dtypes",
"in",
"(",
"include",
",",
"exclude",
")",
":",
"invalidate_string_dtypes",
"(",
"dtypes",
")",
"# can't both include AND exclude!",
"if",
"not",
"include",
".",
"isdisjoint",
"(",
"exclude",
")",
":",
"raise",
"ValueError",
"(",
"'include and exclude overlap on {inc_ex}'",
".",
"format",
"(",
"inc_ex",
"=",
"(",
"include",
"&",
"exclude",
")",
")",
")",
"# empty include/exclude -> defaults to True",
"# three cases (we've already raised if both are empty)",
"# case 1: empty include, nonempty exclude",
"# we have True, True, ... True for include, same for exclude",
"# in the loop below we get the excluded",
"# and when we call '&' below we get only the excluded",
"# case 2: nonempty include, empty exclude",
"# same as case 1, but with include",
"# case 3: both nonempty",
"# the \"union\" of the logic of case 1 and case 2:",
"# we get the included and excluded, and return their logical and",
"include_these",
"=",
"Series",
"(",
"not",
"bool",
"(",
"include",
")",
",",
"index",
"=",
"self",
".",
"columns",
")",
"exclude_these",
"=",
"Series",
"(",
"not",
"bool",
"(",
"exclude",
")",
",",
"index",
"=",
"self",
".",
"columns",
")",
"def",
"is_dtype_instance_mapper",
"(",
"idx",
",",
"dtype",
")",
":",
"return",
"idx",
",",
"functools",
".",
"partial",
"(",
"issubclass",
",",
"dtype",
".",
"type",
")",
"for",
"idx",
",",
"f",
"in",
"itertools",
".",
"starmap",
"(",
"is_dtype_instance_mapper",
",",
"enumerate",
"(",
"self",
".",
"dtypes",
")",
")",
":",
"if",
"include",
":",
"# checks for the case of empty include or exclude",
"include_these",
".",
"iloc",
"[",
"idx",
"]",
"=",
"any",
"(",
"map",
"(",
"f",
",",
"include",
")",
")",
"if",
"exclude",
":",
"exclude_these",
".",
"iloc",
"[",
"idx",
"]",
"=",
"not",
"any",
"(",
"map",
"(",
"f",
",",
"exclude",
")",
")",
"dtype_indexer",
"=",
"include_these",
"&",
"exclude_these",
"return",
"self",
".",
"loc",
"[",
"_get_info_slice",
"(",
"self",
",",
"dtype_indexer",
")",
"]"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame._box_col_values | Provide boxed values for a column. | pandas/core/frame.py | def _box_col_values(self, values, items):
"""
Provide boxed values for a column.
"""
klass = self._constructor_sliced
return klass(values, index=self.index, name=items, fastpath=True) | def _box_col_values(self, values, items):
"""
Provide boxed values for a column.
"""
klass = self._constructor_sliced
return klass(values, index=self.index, name=items, fastpath=True) | [
"Provide",
"boxed",
"values",
"for",
"a",
"column",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3333-L3338 | [
"def",
"_box_col_values",
"(",
"self",
",",
"values",
",",
"items",
")",
":",
"klass",
"=",
"self",
".",
"_constructor_sliced",
"return",
"klass",
"(",
"values",
",",
"index",
"=",
"self",
".",
"index",
",",
"name",
"=",
"items",
",",
"fastpath",
"=",
"True",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame._ensure_valid_index | Ensure that if we don't have an index, that we can create one from the
passed value. | pandas/core/frame.py | def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError):
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan) | def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError):
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan) | [
"Ensure",
"that",
"if",
"we",
"don",
"t",
"have",
"an",
"index",
"that",
"we",
"can",
"create",
"one",
"from",
"the",
"passed",
"value",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3400-L3415 | [
"def",
"_ensure_valid_index",
"(",
"self",
",",
"value",
")",
":",
"# GH5632, make sure that we are a Series convertible",
"if",
"not",
"len",
"(",
"self",
".",
"index",
")",
"and",
"is_list_like",
"(",
"value",
")",
":",
"try",
":",
"value",
"=",
"Series",
"(",
"value",
")",
"except",
"(",
"ValueError",
",",
"NotImplementedError",
",",
"TypeError",
")",
":",
"raise",
"ValueError",
"(",
"'Cannot set a frame with no defined index '",
"'and a value that cannot be converted to a '",
"'Series'",
")",
"self",
".",
"_data",
"=",
"self",
".",
"_data",
".",
"reindex_axis",
"(",
"value",
".",
"index",
".",
"copy",
"(",
")",
",",
"axis",
"=",
"1",
",",
"fill_value",
"=",
"np",
".",
"nan",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame._set_item | Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity. | pandas/core/frame.py | def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy() | def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy() | [
"Add",
"series",
"to",
"DataFrame",
"in",
"specified",
"column",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3417-L3436 | [
"def",
"_set_item",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"_ensure_valid_index",
"(",
"value",
")",
"value",
"=",
"self",
".",
"_sanitize_column",
"(",
"key",
",",
"value",
")",
"NDFrame",
".",
"_set_item",
"(",
"self",
",",
"key",
",",
"value",
")",
"# check if we are modifying a copy",
"# try to set first as we want an invalid",
"# value exception to occur first",
"if",
"len",
"(",
"self",
")",
":",
"self",
".",
"_check_setitem_copy",
"(",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.insert | Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional | pandas/core/frame.py | def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates) | def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns)
column : string, number, or hashable object
label of the inserted column
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value,
allow_duplicates=allow_duplicates) | [
"Insert",
"column",
"into",
"DataFrame",
"at",
"specified",
"location",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3438-L3457 | [
"def",
"insert",
"(",
"self",
",",
"loc",
",",
"column",
",",
"value",
",",
"allow_duplicates",
"=",
"False",
")",
":",
"self",
".",
"_ensure_valid_index",
"(",
"value",
")",
"value",
"=",
"self",
".",
"_sanitize_column",
"(",
"column",
",",
"value",
",",
"broadcast",
"=",
"False",
")",
"self",
".",
"_data",
".",
"insert",
"(",
"loc",
",",
"column",
",",
"value",
",",
"allow_duplicates",
"=",
"allow_duplicates",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.assign | r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
For Python 3.6 and above, later items in '\*\*kwargs' may refer to
newly created or modified columns in 'df'; items are computed and
assigned into 'df' in order. For Python 3.5 and below, the order of
keyword arguments is not specified, you cannot refer to newly created
or modified columns. All items are computed first, and then assigned
in alphabetical order.
.. versionchanged :: 0.23.0
Keyword argument order is maintained for Python 3.6 and later.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
In Python 3.6+, you can create multiple columns within the same assign
where one of the columns depends on another one defined within the same
assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15 | pandas/core/frame.py | def assign(self, **kwargs):
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
For Python 3.6 and above, later items in '\*\*kwargs' may refer to
newly created or modified columns in 'df'; items are computed and
assigned into 'df' in order. For Python 3.5 and below, the order of
keyword arguments is not specified, you cannot refer to newly created
or modified columns. All items are computed first, and then assigned
in alphabetical order.
.. versionchanged :: 0.23.0
Keyword argument order is maintained for Python 3.6 and later.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
In Python 3.6+, you can create multiple columns within the same assign
where one of the columns depends on another one defined within the same
assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
# >= 3.6 preserve order of kwargs
if PY36:
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
else:
# <= 3.5: do all calculations first...
results = OrderedDict()
for k, v in kwargs.items():
results[k] = com.apply_if_callable(v, data)
# <= 3.5 and earlier
results = sorted(results.items())
# ... and then assign
for k, v in results:
data[k] = v
return data | def assign(self, **kwargs):
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
For Python 3.6 and above, later items in '\*\*kwargs' may refer to
newly created or modified columns in 'df'; items are computed and
assigned into 'df' in order. For Python 3.5 and below, the order of
keyword arguments is not specified, you cannot refer to newly created
or modified columns. All items are computed first, and then assigned
in alphabetical order.
.. versionchanged :: 0.23.0
Keyword argument order is maintained for Python 3.6 and later.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
In Python 3.6+, you can create multiple columns within the same assign
where one of the columns depends on another one defined within the same
assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
# >= 3.6 preserve order of kwargs
if PY36:
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
else:
# <= 3.5: do all calculations first...
results = OrderedDict()
for k, v in kwargs.items():
results[k] = com.apply_if_callable(v, data)
# <= 3.5 and earlier
results = sorted(results.items())
# ... and then assign
for k, v in results:
data[k] = v
return data | [
"r",
"Assign",
"new",
"columns",
"to",
"a",
"DataFrame",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3459-L3547 | [
"def",
"assign",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"self",
".",
"copy",
"(",
")",
"# >= 3.6 preserve order of kwargs",
"if",
"PY36",
":",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"data",
"[",
"k",
"]",
"=",
"com",
".",
"apply_if_callable",
"(",
"v",
",",
"data",
")",
"else",
":",
"# <= 3.5: do all calculations first...",
"results",
"=",
"OrderedDict",
"(",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"results",
"[",
"k",
"]",
"=",
"com",
".",
"apply_if_callable",
"(",
"v",
",",
"data",
")",
"# <= 3.5 and earlier",
"results",
"=",
"sorted",
"(",
"results",
".",
"items",
"(",
")",
")",
"# ... and then assign",
"for",
"k",
",",
"v",
"in",
"results",
":",
"data",
"[",
"k",
"]",
"=",
"v",
"return",
"data"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame._sanitize_column | Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
numpy.ndarray | pandas/core/frame.py | def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
numpy.ndarray
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
# Explicitly copy here, instead of in sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = sanitize_index(value, self.index, copy=False)
elif isinstance(value, Index) or is_sequence(value):
# turn me into an ndarray
value = sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# cast ignores pandas dtypes. so save the dtype first
infer_dtype, _ = infer_dtype_from_scalar(
value, pandas_dtype=True)
# upcast
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, infer_dtype)
# return internal types directly
if is_extension_type(value) or is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value)) | def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
numpy.ndarray
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
# Explicitly copy here, instead of in sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = sanitize_index(value, self.index, copy=False)
elif isinstance(value, Index) or is_sequence(value):
# turn me into an ndarray
value = sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# cast ignores pandas dtypes. so save the dtype first
infer_dtype, _ = infer_dtype_from_scalar(
value, pandas_dtype=True)
# upcast
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, infer_dtype)
# return internal types directly
if is_extension_type(value) or is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if (not self.columns.is_unique or
isinstance(self.columns, MultiIndex)):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value)) | [
"Ensures",
"new",
"columns",
"(",
"which",
"go",
"into",
"the",
"BlockManager",
"as",
"new",
"blocks",
")",
"are",
"always",
"copied",
"and",
"converted",
"into",
"an",
"array",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3549-L3652 | [
"def",
"_sanitize_column",
"(",
"self",
",",
"key",
",",
"value",
",",
"broadcast",
"=",
"True",
")",
":",
"def",
"reindexer",
"(",
"value",
")",
":",
"# reindex if necessary",
"if",
"value",
".",
"index",
".",
"equals",
"(",
"self",
".",
"index",
")",
"or",
"not",
"len",
"(",
"self",
".",
"index",
")",
":",
"value",
"=",
"value",
".",
"_values",
".",
"copy",
"(",
")",
"else",
":",
"# GH 4107",
"try",
":",
"value",
"=",
"value",
".",
"reindex",
"(",
"self",
".",
"index",
")",
".",
"_values",
"except",
"Exception",
"as",
"e",
":",
"# duplicate axis",
"if",
"not",
"value",
".",
"index",
".",
"is_unique",
":",
"raise",
"e",
"# other",
"raise",
"TypeError",
"(",
"'incompatible index of inserted column '",
"'with frame index'",
")",
"return",
"value",
"if",
"isinstance",
"(",
"value",
",",
"Series",
")",
":",
"value",
"=",
"reindexer",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"DataFrame",
")",
":",
"# align right-hand-side columns if self.columns",
"# is multi-index and self[key] is a sub-frame",
"if",
"isinstance",
"(",
"self",
".",
"columns",
",",
"MultiIndex",
")",
"and",
"key",
"in",
"self",
".",
"columns",
":",
"loc",
"=",
"self",
".",
"columns",
".",
"get_loc",
"(",
"key",
")",
"if",
"isinstance",
"(",
"loc",
",",
"(",
"slice",
",",
"Series",
",",
"np",
".",
"ndarray",
",",
"Index",
")",
")",
":",
"cols",
"=",
"maybe_droplevels",
"(",
"self",
".",
"columns",
"[",
"loc",
"]",
",",
"key",
")",
"if",
"len",
"(",
"cols",
")",
"and",
"not",
"cols",
".",
"equals",
"(",
"value",
".",
"columns",
")",
":",
"value",
"=",
"value",
".",
"reindex",
"(",
"cols",
",",
"axis",
"=",
"1",
")",
"# now align rows",
"value",
"=",
"reindexer",
"(",
"value",
")",
".",
"T",
"elif",
"isinstance",
"(",
"value",
",",
"ExtensionArray",
")",
":",
"# Explicitly copy here, instead of in sanitize_index,",
"# as sanitize_index won't copy an EA, even with copy=True",
"value",
"=",
"value",
".",
"copy",
"(",
")",
"value",
"=",
"sanitize_index",
"(",
"value",
",",
"self",
".",
"index",
",",
"copy",
"=",
"False",
")",
"elif",
"isinstance",
"(",
"value",
",",
"Index",
")",
"or",
"is_sequence",
"(",
"value",
")",
":",
"# turn me into an ndarray",
"value",
"=",
"sanitize_index",
"(",
"value",
",",
"self",
".",
"index",
",",
"copy",
"=",
"False",
")",
"if",
"not",
"isinstance",
"(",
"value",
",",
"(",
"np",
".",
"ndarray",
",",
"Index",
")",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
"and",
"len",
"(",
"value",
")",
">",
"0",
":",
"value",
"=",
"maybe_convert_platform",
"(",
"value",
")",
"else",
":",
"value",
"=",
"com",
".",
"asarray_tuplesafe",
"(",
"value",
")",
"elif",
"value",
".",
"ndim",
"==",
"2",
":",
"value",
"=",
"value",
".",
"copy",
"(",
")",
".",
"T",
"elif",
"isinstance",
"(",
"value",
",",
"Index",
")",
":",
"value",
"=",
"value",
".",
"copy",
"(",
"deep",
"=",
"True",
")",
"else",
":",
"value",
"=",
"value",
".",
"copy",
"(",
")",
"# possibly infer to datetimelike",
"if",
"is_object_dtype",
"(",
"value",
".",
"dtype",
")",
":",
"value",
"=",
"maybe_infer_to_datetimelike",
"(",
"value",
")",
"else",
":",
"# cast ignores pandas dtypes. so save the dtype first",
"infer_dtype",
",",
"_",
"=",
"infer_dtype_from_scalar",
"(",
"value",
",",
"pandas_dtype",
"=",
"True",
")",
"# upcast",
"value",
"=",
"cast_scalar_to_array",
"(",
"len",
"(",
"self",
".",
"index",
")",
",",
"value",
")",
"value",
"=",
"maybe_cast_to_datetime",
"(",
"value",
",",
"infer_dtype",
")",
"# return internal types directly",
"if",
"is_extension_type",
"(",
"value",
")",
"or",
"is_extension_array_dtype",
"(",
"value",
")",
":",
"return",
"value",
"# broadcast across multiple columns if necessary",
"if",
"broadcast",
"and",
"key",
"in",
"self",
".",
"columns",
"and",
"value",
".",
"ndim",
"==",
"1",
":",
"if",
"(",
"not",
"self",
".",
"columns",
".",
"is_unique",
"or",
"isinstance",
"(",
"self",
".",
"columns",
",",
"MultiIndex",
")",
")",
":",
"existing_piece",
"=",
"self",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"existing_piece",
",",
"DataFrame",
")",
":",
"value",
"=",
"np",
".",
"tile",
"(",
"value",
",",
"(",
"len",
"(",
"existing_piece",
".",
"columns",
")",
",",
"1",
")",
")",
"return",
"np",
".",
"atleast_2d",
"(",
"np",
".",
"asarray",
"(",
"value",
")",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.lookup | Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = [df.get_value(row, col)
for row, col in zip(row_labels, col_labels)]
Examples
--------
values : ndarray
The found values | pandas/core/frame.py | def lookup(self, row_labels, col_labels):
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = [df.get_value(row, col)
for row, col in zip(row_labels, col_labels)]
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result | def lookup(self, row_labels, col_labels):
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = [df.get_value(row, col)
for row, col in zip(row_labels, col_labels)]
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result | [
"Label",
"-",
"based",
"fancy",
"indexing",
"function",
"for",
"DataFrame",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3659-L3708 | [
"def",
"lookup",
"(",
"self",
",",
"row_labels",
",",
"col_labels",
")",
":",
"n",
"=",
"len",
"(",
"row_labels",
")",
"if",
"n",
"!=",
"len",
"(",
"col_labels",
")",
":",
"raise",
"ValueError",
"(",
"'Row labels must have same size as column labels'",
")",
"thresh",
"=",
"1000",
"if",
"not",
"self",
".",
"_is_mixed_type",
"or",
"n",
">",
"thresh",
":",
"values",
"=",
"self",
".",
"values",
"ridx",
"=",
"self",
".",
"index",
".",
"get_indexer",
"(",
"row_labels",
")",
"cidx",
"=",
"self",
".",
"columns",
".",
"get_indexer",
"(",
"col_labels",
")",
"if",
"(",
"ridx",
"==",
"-",
"1",
")",
".",
"any",
"(",
")",
":",
"raise",
"KeyError",
"(",
"'One or more row labels was not found'",
")",
"if",
"(",
"cidx",
"==",
"-",
"1",
")",
".",
"any",
"(",
")",
":",
"raise",
"KeyError",
"(",
"'One or more column labels was not found'",
")",
"flat_index",
"=",
"ridx",
"*",
"len",
"(",
"self",
".",
"columns",
")",
"+",
"cidx",
"result",
"=",
"values",
".",
"flat",
"[",
"flat_index",
"]",
"else",
":",
"result",
"=",
"np",
".",
"empty",
"(",
"n",
",",
"dtype",
"=",
"'O'",
")",
"for",
"i",
",",
"(",
"r",
",",
"c",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"row_labels",
",",
"col_labels",
")",
")",
":",
"result",
"[",
"i",
"]",
"=",
"self",
".",
"_get_value",
"(",
"r",
",",
"c",
")",
"if",
"is_object_dtype",
"(",
"result",
")",
":",
"result",
"=",
"lib",
".",
"maybe_convert_objects",
"(",
"result",
")",
"return",
"result"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame._reindex_multi | We are guaranteed non-Nones in the axes. | pandas/core/frame.py | def _reindex_multi(self, axes, copy, fill_value):
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value) | def _reindex_multi(self, axes, copy, fill_value):
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value) | [
"We",
"are",
"guaranteed",
"non",
"-",
"Nones",
"in",
"the",
"axes",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3747-L3765 | [
"def",
"_reindex_multi",
"(",
"self",
",",
"axes",
",",
"copy",
",",
"fill_value",
")",
":",
"new_index",
",",
"row_indexer",
"=",
"self",
".",
"index",
".",
"reindex",
"(",
"axes",
"[",
"'index'",
"]",
")",
"new_columns",
",",
"col_indexer",
"=",
"self",
".",
"columns",
".",
"reindex",
"(",
"axes",
"[",
"'columns'",
"]",
")",
"if",
"row_indexer",
"is",
"not",
"None",
"and",
"col_indexer",
"is",
"not",
"None",
":",
"indexer",
"=",
"row_indexer",
",",
"col_indexer",
"new_values",
"=",
"algorithms",
".",
"take_2d_multi",
"(",
"self",
".",
"values",
",",
"indexer",
",",
"fill_value",
"=",
"fill_value",
")",
"return",
"self",
".",
"_constructor",
"(",
"new_values",
",",
"index",
"=",
"new_index",
",",
"columns",
"=",
"new_columns",
")",
"else",
":",
"return",
"self",
".",
"_reindex_with_indexers",
"(",
"{",
"0",
":",
"[",
"new_index",
",",
"row_indexer",
"]",
",",
"1",
":",
"[",
"new_columns",
",",
"col_indexer",
"]",
"}",
",",
"copy",
"=",
"copy",
",",
"fill_value",
"=",
"fill_value",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.drop | Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
.. versionadded:: 0.21.0
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame
DataFrame without the removed index or column labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8 | pandas/core/frame.py | def drop(self, labels=None, axis=0, index=None, columns=None,
level=None, inplace=False, errors='raise'):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
.. versionadded:: 0.21.0
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame
DataFrame without the removed index or column labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super().drop(labels=labels, axis=axis, index=index,
columns=columns, level=level, inplace=inplace,
errors=errors) | def drop(self, labels=None, axis=0, index=None, columns=None,
level=None, inplace=False, errors='raise'):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
.. versionadded:: 0.21.0
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame
DataFrame without the removed index or column labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super().drop(labels=labels, axis=axis, index=index,
columns=columns, level=level, inplace=inplace,
errors=errors) | [
"Drop",
"specified",
"labels",
"from",
"rows",
"or",
"columns",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3800-L3926 | [
"def",
"drop",
"(",
"self",
",",
"labels",
"=",
"None",
",",
"axis",
"=",
"0",
",",
"index",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"level",
"=",
"None",
",",
"inplace",
"=",
"False",
",",
"errors",
"=",
"'raise'",
")",
":",
"return",
"super",
"(",
")",
".",
"drop",
"(",
"labels",
"=",
"labels",
",",
"axis",
"=",
"axis",
",",
"index",
"=",
"index",
",",
"columns",
"=",
"columns",
",",
"level",
"=",
"level",
",",
"inplace",
"=",
"inplace",
",",
"errors",
"=",
"errors",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.rename | Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : int or str
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame
DataFrame with the renamed axis labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6 | pandas/core/frame.py | def rename(self, *args, **kwargs):
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : int or str
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame
DataFrame with the renamed axis labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('mapper', None)
return super().rename(**kwargs) | def rename(self, *args, **kwargs):
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : int or str
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame
DataFrame with the renamed axis labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
axes = validate_axis_style_args(self, args, kwargs, 'mapper', 'rename')
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop('axis', None)
kwargs.pop('mapper', None)
return super().rename(**kwargs) | [
"Alter",
"axes",
"labels",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L3932-L4035 | [
"def",
"rename",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"axes",
"=",
"validate_axis_style_args",
"(",
"self",
",",
"args",
",",
"kwargs",
",",
"'mapper'",
",",
"'rename'",
")",
"kwargs",
".",
"update",
"(",
"axes",
")",
"# Pop these, since the values are in `kwargs` under different names",
"kwargs",
".",
"pop",
"(",
"'axis'",
",",
"None",
")",
"kwargs",
".",
"pop",
"(",
"'mapper'",
",",
"None",
")",
"return",
"super",
"(",
")",
".",
"rename",
"(",
"*",
"*",
"kwargs",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.set_index | Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31 | pandas/core/frame.py | def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
err_msg = ('The parameter "keys" may be a column key, one-dimensional '
'array, or a list containing only valid column keys and '
'one-dimensional arrays.')
missing = []
for col in keys:
if isinstance(col, (ABCIndexClass, ABCSeries, np.ndarray,
list, abc.Iterator)):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, 'ndim', 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError:
raise TypeError(err_msg + ' Received column of '
'type {}'.format(type(col)))
else:
if not found:
missing.append(col)
if missing:
raise KeyError('None of {} are in the columns'.format(missing))
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, ABCMultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, ABCMultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (ABCIndexClass, ABCSeries)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
elif isinstance(col, abc.Iterator):
arrays.append(list(col))
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError('Length mismatch: Expected {len_self} rows, '
'received array of length {len_col}'.format(
len_self=len(self),
len_col=len(arrays[-1])
))
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError('Index has duplicate keys: {dup}'.format(
dup=duplicates))
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame | def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
err_msg = ('The parameter "keys" may be a column key, one-dimensional '
'array, or a list containing only valid column keys and '
'one-dimensional arrays.')
missing = []
for col in keys:
if isinstance(col, (ABCIndexClass, ABCSeries, np.ndarray,
list, abc.Iterator)):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, 'ndim', 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError:
raise TypeError(err_msg + ' Received column of '
'type {}'.format(type(col)))
else:
if not found:
missing.append(col)
if missing:
raise KeyError('None of {} are in the columns'.format(missing))
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, ABCMultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, ABCMultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (ABCIndexClass, ABCSeries)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
elif isinstance(col, abc.Iterator):
arrays.append(list(col))
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError('Length mismatch: Expected {len_self} rows, '
'received array of length {len_col}'.format(
len_self=len(self),
len_col=len(arrays[-1])
))
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError('Index has duplicate keys: {dup}'.format(
dup=duplicates))
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame | [
"Set",
"the",
"DataFrame",
"index",
"using",
"existing",
"columns",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L4057-L4242 | [
"def",
"set_index",
"(",
"self",
",",
"keys",
",",
"drop",
"=",
"True",
",",
"append",
"=",
"False",
",",
"inplace",
"=",
"False",
",",
"verify_integrity",
"=",
"False",
")",
":",
"inplace",
"=",
"validate_bool_kwarg",
"(",
"inplace",
",",
"'inplace'",
")",
"if",
"not",
"isinstance",
"(",
"keys",
",",
"list",
")",
":",
"keys",
"=",
"[",
"keys",
"]",
"err_msg",
"=",
"(",
"'The parameter \"keys\" may be a column key, one-dimensional '",
"'array, or a list containing only valid column keys and '",
"'one-dimensional arrays.'",
")",
"missing",
"=",
"[",
"]",
"for",
"col",
"in",
"keys",
":",
"if",
"isinstance",
"(",
"col",
",",
"(",
"ABCIndexClass",
",",
"ABCSeries",
",",
"np",
".",
"ndarray",
",",
"list",
",",
"abc",
".",
"Iterator",
")",
")",
":",
"# arrays are fine as long as they are one-dimensional",
"# iterators get converted to list below",
"if",
"getattr",
"(",
"col",
",",
"'ndim'",
",",
"1",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"err_msg",
")",
"else",
":",
"# everything else gets tried as a key; see GH 24969",
"try",
":",
"found",
"=",
"col",
"in",
"self",
".",
"columns",
"except",
"TypeError",
":",
"raise",
"TypeError",
"(",
"err_msg",
"+",
"' Received column of '",
"'type {}'",
".",
"format",
"(",
"type",
"(",
"col",
")",
")",
")",
"else",
":",
"if",
"not",
"found",
":",
"missing",
".",
"append",
"(",
"col",
")",
"if",
"missing",
":",
"raise",
"KeyError",
"(",
"'None of {} are in the columns'",
".",
"format",
"(",
"missing",
")",
")",
"if",
"inplace",
":",
"frame",
"=",
"self",
"else",
":",
"frame",
"=",
"self",
".",
"copy",
"(",
")",
"arrays",
"=",
"[",
"]",
"names",
"=",
"[",
"]",
"if",
"append",
":",
"names",
"=",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"index",
".",
"names",
"]",
"if",
"isinstance",
"(",
"self",
".",
"index",
",",
"ABCMultiIndex",
")",
":",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"index",
".",
"nlevels",
")",
":",
"arrays",
".",
"append",
"(",
"self",
".",
"index",
".",
"_get_level_values",
"(",
"i",
")",
")",
"else",
":",
"arrays",
".",
"append",
"(",
"self",
".",
"index",
")",
"to_remove",
"=",
"[",
"]",
"for",
"col",
"in",
"keys",
":",
"if",
"isinstance",
"(",
"col",
",",
"ABCMultiIndex",
")",
":",
"for",
"n",
"in",
"range",
"(",
"col",
".",
"nlevels",
")",
":",
"arrays",
".",
"append",
"(",
"col",
".",
"_get_level_values",
"(",
"n",
")",
")",
"names",
".",
"extend",
"(",
"col",
".",
"names",
")",
"elif",
"isinstance",
"(",
"col",
",",
"(",
"ABCIndexClass",
",",
"ABCSeries",
")",
")",
":",
"# if Index then not MultiIndex (treated above)",
"arrays",
".",
"append",
"(",
"col",
")",
"names",
".",
"append",
"(",
"col",
".",
"name",
")",
"elif",
"isinstance",
"(",
"col",
",",
"(",
"list",
",",
"np",
".",
"ndarray",
")",
")",
":",
"arrays",
".",
"append",
"(",
"col",
")",
"names",
".",
"append",
"(",
"None",
")",
"elif",
"isinstance",
"(",
"col",
",",
"abc",
".",
"Iterator",
")",
":",
"arrays",
".",
"append",
"(",
"list",
"(",
"col",
")",
")",
"names",
".",
"append",
"(",
"None",
")",
"# from here, col can only be a column label",
"else",
":",
"arrays",
".",
"append",
"(",
"frame",
"[",
"col",
"]",
".",
"_values",
")",
"names",
".",
"append",
"(",
"col",
")",
"if",
"drop",
":",
"to_remove",
".",
"append",
"(",
"col",
")",
"if",
"len",
"(",
"arrays",
"[",
"-",
"1",
"]",
")",
"!=",
"len",
"(",
"self",
")",
":",
"# check newest element against length of calling frame, since",
"# ensure_index_from_sequences would not raise for append=False.",
"raise",
"ValueError",
"(",
"'Length mismatch: Expected {len_self} rows, '",
"'received array of length {len_col}'",
".",
"format",
"(",
"len_self",
"=",
"len",
"(",
"self",
")",
",",
"len_col",
"=",
"len",
"(",
"arrays",
"[",
"-",
"1",
"]",
")",
")",
")",
"index",
"=",
"ensure_index_from_sequences",
"(",
"arrays",
",",
"names",
")",
"if",
"verify_integrity",
"and",
"not",
"index",
".",
"is_unique",
":",
"duplicates",
"=",
"index",
"[",
"index",
".",
"duplicated",
"(",
")",
"]",
".",
"unique",
"(",
")",
"raise",
"ValueError",
"(",
"'Index has duplicate keys: {dup}'",
".",
"format",
"(",
"dup",
"=",
"duplicates",
")",
")",
"# use set to handle duplicate column names gracefully in case of drop",
"for",
"c",
"in",
"set",
"(",
"to_remove",
")",
":",
"del",
"frame",
"[",
"c",
"]",
"# clear up memory usage",
"index",
".",
"_cleanup",
"(",
")",
"frame",
".",
"index",
"=",
"index",
"if",
"not",
"inplace",
":",
"return",
"frame"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.reset_index | Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump | pandas/core/frame.py | def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
values = index._values
if not isinstance(index, (PeriodIndex, DatetimeIndex)):
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
# TODO(https://github.com/pandas-dev/pandas/issues/24206)
# Push this into maybe_upcast_putmask?
# We can't pass EAs there right now. Looks a bit
# complicated.
# So we unbox the ndarray_values, op, re-box.
values_type = type(values)
values_dtype = values.dtype
if issubclass(values_type, DatetimeLikeArray):
values = values._data
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
if issubclass(values_type, DatetimeLikeArray):
values = values_type(values, dtype=values_dtype)
return values
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.codes)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj | def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
values = index._values
if not isinstance(index, (PeriodIndex, DatetimeIndex)):
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
# TODO(https://github.com/pandas-dev/pandas/issues/24206)
# Push this into maybe_upcast_putmask?
# We can't pass EAs there right now. Looks a bit
# complicated.
# So we unbox the ndarray_values, op, re-box.
values_type = type(values)
values_dtype = values.dtype
if issubclass(values_type, DatetimeLikeArray):
values = values._data
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
if issubclass(values_type, DatetimeLikeArray):
values = values_type(values, dtype=values_dtype)
return values
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.codes)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj | [
"Reset",
"the",
"index",
"or",
"a",
"level",
"of",
"it",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L4244-L4476 | [
"def",
"reset_index",
"(",
"self",
",",
"level",
"=",
"None",
",",
"drop",
"=",
"False",
",",
"inplace",
"=",
"False",
",",
"col_level",
"=",
"0",
",",
"col_fill",
"=",
"''",
")",
":",
"inplace",
"=",
"validate_bool_kwarg",
"(",
"inplace",
",",
"'inplace'",
")",
"if",
"inplace",
":",
"new_obj",
"=",
"self",
"else",
":",
"new_obj",
"=",
"self",
".",
"copy",
"(",
")",
"def",
"_maybe_casted_values",
"(",
"index",
",",
"labels",
"=",
"None",
")",
":",
"values",
"=",
"index",
".",
"_values",
"if",
"not",
"isinstance",
"(",
"index",
",",
"(",
"PeriodIndex",
",",
"DatetimeIndex",
")",
")",
":",
"if",
"values",
".",
"dtype",
"==",
"np",
".",
"object_",
":",
"values",
"=",
"lib",
".",
"maybe_convert_objects",
"(",
"values",
")",
"# if we have the labels, extract the values with a mask",
"if",
"labels",
"is",
"not",
"None",
":",
"mask",
"=",
"labels",
"==",
"-",
"1",
"# we can have situations where the whole mask is -1,",
"# meaning there is nothing found in labels, so make all nan's",
"if",
"mask",
".",
"all",
"(",
")",
":",
"values",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"mask",
")",
")",
"values",
".",
"fill",
"(",
"np",
".",
"nan",
")",
"else",
":",
"values",
"=",
"values",
".",
"take",
"(",
"labels",
")",
"# TODO(https://github.com/pandas-dev/pandas/issues/24206)",
"# Push this into maybe_upcast_putmask?",
"# We can't pass EAs there right now. Looks a bit",
"# complicated.",
"# So we unbox the ndarray_values, op, re-box.",
"values_type",
"=",
"type",
"(",
"values",
")",
"values_dtype",
"=",
"values",
".",
"dtype",
"if",
"issubclass",
"(",
"values_type",
",",
"DatetimeLikeArray",
")",
":",
"values",
"=",
"values",
".",
"_data",
"if",
"mask",
".",
"any",
"(",
")",
":",
"values",
",",
"changed",
"=",
"maybe_upcast_putmask",
"(",
"values",
",",
"mask",
",",
"np",
".",
"nan",
")",
"if",
"issubclass",
"(",
"values_type",
",",
"DatetimeLikeArray",
")",
":",
"values",
"=",
"values_type",
"(",
"values",
",",
"dtype",
"=",
"values_dtype",
")",
"return",
"values",
"new_index",
"=",
"ibase",
".",
"default_index",
"(",
"len",
"(",
"new_obj",
")",
")",
"if",
"level",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"level",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"level",
"=",
"[",
"level",
"]",
"level",
"=",
"[",
"self",
".",
"index",
".",
"_get_level_number",
"(",
"lev",
")",
"for",
"lev",
"in",
"level",
"]",
"if",
"len",
"(",
"level",
")",
"<",
"self",
".",
"index",
".",
"nlevels",
":",
"new_index",
"=",
"self",
".",
"index",
".",
"droplevel",
"(",
"level",
")",
"if",
"not",
"drop",
":",
"if",
"isinstance",
"(",
"self",
".",
"index",
",",
"MultiIndex",
")",
":",
"names",
"=",
"[",
"n",
"if",
"n",
"is",
"not",
"None",
"else",
"(",
"'level_%d'",
"%",
"i",
")",
"for",
"(",
"i",
",",
"n",
")",
"in",
"enumerate",
"(",
"self",
".",
"index",
".",
"names",
")",
"]",
"to_insert",
"=",
"lzip",
"(",
"self",
".",
"index",
".",
"levels",
",",
"self",
".",
"index",
".",
"codes",
")",
"else",
":",
"default",
"=",
"'index'",
"if",
"'index'",
"not",
"in",
"self",
"else",
"'level_0'",
"names",
"=",
"(",
"[",
"default",
"]",
"if",
"self",
".",
"index",
".",
"name",
"is",
"None",
"else",
"[",
"self",
".",
"index",
".",
"name",
"]",
")",
"to_insert",
"=",
"(",
"(",
"self",
".",
"index",
",",
"None",
")",
",",
")",
"multi_col",
"=",
"isinstance",
"(",
"self",
".",
"columns",
",",
"MultiIndex",
")",
"for",
"i",
",",
"(",
"lev",
",",
"lab",
")",
"in",
"reversed",
"(",
"list",
"(",
"enumerate",
"(",
"to_insert",
")",
")",
")",
":",
"if",
"not",
"(",
"level",
"is",
"None",
"or",
"i",
"in",
"level",
")",
":",
"continue",
"name",
"=",
"names",
"[",
"i",
"]",
"if",
"multi_col",
":",
"col_name",
"=",
"(",
"list",
"(",
"name",
")",
"if",
"isinstance",
"(",
"name",
",",
"tuple",
")",
"else",
"[",
"name",
"]",
")",
"if",
"col_fill",
"is",
"None",
":",
"if",
"len",
"(",
"col_name",
")",
"not",
"in",
"(",
"1",
",",
"self",
".",
"columns",
".",
"nlevels",
")",
":",
"raise",
"ValueError",
"(",
"\"col_fill=None is incompatible \"",
"\"with incomplete column name \"",
"\"{}\"",
".",
"format",
"(",
"name",
")",
")",
"col_fill",
"=",
"col_name",
"[",
"0",
"]",
"lev_num",
"=",
"self",
".",
"columns",
".",
"_get_level_number",
"(",
"col_level",
")",
"name_lst",
"=",
"[",
"col_fill",
"]",
"*",
"lev_num",
"+",
"col_name",
"missing",
"=",
"self",
".",
"columns",
".",
"nlevels",
"-",
"len",
"(",
"name_lst",
")",
"name_lst",
"+=",
"[",
"col_fill",
"]",
"*",
"missing",
"name",
"=",
"tuple",
"(",
"name_lst",
")",
"# to ndarray and maybe infer different dtype",
"level_values",
"=",
"_maybe_casted_values",
"(",
"lev",
",",
"lab",
")",
"new_obj",
".",
"insert",
"(",
"0",
",",
"name",
",",
"level_values",
")",
"new_obj",
".",
"index",
"=",
"new_index",
"if",
"not",
"inplace",
":",
"return",
"new_obj"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.dropna | Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. deprecated:: 0.23.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25 | pandas/core/frame.py | def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. deprecated:: 0.23.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
# GH20987
msg = ("supplying multiple axes to axis is deprecated and "
"will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result | def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. deprecated:: 0.23.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(axis, (tuple, list)):
# GH20987
msg = ("supplying multiple axes to axis is deprecated and "
"will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh, subset=subset,
axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result | [
"Remove",
"missing",
"values",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L4497-L4644 | [
"def",
"dropna",
"(",
"self",
",",
"axis",
"=",
"0",
",",
"how",
"=",
"'any'",
",",
"thresh",
"=",
"None",
",",
"subset",
"=",
"None",
",",
"inplace",
"=",
"False",
")",
":",
"inplace",
"=",
"validate_bool_kwarg",
"(",
"inplace",
",",
"'inplace'",
")",
"if",
"isinstance",
"(",
"axis",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"# GH20987",
"msg",
"=",
"(",
"\"supplying multiple axes to axis is deprecated and \"",
"\"will be removed in a future version.\"",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"2",
")",
"result",
"=",
"self",
"for",
"ax",
"in",
"axis",
":",
"result",
"=",
"result",
".",
"dropna",
"(",
"how",
"=",
"how",
",",
"thresh",
"=",
"thresh",
",",
"subset",
"=",
"subset",
",",
"axis",
"=",
"ax",
")",
"else",
":",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"agg_axis",
"=",
"1",
"-",
"axis",
"agg_obj",
"=",
"self",
"if",
"subset",
"is",
"not",
"None",
":",
"ax",
"=",
"self",
".",
"_get_axis",
"(",
"agg_axis",
")",
"indices",
"=",
"ax",
".",
"get_indexer_for",
"(",
"subset",
")",
"check",
"=",
"indices",
"==",
"-",
"1",
"if",
"check",
".",
"any",
"(",
")",
":",
"raise",
"KeyError",
"(",
"list",
"(",
"np",
".",
"compress",
"(",
"check",
",",
"subset",
")",
")",
")",
"agg_obj",
"=",
"self",
".",
"take",
"(",
"indices",
",",
"axis",
"=",
"agg_axis",
")",
"count",
"=",
"agg_obj",
".",
"count",
"(",
"axis",
"=",
"agg_axis",
")",
"if",
"thresh",
"is",
"not",
"None",
":",
"mask",
"=",
"count",
">=",
"thresh",
"elif",
"how",
"==",
"'any'",
":",
"mask",
"=",
"count",
"==",
"len",
"(",
"agg_obj",
".",
"_get_axis",
"(",
"agg_axis",
")",
")",
"elif",
"how",
"==",
"'all'",
":",
"mask",
"=",
"count",
">",
"0",
"else",
":",
"if",
"how",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"'invalid how option: {h}'",
".",
"format",
"(",
"h",
"=",
"how",
")",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'must specify how or thresh'",
")",
"result",
"=",
"self",
".",
"loc",
"(",
"axis",
"=",
"axis",
")",
"[",
"mask",
"]",
"if",
"inplace",
":",
"self",
".",
"_update_inplace",
"(",
"result",
")",
"else",
":",
"return",
"result"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.drop_duplicates | Return DataFrame with duplicate rows removed, optionally only
considering certain columns. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
DataFrame | pandas/core/frame.py | def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
DataFrame
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated)._ndarray_values.nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated] | def drop_duplicates(self, subset=None, keep='first', inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
DataFrame
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, 'inplace')
duplicated = self.duplicated(subset, keep=keep)
if inplace:
inds, = (-duplicated)._ndarray_values.nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated] | [
"Return",
"DataFrame",
"with",
"duplicate",
"rows",
"removed",
"optionally",
"only",
"considering",
"certain",
"columns",
".",
"Indexes",
"including",
"time",
"indexes",
"are",
"ignored",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L4646-L4679 | [
"def",
"drop_duplicates",
"(",
"self",
",",
"subset",
"=",
"None",
",",
"keep",
"=",
"'first'",
",",
"inplace",
"=",
"False",
")",
":",
"if",
"self",
".",
"empty",
":",
"return",
"self",
".",
"copy",
"(",
")",
"inplace",
"=",
"validate_bool_kwarg",
"(",
"inplace",
",",
"'inplace'",
")",
"duplicated",
"=",
"self",
".",
"duplicated",
"(",
"subset",
",",
"keep",
"=",
"keep",
")",
"if",
"inplace",
":",
"inds",
",",
"=",
"(",
"-",
"duplicated",
")",
".",
"_ndarray_values",
".",
"nonzero",
"(",
")",
"new_data",
"=",
"self",
".",
"_data",
".",
"take",
"(",
"inds",
")",
"self",
".",
"_update_inplace",
"(",
"new_data",
")",
"else",
":",
"return",
"self",
"[",
"-",
"duplicated",
"]"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.duplicated | Return boolean Series denoting duplicate rows, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series | pandas/core/frame.py | def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
return Series(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, str) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.iteritems()
if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index) | def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the
first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the
last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
return Series(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8', copy=False), len(shape)
if subset is None:
subset = self.columns
elif (not np.iterable(subset) or
isinstance(subset, str) or
isinstance(subset, tuple) and subset in self.columns):
subset = subset,
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.iteritems()
if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index) | [
"Return",
"boolean",
"Series",
"denoting",
"duplicate",
"rows",
"optionally",
"only",
"considering",
"certain",
"columns",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L4681-L4732 | [
"def",
"duplicated",
"(",
"self",
",",
"subset",
"=",
"None",
",",
"keep",
"=",
"'first'",
")",
":",
"from",
"pandas",
".",
"core",
".",
"sorting",
"import",
"get_group_index",
"from",
"pandas",
".",
"_libs",
".",
"hashtable",
"import",
"duplicated_int64",
",",
"_SIZE_HINT_LIMIT",
"if",
"self",
".",
"empty",
":",
"return",
"Series",
"(",
"dtype",
"=",
"bool",
")",
"def",
"f",
"(",
"vals",
")",
":",
"labels",
",",
"shape",
"=",
"algorithms",
".",
"factorize",
"(",
"vals",
",",
"size_hint",
"=",
"min",
"(",
"len",
"(",
"self",
")",
",",
"_SIZE_HINT_LIMIT",
")",
")",
"return",
"labels",
".",
"astype",
"(",
"'i8'",
",",
"copy",
"=",
"False",
")",
",",
"len",
"(",
"shape",
")",
"if",
"subset",
"is",
"None",
":",
"subset",
"=",
"self",
".",
"columns",
"elif",
"(",
"not",
"np",
".",
"iterable",
"(",
"subset",
")",
"or",
"isinstance",
"(",
"subset",
",",
"str",
")",
"or",
"isinstance",
"(",
"subset",
",",
"tuple",
")",
"and",
"subset",
"in",
"self",
".",
"columns",
")",
":",
"subset",
"=",
"subset",
",",
"# Verify all columns in subset exist in the queried dataframe",
"# Otherwise, raise a KeyError, same as if you try to __getitem__ with a",
"# key that doesn't exist.",
"diff",
"=",
"Index",
"(",
"subset",
")",
".",
"difference",
"(",
"self",
".",
"columns",
")",
"if",
"not",
"diff",
".",
"empty",
":",
"raise",
"KeyError",
"(",
"diff",
")",
"vals",
"=",
"(",
"col",
".",
"values",
"for",
"name",
",",
"col",
"in",
"self",
".",
"iteritems",
"(",
")",
"if",
"name",
"in",
"subset",
")",
"labels",
",",
"shape",
"=",
"map",
"(",
"list",
",",
"zip",
"(",
"*",
"map",
"(",
"f",
",",
"vals",
")",
")",
")",
"ids",
"=",
"get_group_index",
"(",
"labels",
",",
"shape",
",",
"sort",
"=",
"False",
",",
"xnull",
"=",
"False",
")",
"return",
"Series",
"(",
"duplicated_int64",
"(",
"ids",
",",
"keep",
")",
",",
"index",
"=",
"self",
".",
"index",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.nlargest | Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN | pandas/core/frame.py | def nlargest(self, n, columns, keep='first'):
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nlargest() | def nlargest(self, n, columns, keep='first'):
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nlargest() | [
"Return",
"the",
"first",
"n",
"rows",
"ordered",
"by",
"columns",
"in",
"descending",
"order",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L4843-L4953 | [
"def",
"nlargest",
"(",
"self",
",",
"n",
",",
"columns",
",",
"keep",
"=",
"'first'",
")",
":",
"return",
"algorithms",
".",
"SelectNFrame",
"(",
"self",
",",
"n",
"=",
"n",
",",
"keep",
"=",
"keep",
",",
"columns",
"=",
"columns",
")",
".",
"nlargest",
"(",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.nsmallest | Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 11300 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Nauru 11300 182 NR
Anguilla 11300 311 AI | pandas/core/frame.py | def nsmallest(self, n, columns, keep='first'):
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 11300 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Nauru 11300 182 NR
Anguilla 11300 311 AI
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nsmallest() | def nsmallest(self, n, columns, keep='first'):
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 11300 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Nauru 11300 182 NR
Anguilla 11300 311 AI
"""
return algorithms.SelectNFrame(self,
n=n,
keep=keep,
columns=columns).nsmallest() | [
"Return",
"the",
"first",
"n",
"rows",
"ordered",
"by",
"columns",
"in",
"ascending",
"order",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L4955-L5055 | [
"def",
"nsmallest",
"(",
"self",
",",
"n",
",",
"columns",
",",
"keep",
"=",
"'first'",
")",
":",
"return",
"algorithms",
".",
"SelectNFrame",
"(",
"self",
",",
"n",
"=",
"n",
",",
"keep",
"=",
"keep",
",",
"columns",
"=",
"columns",
")",
".",
"nsmallest",
"(",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.swaplevel | Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
DataFrame
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index. | pandas/core/frame.py | def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
DataFrame
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result | def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
DataFrame
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result | [
"Swap",
"levels",
"i",
"and",
"j",
"in",
"a",
"MultiIndex",
"on",
"a",
"particular",
"axis",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5057-L5082 | [
"def",
"swaplevel",
"(",
"self",
",",
"i",
"=",
"-",
"2",
",",
"j",
"=",
"-",
"1",
",",
"axis",
"=",
"0",
")",
":",
"result",
"=",
"self",
".",
"copy",
"(",
")",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"if",
"axis",
"==",
"0",
":",
"result",
".",
"index",
"=",
"result",
".",
"index",
".",
"swaplevel",
"(",
"i",
",",
"j",
")",
"else",
":",
"result",
".",
"columns",
"=",
"result",
".",
"columns",
".",
"swaplevel",
"(",
"i",
",",
"j",
")",
"return",
"result"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.reorder_levels | Rearrange index levels using input order. May not drop or
duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object) | pandas/core/frame.py | def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order. May not drop or
duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result | def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order. May not drop or
duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result | [
"Rearrange",
"index",
"levels",
"using",
"input",
"order",
".",
"May",
"not",
"drop",
"or",
"duplicate",
"levels",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5084-L5112 | [
"def",
"reorder_levels",
"(",
"self",
",",
"order",
",",
"axis",
"=",
"0",
")",
":",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"if",
"not",
"isinstance",
"(",
"self",
".",
"_get_axis",
"(",
"axis",
")",
",",
"MultiIndex",
")",
":",
"# pragma: no cover",
"raise",
"TypeError",
"(",
"'Can only reorder levels on a hierarchical axis.'",
")",
"result",
"=",
"self",
".",
"copy",
"(",
")",
"if",
"axis",
"==",
"0",
":",
"result",
".",
"index",
"=",
"result",
".",
"index",
".",
"reorder_levels",
"(",
"order",
")",
"else",
":",
"result",
".",
"columns",
"=",
"result",
".",
"columns",
".",
"reorder_levels",
"(",
"order",
")",
"return",
"result"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.combine | Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0 | pandas/core/frame.py | def combine(self, other, func, fill_value=None, overwrite=True):
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns) | def combine(self, other, func, fill_value=None, overwrite=True):
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index,
columns=new_columns) | [
"Perform",
"column",
"-",
"wise",
"combine",
"with",
"another",
"DataFrame",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5164-L5330 | [
"def",
"combine",
"(",
"self",
",",
"other",
",",
"func",
",",
"fill_value",
"=",
"None",
",",
"overwrite",
"=",
"True",
")",
":",
"other_idxlen",
"=",
"len",
"(",
"other",
".",
"index",
")",
"# save for compare",
"this",
",",
"other",
"=",
"self",
".",
"align",
"(",
"other",
",",
"copy",
"=",
"False",
")",
"new_index",
"=",
"this",
".",
"index",
"if",
"other",
".",
"empty",
"and",
"len",
"(",
"new_index",
")",
"==",
"len",
"(",
"self",
".",
"index",
")",
":",
"return",
"self",
".",
"copy",
"(",
")",
"if",
"self",
".",
"empty",
"and",
"len",
"(",
"other",
")",
"==",
"other_idxlen",
":",
"return",
"other",
".",
"copy",
"(",
")",
"# sorts if possible",
"new_columns",
"=",
"this",
".",
"columns",
".",
"union",
"(",
"other",
".",
"columns",
")",
"do_fill",
"=",
"fill_value",
"is",
"not",
"None",
"result",
"=",
"{",
"}",
"for",
"col",
"in",
"new_columns",
":",
"series",
"=",
"this",
"[",
"col",
"]",
"otherSeries",
"=",
"other",
"[",
"col",
"]",
"this_dtype",
"=",
"series",
".",
"dtype",
"other_dtype",
"=",
"otherSeries",
".",
"dtype",
"this_mask",
"=",
"isna",
"(",
"series",
")",
"other_mask",
"=",
"isna",
"(",
"otherSeries",
")",
"# don't overwrite columns unecessarily",
"# DO propagate if this column is not in the intersection",
"if",
"not",
"overwrite",
"and",
"other_mask",
".",
"all",
"(",
")",
":",
"result",
"[",
"col",
"]",
"=",
"this",
"[",
"col",
"]",
".",
"copy",
"(",
")",
"continue",
"if",
"do_fill",
":",
"series",
"=",
"series",
".",
"copy",
"(",
")",
"otherSeries",
"=",
"otherSeries",
".",
"copy",
"(",
")",
"series",
"[",
"this_mask",
"]",
"=",
"fill_value",
"otherSeries",
"[",
"other_mask",
"]",
"=",
"fill_value",
"if",
"col",
"not",
"in",
"self",
".",
"columns",
":",
"# If self DataFrame does not have col in other DataFrame,",
"# try to promote series, which is all NaN, as other_dtype.",
"new_dtype",
"=",
"other_dtype",
"try",
":",
"series",
"=",
"series",
".",
"astype",
"(",
"new_dtype",
",",
"copy",
"=",
"False",
")",
"except",
"ValueError",
":",
"# e.g. new_dtype is integer types",
"pass",
"else",
":",
"# if we have different dtypes, possibly promote",
"new_dtype",
"=",
"find_common_type",
"(",
"[",
"this_dtype",
",",
"other_dtype",
"]",
")",
"if",
"not",
"is_dtype_equal",
"(",
"this_dtype",
",",
"new_dtype",
")",
":",
"series",
"=",
"series",
".",
"astype",
"(",
"new_dtype",
")",
"if",
"not",
"is_dtype_equal",
"(",
"other_dtype",
",",
"new_dtype",
")",
":",
"otherSeries",
"=",
"otherSeries",
".",
"astype",
"(",
"new_dtype",
")",
"arr",
"=",
"func",
"(",
"series",
",",
"otherSeries",
")",
"arr",
"=",
"maybe_downcast_to_dtype",
"(",
"arr",
",",
"this_dtype",
")",
"result",
"[",
"col",
"]",
"=",
"arr",
"# convert_objects just in case",
"return",
"self",
".",
"_constructor",
"(",
"result",
",",
"index",
"=",
"new_index",
",",
"columns",
"=",
"new_columns",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.combine_first | Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0 | pandas/core/frame.py | def combine_first(self, other):
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def extract_values(arr):
# Does two things:
# 1. maybe gets the values from the Series / Index
# 2. convert datelike to i8
if isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr._values
if needs_i8_conversion(arr):
if is_extension_array_dtype(arr.dtype):
arr = arr.asi8
else:
arr = arr.view('i8')
return arr
def combiner(x, y):
mask = isna(x)
if isinstance(mask, (ABCIndexClass, ABCSeries)):
mask = mask._values
x_values = extract_values(x)
y_values = extract_values(y)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False) | def combine_first(self, other):
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def extract_values(arr):
# Does two things:
# 1. maybe gets the values from the Series / Index
# 2. convert datelike to i8
if isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr._values
if needs_i8_conversion(arr):
if is_extension_array_dtype(arr.dtype):
arr = arr.asi8
else:
arr = arr.view('i8')
return arr
def combiner(x, y):
mask = isna(x)
if isinstance(mask, (ABCIndexClass, ABCSeries)):
mask = mask._values
x_values = extract_values(x)
y_values = extract_values(y)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False) | [
"Update",
"null",
"elements",
"with",
"value",
"in",
"the",
"same",
"location",
"in",
"other",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5332-L5406 | [
"def",
"combine_first",
"(",
"self",
",",
"other",
")",
":",
"import",
"pandas",
".",
"core",
".",
"computation",
".",
"expressions",
"as",
"expressions",
"def",
"extract_values",
"(",
"arr",
")",
":",
"# Does two things:",
"# 1. maybe gets the values from the Series / Index",
"# 2. convert datelike to i8",
"if",
"isinstance",
"(",
"arr",
",",
"(",
"ABCIndexClass",
",",
"ABCSeries",
")",
")",
":",
"arr",
"=",
"arr",
".",
"_values",
"if",
"needs_i8_conversion",
"(",
"arr",
")",
":",
"if",
"is_extension_array_dtype",
"(",
"arr",
".",
"dtype",
")",
":",
"arr",
"=",
"arr",
".",
"asi8",
"else",
":",
"arr",
"=",
"arr",
".",
"view",
"(",
"'i8'",
")",
"return",
"arr",
"def",
"combiner",
"(",
"x",
",",
"y",
")",
":",
"mask",
"=",
"isna",
"(",
"x",
")",
"if",
"isinstance",
"(",
"mask",
",",
"(",
"ABCIndexClass",
",",
"ABCSeries",
")",
")",
":",
"mask",
"=",
"mask",
".",
"_values",
"x_values",
"=",
"extract_values",
"(",
"x",
")",
"y_values",
"=",
"extract_values",
"(",
"y",
")",
"# If the column y in other DataFrame is not in first DataFrame,",
"# just return y_values.",
"if",
"y",
".",
"name",
"not",
"in",
"self",
".",
"columns",
":",
"return",
"y_values",
"return",
"expressions",
".",
"where",
"(",
"mask",
",",
"y_values",
",",
"x_values",
")",
"return",
"self",
".",
"combine",
"(",
"other",
",",
"combiner",
",",
"overwrite",
"=",
"False",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.update | Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0 | pandas/core/frame.py | def update(self, other, join='left', overwrite=True, filter_func=None,
errors='ignore'):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ['ignore', 'raise']:
raise ValueError("The parameter errors must be either "
"'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if errors == 'raise':
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that) | def update(self, other, join='left', overwrite=True, filter_func=None,
errors='ignore'):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ['ignore', 'raise']:
raise ValueError("The parameter errors must be either "
"'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if errors == 'raise':
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that) | [
"Modify",
"in",
"place",
"using",
"non",
"-",
"NA",
"values",
"from",
"another",
"DataFrame",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5410-L5558 | [
"def",
"update",
"(",
"self",
",",
"other",
",",
"join",
"=",
"'left'",
",",
"overwrite",
"=",
"True",
",",
"filter_func",
"=",
"None",
",",
"errors",
"=",
"'ignore'",
")",
":",
"import",
"pandas",
".",
"core",
".",
"computation",
".",
"expressions",
"as",
"expressions",
"# TODO: Support other joins",
"if",
"join",
"!=",
"'left'",
":",
"# pragma: no cover",
"raise",
"NotImplementedError",
"(",
"\"Only left join is supported\"",
")",
"if",
"errors",
"not",
"in",
"[",
"'ignore'",
",",
"'raise'",
"]",
":",
"raise",
"ValueError",
"(",
"\"The parameter errors must be either \"",
"\"'ignore' or 'raise'\"",
")",
"if",
"not",
"isinstance",
"(",
"other",
",",
"DataFrame",
")",
":",
"other",
"=",
"DataFrame",
"(",
"other",
")",
"other",
"=",
"other",
".",
"reindex_like",
"(",
"self",
")",
"for",
"col",
"in",
"self",
".",
"columns",
":",
"this",
"=",
"self",
"[",
"col",
"]",
".",
"_values",
"that",
"=",
"other",
"[",
"col",
"]",
".",
"_values",
"if",
"filter_func",
"is",
"not",
"None",
":",
"with",
"np",
".",
"errstate",
"(",
"all",
"=",
"'ignore'",
")",
":",
"mask",
"=",
"~",
"filter_func",
"(",
"this",
")",
"|",
"isna",
"(",
"that",
")",
"else",
":",
"if",
"errors",
"==",
"'raise'",
":",
"mask_this",
"=",
"notna",
"(",
"that",
")",
"mask_that",
"=",
"notna",
"(",
"this",
")",
"if",
"any",
"(",
"mask_this",
"&",
"mask_that",
")",
":",
"raise",
"ValueError",
"(",
"\"Data overlaps.\"",
")",
"if",
"overwrite",
":",
"mask",
"=",
"isna",
"(",
"that",
")",
"else",
":",
"mask",
"=",
"notna",
"(",
"this",
")",
"# don't overwrite columns unecessarily",
"if",
"mask",
".",
"all",
"(",
")",
":",
"continue",
"self",
"[",
"col",
"]",
"=",
"expressions",
".",
"where",
"(",
"mask",
",",
"this",
",",
"that",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.stack | Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN | pandas/core/frame.py | def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna) | def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna) | [
"Stack",
"the",
"prescribed",
"level",
"(",
"s",
")",
"from",
"columns",
"to",
"index",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5808-L5976 | [
"def",
"stack",
"(",
"self",
",",
"level",
"=",
"-",
"1",
",",
"dropna",
"=",
"True",
")",
":",
"from",
"pandas",
".",
"core",
".",
"reshape",
".",
"reshape",
"import",
"stack",
",",
"stack_multiple",
"if",
"isinstance",
"(",
"level",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"stack_multiple",
"(",
"self",
",",
"level",
",",
"dropna",
"=",
"dropna",
")",
"else",
":",
"return",
"stack",
"(",
"self",
",",
"level",
",",
"dropna",
"=",
"dropna",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
train | DataFrame.unstack | Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64 | pandas/core/frame.py | def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value) | def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value) | [
"Pivot",
"a",
"level",
"of",
"the",
"(",
"necessarily",
"hierarchical",
")",
"index",
"labels",
"returning",
"a",
"DataFrame",
"having",
"a",
"new",
"level",
"of",
"column",
"labels",
"whose",
"inner",
"-",
"most",
"level",
"consists",
"of",
"the",
"pivoted",
"index",
"labels",
"."
] | pandas-dev/pandas | python | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L5978-L6039 | [
"def",
"unstack",
"(",
"self",
",",
"level",
"=",
"-",
"1",
",",
"fill_value",
"=",
"None",
")",
":",
"from",
"pandas",
".",
"core",
".",
"reshape",
".",
"reshape",
"import",
"unstack",
"return",
"unstack",
"(",
"self",
",",
"level",
",",
"fill_value",
")"
] | 9feb3ad92cc0397a04b665803a49299ee7aa1037 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.