id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
19,700
|
pandas-dev/pandas
|
pandas/core/frame.py
|
DataFrame.round
|
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specfified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specfified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
|
python
|
def round(self, decimals=0, *args, **kwargs):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specfified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specfified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.iteritems():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = [col for col in _dict_round(self, decimals)]
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals)
for _, v in self.iteritems()]
else:
raise TypeError("decimals must be an integer, a dict-like or a "
"Series")
if len(new_cols) > 0:
return self._constructor(concat(new_cols, axis=1),
index=self.index,
columns=self.columns)
else:
return self
|
[
"def",
"round",
"(",
"self",
",",
"decimals",
"=",
"0",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"pandas",
".",
"core",
".",
"reshape",
".",
"concat",
"import",
"concat",
"def",
"_dict_round",
"(",
"df",
",",
"decimals",
")",
":",
"for",
"col",
",",
"vals",
"in",
"df",
".",
"iteritems",
"(",
")",
":",
"try",
":",
"yield",
"_series_round",
"(",
"vals",
",",
"decimals",
"[",
"col",
"]",
")",
"except",
"KeyError",
":",
"yield",
"vals",
"def",
"_series_round",
"(",
"s",
",",
"decimals",
")",
":",
"if",
"is_integer_dtype",
"(",
"s",
")",
"or",
"is_float_dtype",
"(",
"s",
")",
":",
"return",
"s",
".",
"round",
"(",
"decimals",
")",
"return",
"s",
"nv",
".",
"validate_round",
"(",
"args",
",",
"kwargs",
")",
"if",
"isinstance",
"(",
"decimals",
",",
"(",
"dict",
",",
"Series",
")",
")",
":",
"if",
"isinstance",
"(",
"decimals",
",",
"Series",
")",
":",
"if",
"not",
"decimals",
".",
"index",
".",
"is_unique",
":",
"raise",
"ValueError",
"(",
"\"Index of decimals must be unique\"",
")",
"new_cols",
"=",
"[",
"col",
"for",
"col",
"in",
"_dict_round",
"(",
"self",
",",
"decimals",
")",
"]",
"elif",
"is_integer",
"(",
"decimals",
")",
":",
"# Dispatch to Series.round",
"new_cols",
"=",
"[",
"_series_round",
"(",
"v",
",",
"decimals",
")",
"for",
"_",
",",
"v",
"in",
"self",
".",
"iteritems",
"(",
")",
"]",
"else",
":",
"raise",
"TypeError",
"(",
"\"decimals must be an integer, a dict-like or a \"",
"\"Series\"",
")",
"if",
"len",
"(",
"new_cols",
")",
">",
"0",
":",
"return",
"self",
".",
"_constructor",
"(",
"concat",
"(",
"new_cols",
",",
"axis",
"=",
"1",
")",
",",
"index",
"=",
"self",
".",
"index",
",",
"columns",
"=",
"self",
".",
"columns",
")",
"else",
":",
"return",
"self"
] |
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specfified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specfified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
|
[
"Round",
"a",
"DataFrame",
"to",
"a",
"variable",
"number",
"of",
"decimal",
"places",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L6917-L7028
|
19,701
|
pandas-dev/pandas
|
pandas/core/frame.py
|
DataFrame.corrwith
|
def corrwith(self, other, axis=0, drop=False, method='pearson'):
"""
Compute pairwise correlation between rows or columns of DataFrame
with rows or columns of Series or DataFrame. DataFrames are first
aligned along both axes before computing the correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
-------
DataFrame.corr
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method),
axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
if axis == 1:
left = left.T
right = right.T
if method == 'pearson':
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ['kendall', 'spearman'] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = Series(map(c,
zip(left.values.T, right.values.T)),
index=left.columns)
else:
raise ValueError("Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable".
format(method=method))
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = (this._get_axis(raxis).
union(other._get_axis(raxis)))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff),
index=idx_diff))
return correl
|
python
|
def corrwith(self, other, axis=0, drop=False, method='pearson'):
"""
Compute pairwise correlation between rows or columns of DataFrame
with rows or columns of Series or DataFrame. DataFrames are first
aligned along both axes before computing the correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
-------
DataFrame.corr
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method),
axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
if axis == 1:
left = left.T
right = right.T
if method == 'pearson':
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ['kendall', 'spearman'] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = Series(map(c,
zip(left.values.T, right.values.T)),
index=left.columns)
else:
raise ValueError("Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable".
format(method=method))
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = (this._get_axis(raxis).
union(other._get_axis(raxis)))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff),
index=idx_diff))
return correl
|
[
"def",
"corrwith",
"(",
"self",
",",
"other",
",",
"axis",
"=",
"0",
",",
"drop",
"=",
"False",
",",
"method",
"=",
"'pearson'",
")",
":",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"this",
"=",
"self",
".",
"_get_numeric_data",
"(",
")",
"if",
"isinstance",
"(",
"other",
",",
"Series",
")",
":",
"return",
"this",
".",
"apply",
"(",
"lambda",
"x",
":",
"other",
".",
"corr",
"(",
"x",
",",
"method",
"=",
"method",
")",
",",
"axis",
"=",
"axis",
")",
"other",
"=",
"other",
".",
"_get_numeric_data",
"(",
")",
"left",
",",
"right",
"=",
"this",
".",
"align",
"(",
"other",
",",
"join",
"=",
"'inner'",
",",
"copy",
"=",
"False",
")",
"if",
"axis",
"==",
"1",
":",
"left",
"=",
"left",
".",
"T",
"right",
"=",
"right",
".",
"T",
"if",
"method",
"==",
"'pearson'",
":",
"# mask missing values",
"left",
"=",
"left",
"+",
"right",
"*",
"0",
"right",
"=",
"right",
"+",
"left",
"*",
"0",
"# demeaned data",
"ldem",
"=",
"left",
"-",
"left",
".",
"mean",
"(",
")",
"rdem",
"=",
"right",
"-",
"right",
".",
"mean",
"(",
")",
"num",
"=",
"(",
"ldem",
"*",
"rdem",
")",
".",
"sum",
"(",
")",
"dom",
"=",
"(",
"left",
".",
"count",
"(",
")",
"-",
"1",
")",
"*",
"left",
".",
"std",
"(",
")",
"*",
"right",
".",
"std",
"(",
")",
"correl",
"=",
"num",
"/",
"dom",
"elif",
"method",
"in",
"[",
"'kendall'",
",",
"'spearman'",
"]",
"or",
"callable",
"(",
"method",
")",
":",
"def",
"c",
"(",
"x",
")",
":",
"return",
"nanops",
".",
"nancorr",
"(",
"x",
"[",
"0",
"]",
",",
"x",
"[",
"1",
"]",
",",
"method",
"=",
"method",
")",
"correl",
"=",
"Series",
"(",
"map",
"(",
"c",
",",
"zip",
"(",
"left",
".",
"values",
".",
"T",
",",
"right",
".",
"values",
".",
"T",
")",
")",
",",
"index",
"=",
"left",
".",
"columns",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid method {method} was passed, \"",
"\"valid methods are: 'pearson', 'kendall', \"",
"\"'spearman', or callable\"",
".",
"format",
"(",
"method",
"=",
"method",
")",
")",
"if",
"not",
"drop",
":",
"# Find non-matching labels along the given axis",
"# and append missing correlations (GH 22375)",
"raxis",
"=",
"1",
"if",
"axis",
"==",
"0",
"else",
"0",
"result_index",
"=",
"(",
"this",
".",
"_get_axis",
"(",
"raxis",
")",
".",
"union",
"(",
"other",
".",
"_get_axis",
"(",
"raxis",
")",
")",
")",
"idx_diff",
"=",
"result_index",
".",
"difference",
"(",
"correl",
".",
"index",
")",
"if",
"len",
"(",
"idx_diff",
")",
">",
"0",
":",
"correl",
"=",
"correl",
".",
"append",
"(",
"Series",
"(",
"[",
"np",
".",
"nan",
"]",
"*",
"len",
"(",
"idx_diff",
")",
",",
"index",
"=",
"idx_diff",
")",
")",
"return",
"correl"
] |
Compute pairwise correlation between rows or columns of DataFrame
with rows or columns of Series or DataFrame. DataFrames are first
aligned along both axes before computing the correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
0 or 'index' to compute column-wise, 1 or 'columns' for row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
-------
DataFrame.corr
|
[
"Compute",
"pairwise",
"correlation",
"between",
"rows",
"or",
"columns",
"of",
"DataFrame",
"with",
"rows",
"or",
"columns",
"of",
"Series",
"or",
"DataFrame",
".",
"DataFrames",
"are",
"first",
"aligned",
"along",
"both",
"axes",
"before",
"computing",
"the",
"correlations",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L7228-L7314
|
19,702
|
pandas-dev/pandas
|
pandas/core/frame.py
|
DataFrame.count
|
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each **row**.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._data.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
|
python
|
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each **row**.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._data.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
|
[
"def",
"count",
"(",
"self",
",",
"axis",
"=",
"0",
",",
"level",
"=",
"None",
",",
"numeric_only",
"=",
"False",
")",
":",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"if",
"level",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_count_level",
"(",
"level",
",",
"axis",
"=",
"axis",
",",
"numeric_only",
"=",
"numeric_only",
")",
"if",
"numeric_only",
":",
"frame",
"=",
"self",
".",
"_get_numeric_data",
"(",
")",
"else",
":",
"frame",
"=",
"self",
"# GH #423",
"if",
"len",
"(",
"frame",
".",
"_get_axis",
"(",
"axis",
")",
")",
"==",
"0",
":",
"result",
"=",
"Series",
"(",
"0",
",",
"index",
"=",
"frame",
".",
"_get_agg_axis",
"(",
"axis",
")",
")",
"else",
":",
"if",
"frame",
".",
"_is_mixed_type",
"or",
"frame",
".",
"_data",
".",
"any_extension_types",
":",
"# the or any_extension_types is really only hit for single-",
"# column frames with an extension array",
"result",
"=",
"notna",
"(",
"frame",
")",
".",
"sum",
"(",
"axis",
"=",
"axis",
")",
"else",
":",
"# GH13407",
"series_counts",
"=",
"notna",
"(",
"frame",
")",
".",
"sum",
"(",
"axis",
"=",
"axis",
")",
"counts",
"=",
"series_counts",
".",
"values",
"result",
"=",
"Series",
"(",
"counts",
",",
"index",
"=",
"frame",
".",
"_get_agg_axis",
"(",
"axis",
")",
")",
"return",
"result",
".",
"astype",
"(",
"'int64'",
")"
] |
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each **row**.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
|
[
"Count",
"non",
"-",
"NA",
"cells",
"for",
"each",
"column",
"or",
"row",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L7319-L7419
|
19,703
|
pandas-dev/pandas
|
pandas/core/frame.py
|
DataFrame.nunique
|
def nunique(self, axis=0, dropna=True):
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
|
python
|
def nunique(self, axis=0, dropna=True):
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
|
[
"def",
"nunique",
"(",
"self",
",",
"axis",
"=",
"0",
",",
"dropna",
"=",
"True",
")",
":",
"return",
"self",
".",
"apply",
"(",
"Series",
".",
"nunique",
",",
"axis",
"=",
"axis",
",",
"dropna",
"=",
"dropna",
")"
] |
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
.. versionadded:: 0.20.0
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
|
[
"Count",
"distinct",
"observations",
"over",
"requested",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L7565-L7605
|
19,704
|
pandas-dev/pandas
|
pandas/core/frame.py
|
DataFrame._get_agg_axis
|
def _get_agg_axis(self, axis_num):
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
|
python
|
def _get_agg_axis(self, axis_num):
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
|
[
"def",
"_get_agg_axis",
"(",
"self",
",",
"axis_num",
")",
":",
"if",
"axis_num",
"==",
"0",
":",
"return",
"self",
".",
"columns",
"elif",
"axis_num",
"==",
"1",
":",
"return",
"self",
".",
"index",
"else",
":",
"raise",
"ValueError",
"(",
"'Axis must be 0 or 1 (got %r)'",
"%",
"axis_num",
")"
] |
Let's be explicit about this.
|
[
"Let",
"s",
"be",
"explicit",
"about",
"this",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L7681-L7690
|
19,705
|
pandas-dev/pandas
|
pandas/core/frame.py
|
DataFrame.quantile
|
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'} (default 0)
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 0.18.0
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
|
python
|
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'} (default 0)
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 0.18.0
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
self._check_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
result = data._data.quantile(qs=q,
axis=1,
interpolation=interpolation,
transposed=is_transposed)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
|
[
"def",
"quantile",
"(",
"self",
",",
"q",
"=",
"0.5",
",",
"axis",
"=",
"0",
",",
"numeric_only",
"=",
"True",
",",
"interpolation",
"=",
"'linear'",
")",
":",
"self",
".",
"_check_percentile",
"(",
"q",
")",
"data",
"=",
"self",
".",
"_get_numeric_data",
"(",
")",
"if",
"numeric_only",
"else",
"self",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"is_transposed",
"=",
"axis",
"==",
"1",
"if",
"is_transposed",
":",
"data",
"=",
"data",
".",
"T",
"result",
"=",
"data",
".",
"_data",
".",
"quantile",
"(",
"qs",
"=",
"q",
",",
"axis",
"=",
"1",
",",
"interpolation",
"=",
"interpolation",
",",
"transposed",
"=",
"is_transposed",
")",
"if",
"result",
".",
"ndim",
"==",
"2",
":",
"result",
"=",
"self",
".",
"_constructor",
"(",
"result",
")",
"else",
":",
"result",
"=",
"self",
".",
"_constructor_sliced",
"(",
"result",
",",
"name",
"=",
"q",
")",
"if",
"is_transposed",
":",
"result",
"=",
"result",
".",
"T",
"return",
"result"
] |
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'} (default 0)
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
.. versionadded:: 0.18.0
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
|
[
"Return",
"values",
"at",
"the",
"given",
"quantile",
"over",
"requested",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L7778-L7869
|
19,706
|
pandas-dev/pandas
|
pandas/core/frame.py
|
DataFrame.isin
|
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns)
|
python
|
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns)
|
[
"def",
"isin",
"(",
"self",
",",
"values",
")",
":",
"if",
"isinstance",
"(",
"values",
",",
"dict",
")",
":",
"from",
"pandas",
".",
"core",
".",
"reshape",
".",
"concat",
"import",
"concat",
"values",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
",",
"values",
")",
"return",
"concat",
"(",
"(",
"self",
".",
"iloc",
"[",
":",
",",
"[",
"i",
"]",
"]",
".",
"isin",
"(",
"values",
"[",
"col",
"]",
")",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"self",
".",
"columns",
")",
")",
",",
"axis",
"=",
"1",
")",
"elif",
"isinstance",
"(",
"values",
",",
"Series",
")",
":",
"if",
"not",
"values",
".",
"index",
".",
"is_unique",
":",
"raise",
"ValueError",
"(",
"\"cannot compute isin with \"",
"\"a duplicate axis.\"",
")",
"return",
"self",
".",
"eq",
"(",
"values",
".",
"reindex_like",
"(",
"self",
")",
",",
"axis",
"=",
"'index'",
")",
"elif",
"isinstance",
"(",
"values",
",",
"DataFrame",
")",
":",
"if",
"not",
"(",
"values",
".",
"columns",
".",
"is_unique",
"and",
"values",
".",
"index",
".",
"is_unique",
")",
":",
"raise",
"ValueError",
"(",
"\"cannot compute isin with \"",
"\"a duplicate axis.\"",
")",
"return",
"self",
".",
"eq",
"(",
"values",
".",
"reindex_like",
"(",
"self",
")",
")",
"else",
":",
"if",
"not",
"is_list_like",
"(",
"values",
")",
":",
"raise",
"TypeError",
"(",
"\"only list-like or dict-like objects are \"",
"\"allowed to be passed to DataFrame.isin(), \"",
"\"you passed a \"",
"\"{0!r}\"",
".",
"format",
"(",
"type",
"(",
"values",
")",
".",
"__name__",
")",
")",
"return",
"DataFrame",
"(",
"algorithms",
".",
"isin",
"(",
"self",
".",
"values",
".",
"ravel",
"(",
")",
",",
"values",
")",
".",
"reshape",
"(",
"self",
".",
"shape",
")",
",",
"self",
".",
"index",
",",
"self",
".",
"columns",
")"
] |
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
|
[
"Whether",
"each",
"element",
"in",
"the",
"DataFrame",
"is",
"contained",
"in",
"values",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/frame.py#L7939-L8026
|
19,707
|
pandas-dev/pandas
|
pandas/core/arrays/integer.py
|
integer_array
|
def integer_array(values, dtype=None, copy=False):
"""
Infer and return an integer array of the values.
Parameters
----------
values : 1D list-like
dtype : dtype, optional
dtype to coerce
copy : boolean, default False
Returns
-------
IntegerArray
Raises
------
TypeError if incompatible types
"""
values, mask = coerce_to_array(values, dtype=dtype, copy=copy)
return IntegerArray(values, mask)
|
python
|
def integer_array(values, dtype=None, copy=False):
"""
Infer and return an integer array of the values.
Parameters
----------
values : 1D list-like
dtype : dtype, optional
dtype to coerce
copy : boolean, default False
Returns
-------
IntegerArray
Raises
------
TypeError if incompatible types
"""
values, mask = coerce_to_array(values, dtype=dtype, copy=copy)
return IntegerArray(values, mask)
|
[
"def",
"integer_array",
"(",
"values",
",",
"dtype",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"values",
",",
"mask",
"=",
"coerce_to_array",
"(",
"values",
",",
"dtype",
"=",
"dtype",
",",
"copy",
"=",
"copy",
")",
"return",
"IntegerArray",
"(",
"values",
",",
"mask",
")"
] |
Infer and return an integer array of the values.
Parameters
----------
values : 1D list-like
dtype : dtype, optional
dtype to coerce
copy : boolean, default False
Returns
-------
IntegerArray
Raises
------
TypeError if incompatible types
|
[
"Infer",
"and",
"return",
"an",
"integer",
"array",
"of",
"the",
"values",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/integer.py#L92-L112
|
19,708
|
pandas-dev/pandas
|
pandas/core/arrays/integer.py
|
safe_cast
|
def safe_cast(values, dtype, copy):
"""
Safely cast the values to the dtype if they
are equivalent, meaning floats must be equivalent to the
ints.
"""
try:
return values.astype(dtype, casting='safe', copy=copy)
except TypeError:
casted = values.astype(dtype, copy=copy)
if (casted == values).all():
return casted
raise TypeError("cannot safely cast non-equivalent {} to {}".format(
values.dtype, np.dtype(dtype)))
|
python
|
def safe_cast(values, dtype, copy):
"""
Safely cast the values to the dtype if they
are equivalent, meaning floats must be equivalent to the
ints.
"""
try:
return values.astype(dtype, casting='safe', copy=copy)
except TypeError:
casted = values.astype(dtype, copy=copy)
if (casted == values).all():
return casted
raise TypeError("cannot safely cast non-equivalent {} to {}".format(
values.dtype, np.dtype(dtype)))
|
[
"def",
"safe_cast",
"(",
"values",
",",
"dtype",
",",
"copy",
")",
":",
"try",
":",
"return",
"values",
".",
"astype",
"(",
"dtype",
",",
"casting",
"=",
"'safe'",
",",
"copy",
"=",
"copy",
")",
"except",
"TypeError",
":",
"casted",
"=",
"values",
".",
"astype",
"(",
"dtype",
",",
"copy",
"=",
"copy",
")",
"if",
"(",
"casted",
"==",
"values",
")",
".",
"all",
"(",
")",
":",
"return",
"casted",
"raise",
"TypeError",
"(",
"\"cannot safely cast non-equivalent {} to {}\"",
".",
"format",
"(",
"values",
".",
"dtype",
",",
"np",
".",
"dtype",
"(",
"dtype",
")",
")",
")"
] |
Safely cast the values to the dtype if they
are equivalent, meaning floats must be equivalent to the
ints.
|
[
"Safely",
"cast",
"the",
"values",
"to",
"the",
"dtype",
"if",
"they",
"are",
"equivalent",
"meaning",
"floats",
"must",
"be",
"equivalent",
"to",
"the",
"ints",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/integer.py#L115-L132
|
19,709
|
pandas-dev/pandas
|
pandas/core/arrays/integer.py
|
coerce_to_array
|
def coerce_to_array(values, dtype, mask=None, copy=False):
"""
Coerce the input values array to numpy arrays with a mask
Parameters
----------
values : 1D list-like
dtype : integer dtype
mask : boolean 1D array, optional
copy : boolean, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
# if values is integer numpy array, preserve it's dtype
if dtype is None and hasattr(values, 'dtype'):
if is_integer_dtype(values.dtype):
dtype = values.dtype
if dtype is not None:
if (isinstance(dtype, str) and
(dtype.startswith("Int") or dtype.startswith("UInt"))):
# Avoid DeprecationWarning from NumPy about np.dtype("Int64")
# https://github.com/numpy/numpy/pull/7476
dtype = dtype.lower()
if not issubclass(type(dtype), _IntegerDtype):
try:
dtype = _dtypes[str(np.dtype(dtype))]
except KeyError:
raise ValueError("invalid dtype specified {}".format(dtype))
if isinstance(values, IntegerArray):
values, mask = values._data, values._mask
if dtype is not None:
values = values.astype(dtype.numpy_dtype, copy=False)
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
values = np.array(values, copy=copy)
if is_object_dtype(values):
inferred_type = lib.infer_dtype(values, skipna=True)
if inferred_type == 'empty':
values = np.empty(len(values))
values.fill(np.nan)
elif inferred_type not in ['floating', 'integer',
'mixed-integer', 'mixed-integer-float']:
raise TypeError("{} cannot be converted to an IntegerDtype".format(
values.dtype))
elif not (is_integer_dtype(values) or is_float_dtype(values)):
raise TypeError("{} cannot be converted to an IntegerDtype".format(
values.dtype))
if mask is None:
mask = isna(values)
else:
assert len(mask) == len(values)
if not values.ndim == 1:
raise TypeError("values must be a 1D list-like")
if not mask.ndim == 1:
raise TypeError("mask must be a 1D list-like")
# infer dtype if needed
if dtype is None:
dtype = np.dtype('int64')
else:
dtype = dtype.type
# if we are float, let's make sure that we can
# safely cast
# we copy as need to coerce here
if mask.any():
values = values.copy()
values[mask] = 1
values = safe_cast(values, dtype, copy=False)
else:
values = safe_cast(values, dtype, copy=False)
return values, mask
|
python
|
def coerce_to_array(values, dtype, mask=None, copy=False):
"""
Coerce the input values array to numpy arrays with a mask
Parameters
----------
values : 1D list-like
dtype : integer dtype
mask : boolean 1D array, optional
copy : boolean, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
"""
# if values is integer numpy array, preserve it's dtype
if dtype is None and hasattr(values, 'dtype'):
if is_integer_dtype(values.dtype):
dtype = values.dtype
if dtype is not None:
if (isinstance(dtype, str) and
(dtype.startswith("Int") or dtype.startswith("UInt"))):
# Avoid DeprecationWarning from NumPy about np.dtype("Int64")
# https://github.com/numpy/numpy/pull/7476
dtype = dtype.lower()
if not issubclass(type(dtype), _IntegerDtype):
try:
dtype = _dtypes[str(np.dtype(dtype))]
except KeyError:
raise ValueError("invalid dtype specified {}".format(dtype))
if isinstance(values, IntegerArray):
values, mask = values._data, values._mask
if dtype is not None:
values = values.astype(dtype.numpy_dtype, copy=False)
if copy:
values = values.copy()
mask = mask.copy()
return values, mask
values = np.array(values, copy=copy)
if is_object_dtype(values):
inferred_type = lib.infer_dtype(values, skipna=True)
if inferred_type == 'empty':
values = np.empty(len(values))
values.fill(np.nan)
elif inferred_type not in ['floating', 'integer',
'mixed-integer', 'mixed-integer-float']:
raise TypeError("{} cannot be converted to an IntegerDtype".format(
values.dtype))
elif not (is_integer_dtype(values) or is_float_dtype(values)):
raise TypeError("{} cannot be converted to an IntegerDtype".format(
values.dtype))
if mask is None:
mask = isna(values)
else:
assert len(mask) == len(values)
if not values.ndim == 1:
raise TypeError("values must be a 1D list-like")
if not mask.ndim == 1:
raise TypeError("mask must be a 1D list-like")
# infer dtype if needed
if dtype is None:
dtype = np.dtype('int64')
else:
dtype = dtype.type
# if we are float, let's make sure that we can
# safely cast
# we copy as need to coerce here
if mask.any():
values = values.copy()
values[mask] = 1
values = safe_cast(values, dtype, copy=False)
else:
values = safe_cast(values, dtype, copy=False)
return values, mask
|
[
"def",
"coerce_to_array",
"(",
"values",
",",
"dtype",
",",
"mask",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"# if values is integer numpy array, preserve it's dtype",
"if",
"dtype",
"is",
"None",
"and",
"hasattr",
"(",
"values",
",",
"'dtype'",
")",
":",
"if",
"is_integer_dtype",
"(",
"values",
".",
"dtype",
")",
":",
"dtype",
"=",
"values",
".",
"dtype",
"if",
"dtype",
"is",
"not",
"None",
":",
"if",
"(",
"isinstance",
"(",
"dtype",
",",
"str",
")",
"and",
"(",
"dtype",
".",
"startswith",
"(",
"\"Int\"",
")",
"or",
"dtype",
".",
"startswith",
"(",
"\"UInt\"",
")",
")",
")",
":",
"# Avoid DeprecationWarning from NumPy about np.dtype(\"Int64\")",
"# https://github.com/numpy/numpy/pull/7476",
"dtype",
"=",
"dtype",
".",
"lower",
"(",
")",
"if",
"not",
"issubclass",
"(",
"type",
"(",
"dtype",
")",
",",
"_IntegerDtype",
")",
":",
"try",
":",
"dtype",
"=",
"_dtypes",
"[",
"str",
"(",
"np",
".",
"dtype",
"(",
"dtype",
")",
")",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"invalid dtype specified {}\"",
".",
"format",
"(",
"dtype",
")",
")",
"if",
"isinstance",
"(",
"values",
",",
"IntegerArray",
")",
":",
"values",
",",
"mask",
"=",
"values",
".",
"_data",
",",
"values",
".",
"_mask",
"if",
"dtype",
"is",
"not",
"None",
":",
"values",
"=",
"values",
".",
"astype",
"(",
"dtype",
".",
"numpy_dtype",
",",
"copy",
"=",
"False",
")",
"if",
"copy",
":",
"values",
"=",
"values",
".",
"copy",
"(",
")",
"mask",
"=",
"mask",
".",
"copy",
"(",
")",
"return",
"values",
",",
"mask",
"values",
"=",
"np",
".",
"array",
"(",
"values",
",",
"copy",
"=",
"copy",
")",
"if",
"is_object_dtype",
"(",
"values",
")",
":",
"inferred_type",
"=",
"lib",
".",
"infer_dtype",
"(",
"values",
",",
"skipna",
"=",
"True",
")",
"if",
"inferred_type",
"==",
"'empty'",
":",
"values",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"values",
")",
")",
"values",
".",
"fill",
"(",
"np",
".",
"nan",
")",
"elif",
"inferred_type",
"not",
"in",
"[",
"'floating'",
",",
"'integer'",
",",
"'mixed-integer'",
",",
"'mixed-integer-float'",
"]",
":",
"raise",
"TypeError",
"(",
"\"{} cannot be converted to an IntegerDtype\"",
".",
"format",
"(",
"values",
".",
"dtype",
")",
")",
"elif",
"not",
"(",
"is_integer_dtype",
"(",
"values",
")",
"or",
"is_float_dtype",
"(",
"values",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"{} cannot be converted to an IntegerDtype\"",
".",
"format",
"(",
"values",
".",
"dtype",
")",
")",
"if",
"mask",
"is",
"None",
":",
"mask",
"=",
"isna",
"(",
"values",
")",
"else",
":",
"assert",
"len",
"(",
"mask",
")",
"==",
"len",
"(",
"values",
")",
"if",
"not",
"values",
".",
"ndim",
"==",
"1",
":",
"raise",
"TypeError",
"(",
"\"values must be a 1D list-like\"",
")",
"if",
"not",
"mask",
".",
"ndim",
"==",
"1",
":",
"raise",
"TypeError",
"(",
"\"mask must be a 1D list-like\"",
")",
"# infer dtype if needed",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"'int64'",
")",
"else",
":",
"dtype",
"=",
"dtype",
".",
"type",
"# if we are float, let's make sure that we can",
"# safely cast",
"# we copy as need to coerce here",
"if",
"mask",
".",
"any",
"(",
")",
":",
"values",
"=",
"values",
".",
"copy",
"(",
")",
"values",
"[",
"mask",
"]",
"=",
"1",
"values",
"=",
"safe_cast",
"(",
"values",
",",
"dtype",
",",
"copy",
"=",
"False",
")",
"else",
":",
"values",
"=",
"safe_cast",
"(",
"values",
",",
"dtype",
",",
"copy",
"=",
"False",
")",
"return",
"values",
",",
"mask"
] |
Coerce the input values array to numpy arrays with a mask
Parameters
----------
values : 1D list-like
dtype : integer dtype
mask : boolean 1D array, optional
copy : boolean, default False
if True, copy the input
Returns
-------
tuple of (values, mask)
|
[
"Coerce",
"the",
"input",
"values",
"array",
"to",
"numpy",
"arrays",
"with",
"a",
"mask"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/integer.py#L135-L221
|
19,710
|
pandas-dev/pandas
|
pandas/core/arrays/integer.py
|
_IntegerDtype.construct_from_string
|
def construct_from_string(cls, string):
"""
Construction from a string, raise a TypeError if not
possible
"""
if string == cls.name:
return cls()
raise TypeError("Cannot construct a '{}' from "
"'{}'".format(cls, string))
|
python
|
def construct_from_string(cls, string):
"""
Construction from a string, raise a TypeError if not
possible
"""
if string == cls.name:
return cls()
raise TypeError("Cannot construct a '{}' from "
"'{}'".format(cls, string))
|
[
"def",
"construct_from_string",
"(",
"cls",
",",
"string",
")",
":",
"if",
"string",
"==",
"cls",
".",
"name",
":",
"return",
"cls",
"(",
")",
"raise",
"TypeError",
"(",
"\"Cannot construct a '{}' from \"",
"\"'{}'\"",
".",
"format",
"(",
"cls",
",",
"string",
")",
")"
] |
Construction from a string, raise a TypeError if not
possible
|
[
"Construction",
"from",
"a",
"string",
"raise",
"a",
"TypeError",
"if",
"not",
"possible"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/integer.py#L81-L89
|
19,711
|
pandas-dev/pandas
|
pandas/core/arrays/integer.py
|
IntegerArray._coerce_to_ndarray
|
def _coerce_to_ndarray(self):
"""
coerce to an ndarary of object dtype
"""
# TODO(jreback) make this better
data = self._data.astype(object)
data[self._mask] = self._na_value
return data
|
python
|
def _coerce_to_ndarray(self):
"""
coerce to an ndarary of object dtype
"""
# TODO(jreback) make this better
data = self._data.astype(object)
data[self._mask] = self._na_value
return data
|
[
"def",
"_coerce_to_ndarray",
"(",
"self",
")",
":",
"# TODO(jreback) make this better",
"data",
"=",
"self",
".",
"_data",
".",
"astype",
"(",
"object",
")",
"data",
"[",
"self",
".",
"_mask",
"]",
"=",
"self",
".",
"_na_value",
"return",
"data"
] |
coerce to an ndarary of object dtype
|
[
"coerce",
"to",
"an",
"ndarary",
"of",
"object",
"dtype"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/integer.py#L328-L336
|
19,712
|
pandas-dev/pandas
|
pandas/core/arrays/integer.py
|
IntegerArray.astype
|
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array or IntegerArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray or IntegerArray
NumPy ndarray or IntergerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an IntegerDtype, equivalent of same_kind
casting
"""
# if we are astyping to an existing IntegerDtype we can fastpath
if isinstance(dtype, _IntegerDtype):
result = self._data.astype(dtype.numpy_dtype, copy=False)
return type(self)(result, mask=self._mask, copy=False)
# coerce
data = self._coerce_to_ndarray()
return astype_nansafe(data, dtype, copy=None)
|
python
|
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array or IntegerArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray or IntegerArray
NumPy ndarray or IntergerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an IntegerDtype, equivalent of same_kind
casting
"""
# if we are astyping to an existing IntegerDtype we can fastpath
if isinstance(dtype, _IntegerDtype):
result = self._data.astype(dtype.numpy_dtype, copy=False)
return type(self)(result, mask=self._mask, copy=False)
# coerce
data = self._coerce_to_ndarray()
return astype_nansafe(data, dtype, copy=None)
|
[
"def",
"astype",
"(",
"self",
",",
"dtype",
",",
"copy",
"=",
"True",
")",
":",
"# if we are astyping to an existing IntegerDtype we can fastpath",
"if",
"isinstance",
"(",
"dtype",
",",
"_IntegerDtype",
")",
":",
"result",
"=",
"self",
".",
"_data",
".",
"astype",
"(",
"dtype",
".",
"numpy_dtype",
",",
"copy",
"=",
"False",
")",
"return",
"type",
"(",
"self",
")",
"(",
"result",
",",
"mask",
"=",
"self",
".",
"_mask",
",",
"copy",
"=",
"False",
")",
"# coerce",
"data",
"=",
"self",
".",
"_coerce_to_ndarray",
"(",
")",
"return",
"astype_nansafe",
"(",
"data",
",",
"dtype",
",",
"copy",
"=",
"None",
")"
] |
Cast to a NumPy array or IntegerArray with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray or IntegerArray
NumPy ndarray or IntergerArray with 'dtype' for its dtype.
Raises
------
TypeError
if incompatible type with an IntegerDtype, equivalent of same_kind
casting
|
[
"Cast",
"to",
"a",
"NumPy",
"array",
"or",
"IntegerArray",
"with",
"dtype",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/integer.py#L420-L452
|
19,713
|
pandas-dev/pandas
|
pandas/core/arrays/integer.py
|
IntegerArray.value_counts
|
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from pandas import Index, Series
# compute counts on the data with no nans
data = self._data[~self._mask]
value_counts = Index(data).value_counts()
array = value_counts.values
# TODO(extension)
# if we have allow Index to hold an ExtensionArray
# this is easier
index = value_counts.index.astype(object)
# if we want nans, count the mask
if not dropna:
# TODO(extension)
# appending to an Index *always* infers
# w/o passing the dtype
array = np.append(array, [self._mask.sum()])
index = Index(np.concatenate(
[index.values,
np.array([np.nan], dtype=object)]), dtype=object)
return Series(array, index=index)
|
python
|
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from pandas import Index, Series
# compute counts on the data with no nans
data = self._data[~self._mask]
value_counts = Index(data).value_counts()
array = value_counts.values
# TODO(extension)
# if we have allow Index to hold an ExtensionArray
# this is easier
index = value_counts.index.astype(object)
# if we want nans, count the mask
if not dropna:
# TODO(extension)
# appending to an Index *always* infers
# w/o passing the dtype
array = np.append(array, [self._mask.sum()])
index = Index(np.concatenate(
[index.values,
np.array([np.nan], dtype=object)]), dtype=object)
return Series(array, index=index)
|
[
"def",
"value_counts",
"(",
"self",
",",
"dropna",
"=",
"True",
")",
":",
"from",
"pandas",
"import",
"Index",
",",
"Series",
"# compute counts on the data with no nans",
"data",
"=",
"self",
".",
"_data",
"[",
"~",
"self",
".",
"_mask",
"]",
"value_counts",
"=",
"Index",
"(",
"data",
")",
".",
"value_counts",
"(",
")",
"array",
"=",
"value_counts",
".",
"values",
"# TODO(extension)",
"# if we have allow Index to hold an ExtensionArray",
"# this is easier",
"index",
"=",
"value_counts",
".",
"index",
".",
"astype",
"(",
"object",
")",
"# if we want nans, count the mask",
"if",
"not",
"dropna",
":",
"# TODO(extension)",
"# appending to an Index *always* infers",
"# w/o passing the dtype",
"array",
"=",
"np",
".",
"append",
"(",
"array",
",",
"[",
"self",
".",
"_mask",
".",
"sum",
"(",
")",
"]",
")",
"index",
"=",
"Index",
"(",
"np",
".",
"concatenate",
"(",
"[",
"index",
".",
"values",
",",
"np",
".",
"array",
"(",
"[",
"np",
".",
"nan",
"]",
",",
"dtype",
"=",
"object",
")",
"]",
")",
",",
"dtype",
"=",
"object",
")",
"return",
"Series",
"(",
"array",
",",
"index",
"=",
"index",
")"
] |
Returns a Series containing counts of each category.
Every category will have an entry, even those with a count of 0.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
|
[
"Returns",
"a",
"Series",
"containing",
"counts",
"of",
"each",
"category",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/integer.py#L465-L509
|
19,714
|
pandas-dev/pandas
|
pandas/core/arrays/integer.py
|
IntegerArray._values_for_argsort
|
def _values_for_argsort(self) -> np.ndarray:
"""Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort
"""
data = self._data.copy()
data[self._mask] = data.min() - 1
return data
|
python
|
def _values_for_argsort(self) -> np.ndarray:
"""Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort
"""
data = self._data.copy()
data[self._mask] = data.min() - 1
return data
|
[
"def",
"_values_for_argsort",
"(",
"self",
")",
"->",
"np",
".",
"ndarray",
":",
"data",
"=",
"self",
".",
"_data",
".",
"copy",
"(",
")",
"data",
"[",
"self",
".",
"_mask",
"]",
"=",
"data",
".",
"min",
"(",
")",
"-",
"1",
"return",
"data"
] |
Return values for sorting.
Returns
-------
ndarray
The transformed values should maintain the ordering between values
within the array.
See Also
--------
ExtensionArray.argsort
|
[
"Return",
"values",
"for",
"sorting",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/integer.py#L511-L526
|
19,715
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
length_of_indexer
|
def length_of_indexer(indexer, target=None):
"""
return the length of a single non-tuple indexer which could be a slice
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, Index, np.ndarray, list)):
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
|
python
|
def length_of_indexer(indexer, target=None):
"""
return the length of a single non-tuple indexer which could be a slice
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, Index, np.ndarray, list)):
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
|
[
"def",
"length_of_indexer",
"(",
"indexer",
",",
"target",
"=",
"None",
")",
":",
"if",
"target",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"indexer",
",",
"slice",
")",
":",
"target_len",
"=",
"len",
"(",
"target",
")",
"start",
"=",
"indexer",
".",
"start",
"stop",
"=",
"indexer",
".",
"stop",
"step",
"=",
"indexer",
".",
"step",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"0",
"elif",
"start",
"<",
"0",
":",
"start",
"+=",
"target_len",
"if",
"stop",
"is",
"None",
"or",
"stop",
">",
"target_len",
":",
"stop",
"=",
"target_len",
"elif",
"stop",
"<",
"0",
":",
"stop",
"+=",
"target_len",
"if",
"step",
"is",
"None",
":",
"step",
"=",
"1",
"elif",
"step",
"<",
"0",
":",
"step",
"=",
"-",
"step",
"return",
"(",
"stop",
"-",
"start",
"+",
"step",
"-",
"1",
")",
"//",
"step",
"elif",
"isinstance",
"(",
"indexer",
",",
"(",
"ABCSeries",
",",
"Index",
",",
"np",
".",
"ndarray",
",",
"list",
")",
")",
":",
"return",
"len",
"(",
"indexer",
")",
"elif",
"not",
"is_list_like_indexer",
"(",
"indexer",
")",
":",
"return",
"1",
"raise",
"AssertionError",
"(",
"\"cannot find the length of the indexer\"",
")"
] |
return the length of a single non-tuple indexer which could be a slice
|
[
"return",
"the",
"length",
"of",
"a",
"single",
"non",
"-",
"tuple",
"indexer",
"which",
"could",
"be",
"a",
"slice"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L2431-L2457
|
19,716
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
convert_to_index_sliceable
|
def convert_to_index_sliceable(obj, key):
"""
if we are index sliceable, then return my slicer, otherwise return None
"""
idx = obj.index
if isinstance(key, slice):
return idx._convert_slice_indexer(key, kind='getitem')
elif isinstance(key, str):
# we are an actual column
if obj._data.items.contains(key):
return None
# We might have a datetimelike string that we can translate to a
# slice here via partial string indexing
if idx.is_all_dates:
try:
return idx._get_string_slice(key)
except (KeyError, ValueError, NotImplementedError):
return None
return None
|
python
|
def convert_to_index_sliceable(obj, key):
"""
if we are index sliceable, then return my slicer, otherwise return None
"""
idx = obj.index
if isinstance(key, slice):
return idx._convert_slice_indexer(key, kind='getitem')
elif isinstance(key, str):
# we are an actual column
if obj._data.items.contains(key):
return None
# We might have a datetimelike string that we can translate to a
# slice here via partial string indexing
if idx.is_all_dates:
try:
return idx._get_string_slice(key)
except (KeyError, ValueError, NotImplementedError):
return None
return None
|
[
"def",
"convert_to_index_sliceable",
"(",
"obj",
",",
"key",
")",
":",
"idx",
"=",
"obj",
".",
"index",
"if",
"isinstance",
"(",
"key",
",",
"slice",
")",
":",
"return",
"idx",
".",
"_convert_slice_indexer",
"(",
"key",
",",
"kind",
"=",
"'getitem'",
")",
"elif",
"isinstance",
"(",
"key",
",",
"str",
")",
":",
"# we are an actual column",
"if",
"obj",
".",
"_data",
".",
"items",
".",
"contains",
"(",
"key",
")",
":",
"return",
"None",
"# We might have a datetimelike string that we can translate to a",
"# slice here via partial string indexing",
"if",
"idx",
".",
"is_all_dates",
":",
"try",
":",
"return",
"idx",
".",
"_get_string_slice",
"(",
"key",
")",
"except",
"(",
"KeyError",
",",
"ValueError",
",",
"NotImplementedError",
")",
":",
"return",
"None",
"return",
"None"
] |
if we are index sliceable, then return my slicer, otherwise return None
|
[
"if",
"we",
"are",
"index",
"sliceable",
"then",
"return",
"my",
"slicer",
"otherwise",
"return",
"None"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L2460-L2482
|
19,717
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
check_setitem_lengths
|
def check_setitem_lengths(indexer, value, values):
"""
Validate that value and indexer are the same length.
An special-case is allowed for when the indexer is a boolean array
and the number of true values equals the length of ``value``. In
this case, no exception is raised.
Parameters
----------
indexer : sequence
The key for the setitem
value : array-like
The value for the setitem
values : array-like
The values being set into
Returns
-------
None
Raises
------
ValueError
When the indexer is an ndarray or list and the lengths don't
match.
"""
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and len(values):
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
|
python
|
def check_setitem_lengths(indexer, value, values):
"""
Validate that value and indexer are the same length.
An special-case is allowed for when the indexer is a boolean array
and the number of true values equals the length of ``value``. In
this case, no exception is raised.
Parameters
----------
indexer : sequence
The key for the setitem
value : array-like
The value for the setitem
values : array-like
The values being set into
Returns
-------
None
Raises
------
ValueError
When the indexer is an ndarray or list and the lengths don't
match.
"""
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and len(values):
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
|
[
"def",
"check_setitem_lengths",
"(",
"indexer",
",",
"value",
",",
"values",
")",
":",
"# boolean with truth values == len of the value is ok too",
"if",
"isinstance",
"(",
"indexer",
",",
"(",
"np",
".",
"ndarray",
",",
"list",
")",
")",
":",
"if",
"is_list_like",
"(",
"value",
")",
"and",
"len",
"(",
"indexer",
")",
"!=",
"len",
"(",
"value",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"indexer",
",",
"np",
".",
"ndarray",
")",
"and",
"indexer",
".",
"dtype",
"==",
"np",
".",
"bool_",
"and",
"len",
"(",
"indexer",
"[",
"indexer",
"]",
")",
"==",
"len",
"(",
"value",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"cannot set using a list-like indexer \"",
"\"with a different length than the value\"",
")",
"# slice",
"elif",
"isinstance",
"(",
"indexer",
",",
"slice",
")",
":",
"if",
"is_list_like",
"(",
"value",
")",
"and",
"len",
"(",
"values",
")",
":",
"if",
"len",
"(",
"value",
")",
"!=",
"length_of_indexer",
"(",
"indexer",
",",
"values",
")",
":",
"raise",
"ValueError",
"(",
"\"cannot set using a slice indexer with a \"",
"\"different length than the value\"",
")"
] |
Validate that value and indexer are the same length.
An special-case is allowed for when the indexer is a boolean array
and the number of true values equals the length of ``value``. In
this case, no exception is raised.
Parameters
----------
indexer : sequence
The key for the setitem
value : array-like
The value for the setitem
values : array-like
The values being set into
Returns
-------
None
Raises
------
ValueError
When the indexer is an ndarray or list and the lengths don't
match.
|
[
"Validate",
"that",
"value",
"and",
"indexer",
"are",
"the",
"same",
"length",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L2511-L2552
|
19,718
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
convert_missing_indexer
|
def convert_missing_indexer(indexer):
"""
reverse convert a missing indexer, which is a dict
return the scalar indexer and a boolean indicating if we converted
"""
if isinstance(indexer, dict):
# a missing key (but not a tuple indexer)
indexer = indexer['key']
if isinstance(indexer, bool):
raise KeyError("cannot use a single bool to index into setitem")
return indexer, True
return indexer, False
|
python
|
def convert_missing_indexer(indexer):
"""
reverse convert a missing indexer, which is a dict
return the scalar indexer and a boolean indicating if we converted
"""
if isinstance(indexer, dict):
# a missing key (but not a tuple indexer)
indexer = indexer['key']
if isinstance(indexer, bool):
raise KeyError("cannot use a single bool to index into setitem")
return indexer, True
return indexer, False
|
[
"def",
"convert_missing_indexer",
"(",
"indexer",
")",
":",
"if",
"isinstance",
"(",
"indexer",
",",
"dict",
")",
":",
"# a missing key (but not a tuple indexer)",
"indexer",
"=",
"indexer",
"[",
"'key'",
"]",
"if",
"isinstance",
"(",
"indexer",
",",
"bool",
")",
":",
"raise",
"KeyError",
"(",
"\"cannot use a single bool to index into setitem\"",
")",
"return",
"indexer",
",",
"True",
"return",
"indexer",
",",
"False"
] |
reverse convert a missing indexer, which is a dict
return the scalar indexer and a boolean indicating if we converted
|
[
"reverse",
"convert",
"a",
"missing",
"indexer",
"which",
"is",
"a",
"dict",
"return",
"the",
"scalar",
"indexer",
"and",
"a",
"boolean",
"indicating",
"if",
"we",
"converted"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L2555-L2570
|
19,719
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
convert_from_missing_indexer_tuple
|
def convert_from_missing_indexer_tuple(indexer, axes):
"""
create a filtered indexer that doesn't have any missing indexers
"""
def get_indexer(_i, _idx):
return (axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else
_idx)
return tuple(get_indexer(_i, _idx) for _i, _idx in enumerate(indexer))
|
python
|
def convert_from_missing_indexer_tuple(indexer, axes):
"""
create a filtered indexer that doesn't have any missing indexers
"""
def get_indexer(_i, _idx):
return (axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else
_idx)
return tuple(get_indexer(_i, _idx) for _i, _idx in enumerate(indexer))
|
[
"def",
"convert_from_missing_indexer_tuple",
"(",
"indexer",
",",
"axes",
")",
":",
"def",
"get_indexer",
"(",
"_i",
",",
"_idx",
")",
":",
"return",
"(",
"axes",
"[",
"_i",
"]",
".",
"get_loc",
"(",
"_idx",
"[",
"'key'",
"]",
")",
"if",
"isinstance",
"(",
"_idx",
",",
"dict",
")",
"else",
"_idx",
")",
"return",
"tuple",
"(",
"get_indexer",
"(",
"_i",
",",
"_idx",
")",
"for",
"_i",
",",
"_idx",
"in",
"enumerate",
"(",
"indexer",
")",
")"
] |
create a filtered indexer that doesn't have any missing indexers
|
[
"create",
"a",
"filtered",
"indexer",
"that",
"doesn",
"t",
"have",
"any",
"missing",
"indexers"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L2573-L2582
|
19,720
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
maybe_convert_indices
|
def maybe_convert_indices(indices, n):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
The array of indices that we are to convert.
n : int
The number of elements in the array that we are indexing.
Returns
-------
valid_indices : array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError : one of the converted indices either exceeded the number
of elements (specified by `n`) OR was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If list is empty, np.array will return float and cause indexing
# errors.
return np.empty(0, dtype=np.intp)
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
|
python
|
def maybe_convert_indices(indices, n):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
The array of indices that we are to convert.
n : int
The number of elements in the array that we are indexing.
Returns
-------
valid_indices : array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError : one of the converted indices either exceeded the number
of elements (specified by `n`) OR was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If list is empty, np.array will return float and cause indexing
# errors.
return np.empty(0, dtype=np.intp)
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
|
[
"def",
"maybe_convert_indices",
"(",
"indices",
",",
"n",
")",
":",
"if",
"isinstance",
"(",
"indices",
",",
"list",
")",
":",
"indices",
"=",
"np",
".",
"array",
"(",
"indices",
")",
"if",
"len",
"(",
"indices",
")",
"==",
"0",
":",
"# If list is empty, np.array will return float and cause indexing",
"# errors.",
"return",
"np",
".",
"empty",
"(",
"0",
",",
"dtype",
"=",
"np",
".",
"intp",
")",
"mask",
"=",
"indices",
"<",
"0",
"if",
"mask",
".",
"any",
"(",
")",
":",
"indices",
"=",
"indices",
".",
"copy",
"(",
")",
"indices",
"[",
"mask",
"]",
"+=",
"n",
"mask",
"=",
"(",
"indices",
">=",
"n",
")",
"|",
"(",
"indices",
"<",
"0",
")",
"if",
"mask",
".",
"any",
"(",
")",
":",
"raise",
"IndexError",
"(",
"\"indices are out-of-bounds\"",
")",
"return",
"indices"
] |
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
The array of indices that we are to convert.
n : int
The number of elements in the array that we are indexing.
Returns
-------
valid_indices : array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError : one of the converted indices either exceeded the number
of elements (specified by `n`) OR was still negative.
|
[
"Attempt",
"to",
"convert",
"indices",
"into",
"valid",
"positive",
"indices",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L2585-L2626
|
19,721
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
validate_indices
|
def validate_indices(indices, n):
"""
Perform bounds-checking for an indexer.
-1 is allowed for indicating missing values.
Parameters
----------
indices : ndarray
n : int
length of the array being indexed
Raises
------
ValueError
Examples
--------
>>> validate_indices([1, 2], 3)
# OK
>>> validate_indices([1, -2], 3)
ValueError
>>> validate_indices([1, 2, 3], 3)
IndexError
>>> validate_indices([-1, -1], 0)
# OK
>>> validate_indices([0, 1], 0)
IndexError
"""
if len(indices):
min_idx = indices.min()
if min_idx < -1:
msg = ("'indices' contains values less than allowed ({} < {})"
.format(min_idx, -1))
raise ValueError(msg)
max_idx = indices.max()
if max_idx >= n:
raise IndexError("indices are out-of-bounds")
|
python
|
def validate_indices(indices, n):
"""
Perform bounds-checking for an indexer.
-1 is allowed for indicating missing values.
Parameters
----------
indices : ndarray
n : int
length of the array being indexed
Raises
------
ValueError
Examples
--------
>>> validate_indices([1, 2], 3)
# OK
>>> validate_indices([1, -2], 3)
ValueError
>>> validate_indices([1, 2, 3], 3)
IndexError
>>> validate_indices([-1, -1], 0)
# OK
>>> validate_indices([0, 1], 0)
IndexError
"""
if len(indices):
min_idx = indices.min()
if min_idx < -1:
msg = ("'indices' contains values less than allowed ({} < {})"
.format(min_idx, -1))
raise ValueError(msg)
max_idx = indices.max()
if max_idx >= n:
raise IndexError("indices are out-of-bounds")
|
[
"def",
"validate_indices",
"(",
"indices",
",",
"n",
")",
":",
"if",
"len",
"(",
"indices",
")",
":",
"min_idx",
"=",
"indices",
".",
"min",
"(",
")",
"if",
"min_idx",
"<",
"-",
"1",
":",
"msg",
"=",
"(",
"\"'indices' contains values less than allowed ({} < {})\"",
".",
"format",
"(",
"min_idx",
",",
"-",
"1",
")",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"max_idx",
"=",
"indices",
".",
"max",
"(",
")",
"if",
"max_idx",
">=",
"n",
":",
"raise",
"IndexError",
"(",
"\"indices are out-of-bounds\"",
")"
] |
Perform bounds-checking for an indexer.
-1 is allowed for indicating missing values.
Parameters
----------
indices : ndarray
n : int
length of the array being indexed
Raises
------
ValueError
Examples
--------
>>> validate_indices([1, 2], 3)
# OK
>>> validate_indices([1, -2], 3)
ValueError
>>> validate_indices([1, 2, 3], 3)
IndexError
>>> validate_indices([-1, -1], 0)
# OK
>>> validate_indices([0, 1], 0)
IndexError
|
[
"Perform",
"bounds",
"-",
"checking",
"for",
"an",
"indexer",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L2629-L2667
|
19,722
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
maybe_convert_ix
|
def maybe_convert_ix(*args):
"""
We likely want to take the cross-product
"""
ixify = True
for arg in args:
if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):
ixify = False
if ixify:
return np.ix_(*args)
else:
return args
|
python
|
def maybe_convert_ix(*args):
"""
We likely want to take the cross-product
"""
ixify = True
for arg in args:
if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)):
ixify = False
if ixify:
return np.ix_(*args)
else:
return args
|
[
"def",
"maybe_convert_ix",
"(",
"*",
"args",
")",
":",
"ixify",
"=",
"True",
"for",
"arg",
"in",
"args",
":",
"if",
"not",
"isinstance",
"(",
"arg",
",",
"(",
"np",
".",
"ndarray",
",",
"list",
",",
"ABCSeries",
",",
"Index",
")",
")",
":",
"ixify",
"=",
"False",
"if",
"ixify",
":",
"return",
"np",
".",
"ix_",
"(",
"*",
"args",
")",
"else",
":",
"return",
"args"
] |
We likely want to take the cross-product
|
[
"We",
"likely",
"want",
"to",
"take",
"the",
"cross",
"-",
"product"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L2670-L2683
|
19,723
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
_non_reducing_slice
|
def _non_reducing_slice(slice_):
"""
Ensurse that a slice doesn't reduce to a Series or Scalar.
Any user-paseed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = (ABCSeries, np.ndarray, Index, list, str)
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
def pred(part):
# true when slice does *not* reduce, False when part is a tuple,
# i.e. MultiIndex slice
return ((isinstance(part, slice) or is_list_like(part))
and not isinstance(part, tuple))
if not is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)
|
python
|
def _non_reducing_slice(slice_):
"""
Ensurse that a slice doesn't reduce to a Series or Scalar.
Any user-paseed `subset` should have this called on it
to make sure we're always working with DataFrames.
"""
# default to column slice, like DataFrame
# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]
kinds = (ABCSeries, np.ndarray, Index, list, str)
if isinstance(slice_, kinds):
slice_ = IndexSlice[:, slice_]
def pred(part):
# true when slice does *not* reduce, False when part is a tuple,
# i.e. MultiIndex slice
return ((isinstance(part, slice) or is_list_like(part))
and not isinstance(part, tuple))
if not is_list_like(slice_):
if not isinstance(slice_, slice):
# a 1-d slice, like df.loc[1]
slice_ = [[slice_]]
else:
# slice(a, b, c)
slice_ = [slice_] # to tuplize later
else:
slice_ = [part if pred(part) else [part] for part in slice_]
return tuple(slice_)
|
[
"def",
"_non_reducing_slice",
"(",
"slice_",
")",
":",
"# default to column slice, like DataFrame",
"# ['A', 'B'] -> IndexSlices[:, ['A', 'B']]",
"kinds",
"=",
"(",
"ABCSeries",
",",
"np",
".",
"ndarray",
",",
"Index",
",",
"list",
",",
"str",
")",
"if",
"isinstance",
"(",
"slice_",
",",
"kinds",
")",
":",
"slice_",
"=",
"IndexSlice",
"[",
":",
",",
"slice_",
"]",
"def",
"pred",
"(",
"part",
")",
":",
"# true when slice does *not* reduce, False when part is a tuple,",
"# i.e. MultiIndex slice",
"return",
"(",
"(",
"isinstance",
"(",
"part",
",",
"slice",
")",
"or",
"is_list_like",
"(",
"part",
")",
")",
"and",
"not",
"isinstance",
"(",
"part",
",",
"tuple",
")",
")",
"if",
"not",
"is_list_like",
"(",
"slice_",
")",
":",
"if",
"not",
"isinstance",
"(",
"slice_",
",",
"slice",
")",
":",
"# a 1-d slice, like df.loc[1]",
"slice_",
"=",
"[",
"[",
"slice_",
"]",
"]",
"else",
":",
"# slice(a, b, c)",
"slice_",
"=",
"[",
"slice_",
"]",
"# to tuplize later",
"else",
":",
"slice_",
"=",
"[",
"part",
"if",
"pred",
"(",
"part",
")",
"else",
"[",
"part",
"]",
"for",
"part",
"in",
"slice_",
"]",
"return",
"tuple",
"(",
"slice_",
")"
] |
Ensurse that a slice doesn't reduce to a Series or Scalar.
Any user-paseed `subset` should have this called on it
to make sure we're always working with DataFrames.
|
[
"Ensurse",
"that",
"a",
"slice",
"doesn",
"t",
"reduce",
"to",
"a",
"Series",
"or",
"Scalar",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L2734-L2762
|
19,724
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
_maybe_numeric_slice
|
def _maybe_numeric_slice(df, slice_, include_bool=False):
"""
want nice defaults for background_gradient that don't break
with non-numeric data. But if slice_ is passed go with that.
"""
if slice_ is None:
dtypes = [np.number]
if include_bool:
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_
|
python
|
def _maybe_numeric_slice(df, slice_, include_bool=False):
"""
want nice defaults for background_gradient that don't break
with non-numeric data. But if slice_ is passed go with that.
"""
if slice_ is None:
dtypes = [np.number]
if include_bool:
dtypes.append(bool)
slice_ = IndexSlice[:, df.select_dtypes(include=dtypes).columns]
return slice_
|
[
"def",
"_maybe_numeric_slice",
"(",
"df",
",",
"slice_",
",",
"include_bool",
"=",
"False",
")",
":",
"if",
"slice_",
"is",
"None",
":",
"dtypes",
"=",
"[",
"np",
".",
"number",
"]",
"if",
"include_bool",
":",
"dtypes",
".",
"append",
"(",
"bool",
")",
"slice_",
"=",
"IndexSlice",
"[",
":",
",",
"df",
".",
"select_dtypes",
"(",
"include",
"=",
"dtypes",
")",
".",
"columns",
"]",
"return",
"slice_"
] |
want nice defaults for background_gradient that don't break
with non-numeric data. But if slice_ is passed go with that.
|
[
"want",
"nice",
"defaults",
"for",
"background_gradient",
"that",
"don",
"t",
"break",
"with",
"non",
"-",
"numeric",
"data",
".",
"But",
"if",
"slice_",
"is",
"passed",
"go",
"with",
"that",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L2765-L2775
|
19,725
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
_NDFrameIndexer._has_valid_tuple
|
def _has_valid_tuple(self, key):
""" check the key for valid keys across my indexer """
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
try:
self._validate_key(k, i)
except ValueError:
raise ValueError("Location based indexing can only have "
"[{types}] types"
.format(types=self._valid_types))
|
python
|
def _has_valid_tuple(self, key):
""" check the key for valid keys across my indexer """
for i, k in enumerate(key):
if i >= self.obj.ndim:
raise IndexingError('Too many indexers')
try:
self._validate_key(k, i)
except ValueError:
raise ValueError("Location based indexing can only have "
"[{types}] types"
.format(types=self._valid_types))
|
[
"def",
"_has_valid_tuple",
"(",
"self",
",",
"key",
")",
":",
"for",
"i",
",",
"k",
"in",
"enumerate",
"(",
"key",
")",
":",
"if",
"i",
">=",
"self",
".",
"obj",
".",
"ndim",
":",
"raise",
"IndexingError",
"(",
"'Too many indexers'",
")",
"try",
":",
"self",
".",
"_validate_key",
"(",
"k",
",",
"i",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Location based indexing can only have \"",
"\"[{types}] types\"",
".",
"format",
"(",
"types",
"=",
"self",
".",
"_valid_types",
")",
")"
] |
check the key for valid keys across my indexer
|
[
"check",
"the",
"key",
"for",
"valid",
"keys",
"across",
"my",
"indexer"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L215-L225
|
19,726
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
_NDFrameIndexer._has_valid_positional_setitem_indexer
|
def _has_valid_positional_setitem_indexer(self, indexer):
""" validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally
"""
if isinstance(indexer, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
else:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
for ax, i in zip(self.obj.axes, indexer):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like_indexer(i):
# should check the elements?
pass
elif is_integer(i):
if i >= len(ax):
raise IndexError("{name} cannot enlarge its target "
"object".format(name=self.name))
elif isinstance(i, dict):
raise IndexError("{name} cannot enlarge its target object"
.format(name=self.name))
return True
|
python
|
def _has_valid_positional_setitem_indexer(self, indexer):
""" validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally
"""
if isinstance(indexer, dict):
raise IndexError("{0} cannot enlarge its target object"
.format(self.name))
else:
if not isinstance(indexer, tuple):
indexer = self._tuplify(indexer)
for ax, i in zip(self.obj.axes, indexer):
if isinstance(i, slice):
# should check the stop slice?
pass
elif is_list_like_indexer(i):
# should check the elements?
pass
elif is_integer(i):
if i >= len(ax):
raise IndexError("{name} cannot enlarge its target "
"object".format(name=self.name))
elif isinstance(i, dict):
raise IndexError("{name} cannot enlarge its target object"
.format(name=self.name))
return True
|
[
"def",
"_has_valid_positional_setitem_indexer",
"(",
"self",
",",
"indexer",
")",
":",
"if",
"isinstance",
"(",
"indexer",
",",
"dict",
")",
":",
"raise",
"IndexError",
"(",
"\"{0} cannot enlarge its target object\"",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"else",
":",
"if",
"not",
"isinstance",
"(",
"indexer",
",",
"tuple",
")",
":",
"indexer",
"=",
"self",
".",
"_tuplify",
"(",
"indexer",
")",
"for",
"ax",
",",
"i",
"in",
"zip",
"(",
"self",
".",
"obj",
".",
"axes",
",",
"indexer",
")",
":",
"if",
"isinstance",
"(",
"i",
",",
"slice",
")",
":",
"# should check the stop slice?",
"pass",
"elif",
"is_list_like_indexer",
"(",
"i",
")",
":",
"# should check the elements?",
"pass",
"elif",
"is_integer",
"(",
"i",
")",
":",
"if",
"i",
">=",
"len",
"(",
"ax",
")",
":",
"raise",
"IndexError",
"(",
"\"{name} cannot enlarge its target \"",
"\"object\"",
".",
"format",
"(",
"name",
"=",
"self",
".",
"name",
")",
")",
"elif",
"isinstance",
"(",
"i",
",",
"dict",
")",
":",
"raise",
"IndexError",
"(",
"\"{name} cannot enlarge its target object\"",
".",
"format",
"(",
"name",
"=",
"self",
".",
"name",
")",
")",
"return",
"True"
] |
validate that an positional indexer cannot enlarge its target
will raise if needed, does not modify the indexer externally
|
[
"validate",
"that",
"an",
"positional",
"indexer",
"cannot",
"enlarge",
"its",
"target",
"will",
"raise",
"if",
"needed",
"does",
"not",
"modify",
"the",
"indexer",
"externally"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L270-L295
|
19,727
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
_NDFrameIndexer._multi_take_opportunity
|
def _multi_take_opportunity(self, tup):
"""
Check whether there is the possibility to use ``_multi_take``.
Currently the limit is that all axes being indexed must be indexed with
list-likes.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis
Returns
-------
boolean: Whether the current indexing can be passed through _multi_take
"""
if not all(is_list_like_indexer(x) for x in tup):
return False
# just too complicated
if any(com.is_bool_indexer(x) for x in tup):
return False
return True
|
python
|
def _multi_take_opportunity(self, tup):
"""
Check whether there is the possibility to use ``_multi_take``.
Currently the limit is that all axes being indexed must be indexed with
list-likes.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis
Returns
-------
boolean: Whether the current indexing can be passed through _multi_take
"""
if not all(is_list_like_indexer(x) for x in tup):
return False
# just too complicated
if any(com.is_bool_indexer(x) for x in tup):
return False
return True
|
[
"def",
"_multi_take_opportunity",
"(",
"self",
",",
"tup",
")",
":",
"if",
"not",
"all",
"(",
"is_list_like_indexer",
"(",
"x",
")",
"for",
"x",
"in",
"tup",
")",
":",
"return",
"False",
"# just too complicated",
"if",
"any",
"(",
"com",
".",
"is_bool_indexer",
"(",
"x",
")",
"for",
"x",
"in",
"tup",
")",
":",
"return",
"False",
"return",
"True"
] |
Check whether there is the possibility to use ``_multi_take``.
Currently the limit is that all axes being indexed must be indexed with
list-likes.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis
Returns
-------
boolean: Whether the current indexing can be passed through _multi_take
|
[
"Check",
"whether",
"there",
"is",
"the",
"possibility",
"to",
"use",
"_multi_take",
".",
"Currently",
"the",
"limit",
"is",
"that",
"all",
"axes",
"being",
"indexed",
"must",
"be",
"indexed",
"with",
"list",
"-",
"likes",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L890-L912
|
19,728
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
_NDFrameIndexer._multi_take
|
def _multi_take(self, tup):
"""
Create the indexers for the passed tuple of keys, and execute the take
operation. This allows the take operation to be executed all at once -
rather than once for each dimension - improving efficiency.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis
Returns
-------
values: same type as the object being indexed
"""
# GH 836
o = self.obj
d = {axis: self._get_listlike_indexer(key, axis)
for (key, axis) in zip(tup, o._AXIS_ORDERS)}
return o._reindex_with_indexers(d, copy=True, allow_dups=True)
|
python
|
def _multi_take(self, tup):
"""
Create the indexers for the passed tuple of keys, and execute the take
operation. This allows the take operation to be executed all at once -
rather than once for each dimension - improving efficiency.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis
Returns
-------
values: same type as the object being indexed
"""
# GH 836
o = self.obj
d = {axis: self._get_listlike_indexer(key, axis)
for (key, axis) in zip(tup, o._AXIS_ORDERS)}
return o._reindex_with_indexers(d, copy=True, allow_dups=True)
|
[
"def",
"_multi_take",
"(",
"self",
",",
"tup",
")",
":",
"# GH 836",
"o",
"=",
"self",
".",
"obj",
"d",
"=",
"{",
"axis",
":",
"self",
".",
"_get_listlike_indexer",
"(",
"key",
",",
"axis",
")",
"for",
"(",
"key",
",",
"axis",
")",
"in",
"zip",
"(",
"tup",
",",
"o",
".",
"_AXIS_ORDERS",
")",
"}",
"return",
"o",
".",
"_reindex_with_indexers",
"(",
"d",
",",
"copy",
"=",
"True",
",",
"allow_dups",
"=",
"True",
")"
] |
Create the indexers for the passed tuple of keys, and execute the take
operation. This allows the take operation to be executed all at once -
rather than once for each dimension - improving efficiency.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis
Returns
-------
values: same type as the object being indexed
|
[
"Create",
"the",
"indexers",
"for",
"the",
"passed",
"tuple",
"of",
"keys",
"and",
"execute",
"the",
"take",
"operation",
".",
"This",
"allows",
"the",
"take",
"operation",
"to",
"be",
"executed",
"all",
"at",
"once",
"-",
"rather",
"than",
"once",
"for",
"each",
"dimension",
"-",
"improving",
"efficiency",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L914-L933
|
19,729
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
_NDFrameIndexer._get_listlike_indexer
|
def _get_listlike_indexer(self, key, axis, raise_missing=False):
"""
Transform a list-like of keys into a new index and an indexer.
Parameters
----------
key : list-like
Target labels
axis: int
Dimension on which the indexing is being made
raise_missing: bool
Whether to raise a KeyError if some labels are not found. Will be
removed in the future, and then this method will always behave as
if raise_missing=True.
Raises
------
KeyError
If at least one key was requested but none was found, and
raise_missing=True.
Returns
-------
keyarr: Index
New index (coinciding with 'key' if the axis is unique)
values : array-like
An indexer for the return object; -1 denotes keys not found
"""
o = self.obj
ax = o._get_axis(axis)
# Have the index compute an indexer or return None
# if it cannot handle:
indexer, keyarr = ax._convert_listlike_indexer(key,
kind=self.name)
# We only act on all found values:
if indexer is not None and (indexer != -1).all():
self._validate_read_indexer(key, indexer, axis,
raise_missing=raise_missing)
return ax[indexer], indexer
if ax.is_unique:
# If we are trying to get actual keys from empty Series, we
# patiently wait for a KeyError later on - otherwise, convert
if len(ax) or not len(key):
key = self._convert_for_reindex(key, axis)
indexer = ax.get_indexer_for(key)
keyarr = ax.reindex(keyarr)[0]
else:
keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr)
self._validate_read_indexer(keyarr, indexer,
o._get_axis_number(axis),
raise_missing=raise_missing)
return keyarr, indexer
|
python
|
def _get_listlike_indexer(self, key, axis, raise_missing=False):
"""
Transform a list-like of keys into a new index and an indexer.
Parameters
----------
key : list-like
Target labels
axis: int
Dimension on which the indexing is being made
raise_missing: bool
Whether to raise a KeyError if some labels are not found. Will be
removed in the future, and then this method will always behave as
if raise_missing=True.
Raises
------
KeyError
If at least one key was requested but none was found, and
raise_missing=True.
Returns
-------
keyarr: Index
New index (coinciding with 'key' if the axis is unique)
values : array-like
An indexer for the return object; -1 denotes keys not found
"""
o = self.obj
ax = o._get_axis(axis)
# Have the index compute an indexer or return None
# if it cannot handle:
indexer, keyarr = ax._convert_listlike_indexer(key,
kind=self.name)
# We only act on all found values:
if indexer is not None and (indexer != -1).all():
self._validate_read_indexer(key, indexer, axis,
raise_missing=raise_missing)
return ax[indexer], indexer
if ax.is_unique:
# If we are trying to get actual keys from empty Series, we
# patiently wait for a KeyError later on - otherwise, convert
if len(ax) or not len(key):
key = self._convert_for_reindex(key, axis)
indexer = ax.get_indexer_for(key)
keyarr = ax.reindex(keyarr)[0]
else:
keyarr, indexer, new_indexer = ax._reindex_non_unique(keyarr)
self._validate_read_indexer(keyarr, indexer,
o._get_axis_number(axis),
raise_missing=raise_missing)
return keyarr, indexer
|
[
"def",
"_get_listlike_indexer",
"(",
"self",
",",
"key",
",",
"axis",
",",
"raise_missing",
"=",
"False",
")",
":",
"o",
"=",
"self",
".",
"obj",
"ax",
"=",
"o",
".",
"_get_axis",
"(",
"axis",
")",
"# Have the index compute an indexer or return None",
"# if it cannot handle:",
"indexer",
",",
"keyarr",
"=",
"ax",
".",
"_convert_listlike_indexer",
"(",
"key",
",",
"kind",
"=",
"self",
".",
"name",
")",
"# We only act on all found values:",
"if",
"indexer",
"is",
"not",
"None",
"and",
"(",
"indexer",
"!=",
"-",
"1",
")",
".",
"all",
"(",
")",
":",
"self",
".",
"_validate_read_indexer",
"(",
"key",
",",
"indexer",
",",
"axis",
",",
"raise_missing",
"=",
"raise_missing",
")",
"return",
"ax",
"[",
"indexer",
"]",
",",
"indexer",
"if",
"ax",
".",
"is_unique",
":",
"# If we are trying to get actual keys from empty Series, we",
"# patiently wait for a KeyError later on - otherwise, convert",
"if",
"len",
"(",
"ax",
")",
"or",
"not",
"len",
"(",
"key",
")",
":",
"key",
"=",
"self",
".",
"_convert_for_reindex",
"(",
"key",
",",
"axis",
")",
"indexer",
"=",
"ax",
".",
"get_indexer_for",
"(",
"key",
")",
"keyarr",
"=",
"ax",
".",
"reindex",
"(",
"keyarr",
")",
"[",
"0",
"]",
"else",
":",
"keyarr",
",",
"indexer",
",",
"new_indexer",
"=",
"ax",
".",
"_reindex_non_unique",
"(",
"keyarr",
")",
"self",
".",
"_validate_read_indexer",
"(",
"keyarr",
",",
"indexer",
",",
"o",
".",
"_get_axis_number",
"(",
"axis",
")",
",",
"raise_missing",
"=",
"raise_missing",
")",
"return",
"keyarr",
",",
"indexer"
] |
Transform a list-like of keys into a new index and an indexer.
Parameters
----------
key : list-like
Target labels
axis: int
Dimension on which the indexing is being made
raise_missing: bool
Whether to raise a KeyError if some labels are not found. Will be
removed in the future, and then this method will always behave as
if raise_missing=True.
Raises
------
KeyError
If at least one key was requested but none was found, and
raise_missing=True.
Returns
-------
keyarr: Index
New index (coinciding with 'key' if the axis is unique)
values : array-like
An indexer for the return object; -1 denotes keys not found
|
[
"Transform",
"a",
"list",
"-",
"like",
"of",
"keys",
"into",
"a",
"new",
"index",
"and",
"an",
"indexer",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L1112-L1166
|
19,730
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
_NDFrameIndexer._convert_to_indexer
|
def _convert_to_indexer(self, obj, axis=None, is_setter=False,
raise_missing=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
if axis is None:
axis = self.axis or 0
labels = self.obj._get_axis(axis)
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
# try to find out correct indexer, if not type correct raise
try:
obj = self._convert_scalar_indexer(obj, axis)
except TypeError:
# but we will allow setting
if is_setter:
pass
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(obj) and not is_int_index
# if we are a label return me
try:
return labels.get_loc(obj)
except LookupError:
if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
if is_setter and len(obj) == labels.nlevels:
return {'key': obj}
raise
except TypeError:
pass
except (ValueError):
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
if is_setter:
# always valid
if self.name == 'loc':
return {'key': obj}
# a positional
if (obj >= self.obj.shape[axis] and
not isinstance(labels, MultiIndex)):
raise ValueError("cannot set by positional indexing with "
"enlargement")
return obj
if is_nested_tuple(obj, labels):
return labels.get_locs(obj)
elif is_list_like_indexer(obj):
if com.is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
else:
# When setting, missing keys are not allowed, even with .loc:
kwargs = {'raise_missing': True if is_setter else
raise_missing}
return self._get_listlike_indexer(obj, axis, **kwargs)[1]
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(obj) and is_setter:
return {'key': obj}
raise
|
python
|
def _convert_to_indexer(self, obj, axis=None, is_setter=False,
raise_missing=False):
"""
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
"""
if axis is None:
axis = self.axis or 0
labels = self.obj._get_axis(axis)
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
# try to find out correct indexer, if not type correct raise
try:
obj = self._convert_scalar_indexer(obj, axis)
except TypeError:
# but we will allow setting
if is_setter:
pass
# see if we are positional in nature
is_int_index = labels.is_integer()
is_int_positional = is_integer(obj) and not is_int_index
# if we are a label return me
try:
return labels.get_loc(obj)
except LookupError:
if isinstance(obj, tuple) and isinstance(labels, MultiIndex):
if is_setter and len(obj) == labels.nlevels:
return {'key': obj}
raise
except TypeError:
pass
except (ValueError):
if not is_int_positional:
raise
# a positional
if is_int_positional:
# if we are setting and its not a valid location
# its an insert which fails by definition
if is_setter:
# always valid
if self.name == 'loc':
return {'key': obj}
# a positional
if (obj >= self.obj.shape[axis] and
not isinstance(labels, MultiIndex)):
raise ValueError("cannot set by positional indexing with "
"enlargement")
return obj
if is_nested_tuple(obj, labels):
return labels.get_locs(obj)
elif is_list_like_indexer(obj):
if com.is_bool_indexer(obj):
obj = check_bool_indexer(labels, obj)
inds, = obj.nonzero()
return inds
else:
# When setting, missing keys are not allowed, even with .loc:
kwargs = {'raise_missing': True if is_setter else
raise_missing}
return self._get_listlike_indexer(obj, axis, **kwargs)[1]
else:
try:
return labels.get_loc(obj)
except LookupError:
# allow a not found key only if we are a setter
if not is_list_like_indexer(obj) and is_setter:
return {'key': obj}
raise
|
[
"def",
"_convert_to_indexer",
"(",
"self",
",",
"obj",
",",
"axis",
"=",
"None",
",",
"is_setter",
"=",
"False",
",",
"raise_missing",
"=",
"False",
")",
":",
"if",
"axis",
"is",
"None",
":",
"axis",
"=",
"self",
".",
"axis",
"or",
"0",
"labels",
"=",
"self",
".",
"obj",
".",
"_get_axis",
"(",
"axis",
")",
"if",
"isinstance",
"(",
"obj",
",",
"slice",
")",
":",
"return",
"self",
".",
"_convert_slice_indexer",
"(",
"obj",
",",
"axis",
")",
"# try to find out correct indexer, if not type correct raise",
"try",
":",
"obj",
"=",
"self",
".",
"_convert_scalar_indexer",
"(",
"obj",
",",
"axis",
")",
"except",
"TypeError",
":",
"# but we will allow setting",
"if",
"is_setter",
":",
"pass",
"# see if we are positional in nature",
"is_int_index",
"=",
"labels",
".",
"is_integer",
"(",
")",
"is_int_positional",
"=",
"is_integer",
"(",
"obj",
")",
"and",
"not",
"is_int_index",
"# if we are a label return me",
"try",
":",
"return",
"labels",
".",
"get_loc",
"(",
"obj",
")",
"except",
"LookupError",
":",
"if",
"isinstance",
"(",
"obj",
",",
"tuple",
")",
"and",
"isinstance",
"(",
"labels",
",",
"MultiIndex",
")",
":",
"if",
"is_setter",
"and",
"len",
"(",
"obj",
")",
"==",
"labels",
".",
"nlevels",
":",
"return",
"{",
"'key'",
":",
"obj",
"}",
"raise",
"except",
"TypeError",
":",
"pass",
"except",
"(",
"ValueError",
")",
":",
"if",
"not",
"is_int_positional",
":",
"raise",
"# a positional",
"if",
"is_int_positional",
":",
"# if we are setting and its not a valid location",
"# its an insert which fails by definition",
"if",
"is_setter",
":",
"# always valid",
"if",
"self",
".",
"name",
"==",
"'loc'",
":",
"return",
"{",
"'key'",
":",
"obj",
"}",
"# a positional",
"if",
"(",
"obj",
">=",
"self",
".",
"obj",
".",
"shape",
"[",
"axis",
"]",
"and",
"not",
"isinstance",
"(",
"labels",
",",
"MultiIndex",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"cannot set by positional indexing with \"",
"\"enlargement\"",
")",
"return",
"obj",
"if",
"is_nested_tuple",
"(",
"obj",
",",
"labels",
")",
":",
"return",
"labels",
".",
"get_locs",
"(",
"obj",
")",
"elif",
"is_list_like_indexer",
"(",
"obj",
")",
":",
"if",
"com",
".",
"is_bool_indexer",
"(",
"obj",
")",
":",
"obj",
"=",
"check_bool_indexer",
"(",
"labels",
",",
"obj",
")",
"inds",
",",
"=",
"obj",
".",
"nonzero",
"(",
")",
"return",
"inds",
"else",
":",
"# When setting, missing keys are not allowed, even with .loc:",
"kwargs",
"=",
"{",
"'raise_missing'",
":",
"True",
"if",
"is_setter",
"else",
"raise_missing",
"}",
"return",
"self",
".",
"_get_listlike_indexer",
"(",
"obj",
",",
"axis",
",",
"*",
"*",
"kwargs",
")",
"[",
"1",
"]",
"else",
":",
"try",
":",
"return",
"labels",
".",
"get_loc",
"(",
"obj",
")",
"except",
"LookupError",
":",
"# allow a not found key only if we are a setter",
"if",
"not",
"is_list_like_indexer",
"(",
"obj",
")",
"and",
"is_setter",
":",
"return",
"{",
"'key'",
":",
"obj",
"}",
"raise"
] |
Convert indexing key into something we can use to do actual fancy
indexing on an ndarray
Examples
ix[:5] -> slice(0, 5)
ix[[1,2,3]] -> [1,2,3]
ix[['foo', 'bar', 'baz']] -> [i, j, k] (indices of foo, bar, baz)
Going by Zen of Python?
'In the face of ambiguity, refuse the temptation to guess.'
raise AmbiguousIndexError with integer labels?
- No, prefer label-based indexing
|
[
"Convert",
"indexing",
"key",
"into",
"something",
"we",
"can",
"use",
"to",
"do",
"actual",
"fancy",
"indexing",
"on",
"an",
"ndarray"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L1275-L1366
|
19,731
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
_LocationIndexer._get_slice_axis
|
def _get_slice_axis(self, slice_obj, axis=None):
""" this is pretty simple as we just have to deal with labels """
if axis is None:
axis = self.axis or 0
obj = self.obj
if not need_slice(slice_obj):
return obj.copy(deep=False)
labels = obj._get_axis(axis)
indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop,
slice_obj.step, kind=self.name)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj._take(indexer, axis=axis)
|
python
|
def _get_slice_axis(self, slice_obj, axis=None):
""" this is pretty simple as we just have to deal with labels """
if axis is None:
axis = self.axis or 0
obj = self.obj
if not need_slice(slice_obj):
return obj.copy(deep=False)
labels = obj._get_axis(axis)
indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop,
slice_obj.step, kind=self.name)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj._take(indexer, axis=axis)
|
[
"def",
"_get_slice_axis",
"(",
"self",
",",
"slice_obj",
",",
"axis",
"=",
"None",
")",
":",
"if",
"axis",
"is",
"None",
":",
"axis",
"=",
"self",
".",
"axis",
"or",
"0",
"obj",
"=",
"self",
".",
"obj",
"if",
"not",
"need_slice",
"(",
"slice_obj",
")",
":",
"return",
"obj",
".",
"copy",
"(",
"deep",
"=",
"False",
")",
"labels",
"=",
"obj",
".",
"_get_axis",
"(",
"axis",
")",
"indexer",
"=",
"labels",
".",
"slice_indexer",
"(",
"slice_obj",
".",
"start",
",",
"slice_obj",
".",
"stop",
",",
"slice_obj",
".",
"step",
",",
"kind",
"=",
"self",
".",
"name",
")",
"if",
"isinstance",
"(",
"indexer",
",",
"slice",
")",
":",
"return",
"self",
".",
"_slice",
"(",
"indexer",
",",
"axis",
"=",
"axis",
",",
"kind",
"=",
"'iloc'",
")",
"else",
":",
"return",
"self",
".",
"obj",
".",
"_take",
"(",
"indexer",
",",
"axis",
"=",
"axis",
")"
] |
this is pretty simple as we just have to deal with labels
|
[
"this",
"is",
"pretty",
"simple",
"as",
"we",
"just",
"have",
"to",
"deal",
"with",
"labels"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L1526-L1542
|
19,732
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
_iLocIndexer._validate_integer
|
def _validate_integer(self, key, axis):
"""
Check that 'key' is a valid position in the desired axis.
Parameters
----------
key : int
Requested position
axis : int
Desired axis
Returns
-------
None
Raises
------
IndexError
If 'key' is not a valid position in axis 'axis'
"""
len_axis = len(self.obj._get_axis(axis))
if key >= len_axis or key < -len_axis:
raise IndexError("single positional indexer is out-of-bounds")
|
python
|
def _validate_integer(self, key, axis):
"""
Check that 'key' is a valid position in the desired axis.
Parameters
----------
key : int
Requested position
axis : int
Desired axis
Returns
-------
None
Raises
------
IndexError
If 'key' is not a valid position in axis 'axis'
"""
len_axis = len(self.obj._get_axis(axis))
if key >= len_axis or key < -len_axis:
raise IndexError("single positional indexer is out-of-bounds")
|
[
"def",
"_validate_integer",
"(",
"self",
",",
"key",
",",
"axis",
")",
":",
"len_axis",
"=",
"len",
"(",
"self",
".",
"obj",
".",
"_get_axis",
"(",
"axis",
")",
")",
"if",
"key",
">=",
"len_axis",
"or",
"key",
"<",
"-",
"len_axis",
":",
"raise",
"IndexError",
"(",
"\"single positional indexer is out-of-bounds\"",
")"
] |
Check that 'key' is a valid position in the desired axis.
Parameters
----------
key : int
Requested position
axis : int
Desired axis
Returns
-------
None
Raises
------
IndexError
If 'key' is not a valid position in axis 'axis'
|
[
"Check",
"that",
"key",
"is",
"a",
"valid",
"position",
"in",
"the",
"desired",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L2125-L2148
|
19,733
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
_iLocIndexer._get_list_axis
|
def _get_list_axis(self, key, axis=None):
"""
Return Series values by list or array of integers
Parameters
----------
key : list-like positional indexer
axis : int (can only be zero)
Returns
-------
Series object
"""
if axis is None:
axis = self.axis or 0
try:
return self.obj._take(key, axis=axis)
except IndexError:
# re-raise with different error message
raise IndexError("positional indexers are out-of-bounds")
|
python
|
def _get_list_axis(self, key, axis=None):
"""
Return Series values by list or array of integers
Parameters
----------
key : list-like positional indexer
axis : int (can only be zero)
Returns
-------
Series object
"""
if axis is None:
axis = self.axis or 0
try:
return self.obj._take(key, axis=axis)
except IndexError:
# re-raise with different error message
raise IndexError("positional indexers are out-of-bounds")
|
[
"def",
"_get_list_axis",
"(",
"self",
",",
"key",
",",
"axis",
"=",
"None",
")",
":",
"if",
"axis",
"is",
"None",
":",
"axis",
"=",
"self",
".",
"axis",
"or",
"0",
"try",
":",
"return",
"self",
".",
"obj",
".",
"_take",
"(",
"key",
",",
"axis",
"=",
"axis",
")",
"except",
"IndexError",
":",
"# re-raise with different error message",
"raise",
"IndexError",
"(",
"\"positional indexers are out-of-bounds\"",
")"
] |
Return Series values by list or array of integers
Parameters
----------
key : list-like positional indexer
axis : int (can only be zero)
Returns
-------
Series object
|
[
"Return",
"Series",
"values",
"by",
"list",
"or",
"array",
"of",
"integers"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L2193-L2212
|
19,734
|
pandas-dev/pandas
|
pandas/core/indexing.py
|
_iLocIndexer._convert_to_indexer
|
def _convert_to_indexer(self, obj, axis=None, is_setter=False):
""" much simpler as we only have to deal with our valid types """
if axis is None:
axis = self.axis or 0
# make need to convert a float key
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
elif is_float(obj):
return self._convert_scalar_indexer(obj, axis)
try:
self._validate_key(obj, axis)
return obj
except ValueError:
raise ValueError("Can only index by location with "
"a [{types}]".format(types=self._valid_types))
|
python
|
def _convert_to_indexer(self, obj, axis=None, is_setter=False):
""" much simpler as we only have to deal with our valid types """
if axis is None:
axis = self.axis or 0
# make need to convert a float key
if isinstance(obj, slice):
return self._convert_slice_indexer(obj, axis)
elif is_float(obj):
return self._convert_scalar_indexer(obj, axis)
try:
self._validate_key(obj, axis)
return obj
except ValueError:
raise ValueError("Can only index by location with "
"a [{types}]".format(types=self._valid_types))
|
[
"def",
"_convert_to_indexer",
"(",
"self",
",",
"obj",
",",
"axis",
"=",
"None",
",",
"is_setter",
"=",
"False",
")",
":",
"if",
"axis",
"is",
"None",
":",
"axis",
"=",
"self",
".",
"axis",
"or",
"0",
"# make need to convert a float key",
"if",
"isinstance",
"(",
"obj",
",",
"slice",
")",
":",
"return",
"self",
".",
"_convert_slice_indexer",
"(",
"obj",
",",
"axis",
")",
"elif",
"is_float",
"(",
"obj",
")",
":",
"return",
"self",
".",
"_convert_scalar_indexer",
"(",
"obj",
",",
"axis",
")",
"try",
":",
"self",
".",
"_validate_key",
"(",
"obj",
",",
"axis",
")",
"return",
"obj",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Can only index by location with \"",
"\"a [{types}]\"",
".",
"format",
"(",
"types",
"=",
"self",
".",
"_valid_types",
")",
")"
] |
much simpler as we only have to deal with our valid types
|
[
"much",
"simpler",
"as",
"we",
"only",
"have",
"to",
"deal",
"with",
"our",
"valid",
"types"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L2244-L2261
|
19,735
|
pandas-dev/pandas
|
pandas/core/sparse/frame.py
|
to_manager
|
def to_manager(sdf, columns, index):
""" create and return the block manager from a dataframe of series,
columns, index
"""
# from BlockManager perspective
axes = [ensure_index(columns), ensure_index(index)]
return create_block_manager_from_arrays(
[sdf[c] for c in columns], columns, axes)
|
python
|
def to_manager(sdf, columns, index):
""" create and return the block manager from a dataframe of series,
columns, index
"""
# from BlockManager perspective
axes = [ensure_index(columns), ensure_index(index)]
return create_block_manager_from_arrays(
[sdf[c] for c in columns], columns, axes)
|
[
"def",
"to_manager",
"(",
"sdf",
",",
"columns",
",",
"index",
")",
":",
"# from BlockManager perspective",
"axes",
"=",
"[",
"ensure_index",
"(",
"columns",
")",
",",
"ensure_index",
"(",
"index",
")",
"]",
"return",
"create_block_manager_from_arrays",
"(",
"[",
"sdf",
"[",
"c",
"]",
"for",
"c",
"in",
"columns",
"]",
",",
"columns",
",",
"axes",
")"
] |
create and return the block manager from a dataframe of series,
columns, index
|
[
"create",
"and",
"return",
"the",
"block",
"manager",
"from",
"a",
"dataframe",
"of",
"series",
"columns",
"index"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L951-L960
|
19,736
|
pandas-dev/pandas
|
pandas/core/sparse/frame.py
|
stack_sparse_frame
|
def stack_sparse_frame(frame):
"""
Only makes sense when fill_value is NaN
"""
lengths = [s.sp_index.npoints for _, s in frame.items()]
nobs = sum(lengths)
# this is pretty fast
minor_codes = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
# TODO: Figure out whether this can be reached.
# I think this currently can't be reached because you can't build a
# SparseDataFrame with a non-np.NaN fill value (fails earlier).
for _, series in frame.items():
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_codes = np.concatenate(inds_to_concat)
stacked_values = np.concatenate(vals_to_concat)
index = MultiIndex(levels=[frame.index, frame.columns],
codes=[major_codes, minor_codes],
verify_integrity=False)
lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index,
columns=['foo'])
return lp.sort_index(level=0)
|
python
|
def stack_sparse_frame(frame):
"""
Only makes sense when fill_value is NaN
"""
lengths = [s.sp_index.npoints for _, s in frame.items()]
nobs = sum(lengths)
# this is pretty fast
minor_codes = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
# TODO: Figure out whether this can be reached.
# I think this currently can't be reached because you can't build a
# SparseDataFrame with a non-np.NaN fill value (fails earlier).
for _, series in frame.items():
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_codes = np.concatenate(inds_to_concat)
stacked_values = np.concatenate(vals_to_concat)
index = MultiIndex(levels=[frame.index, frame.columns],
codes=[major_codes, minor_codes],
verify_integrity=False)
lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index,
columns=['foo'])
return lp.sort_index(level=0)
|
[
"def",
"stack_sparse_frame",
"(",
"frame",
")",
":",
"lengths",
"=",
"[",
"s",
".",
"sp_index",
".",
"npoints",
"for",
"_",
",",
"s",
"in",
"frame",
".",
"items",
"(",
")",
"]",
"nobs",
"=",
"sum",
"(",
"lengths",
")",
"# this is pretty fast",
"minor_codes",
"=",
"np",
".",
"repeat",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"frame",
".",
"columns",
")",
")",
",",
"lengths",
")",
"inds_to_concat",
"=",
"[",
"]",
"vals_to_concat",
"=",
"[",
"]",
"# TODO: Figure out whether this can be reached.",
"# I think this currently can't be reached because you can't build a",
"# SparseDataFrame with a non-np.NaN fill value (fails earlier).",
"for",
"_",
",",
"series",
"in",
"frame",
".",
"items",
"(",
")",
":",
"if",
"not",
"np",
".",
"isnan",
"(",
"series",
".",
"fill_value",
")",
":",
"raise",
"TypeError",
"(",
"'This routine assumes NaN fill value'",
")",
"int_index",
"=",
"series",
".",
"sp_index",
".",
"to_int_index",
"(",
")",
"inds_to_concat",
".",
"append",
"(",
"int_index",
".",
"indices",
")",
"vals_to_concat",
".",
"append",
"(",
"series",
".",
"sp_values",
")",
"major_codes",
"=",
"np",
".",
"concatenate",
"(",
"inds_to_concat",
")",
"stacked_values",
"=",
"np",
".",
"concatenate",
"(",
"vals_to_concat",
")",
"index",
"=",
"MultiIndex",
"(",
"levels",
"=",
"[",
"frame",
".",
"index",
",",
"frame",
".",
"columns",
"]",
",",
"codes",
"=",
"[",
"major_codes",
",",
"minor_codes",
"]",
",",
"verify_integrity",
"=",
"False",
")",
"lp",
"=",
"DataFrame",
"(",
"stacked_values",
".",
"reshape",
"(",
"(",
"nobs",
",",
"1",
")",
")",
",",
"index",
"=",
"index",
",",
"columns",
"=",
"[",
"'foo'",
"]",
")",
"return",
"lp",
".",
"sort_index",
"(",
"level",
"=",
"0",
")"
] |
Only makes sense when fill_value is NaN
|
[
"Only",
"makes",
"sense",
"when",
"fill_value",
"is",
"NaN"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L963-L994
|
19,737
|
pandas-dev/pandas
|
pandas/core/sparse/frame.py
|
SparseDataFrame._init_matrix
|
def _init_matrix(self, data, index, columns, dtype=None):
"""
Init self from ndarray or list of lists.
"""
data = prep_ndarray(data, copy=False)
index, columns = self._prep_index(data, index, columns)
data = {idx: data[:, i] for i, idx in enumerate(columns)}
return self._init_dict(data, index, columns, dtype)
|
python
|
def _init_matrix(self, data, index, columns, dtype=None):
"""
Init self from ndarray or list of lists.
"""
data = prep_ndarray(data, copy=False)
index, columns = self._prep_index(data, index, columns)
data = {idx: data[:, i] for i, idx in enumerate(columns)}
return self._init_dict(data, index, columns, dtype)
|
[
"def",
"_init_matrix",
"(",
"self",
",",
"data",
",",
"index",
",",
"columns",
",",
"dtype",
"=",
"None",
")",
":",
"data",
"=",
"prep_ndarray",
"(",
"data",
",",
"copy",
"=",
"False",
")",
"index",
",",
"columns",
"=",
"self",
".",
"_prep_index",
"(",
"data",
",",
"index",
",",
"columns",
")",
"data",
"=",
"{",
"idx",
":",
"data",
"[",
":",
",",
"i",
"]",
"for",
"i",
",",
"idx",
"in",
"enumerate",
"(",
"columns",
")",
"}",
"return",
"self",
".",
"_init_dict",
"(",
"data",
",",
"index",
",",
"columns",
",",
"dtype",
")"
] |
Init self from ndarray or list of lists.
|
[
"Init",
"self",
"from",
"ndarray",
"or",
"list",
"of",
"lists",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L190-L197
|
19,738
|
pandas-dev/pandas
|
pandas/core/sparse/frame.py
|
SparseDataFrame._init_spmatrix
|
def _init_spmatrix(self, data, index, columns, dtype=None,
fill_value=None):
"""
Init self from scipy.sparse matrix.
"""
index, columns = self._prep_index(data, index, columns)
data = data.tocoo()
N = len(index)
# Construct a dict of SparseSeries
sdict = {}
values = Series(data.data, index=data.row, copy=False)
for col, rowvals in values.groupby(data.col):
# get_blocks expects int32 row indices in sorted order
rowvals = rowvals.sort_index()
rows = rowvals.index.values.astype(np.int32)
blocs, blens = get_blocks(rows)
sdict[columns[col]] = SparseSeries(
rowvals.values, index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, blocs, blens))
# Add any columns that were empty and thus not grouped on above
sdict.update({column: SparseSeries(index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, [], []))
for column in columns
if column not in sdict})
return self._init_dict(sdict, index, columns, dtype)
|
python
|
def _init_spmatrix(self, data, index, columns, dtype=None,
fill_value=None):
"""
Init self from scipy.sparse matrix.
"""
index, columns = self._prep_index(data, index, columns)
data = data.tocoo()
N = len(index)
# Construct a dict of SparseSeries
sdict = {}
values = Series(data.data, index=data.row, copy=False)
for col, rowvals in values.groupby(data.col):
# get_blocks expects int32 row indices in sorted order
rowvals = rowvals.sort_index()
rows = rowvals.index.values.astype(np.int32)
blocs, blens = get_blocks(rows)
sdict[columns[col]] = SparseSeries(
rowvals.values, index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, blocs, blens))
# Add any columns that were empty and thus not grouped on above
sdict.update({column: SparseSeries(index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, [], []))
for column in columns
if column not in sdict})
return self._init_dict(sdict, index, columns, dtype)
|
[
"def",
"_init_spmatrix",
"(",
"self",
",",
"data",
",",
"index",
",",
"columns",
",",
"dtype",
"=",
"None",
",",
"fill_value",
"=",
"None",
")",
":",
"index",
",",
"columns",
"=",
"self",
".",
"_prep_index",
"(",
"data",
",",
"index",
",",
"columns",
")",
"data",
"=",
"data",
".",
"tocoo",
"(",
")",
"N",
"=",
"len",
"(",
"index",
")",
"# Construct a dict of SparseSeries",
"sdict",
"=",
"{",
"}",
"values",
"=",
"Series",
"(",
"data",
".",
"data",
",",
"index",
"=",
"data",
".",
"row",
",",
"copy",
"=",
"False",
")",
"for",
"col",
",",
"rowvals",
"in",
"values",
".",
"groupby",
"(",
"data",
".",
"col",
")",
":",
"# get_blocks expects int32 row indices in sorted order",
"rowvals",
"=",
"rowvals",
".",
"sort_index",
"(",
")",
"rows",
"=",
"rowvals",
".",
"index",
".",
"values",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"blocs",
",",
"blens",
"=",
"get_blocks",
"(",
"rows",
")",
"sdict",
"[",
"columns",
"[",
"col",
"]",
"]",
"=",
"SparseSeries",
"(",
"rowvals",
".",
"values",
",",
"index",
"=",
"index",
",",
"fill_value",
"=",
"fill_value",
",",
"sparse_index",
"=",
"BlockIndex",
"(",
"N",
",",
"blocs",
",",
"blens",
")",
")",
"# Add any columns that were empty and thus not grouped on above",
"sdict",
".",
"update",
"(",
"{",
"column",
":",
"SparseSeries",
"(",
"index",
"=",
"index",
",",
"fill_value",
"=",
"fill_value",
",",
"sparse_index",
"=",
"BlockIndex",
"(",
"N",
",",
"[",
"]",
",",
"[",
"]",
")",
")",
"for",
"column",
"in",
"columns",
"if",
"column",
"not",
"in",
"sdict",
"}",
")",
"return",
"self",
".",
"_init_dict",
"(",
"sdict",
",",
"index",
",",
"columns",
",",
"dtype",
")"
] |
Init self from scipy.sparse matrix.
|
[
"Init",
"self",
"from",
"scipy",
".",
"sparse",
"matrix",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L199-L229
|
19,739
|
pandas-dev/pandas
|
pandas/core/sparse/frame.py
|
SparseDataFrame.to_coo
|
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.20.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
try:
from scipy.sparse import coo_matrix
except ImportError:
raise ImportError('Scipy is not installed')
dtype = find_common_type(self.dtypes)
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
cols, rows, datas = [], [], []
for col, name in enumerate(self):
s = self[name]
row = s.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
datas.append(s.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
datas = np.concatenate(datas)
return coo_matrix((datas, (rows, cols)), shape=self.shape)
|
python
|
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.20.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
try:
from scipy.sparse import coo_matrix
except ImportError:
raise ImportError('Scipy is not installed')
dtype = find_common_type(self.dtypes)
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
cols, rows, datas = [], [], []
for col, name in enumerate(self):
s = self[name]
row = s.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
datas.append(s.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
datas = np.concatenate(datas)
return coo_matrix((datas, (rows, cols)), shape=self.shape)
|
[
"def",
"to_coo",
"(",
"self",
")",
":",
"try",
":",
"from",
"scipy",
".",
"sparse",
"import",
"coo_matrix",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"'Scipy is not installed'",
")",
"dtype",
"=",
"find_common_type",
"(",
"self",
".",
"dtypes",
")",
"if",
"isinstance",
"(",
"dtype",
",",
"SparseDtype",
")",
":",
"dtype",
"=",
"dtype",
".",
"subtype",
"cols",
",",
"rows",
",",
"datas",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"col",
",",
"name",
"in",
"enumerate",
"(",
"self",
")",
":",
"s",
"=",
"self",
"[",
"name",
"]",
"row",
"=",
"s",
".",
"sp_index",
".",
"to_int_index",
"(",
")",
".",
"indices",
"cols",
".",
"append",
"(",
"np",
".",
"repeat",
"(",
"col",
",",
"len",
"(",
"row",
")",
")",
")",
"rows",
".",
"append",
"(",
"row",
")",
"datas",
".",
"append",
"(",
"s",
".",
"sp_values",
".",
"astype",
"(",
"dtype",
",",
"copy",
"=",
"False",
")",
")",
"cols",
"=",
"np",
".",
"concatenate",
"(",
"cols",
")",
"rows",
"=",
"np",
".",
"concatenate",
"(",
"rows",
")",
"datas",
"=",
"np",
".",
"concatenate",
"(",
"datas",
")",
"return",
"coo_matrix",
"(",
"(",
"datas",
",",
"(",
"rows",
",",
"cols",
")",
")",
",",
"shape",
"=",
"self",
".",
"shape",
")"
] |
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.20.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
|
[
"Return",
"the",
"contents",
"of",
"the",
"frame",
"as",
"a",
"sparse",
"SciPy",
"COO",
"matrix",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L246-L288
|
19,740
|
pandas-dev/pandas
|
pandas/core/sparse/frame.py
|
SparseDataFrame._unpickle_sparse_frame_compat
|
def _unpickle_sparse_frame_compat(self, state):
"""
Original pickle format
"""
series, cols, idx, fv, kind = state
if not isinstance(cols, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
columns = _unpickle_array(cols)
else:
columns = cols
if not isinstance(idx, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
index = _unpickle_array(idx)
else:
index = idx
series_dict = DataFrame()
for col, (sp_index, sp_values) in series.items():
series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,
fill_value=fv)
self._data = to_manager(series_dict, columns, index)
self._default_fill_value = fv
self._default_kind = kind
|
python
|
def _unpickle_sparse_frame_compat(self, state):
"""
Original pickle format
"""
series, cols, idx, fv, kind = state
if not isinstance(cols, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
columns = _unpickle_array(cols)
else:
columns = cols
if not isinstance(idx, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
index = _unpickle_array(idx)
else:
index = idx
series_dict = DataFrame()
for col, (sp_index, sp_values) in series.items():
series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,
fill_value=fv)
self._data = to_manager(series_dict, columns, index)
self._default_fill_value = fv
self._default_kind = kind
|
[
"def",
"_unpickle_sparse_frame_compat",
"(",
"self",
",",
"state",
")",
":",
"series",
",",
"cols",
",",
"idx",
",",
"fv",
",",
"kind",
"=",
"state",
"if",
"not",
"isinstance",
"(",
"cols",
",",
"Index",
")",
":",
"# pragma: no cover",
"from",
"pandas",
".",
"io",
".",
"pickle",
"import",
"_unpickle_array",
"columns",
"=",
"_unpickle_array",
"(",
"cols",
")",
"else",
":",
"columns",
"=",
"cols",
"if",
"not",
"isinstance",
"(",
"idx",
",",
"Index",
")",
":",
"# pragma: no cover",
"from",
"pandas",
".",
"io",
".",
"pickle",
"import",
"_unpickle_array",
"index",
"=",
"_unpickle_array",
"(",
"idx",
")",
"else",
":",
"index",
"=",
"idx",
"series_dict",
"=",
"DataFrame",
"(",
")",
"for",
"col",
",",
"(",
"sp_index",
",",
"sp_values",
")",
"in",
"series",
".",
"items",
"(",
")",
":",
"series_dict",
"[",
"col",
"]",
"=",
"SparseSeries",
"(",
"sp_values",
",",
"sparse_index",
"=",
"sp_index",
",",
"fill_value",
"=",
"fv",
")",
"self",
".",
"_data",
"=",
"to_manager",
"(",
"series_dict",
",",
"columns",
",",
"index",
")",
"self",
".",
"_default_fill_value",
"=",
"fv",
"self",
".",
"_default_kind",
"=",
"kind"
] |
Original pickle format
|
[
"Original",
"pickle",
"format"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L302-L327
|
19,741
|
pandas-dev/pandas
|
pandas/core/sparse/frame.py
|
SparseDataFrame.to_dense
|
def to_dense(self):
"""
Convert to dense DataFrame
Returns
-------
df : DataFrame
"""
data = {k: v.to_dense() for k, v in self.items()}
return DataFrame(data, index=self.index, columns=self.columns)
|
python
|
def to_dense(self):
"""
Convert to dense DataFrame
Returns
-------
df : DataFrame
"""
data = {k: v.to_dense() for k, v in self.items()}
return DataFrame(data, index=self.index, columns=self.columns)
|
[
"def",
"to_dense",
"(",
"self",
")",
":",
"data",
"=",
"{",
"k",
":",
"v",
".",
"to_dense",
"(",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
"}",
"return",
"DataFrame",
"(",
"data",
",",
"index",
"=",
"self",
".",
"index",
",",
"columns",
"=",
"self",
".",
"columns",
")"
] |
Convert to dense DataFrame
Returns
-------
df : DataFrame
|
[
"Convert",
"to",
"dense",
"DataFrame"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L329-L338
|
19,742
|
pandas-dev/pandas
|
pandas/core/sparse/frame.py
|
SparseDataFrame._apply_columns
|
def _apply_columns(self, func):
"""
Get new SparseDataFrame applying func to each columns
"""
new_data = {col: func(series)
for col, series in self.items()}
return self._constructor(
data=new_data, index=self.index, columns=self.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
|
python
|
def _apply_columns(self, func):
"""
Get new SparseDataFrame applying func to each columns
"""
new_data = {col: func(series)
for col, series in self.items()}
return self._constructor(
data=new_data, index=self.index, columns=self.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
|
[
"def",
"_apply_columns",
"(",
"self",
",",
"func",
")",
":",
"new_data",
"=",
"{",
"col",
":",
"func",
"(",
"series",
")",
"for",
"col",
",",
"series",
"in",
"self",
".",
"items",
"(",
")",
"}",
"return",
"self",
".",
"_constructor",
"(",
"data",
"=",
"new_data",
",",
"index",
"=",
"self",
".",
"index",
",",
"columns",
"=",
"self",
".",
"columns",
",",
"default_fill_value",
"=",
"self",
".",
"default_fill_value",
")",
".",
"__finalize__",
"(",
"self",
")"
] |
Get new SparseDataFrame applying func to each columns
|
[
"Get",
"new",
"SparseDataFrame",
"applying",
"func",
"to",
"each",
"columns"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L340-L350
|
19,743
|
pandas-dev/pandas
|
pandas/core/sparse/frame.py
|
SparseDataFrame.copy
|
def copy(self, deep=True):
"""
Make a copy of this SparseDataFrame
"""
result = super().copy(deep=deep)
result._default_fill_value = self._default_fill_value
result._default_kind = self._default_kind
return result
|
python
|
def copy(self, deep=True):
"""
Make a copy of this SparseDataFrame
"""
result = super().copy(deep=deep)
result._default_fill_value = self._default_fill_value
result._default_kind = self._default_kind
return result
|
[
"def",
"copy",
"(",
"self",
",",
"deep",
"=",
"True",
")",
":",
"result",
"=",
"super",
"(",
")",
".",
"copy",
"(",
"deep",
"=",
"deep",
")",
"result",
".",
"_default_fill_value",
"=",
"self",
".",
"_default_fill_value",
"result",
".",
"_default_kind",
"=",
"self",
".",
"_default_kind",
"return",
"result"
] |
Make a copy of this SparseDataFrame
|
[
"Make",
"a",
"copy",
"of",
"this",
"SparseDataFrame"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L355-L362
|
19,744
|
pandas-dev/pandas
|
pandas/core/sparse/frame.py
|
SparseDataFrame._sanitize_column
|
def _sanitize_column(self, key, value, **kwargs):
"""
Creates a new SparseArray from the input value.
Parameters
----------
key : object
value : scalar, Series, or array-like
kwargs : dict
Returns
-------
sanitized_column : SparseArray
"""
def sp_maker(x, index=None):
return SparseArray(x, index=index,
fill_value=self._default_fill_value,
kind=self._default_kind)
if isinstance(value, SparseSeries):
clean = value.reindex(self.index).as_sparse_array(
fill_value=self._default_fill_value, kind=self._default_kind)
elif isinstance(value, SparseArray):
if len(value) != len(self.index):
raise ValueError('Length of values does not match '
'length of index')
clean = value
elif hasattr(value, '__iter__'):
if isinstance(value, Series):
clean = value.reindex(self.index)
if not isinstance(value, SparseSeries):
clean = sp_maker(clean)
else:
if len(value) != len(self.index):
raise ValueError('Length of values does not match '
'length of index')
clean = sp_maker(value)
# Scalar
else:
clean = sp_maker(value, self.index)
# always return a SparseArray!
return clean
|
python
|
def _sanitize_column(self, key, value, **kwargs):
"""
Creates a new SparseArray from the input value.
Parameters
----------
key : object
value : scalar, Series, or array-like
kwargs : dict
Returns
-------
sanitized_column : SparseArray
"""
def sp_maker(x, index=None):
return SparseArray(x, index=index,
fill_value=self._default_fill_value,
kind=self._default_kind)
if isinstance(value, SparseSeries):
clean = value.reindex(self.index).as_sparse_array(
fill_value=self._default_fill_value, kind=self._default_kind)
elif isinstance(value, SparseArray):
if len(value) != len(self.index):
raise ValueError('Length of values does not match '
'length of index')
clean = value
elif hasattr(value, '__iter__'):
if isinstance(value, Series):
clean = value.reindex(self.index)
if not isinstance(value, SparseSeries):
clean = sp_maker(clean)
else:
if len(value) != len(self.index):
raise ValueError('Length of values does not match '
'length of index')
clean = sp_maker(value)
# Scalar
else:
clean = sp_maker(value, self.index)
# always return a SparseArray!
return clean
|
[
"def",
"_sanitize_column",
"(",
"self",
",",
"key",
",",
"value",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"sp_maker",
"(",
"x",
",",
"index",
"=",
"None",
")",
":",
"return",
"SparseArray",
"(",
"x",
",",
"index",
"=",
"index",
",",
"fill_value",
"=",
"self",
".",
"_default_fill_value",
",",
"kind",
"=",
"self",
".",
"_default_kind",
")",
"if",
"isinstance",
"(",
"value",
",",
"SparseSeries",
")",
":",
"clean",
"=",
"value",
".",
"reindex",
"(",
"self",
".",
"index",
")",
".",
"as_sparse_array",
"(",
"fill_value",
"=",
"self",
".",
"_default_fill_value",
",",
"kind",
"=",
"self",
".",
"_default_kind",
")",
"elif",
"isinstance",
"(",
"value",
",",
"SparseArray",
")",
":",
"if",
"len",
"(",
"value",
")",
"!=",
"len",
"(",
"self",
".",
"index",
")",
":",
"raise",
"ValueError",
"(",
"'Length of values does not match '",
"'length of index'",
")",
"clean",
"=",
"value",
"elif",
"hasattr",
"(",
"value",
",",
"'__iter__'",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Series",
")",
":",
"clean",
"=",
"value",
".",
"reindex",
"(",
"self",
".",
"index",
")",
"if",
"not",
"isinstance",
"(",
"value",
",",
"SparseSeries",
")",
":",
"clean",
"=",
"sp_maker",
"(",
"clean",
")",
"else",
":",
"if",
"len",
"(",
"value",
")",
"!=",
"len",
"(",
"self",
".",
"index",
")",
":",
"raise",
"ValueError",
"(",
"'Length of values does not match '",
"'length of index'",
")",
"clean",
"=",
"sp_maker",
"(",
"value",
")",
"# Scalar",
"else",
":",
"clean",
"=",
"sp_maker",
"(",
"value",
",",
"self",
".",
"index",
")",
"# always return a SparseArray!",
"return",
"clean"
] |
Creates a new SparseArray from the input value.
Parameters
----------
key : object
value : scalar, Series, or array-like
kwargs : dict
Returns
-------
sanitized_column : SparseArray
|
[
"Creates",
"a",
"new",
"SparseArray",
"from",
"the",
"input",
"value",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L403-L448
|
19,745
|
pandas-dev/pandas
|
pandas/core/sparse/frame.py
|
SparseDataFrame.cumsum
|
def cumsum(self, axis=0, *args, **kwargs):
"""
Return SparseDataFrame of cumulative sums over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
Returns
-------
y : SparseDataFrame
"""
nv.validate_cumsum(args, kwargs)
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.cumsum(), axis=axis)
|
python
|
def cumsum(self, axis=0, *args, **kwargs):
"""
Return SparseDataFrame of cumulative sums over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
Returns
-------
y : SparseDataFrame
"""
nv.validate_cumsum(args, kwargs)
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.cumsum(), axis=axis)
|
[
"def",
"cumsum",
"(",
"self",
",",
"axis",
"=",
"0",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"nv",
".",
"validate_cumsum",
"(",
"args",
",",
"kwargs",
")",
"if",
"axis",
"is",
"None",
":",
"axis",
"=",
"self",
".",
"_stat_axis_number",
"return",
"self",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
".",
"cumsum",
"(",
")",
",",
"axis",
"=",
"axis",
")"
] |
Return SparseDataFrame of cumulative sums over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
Returns
-------
y : SparseDataFrame
|
[
"Return",
"SparseDataFrame",
"of",
"cumulative",
"sums",
"over",
"requested",
"axis",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L828-L846
|
19,746
|
pandas-dev/pandas
|
pandas/core/sparse/frame.py
|
SparseDataFrame.apply
|
def apply(self, func, axis=0, broadcast=None, reduce=None,
result_type=None):
"""
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='reduce'.
result_type : {'expand', 'reduce', 'broadcast, None}
These only act when axis=1 {columns}:
* 'expand' : list-like results will be turned into columns.
* 'reduce' : return a Series if possible rather than expanding
list-like results. This is the opposite to 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the frame, the original index & columns will be retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
Returns
-------
applied : Series or SparseDataFrame
"""
if not len(self.columns):
return self
axis = self._get_axis_number(axis)
if isinstance(func, np.ufunc):
new_series = {}
for k, v in self.items():
applied = func(v)
applied.fill_value = func(v.fill_value)
new_series[k] = applied
return self._constructor(
new_series, index=self.index, columns=self.columns,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
reduce=reduce,
broadcast=broadcast,
result_type=result_type)
return op.get_result()
|
python
|
def apply(self, func, axis=0, broadcast=None, reduce=None,
result_type=None):
"""
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='reduce'.
result_type : {'expand', 'reduce', 'broadcast, None}
These only act when axis=1 {columns}:
* 'expand' : list-like results will be turned into columns.
* 'reduce' : return a Series if possible rather than expanding
list-like results. This is the opposite to 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the frame, the original index & columns will be retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
Returns
-------
applied : Series or SparseDataFrame
"""
if not len(self.columns):
return self
axis = self._get_axis_number(axis)
if isinstance(func, np.ufunc):
new_series = {}
for k, v in self.items():
applied = func(v)
applied.fill_value = func(v.fill_value)
new_series[k] = applied
return self._constructor(
new_series, index=self.index, columns=self.columns,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
reduce=reduce,
broadcast=broadcast,
result_type=result_type)
return op.get_result()
|
[
"def",
"apply",
"(",
"self",
",",
"func",
",",
"axis",
"=",
"0",
",",
"broadcast",
"=",
"None",
",",
"reduce",
"=",
"None",
",",
"result_type",
"=",
"None",
")",
":",
"if",
"not",
"len",
"(",
"self",
".",
"columns",
")",
":",
"return",
"self",
"axis",
"=",
"self",
".",
"_get_axis_number",
"(",
"axis",
")",
"if",
"isinstance",
"(",
"func",
",",
"np",
".",
"ufunc",
")",
":",
"new_series",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"self",
".",
"items",
"(",
")",
":",
"applied",
"=",
"func",
"(",
"v",
")",
"applied",
".",
"fill_value",
"=",
"func",
"(",
"v",
".",
"fill_value",
")",
"new_series",
"[",
"k",
"]",
"=",
"applied",
"return",
"self",
".",
"_constructor",
"(",
"new_series",
",",
"index",
"=",
"self",
".",
"index",
",",
"columns",
"=",
"self",
".",
"columns",
",",
"default_fill_value",
"=",
"self",
".",
"_default_fill_value",
",",
"default_kind",
"=",
"self",
".",
"_default_kind",
")",
".",
"__finalize__",
"(",
"self",
")",
"from",
"pandas",
".",
"core",
".",
"apply",
"import",
"frame_apply",
"op",
"=",
"frame_apply",
"(",
"self",
",",
"func",
"=",
"func",
",",
"axis",
"=",
"axis",
",",
"reduce",
"=",
"reduce",
",",
"broadcast",
"=",
"broadcast",
",",
"result_type",
"=",
"result_type",
")",
"return",
"op",
".",
"get_result",
"(",
")"
] |
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='reduce'.
result_type : {'expand', 'reduce', 'broadcast, None}
These only act when axis=1 {columns}:
* 'expand' : list-like results will be turned into columns.
* 'reduce' : return a Series if possible rather than expanding
list-like results. This is the opposite to 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the frame, the original index & columns will be retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
Returns
-------
applied : Series or SparseDataFrame
|
[
"Analogous",
"to",
"DataFrame",
".",
"apply",
"for",
"SparseDataFrame"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/sparse/frame.py#L858-L931
|
19,747
|
pandas-dev/pandas
|
scripts/generate_pip_deps_from_conda.py
|
conda_package_to_pip
|
def conda_package_to_pip(package):
"""
Convert a conda package to its pip equivalent.
In most cases they are the same, those are the exceptions:
- Packages that should be excluded (in `EXCLUDE`)
- Packages that should be renamed (in `RENAME`)
- A package requiring a specific version, in conda is defined with a single
equal (e.g. ``pandas=1.0``) and in pip with two (e.g. ``pandas==1.0``)
"""
if package in EXCLUDE:
return
package = re.sub('(?<=[^<>])=', '==', package).strip()
for compare in ('<=', '>=', '=='):
if compare not in package:
continue
pkg, version = package.split(compare)
if pkg in RENAME:
return ''.join((RENAME[pkg], compare, version))
break
return package
|
python
|
def conda_package_to_pip(package):
"""
Convert a conda package to its pip equivalent.
In most cases they are the same, those are the exceptions:
- Packages that should be excluded (in `EXCLUDE`)
- Packages that should be renamed (in `RENAME`)
- A package requiring a specific version, in conda is defined with a single
equal (e.g. ``pandas=1.0``) and in pip with two (e.g. ``pandas==1.0``)
"""
if package in EXCLUDE:
return
package = re.sub('(?<=[^<>])=', '==', package).strip()
for compare in ('<=', '>=', '=='):
if compare not in package:
continue
pkg, version = package.split(compare)
if pkg in RENAME:
return ''.join((RENAME[pkg], compare, version))
break
return package
|
[
"def",
"conda_package_to_pip",
"(",
"package",
")",
":",
"if",
"package",
"in",
"EXCLUDE",
":",
"return",
"package",
"=",
"re",
".",
"sub",
"(",
"'(?<=[^<>])='",
",",
"'=='",
",",
"package",
")",
".",
"strip",
"(",
")",
"for",
"compare",
"in",
"(",
"'<='",
",",
"'>='",
",",
"'=='",
")",
":",
"if",
"compare",
"not",
"in",
"package",
":",
"continue",
"pkg",
",",
"version",
"=",
"package",
".",
"split",
"(",
"compare",
")",
"if",
"pkg",
"in",
"RENAME",
":",
"return",
"''",
".",
"join",
"(",
"(",
"RENAME",
"[",
"pkg",
"]",
",",
"compare",
",",
"version",
")",
")",
"break",
"return",
"package"
] |
Convert a conda package to its pip equivalent.
In most cases they are the same, those are the exceptions:
- Packages that should be excluded (in `EXCLUDE`)
- Packages that should be renamed (in `RENAME`)
- A package requiring a specific version, in conda is defined with a single
equal (e.g. ``pandas=1.0``) and in pip with two (e.g. ``pandas==1.0``)
|
[
"Convert",
"a",
"conda",
"package",
"to",
"its",
"pip",
"equivalent",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/scripts/generate_pip_deps_from_conda.py#L26-L51
|
19,748
|
pandas-dev/pandas
|
pandas/core/dtypes/cast.py
|
maybe_convert_platform
|
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple)):
values = construct_1d_object_array_from_listlike(list(values))
if getattr(values, 'dtype', None) == np.object_:
if hasattr(values, '_values'):
values = values._values
values = lib.maybe_convert_objects(values)
return values
|
python
|
def maybe_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple)):
values = construct_1d_object_array_from_listlike(list(values))
if getattr(values, 'dtype', None) == np.object_:
if hasattr(values, '_values'):
values = values._values
values = lib.maybe_convert_objects(values)
return values
|
[
"def",
"maybe_convert_platform",
"(",
"values",
")",
":",
"if",
"isinstance",
"(",
"values",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"values",
"=",
"construct_1d_object_array_from_listlike",
"(",
"list",
"(",
"values",
")",
")",
"if",
"getattr",
"(",
"values",
",",
"'dtype'",
",",
"None",
")",
"==",
"np",
".",
"object_",
":",
"if",
"hasattr",
"(",
"values",
",",
"'_values'",
")",
":",
"values",
"=",
"values",
".",
"_values",
"values",
"=",
"lib",
".",
"maybe_convert_objects",
"(",
"values",
")",
"return",
"values"
] |
try to do platform conversion, allow ndarray or list here
|
[
"try",
"to",
"do",
"platform",
"conversion",
"allow",
"ndarray",
"or",
"list",
"here"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L35-L45
|
19,749
|
pandas-dev/pandas
|
pandas/core/dtypes/cast.py
|
maybe_upcast_putmask
|
def maybe_upcast_putmask(result, mask, other):
"""
A safe version of putmask that potentially upcasts the result.
The result is replaced with the first N elements of other,
where N is the number of True values in mask.
If the length of other is shorter than N, other will be repeated.
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : ndarray or scalar
The source array or value
Returns
-------
result : ndarray
changed : boolean
Set to true if the result array was upcasted
Examples
--------
>>> result, _ = maybe_upcast_putmask(np.arange(1,6),
np.array([False, True, False, True, True]), np.arange(21,23))
>>> result
array([1, 21, 3, 22, 21])
"""
if not isinstance(result, np.ndarray):
raise ValueError("The result input must be a ndarray.")
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
# NaN -> NaT
# integer or integer array -> date-like array
if is_datetimelike(result.dtype):
if is_scalar(other):
if isna(other):
other = result.dtype.type('nat')
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
elif is_integer_dtype(other):
other = np.array(other, dtype=result.dtype)
def changeit():
# try to directly set by expanding our array to full
# length of the boolean
try:
om = other[mask]
om_at = om.astype(result.dtype)
if (om == om_at).all():
new_result = result.values.copy()
new_result[mask] = om_at
result[:] = new_result
return result, False
except Exception:
pass
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if (is_scalar(other) or
(isinstance(other, np.ndarray) and other.ndim < 1)):
if isna(other):
return changeit()
# we have an ndarray and the masking has nans in it
else:
if isna(other).any():
return changeit()
try:
np.place(result, mask, other)
except Exception:
return changeit()
return result, False
|
python
|
def maybe_upcast_putmask(result, mask, other):
"""
A safe version of putmask that potentially upcasts the result.
The result is replaced with the first N elements of other,
where N is the number of True values in mask.
If the length of other is shorter than N, other will be repeated.
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : ndarray or scalar
The source array or value
Returns
-------
result : ndarray
changed : boolean
Set to true if the result array was upcasted
Examples
--------
>>> result, _ = maybe_upcast_putmask(np.arange(1,6),
np.array([False, True, False, True, True]), np.arange(21,23))
>>> result
array([1, 21, 3, 22, 21])
"""
if not isinstance(result, np.ndarray):
raise ValueError("The result input must be a ndarray.")
if mask.any():
# Two conversions for date-like dtypes that can't be done automatically
# in np.place:
# NaN -> NaT
# integer or integer array -> date-like array
if is_datetimelike(result.dtype):
if is_scalar(other):
if isna(other):
other = result.dtype.type('nat')
elif is_integer(other):
other = np.array(other, dtype=result.dtype)
elif is_integer_dtype(other):
other = np.array(other, dtype=result.dtype)
def changeit():
# try to directly set by expanding our array to full
# length of the boolean
try:
om = other[mask]
om_at = om.astype(result.dtype)
if (om == om_at).all():
new_result = result.values.copy()
new_result[mask] = om_at
result[:] = new_result
return result, False
except Exception:
pass
# we are forced to change the dtype of the result as the input
# isn't compatible
r, _ = maybe_upcast(result, fill_value=other, copy=True)
np.place(r, mask, other)
return r, True
# we want to decide whether place will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibly), otherwise we DON't want to upcast (e.g. if we
# have values, say integers, in the success portion then it's ok to not
# upcast)
new_dtype, _ = maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if (is_scalar(other) or
(isinstance(other, np.ndarray) and other.ndim < 1)):
if isna(other):
return changeit()
# we have an ndarray and the masking has nans in it
else:
if isna(other).any():
return changeit()
try:
np.place(result, mask, other)
except Exception:
return changeit()
return result, False
|
[
"def",
"maybe_upcast_putmask",
"(",
"result",
",",
"mask",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"result",
",",
"np",
".",
"ndarray",
")",
":",
"raise",
"ValueError",
"(",
"\"The result input must be a ndarray.\"",
")",
"if",
"mask",
".",
"any",
"(",
")",
":",
"# Two conversions for date-like dtypes that can't be done automatically",
"# in np.place:",
"# NaN -> NaT",
"# integer or integer array -> date-like array",
"if",
"is_datetimelike",
"(",
"result",
".",
"dtype",
")",
":",
"if",
"is_scalar",
"(",
"other",
")",
":",
"if",
"isna",
"(",
"other",
")",
":",
"other",
"=",
"result",
".",
"dtype",
".",
"type",
"(",
"'nat'",
")",
"elif",
"is_integer",
"(",
"other",
")",
":",
"other",
"=",
"np",
".",
"array",
"(",
"other",
",",
"dtype",
"=",
"result",
".",
"dtype",
")",
"elif",
"is_integer_dtype",
"(",
"other",
")",
":",
"other",
"=",
"np",
".",
"array",
"(",
"other",
",",
"dtype",
"=",
"result",
".",
"dtype",
")",
"def",
"changeit",
"(",
")",
":",
"# try to directly set by expanding our array to full",
"# length of the boolean",
"try",
":",
"om",
"=",
"other",
"[",
"mask",
"]",
"om_at",
"=",
"om",
".",
"astype",
"(",
"result",
".",
"dtype",
")",
"if",
"(",
"om",
"==",
"om_at",
")",
".",
"all",
"(",
")",
":",
"new_result",
"=",
"result",
".",
"values",
".",
"copy",
"(",
")",
"new_result",
"[",
"mask",
"]",
"=",
"om_at",
"result",
"[",
":",
"]",
"=",
"new_result",
"return",
"result",
",",
"False",
"except",
"Exception",
":",
"pass",
"# we are forced to change the dtype of the result as the input",
"# isn't compatible",
"r",
",",
"_",
"=",
"maybe_upcast",
"(",
"result",
",",
"fill_value",
"=",
"other",
",",
"copy",
"=",
"True",
")",
"np",
".",
"place",
"(",
"r",
",",
"mask",
",",
"other",
")",
"return",
"r",
",",
"True",
"# we want to decide whether place will work",
"# if we have nans in the False portion of our mask then we need to",
"# upcast (possibly), otherwise we DON't want to upcast (e.g. if we",
"# have values, say integers, in the success portion then it's ok to not",
"# upcast)",
"new_dtype",
",",
"_",
"=",
"maybe_promote",
"(",
"result",
".",
"dtype",
",",
"other",
")",
"if",
"new_dtype",
"!=",
"result",
".",
"dtype",
":",
"# we have a scalar or len 0 ndarray",
"# and its nan and we are changing some values",
"if",
"(",
"is_scalar",
"(",
"other",
")",
"or",
"(",
"isinstance",
"(",
"other",
",",
"np",
".",
"ndarray",
")",
"and",
"other",
".",
"ndim",
"<",
"1",
")",
")",
":",
"if",
"isna",
"(",
"other",
")",
":",
"return",
"changeit",
"(",
")",
"# we have an ndarray and the masking has nans in it",
"else",
":",
"if",
"isna",
"(",
"other",
")",
".",
"any",
"(",
")",
":",
"return",
"changeit",
"(",
")",
"try",
":",
"np",
".",
"place",
"(",
"result",
",",
"mask",
",",
"other",
")",
"except",
"Exception",
":",
"return",
"changeit",
"(",
")",
"return",
"result",
",",
"False"
] |
A safe version of putmask that potentially upcasts the result.
The result is replaced with the first N elements of other,
where N is the number of True values in mask.
If the length of other is shorter than N, other will be repeated.
Parameters
----------
result : ndarray
The destination array. This will be mutated in-place if no upcasting is
necessary.
mask : boolean ndarray
other : ndarray or scalar
The source array or value
Returns
-------
result : ndarray
changed : boolean
Set to true if the result array was upcasted
Examples
--------
>>> result, _ = maybe_upcast_putmask(np.arange(1,6),
np.array([False, True, False, True, True]), np.arange(21,23))
>>> result
array([1, 21, 3, 22, 21])
|
[
"A",
"safe",
"version",
"of",
"putmask",
"that",
"potentially",
"upcasts",
"the",
"result",
".",
"The",
"result",
"is",
"replaced",
"with",
"the",
"first",
"N",
"elements",
"of",
"other",
"where",
"N",
"is",
"the",
"number",
"of",
"True",
"values",
"in",
"mask",
".",
"If",
"the",
"length",
"of",
"other",
"is",
"shorter",
"than",
"N",
"other",
"will",
"be",
"repeated",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L170-L265
|
19,750
|
pandas-dev/pandas
|
pandas/core/dtypes/cast.py
|
infer_dtype_from
|
def infer_dtype_from(val, pandas_dtype=False):
"""
interpret the dtype from a scalar or array. This is a convenience
routines to infer dtype from a scalar or an array
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if is_scalar(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
|
python
|
def infer_dtype_from(val, pandas_dtype=False):
"""
interpret the dtype from a scalar or array. This is a convenience
routines to infer dtype from a scalar or an array
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
"""
if is_scalar(val):
return infer_dtype_from_scalar(val, pandas_dtype=pandas_dtype)
return infer_dtype_from_array(val, pandas_dtype=pandas_dtype)
|
[
"def",
"infer_dtype_from",
"(",
"val",
",",
"pandas_dtype",
"=",
"False",
")",
":",
"if",
"is_scalar",
"(",
"val",
")",
":",
"return",
"infer_dtype_from_scalar",
"(",
"val",
",",
"pandas_dtype",
"=",
"pandas_dtype",
")",
"return",
"infer_dtype_from_array",
"(",
"val",
",",
"pandas_dtype",
"=",
"pandas_dtype",
")"
] |
interpret the dtype from a scalar or array. This is a convenience
routines to infer dtype from a scalar or an array
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar/array belongs to pandas extension types is inferred as
object
|
[
"interpret",
"the",
"dtype",
"from",
"a",
"scalar",
"or",
"array",
".",
"This",
"is",
"a",
"convenience",
"routines",
"to",
"infer",
"dtype",
"from",
"a",
"scalar",
"or",
"an",
"array"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L337-L351
|
19,751
|
pandas-dev/pandas
|
pandas/core/dtypes/cast.py
|
infer_dtype_from_scalar
|
def infer_dtype_from_scalar(val, pandas_dtype=False):
"""
interpret the dtype from a scalar
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype = np.object_
# a 1-element ndarray
if isinstance(val, np.ndarray):
msg = "invalid ndarray passed to infer_dtype_from_scalar"
if val.ndim != 0:
raise ValueError(msg)
dtype = val.dtype
val = val.item()
elif isinstance(val, str):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.object_
elif isinstance(val, (np.datetime64, datetime)):
val = tslibs.Timestamp(val)
if val is tslibs.NaT or val.tz is None:
dtype = np.dtype('M8[ns]')
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit='ns', tz=val.tz)
else:
# return datetimetz as object
return np.object_, val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = tslibs.Timedelta(val).value
dtype = np.dtype('m8[ns]')
elif is_bool(val):
dtype = np.bool_
elif is_integer(val):
if isinstance(val, np.integer):
dtype = type(val)
else:
dtype = np.int64
elif is_float(val):
if isinstance(val, np.floating):
dtype = type(val)
else:
dtype = np.float64
elif is_complex(val):
dtype = np.complex_
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
val = val.ordinal
return dtype, val
|
python
|
def infer_dtype_from_scalar(val, pandas_dtype=False):
"""
interpret the dtype from a scalar
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype = np.object_
# a 1-element ndarray
if isinstance(val, np.ndarray):
msg = "invalid ndarray passed to infer_dtype_from_scalar"
if val.ndim != 0:
raise ValueError(msg)
dtype = val.dtype
val = val.item()
elif isinstance(val, str):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.object_
elif isinstance(val, (np.datetime64, datetime)):
val = tslibs.Timestamp(val)
if val is tslibs.NaT or val.tz is None:
dtype = np.dtype('M8[ns]')
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit='ns', tz=val.tz)
else:
# return datetimetz as object
return np.object_, val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = tslibs.Timedelta(val).value
dtype = np.dtype('m8[ns]')
elif is_bool(val):
dtype = np.bool_
elif is_integer(val):
if isinstance(val, np.integer):
dtype = type(val)
else:
dtype = np.int64
elif is_float(val):
if isinstance(val, np.floating):
dtype = type(val)
else:
dtype = np.float64
elif is_complex(val):
dtype = np.complex_
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
val = val.ordinal
return dtype, val
|
[
"def",
"infer_dtype_from_scalar",
"(",
"val",
",",
"pandas_dtype",
"=",
"False",
")",
":",
"dtype",
"=",
"np",
".",
"object_",
"# a 1-element ndarray",
"if",
"isinstance",
"(",
"val",
",",
"np",
".",
"ndarray",
")",
":",
"msg",
"=",
"\"invalid ndarray passed to infer_dtype_from_scalar\"",
"if",
"val",
".",
"ndim",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"msg",
")",
"dtype",
"=",
"val",
".",
"dtype",
"val",
"=",
"val",
".",
"item",
"(",
")",
"elif",
"isinstance",
"(",
"val",
",",
"str",
")",
":",
"# If we create an empty array using a string to infer",
"# the dtype, NumPy will only allocate one character per entry",
"# so this is kind of bad. Alternately we could use np.repeat",
"# instead of np.empty (but then you still don't want things",
"# coming out as np.str_!",
"dtype",
"=",
"np",
".",
"object_",
"elif",
"isinstance",
"(",
"val",
",",
"(",
"np",
".",
"datetime64",
",",
"datetime",
")",
")",
":",
"val",
"=",
"tslibs",
".",
"Timestamp",
"(",
"val",
")",
"if",
"val",
"is",
"tslibs",
".",
"NaT",
"or",
"val",
".",
"tz",
"is",
"None",
":",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"'M8[ns]'",
")",
"else",
":",
"if",
"pandas_dtype",
":",
"dtype",
"=",
"DatetimeTZDtype",
"(",
"unit",
"=",
"'ns'",
",",
"tz",
"=",
"val",
".",
"tz",
")",
"else",
":",
"# return datetimetz as object",
"return",
"np",
".",
"object_",
",",
"val",
"val",
"=",
"val",
".",
"value",
"elif",
"isinstance",
"(",
"val",
",",
"(",
"np",
".",
"timedelta64",
",",
"timedelta",
")",
")",
":",
"val",
"=",
"tslibs",
".",
"Timedelta",
"(",
"val",
")",
".",
"value",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"'m8[ns]'",
")",
"elif",
"is_bool",
"(",
"val",
")",
":",
"dtype",
"=",
"np",
".",
"bool_",
"elif",
"is_integer",
"(",
"val",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"np",
".",
"integer",
")",
":",
"dtype",
"=",
"type",
"(",
"val",
")",
"else",
":",
"dtype",
"=",
"np",
".",
"int64",
"elif",
"is_float",
"(",
"val",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"np",
".",
"floating",
")",
":",
"dtype",
"=",
"type",
"(",
"val",
")",
"else",
":",
"dtype",
"=",
"np",
".",
"float64",
"elif",
"is_complex",
"(",
"val",
")",
":",
"dtype",
"=",
"np",
".",
"complex_",
"elif",
"pandas_dtype",
":",
"if",
"lib",
".",
"is_period",
"(",
"val",
")",
":",
"dtype",
"=",
"PeriodDtype",
"(",
"freq",
"=",
"val",
".",
"freq",
")",
"val",
"=",
"val",
".",
"ordinal",
"return",
"dtype",
",",
"val"
] |
interpret the dtype from a scalar
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
|
[
"interpret",
"the",
"dtype",
"from",
"a",
"scalar"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L354-L426
|
19,752
|
pandas-dev/pandas
|
pandas/core/dtypes/cast.py
|
infer_dtype_from_array
|
def infer_dtype_from_array(arr, pandas_dtype=False):
"""
infer the dtype from a scalar or array
Parameters
----------
arr : scalar or array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(numpy.object_, [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
arr = [arr]
if pandas_dtype and is_extension_type(arr):
return arr.dtype, arr
elif isinstance(arr, ABCSeries):
return arr.dtype, np.asarray(arr)
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
if inferred in ['string', 'bytes', 'unicode',
'mixed', 'mixed-integer']:
return (np.object_, arr)
arr = np.asarray(arr)
return arr.dtype, arr
|
python
|
def infer_dtype_from_array(arr, pandas_dtype=False):
"""
infer the dtype from a scalar or array
Parameters
----------
arr : scalar or array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(numpy.object_, [1, '1'])
"""
if isinstance(arr, np.ndarray):
return arr.dtype, arr
if not is_list_like(arr):
arr = [arr]
if pandas_dtype and is_extension_type(arr):
return arr.dtype, arr
elif isinstance(arr, ABCSeries):
return arr.dtype, np.asarray(arr)
# don't force numpy coerce with nan's
inferred = lib.infer_dtype(arr, skipna=False)
if inferred in ['string', 'bytes', 'unicode',
'mixed', 'mixed-integer']:
return (np.object_, arr)
arr = np.asarray(arr)
return arr.dtype, arr
|
[
"def",
"infer_dtype_from_array",
"(",
"arr",
",",
"pandas_dtype",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"arr",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"arr",
".",
"dtype",
",",
"arr",
"if",
"not",
"is_list_like",
"(",
"arr",
")",
":",
"arr",
"=",
"[",
"arr",
"]",
"if",
"pandas_dtype",
"and",
"is_extension_type",
"(",
"arr",
")",
":",
"return",
"arr",
".",
"dtype",
",",
"arr",
"elif",
"isinstance",
"(",
"arr",
",",
"ABCSeries",
")",
":",
"return",
"arr",
".",
"dtype",
",",
"np",
".",
"asarray",
"(",
"arr",
")",
"# don't force numpy coerce with nan's",
"inferred",
"=",
"lib",
".",
"infer_dtype",
"(",
"arr",
",",
"skipna",
"=",
"False",
")",
"if",
"inferred",
"in",
"[",
"'string'",
",",
"'bytes'",
",",
"'unicode'",
",",
"'mixed'",
",",
"'mixed-integer'",
"]",
":",
"return",
"(",
"np",
".",
"object_",
",",
"arr",
")",
"arr",
"=",
"np",
".",
"asarray",
"(",
"arr",
")",
"return",
"arr",
".",
"dtype",
",",
"arr"
] |
infer the dtype from a scalar or array
Parameters
----------
arr : scalar or array
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, array belongs to pandas extension types
is inferred as object
Returns
-------
tuple (numpy-compat/pandas-compat dtype, array)
Notes
-----
if pandas_dtype=False. these infer to numpy dtypes
exactly with the exception that mixed / object dtypes
are not coerced by stringifying or conversion
if pandas_dtype=True. datetime64tz-aware/categorical
types will retain there character.
Examples
--------
>>> np.asarray([1, '1'])
array(['1', '1'], dtype='<U21')
>>> infer_dtype_from_array([1, '1'])
(numpy.object_, [1, '1'])
|
[
"infer",
"the",
"dtype",
"from",
"a",
"scalar",
"or",
"array"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L429-L483
|
19,753
|
pandas-dev/pandas
|
pandas/core/dtypes/cast.py
|
maybe_infer_dtype_type
|
def maybe_infer_dtype_type(element):
"""Try to infer an object's dtype, for use in arithmetic ops
Uses `element.dtype` if that's available.
Objects implementing the iterator protocol are cast to a NumPy array,
and from there the array's type is used.
Parameters
----------
element : object
Possibly has a `.dtype` attribute, and possibly the iterator
protocol.
Returns
-------
tipo : type
Examples
--------
>>> from collections import namedtuple
>>> Foo = namedtuple("Foo", "dtype")
>>> maybe_infer_dtype_type(Foo(np.dtype("i8")))
numpy.int64
"""
tipo = None
if hasattr(element, 'dtype'):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
|
python
|
def maybe_infer_dtype_type(element):
"""Try to infer an object's dtype, for use in arithmetic ops
Uses `element.dtype` if that's available.
Objects implementing the iterator protocol are cast to a NumPy array,
and from there the array's type is used.
Parameters
----------
element : object
Possibly has a `.dtype` attribute, and possibly the iterator
protocol.
Returns
-------
tipo : type
Examples
--------
>>> from collections import namedtuple
>>> Foo = namedtuple("Foo", "dtype")
>>> maybe_infer_dtype_type(Foo(np.dtype("i8")))
numpy.int64
"""
tipo = None
if hasattr(element, 'dtype'):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo
|
[
"def",
"maybe_infer_dtype_type",
"(",
"element",
")",
":",
"tipo",
"=",
"None",
"if",
"hasattr",
"(",
"element",
",",
"'dtype'",
")",
":",
"tipo",
"=",
"element",
".",
"dtype",
"elif",
"is_list_like",
"(",
"element",
")",
":",
"element",
"=",
"np",
".",
"asarray",
"(",
"element",
")",
"tipo",
"=",
"element",
".",
"dtype",
"return",
"tipo"
] |
Try to infer an object's dtype, for use in arithmetic ops
Uses `element.dtype` if that's available.
Objects implementing the iterator protocol are cast to a NumPy array,
and from there the array's type is used.
Parameters
----------
element : object
Possibly has a `.dtype` attribute, and possibly the iterator
protocol.
Returns
-------
tipo : type
Examples
--------
>>> from collections import namedtuple
>>> Foo = namedtuple("Foo", "dtype")
>>> maybe_infer_dtype_type(Foo(np.dtype("i8")))
numpy.int64
|
[
"Try",
"to",
"infer",
"an",
"object",
"s",
"dtype",
"for",
"use",
"in",
"arithmetic",
"ops"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L486-L516
|
19,754
|
pandas-dev/pandas
|
pandas/core/dtypes/cast.py
|
maybe_upcast
|
def maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
""" provide explicit type promotion and coercion
Parameters
----------
values : the ndarray that we want to maybe upcast
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : if True always make a copy even if no upcast is required
"""
if is_extension_type(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
|
python
|
def maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
""" provide explicit type promotion and coercion
Parameters
----------
values : the ndarray that we want to maybe upcast
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : if True always make a copy even if no upcast is required
"""
if is_extension_type(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
|
[
"def",
"maybe_upcast",
"(",
"values",
",",
"fill_value",
"=",
"np",
".",
"nan",
",",
"dtype",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"if",
"is_extension_type",
"(",
"values",
")",
":",
"if",
"copy",
":",
"values",
"=",
"values",
".",
"copy",
"(",
")",
"else",
":",
"if",
"dtype",
"is",
"None",
":",
"dtype",
"=",
"values",
".",
"dtype",
"new_dtype",
",",
"fill_value",
"=",
"maybe_promote",
"(",
"dtype",
",",
"fill_value",
")",
"if",
"new_dtype",
"!=",
"values",
".",
"dtype",
":",
"values",
"=",
"values",
".",
"astype",
"(",
"new_dtype",
")",
"elif",
"copy",
":",
"values",
"=",
"values",
".",
"copy",
"(",
")",
"return",
"values",
",",
"fill_value"
] |
provide explicit type promotion and coercion
Parameters
----------
values : the ndarray that we want to maybe upcast
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : if True always make a copy even if no upcast is required
|
[
"provide",
"explicit",
"type",
"promotion",
"and",
"coercion"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L519-L542
|
19,755
|
pandas-dev/pandas
|
pandas/core/dtypes/cast.py
|
coerce_indexer_dtype
|
def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
length = len(categories)
if length < _int8_max:
return ensure_int8(indexer)
elif length < _int16_max:
return ensure_int16(indexer)
elif length < _int32_max:
return ensure_int32(indexer)
return ensure_int64(indexer)
|
python
|
def coerce_indexer_dtype(indexer, categories):
""" coerce the indexer input array to the smallest dtype possible """
length = len(categories)
if length < _int8_max:
return ensure_int8(indexer)
elif length < _int16_max:
return ensure_int16(indexer)
elif length < _int32_max:
return ensure_int32(indexer)
return ensure_int64(indexer)
|
[
"def",
"coerce_indexer_dtype",
"(",
"indexer",
",",
"categories",
")",
":",
"length",
"=",
"len",
"(",
"categories",
")",
"if",
"length",
"<",
"_int8_max",
":",
"return",
"ensure_int8",
"(",
"indexer",
")",
"elif",
"length",
"<",
"_int16_max",
":",
"return",
"ensure_int16",
"(",
"indexer",
")",
"elif",
"length",
"<",
"_int32_max",
":",
"return",
"ensure_int32",
"(",
"indexer",
")",
"return",
"ensure_int64",
"(",
"indexer",
")"
] |
coerce the indexer input array to the smallest dtype possible
|
[
"coerce",
"the",
"indexer",
"input",
"array",
"to",
"the",
"smallest",
"dtype",
"possible"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L565-L574
|
19,756
|
pandas-dev/pandas
|
pandas/core/dtypes/cast.py
|
coerce_to_dtypes
|
def coerce_to_dtypes(result, dtypes):
"""
given a dtypes and a result set, coerce the result elements to the
dtypes
"""
if len(result) != len(dtypes):
raise AssertionError("_coerce_to_dtypes requires equal len arrays")
def conv(r, dtype):
try:
if isna(r):
pass
elif dtype == _NS_DTYPE:
r = tslibs.Timestamp(r)
elif dtype == _TD_DTYPE:
r = tslibs.Timedelta(r)
elif dtype == np.bool_:
# messy. non 0/1 integers do not get converted.
if is_integer(r) and r not in [0, 1]:
return int(r)
r = bool(r)
elif dtype.kind == 'f':
r = float(r)
elif dtype.kind == 'i':
r = int(r)
except Exception:
pass
return r
return [conv(r, dtype) for r, dtype in zip(result, dtypes)]
|
python
|
def coerce_to_dtypes(result, dtypes):
"""
given a dtypes and a result set, coerce the result elements to the
dtypes
"""
if len(result) != len(dtypes):
raise AssertionError("_coerce_to_dtypes requires equal len arrays")
def conv(r, dtype):
try:
if isna(r):
pass
elif dtype == _NS_DTYPE:
r = tslibs.Timestamp(r)
elif dtype == _TD_DTYPE:
r = tslibs.Timedelta(r)
elif dtype == np.bool_:
# messy. non 0/1 integers do not get converted.
if is_integer(r) and r not in [0, 1]:
return int(r)
r = bool(r)
elif dtype.kind == 'f':
r = float(r)
elif dtype.kind == 'i':
r = int(r)
except Exception:
pass
return r
return [conv(r, dtype) for r, dtype in zip(result, dtypes)]
|
[
"def",
"coerce_to_dtypes",
"(",
"result",
",",
"dtypes",
")",
":",
"if",
"len",
"(",
"result",
")",
"!=",
"len",
"(",
"dtypes",
")",
":",
"raise",
"AssertionError",
"(",
"\"_coerce_to_dtypes requires equal len arrays\"",
")",
"def",
"conv",
"(",
"r",
",",
"dtype",
")",
":",
"try",
":",
"if",
"isna",
"(",
"r",
")",
":",
"pass",
"elif",
"dtype",
"==",
"_NS_DTYPE",
":",
"r",
"=",
"tslibs",
".",
"Timestamp",
"(",
"r",
")",
"elif",
"dtype",
"==",
"_TD_DTYPE",
":",
"r",
"=",
"tslibs",
".",
"Timedelta",
"(",
"r",
")",
"elif",
"dtype",
"==",
"np",
".",
"bool_",
":",
"# messy. non 0/1 integers do not get converted.",
"if",
"is_integer",
"(",
"r",
")",
"and",
"r",
"not",
"in",
"[",
"0",
",",
"1",
"]",
":",
"return",
"int",
"(",
"r",
")",
"r",
"=",
"bool",
"(",
"r",
")",
"elif",
"dtype",
".",
"kind",
"==",
"'f'",
":",
"r",
"=",
"float",
"(",
"r",
")",
"elif",
"dtype",
".",
"kind",
"==",
"'i'",
":",
"r",
"=",
"int",
"(",
"r",
")",
"except",
"Exception",
":",
"pass",
"return",
"r",
"return",
"[",
"conv",
"(",
"r",
",",
"dtype",
")",
"for",
"r",
",",
"dtype",
"in",
"zip",
"(",
"result",
",",
"dtypes",
")",
"]"
] |
given a dtypes and a result set, coerce the result elements to the
dtypes
|
[
"given",
"a",
"dtypes",
"and",
"a",
"result",
"set",
"coerce",
"the",
"result",
"elements",
"to",
"the",
"dtypes"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L577-L607
|
19,757
|
pandas-dev/pandas
|
pandas/core/dtypes/cast.py
|
find_common_type
|
def find_common_type(types):
"""
Find a common data type among the given dtypes.
Parameters
----------
types : list of dtypes
Returns
-------
pandas extension or numpy dtype
See Also
--------
numpy.find_common_type
"""
if len(types) == 0:
raise ValueError('no types given')
first = types[0]
# workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
# => object
if all(is_dtype_equal(first, t) for t in types[1:]):
return first
if any(isinstance(t, (PandasExtensionDtype, ExtensionDtype))
for t in types):
return np.object
# take lowest unit
if all(is_datetime64_dtype(t) for t in types):
return np.dtype('datetime64[ns]')
if all(is_timedelta64_dtype(t) for t in types):
return np.dtype('timedelta64[ns]')
# don't mix bool / int or float or complex
# this is different from numpy, which casts bool with float/int as int
has_bools = any(is_bool_dtype(t) for t in types)
if has_bools:
for t in types:
if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t):
return np.object
return np.find_common_type(types, [])
|
python
|
def find_common_type(types):
"""
Find a common data type among the given dtypes.
Parameters
----------
types : list of dtypes
Returns
-------
pandas extension or numpy dtype
See Also
--------
numpy.find_common_type
"""
if len(types) == 0:
raise ValueError('no types given')
first = types[0]
# workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
# => object
if all(is_dtype_equal(first, t) for t in types[1:]):
return first
if any(isinstance(t, (PandasExtensionDtype, ExtensionDtype))
for t in types):
return np.object
# take lowest unit
if all(is_datetime64_dtype(t) for t in types):
return np.dtype('datetime64[ns]')
if all(is_timedelta64_dtype(t) for t in types):
return np.dtype('timedelta64[ns]')
# don't mix bool / int or float or complex
# this is different from numpy, which casts bool with float/int as int
has_bools = any(is_bool_dtype(t) for t in types)
if has_bools:
for t in types:
if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t):
return np.object
return np.find_common_type(types, [])
|
[
"def",
"find_common_type",
"(",
"types",
")",
":",
"if",
"len",
"(",
"types",
")",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'no types given'",
")",
"first",
"=",
"types",
"[",
"0",
"]",
"# workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)",
"# => object",
"if",
"all",
"(",
"is_dtype_equal",
"(",
"first",
",",
"t",
")",
"for",
"t",
"in",
"types",
"[",
"1",
":",
"]",
")",
":",
"return",
"first",
"if",
"any",
"(",
"isinstance",
"(",
"t",
",",
"(",
"PandasExtensionDtype",
",",
"ExtensionDtype",
")",
")",
"for",
"t",
"in",
"types",
")",
":",
"return",
"np",
".",
"object",
"# take lowest unit",
"if",
"all",
"(",
"is_datetime64_dtype",
"(",
"t",
")",
"for",
"t",
"in",
"types",
")",
":",
"return",
"np",
".",
"dtype",
"(",
"'datetime64[ns]'",
")",
"if",
"all",
"(",
"is_timedelta64_dtype",
"(",
"t",
")",
"for",
"t",
"in",
"types",
")",
":",
"return",
"np",
".",
"dtype",
"(",
"'timedelta64[ns]'",
")",
"# don't mix bool / int or float or complex",
"# this is different from numpy, which casts bool with float/int as int",
"has_bools",
"=",
"any",
"(",
"is_bool_dtype",
"(",
"t",
")",
"for",
"t",
"in",
"types",
")",
"if",
"has_bools",
":",
"for",
"t",
"in",
"types",
":",
"if",
"is_integer_dtype",
"(",
"t",
")",
"or",
"is_float_dtype",
"(",
"t",
")",
"or",
"is_complex_dtype",
"(",
"t",
")",
":",
"return",
"np",
".",
"object",
"return",
"np",
".",
"find_common_type",
"(",
"types",
",",
"[",
"]",
")"
] |
Find a common data type among the given dtypes.
Parameters
----------
types : list of dtypes
Returns
-------
pandas extension or numpy dtype
See Also
--------
numpy.find_common_type
|
[
"Find",
"a",
"common",
"data",
"type",
"among",
"the",
"given",
"dtypes",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L1083-L1129
|
19,758
|
pandas-dev/pandas
|
pandas/core/dtypes/cast.py
|
cast_scalar_to_array
|
def cast_scalar_to_array(shape, value, dtype=None):
"""
create np.ndarray of specified shape and dtype, filled with values
Parameters
----------
shape : tuple
value : scalar value
dtype : np.dtype, optional
dtype to coerce
Returns
-------
ndarray of shape, filled with value, of specified / inferred dtype
"""
if dtype is None:
dtype, fill_value = infer_dtype_from_scalar(value)
else:
fill_value = value
values = np.empty(shape, dtype=dtype)
values.fill(fill_value)
return values
|
python
|
def cast_scalar_to_array(shape, value, dtype=None):
"""
create np.ndarray of specified shape and dtype, filled with values
Parameters
----------
shape : tuple
value : scalar value
dtype : np.dtype, optional
dtype to coerce
Returns
-------
ndarray of shape, filled with value, of specified / inferred dtype
"""
if dtype is None:
dtype, fill_value = infer_dtype_from_scalar(value)
else:
fill_value = value
values = np.empty(shape, dtype=dtype)
values.fill(fill_value)
return values
|
[
"def",
"cast_scalar_to_array",
"(",
"shape",
",",
"value",
",",
"dtype",
"=",
"None",
")",
":",
"if",
"dtype",
"is",
"None",
":",
"dtype",
",",
"fill_value",
"=",
"infer_dtype_from_scalar",
"(",
"value",
")",
"else",
":",
"fill_value",
"=",
"value",
"values",
"=",
"np",
".",
"empty",
"(",
"shape",
",",
"dtype",
"=",
"dtype",
")",
"values",
".",
"fill",
"(",
"fill_value",
")",
"return",
"values"
] |
create np.ndarray of specified shape and dtype, filled with values
Parameters
----------
shape : tuple
value : scalar value
dtype : np.dtype, optional
dtype to coerce
Returns
-------
ndarray of shape, filled with value, of specified / inferred dtype
|
[
"create",
"np",
".",
"ndarray",
"of",
"specified",
"shape",
"and",
"dtype",
"filled",
"with",
"values"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L1132-L1157
|
19,759
|
pandas-dev/pandas
|
pandas/core/dtypes/cast.py
|
construct_1d_object_array_from_listlike
|
def construct_1d_object_array_from_listlike(values):
"""
Transform any list-like object in a 1-dimensional numpy array of object
dtype.
Parameters
----------
values : any iterable which has a len()
Raises
------
TypeError
* If `values` does not have a len()
Returns
-------
1-dimensional numpy array of dtype object
"""
# numpy will try to interpret nested lists as further dimensions, hence
# making a 1D array that contains list-likes is a bit tricky:
result = np.empty(len(values), dtype='object')
result[:] = values
return result
|
python
|
def construct_1d_object_array_from_listlike(values):
"""
Transform any list-like object in a 1-dimensional numpy array of object
dtype.
Parameters
----------
values : any iterable which has a len()
Raises
------
TypeError
* If `values` does not have a len()
Returns
-------
1-dimensional numpy array of dtype object
"""
# numpy will try to interpret nested lists as further dimensions, hence
# making a 1D array that contains list-likes is a bit tricky:
result = np.empty(len(values), dtype='object')
result[:] = values
return result
|
[
"def",
"construct_1d_object_array_from_listlike",
"(",
"values",
")",
":",
"# numpy will try to interpret nested lists as further dimensions, hence",
"# making a 1D array that contains list-likes is a bit tricky:",
"result",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"values",
")",
",",
"dtype",
"=",
"'object'",
")",
"result",
"[",
":",
"]",
"=",
"values",
"return",
"result"
] |
Transform any list-like object in a 1-dimensional numpy array of object
dtype.
Parameters
----------
values : any iterable which has a len()
Raises
------
TypeError
* If `values` does not have a len()
Returns
-------
1-dimensional numpy array of dtype object
|
[
"Transform",
"any",
"list",
"-",
"like",
"object",
"in",
"a",
"1",
"-",
"dimensional",
"numpy",
"array",
"of",
"object",
"dtype",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L1202-L1224
|
19,760
|
pandas-dev/pandas
|
pandas/core/dtypes/cast.py
|
construct_1d_ndarray_preserving_na
|
def construct_1d_ndarray_preserving_na(values, dtype=None, copy=False):
"""
Construct a new ndarray, coercing `values` to `dtype`, preserving NA.
Parameters
----------
values : Sequence
dtype : numpy.dtype, optional
copy : bool, default False
Note that copies may still be made with ``copy=False`` if casting
is required.
Returns
-------
arr : ndarray[dtype]
Examples
--------
>>> np.array([1.0, 2.0, None], dtype='str')
array(['1.0', '2.0', 'None'], dtype='<U4')
>>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype='str')
"""
subarr = np.array(values, dtype=dtype, copy=copy)
if dtype is not None and dtype.kind in ("U", "S"):
# GH-21083
# We can't just return np.array(subarr, dtype='str') since
# NumPy will convert the non-string objects into strings
# Including NA values. Se we have to go
# string -> object -> update NA, which requires an
# additional pass over the data.
na_values = isna(values)
subarr2 = subarr.astype(object)
subarr2[na_values] = np.asarray(values, dtype=object)[na_values]
subarr = subarr2
return subarr
|
python
|
def construct_1d_ndarray_preserving_na(values, dtype=None, copy=False):
"""
Construct a new ndarray, coercing `values` to `dtype`, preserving NA.
Parameters
----------
values : Sequence
dtype : numpy.dtype, optional
copy : bool, default False
Note that copies may still be made with ``copy=False`` if casting
is required.
Returns
-------
arr : ndarray[dtype]
Examples
--------
>>> np.array([1.0, 2.0, None], dtype='str')
array(['1.0', '2.0', 'None'], dtype='<U4')
>>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype='str')
"""
subarr = np.array(values, dtype=dtype, copy=copy)
if dtype is not None and dtype.kind in ("U", "S"):
# GH-21083
# We can't just return np.array(subarr, dtype='str') since
# NumPy will convert the non-string objects into strings
# Including NA values. Se we have to go
# string -> object -> update NA, which requires an
# additional pass over the data.
na_values = isna(values)
subarr2 = subarr.astype(object)
subarr2[na_values] = np.asarray(values, dtype=object)[na_values]
subarr = subarr2
return subarr
|
[
"def",
"construct_1d_ndarray_preserving_na",
"(",
"values",
",",
"dtype",
"=",
"None",
",",
"copy",
"=",
"False",
")",
":",
"subarr",
"=",
"np",
".",
"array",
"(",
"values",
",",
"dtype",
"=",
"dtype",
",",
"copy",
"=",
"copy",
")",
"if",
"dtype",
"is",
"not",
"None",
"and",
"dtype",
".",
"kind",
"in",
"(",
"\"U\"",
",",
"\"S\"",
")",
":",
"# GH-21083",
"# We can't just return np.array(subarr, dtype='str') since",
"# NumPy will convert the non-string objects into strings",
"# Including NA values. Se we have to go",
"# string -> object -> update NA, which requires an",
"# additional pass over the data.",
"na_values",
"=",
"isna",
"(",
"values",
")",
"subarr2",
"=",
"subarr",
".",
"astype",
"(",
"object",
")",
"subarr2",
"[",
"na_values",
"]",
"=",
"np",
".",
"asarray",
"(",
"values",
",",
"dtype",
"=",
"object",
")",
"[",
"na_values",
"]",
"subarr",
"=",
"subarr2",
"return",
"subarr"
] |
Construct a new ndarray, coercing `values` to `dtype`, preserving NA.
Parameters
----------
values : Sequence
dtype : numpy.dtype, optional
copy : bool, default False
Note that copies may still be made with ``copy=False`` if casting
is required.
Returns
-------
arr : ndarray[dtype]
Examples
--------
>>> np.array([1.0, 2.0, None], dtype='str')
array(['1.0', '2.0', 'None'], dtype='<U4')
>>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype='str')
|
[
"Construct",
"a",
"new",
"ndarray",
"coercing",
"values",
"to",
"dtype",
"preserving",
"NA",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/cast.py#L1227-L1266
|
19,761
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
scatter_plot
|
def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False,
**kwargs):
"""
Make a scatter plot from two DataFrame columns
Parameters
----------
data : DataFrame
x : Column name for the x-axis values
y : Column name for the y-axis values
ax : Matplotlib axis object
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
Returns
-------
matplotlib.Figure
"""
import matplotlib.pyplot as plt
kwargs.setdefault('edgecolors', 'none')
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
ax.grid(grid)
return fig
|
python
|
def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False,
**kwargs):
"""
Make a scatter plot from two DataFrame columns
Parameters
----------
data : DataFrame
x : Column name for the x-axis values
y : Column name for the y-axis values
ax : Matplotlib axis object
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
Returns
-------
matplotlib.Figure
"""
import matplotlib.pyplot as plt
kwargs.setdefault('edgecolors', 'none')
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
ax.grid(grid)
return fig
|
[
"def",
"scatter_plot",
"(",
"data",
",",
"x",
",",
"y",
",",
"by",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"figsize",
"=",
"None",
",",
"grid",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"kwargs",
".",
"setdefault",
"(",
"'edgecolors'",
",",
"'none'",
")",
"def",
"plot_group",
"(",
"group",
",",
"ax",
")",
":",
"xvals",
"=",
"group",
"[",
"x",
"]",
".",
"values",
"yvals",
"=",
"group",
"[",
"y",
"]",
".",
"values",
"ax",
".",
"scatter",
"(",
"xvals",
",",
"yvals",
",",
"*",
"*",
"kwargs",
")",
"ax",
".",
"grid",
"(",
"grid",
")",
"if",
"by",
"is",
"not",
"None",
":",
"fig",
"=",
"_grouped_plot",
"(",
"plot_group",
",",
"data",
",",
"by",
"=",
"by",
",",
"figsize",
"=",
"figsize",
",",
"ax",
"=",
"ax",
")",
"else",
":",
"if",
"ax",
"is",
"None",
":",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"else",
":",
"fig",
"=",
"ax",
".",
"get_figure",
"(",
")",
"plot_group",
"(",
"data",
",",
"ax",
")",
"ax",
".",
"set_ylabel",
"(",
"pprint_thing",
"(",
"y",
")",
")",
"ax",
".",
"set_xlabel",
"(",
"pprint_thing",
"(",
"x",
")",
")",
"ax",
".",
"grid",
"(",
"grid",
")",
"return",
"fig"
] |
Make a scatter plot from two DataFrame columns
Parameters
----------
data : DataFrame
x : Column name for the x-axis values
y : Column name for the y-axis values
ax : Matplotlib axis object
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
Returns
-------
matplotlib.Figure
|
[
"Make",
"a",
"scatter",
"plot",
"from",
"two",
"DataFrame",
"columns"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L2284-L2328
|
19,762
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
hist_frame
|
def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
"""
Make a histogram of the DataFrame's.
A `histogram`_ is a representation of the distribution of data.
This function calls :meth:`matplotlib.pyplot.hist`, on each series in
the DataFrame, resulting in one histogram per column.
.. _histogram: https://en.wikipedia.org/wiki/Histogram
Parameters
----------
data : DataFrame
The pandas object holding the data.
column : string or sequence
If passed, will be used to limit data to a subset of columns.
by : object, optional
If passed, then used to form histograms for separate groups.
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels. For example, a value of 90 displays the
x labels rotated 90 degrees clockwise.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels. For example, a value of 90 displays the
y labels rotated 90 degrees clockwise.
ax : Matplotlib axes object, default None
The axes to plot the histogram on.
sharex : bool, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in.
Note that passing in both an ax and sharex=True will alter all x axis
labels for all subplots in a figure.
sharey : bool, default False
In case subplots=True, share y axis and set some y axis labels to
invisible.
figsize : tuple
The size in inches of the figure to create. Uses the value in
`matplotlib.rcParams` by default.
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms.
bins : integer or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
**kwds
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
Returns
-------
matplotlib.AxesSubplot or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.hist : Plot a histogram using matplotlib.
Examples
--------
.. plot::
:context: close-figs
This example draws a histogram based on the length and width of
some animals, displayed in three bins
>>> df = pd.DataFrame({
... 'length': [1.5, 0.5, 1.2, 0.9, 3],
... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]
... }, index= ['pig', 'rabbit', 'duck', 'chicken', 'horse'])
>>> hist = df.hist(bins=3)
"""
_raise_if_no_mpl()
_converter._WARN = False
if by is not None:
axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid,
figsize=figsize, sharex=sharex, sharey=sharey,
layout=layout, bins=bins, xlabelsize=xlabelsize,
xrot=xrot, ylabelsize=ylabelsize,
yrot=yrot, **kwds)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, ABCIndexClass)):
column = [column]
data = data[column]
data = data._get_numeric_data()
naxes = len(data.columns)
fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,
sharex=sharex, sharey=sharey, figsize=figsize,
layout=layout)
_axes = _flatten(axes)
for i, col in enumerate(com.try_sort(data.columns)):
ax = _axes[i]
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
ax.grid(grid)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
return axes
|
python
|
def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
"""
Make a histogram of the DataFrame's.
A `histogram`_ is a representation of the distribution of data.
This function calls :meth:`matplotlib.pyplot.hist`, on each series in
the DataFrame, resulting in one histogram per column.
.. _histogram: https://en.wikipedia.org/wiki/Histogram
Parameters
----------
data : DataFrame
The pandas object holding the data.
column : string or sequence
If passed, will be used to limit data to a subset of columns.
by : object, optional
If passed, then used to form histograms for separate groups.
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels. For example, a value of 90 displays the
x labels rotated 90 degrees clockwise.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels. For example, a value of 90 displays the
y labels rotated 90 degrees clockwise.
ax : Matplotlib axes object, default None
The axes to plot the histogram on.
sharex : bool, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in.
Note that passing in both an ax and sharex=True will alter all x axis
labels for all subplots in a figure.
sharey : bool, default False
In case subplots=True, share y axis and set some y axis labels to
invisible.
figsize : tuple
The size in inches of the figure to create. Uses the value in
`matplotlib.rcParams` by default.
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms.
bins : integer or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
**kwds
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
Returns
-------
matplotlib.AxesSubplot or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.hist : Plot a histogram using matplotlib.
Examples
--------
.. plot::
:context: close-figs
This example draws a histogram based on the length and width of
some animals, displayed in three bins
>>> df = pd.DataFrame({
... 'length': [1.5, 0.5, 1.2, 0.9, 3],
... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]
... }, index= ['pig', 'rabbit', 'duck', 'chicken', 'horse'])
>>> hist = df.hist(bins=3)
"""
_raise_if_no_mpl()
_converter._WARN = False
if by is not None:
axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid,
figsize=figsize, sharex=sharex, sharey=sharey,
layout=layout, bins=bins, xlabelsize=xlabelsize,
xrot=xrot, ylabelsize=ylabelsize,
yrot=yrot, **kwds)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, ABCIndexClass)):
column = [column]
data = data[column]
data = data._get_numeric_data()
naxes = len(data.columns)
fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,
sharex=sharex, sharey=sharey, figsize=figsize,
layout=layout)
_axes = _flatten(axes)
for i, col in enumerate(com.try_sort(data.columns)):
ax = _axes[i]
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
ax.grid(grid)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
return axes
|
[
"def",
"hist_frame",
"(",
"data",
",",
"column",
"=",
"None",
",",
"by",
"=",
"None",
",",
"grid",
"=",
"True",
",",
"xlabelsize",
"=",
"None",
",",
"xrot",
"=",
"None",
",",
"ylabelsize",
"=",
"None",
",",
"yrot",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"sharex",
"=",
"False",
",",
"sharey",
"=",
"False",
",",
"figsize",
"=",
"None",
",",
"layout",
"=",
"None",
",",
"bins",
"=",
"10",
",",
"*",
"*",
"kwds",
")",
":",
"_raise_if_no_mpl",
"(",
")",
"_converter",
".",
"_WARN",
"=",
"False",
"if",
"by",
"is",
"not",
"None",
":",
"axes",
"=",
"grouped_hist",
"(",
"data",
",",
"column",
"=",
"column",
",",
"by",
"=",
"by",
",",
"ax",
"=",
"ax",
",",
"grid",
"=",
"grid",
",",
"figsize",
"=",
"figsize",
",",
"sharex",
"=",
"sharex",
",",
"sharey",
"=",
"sharey",
",",
"layout",
"=",
"layout",
",",
"bins",
"=",
"bins",
",",
"xlabelsize",
"=",
"xlabelsize",
",",
"xrot",
"=",
"xrot",
",",
"ylabelsize",
"=",
"ylabelsize",
",",
"yrot",
"=",
"yrot",
",",
"*",
"*",
"kwds",
")",
"return",
"axes",
"if",
"column",
"is",
"not",
"None",
":",
"if",
"not",
"isinstance",
"(",
"column",
",",
"(",
"list",
",",
"np",
".",
"ndarray",
",",
"ABCIndexClass",
")",
")",
":",
"column",
"=",
"[",
"column",
"]",
"data",
"=",
"data",
"[",
"column",
"]",
"data",
"=",
"data",
".",
"_get_numeric_data",
"(",
")",
"naxes",
"=",
"len",
"(",
"data",
".",
"columns",
")",
"fig",
",",
"axes",
"=",
"_subplots",
"(",
"naxes",
"=",
"naxes",
",",
"ax",
"=",
"ax",
",",
"squeeze",
"=",
"False",
",",
"sharex",
"=",
"sharex",
",",
"sharey",
"=",
"sharey",
",",
"figsize",
"=",
"figsize",
",",
"layout",
"=",
"layout",
")",
"_axes",
"=",
"_flatten",
"(",
"axes",
")",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"com",
".",
"try_sort",
"(",
"data",
".",
"columns",
")",
")",
":",
"ax",
"=",
"_axes",
"[",
"i",
"]",
"ax",
".",
"hist",
"(",
"data",
"[",
"col",
"]",
".",
"dropna",
"(",
")",
".",
"values",
",",
"bins",
"=",
"bins",
",",
"*",
"*",
"kwds",
")",
"ax",
".",
"set_title",
"(",
"col",
")",
"ax",
".",
"grid",
"(",
"grid",
")",
"_set_ticks_props",
"(",
"axes",
",",
"xlabelsize",
"=",
"xlabelsize",
",",
"xrot",
"=",
"xrot",
",",
"ylabelsize",
"=",
"ylabelsize",
",",
"yrot",
"=",
"yrot",
")",
"fig",
".",
"subplots_adjust",
"(",
"wspace",
"=",
"0.3",
",",
"hspace",
"=",
"0.3",
")",
"return",
"axes"
] |
Make a histogram of the DataFrame's.
A `histogram`_ is a representation of the distribution of data.
This function calls :meth:`matplotlib.pyplot.hist`, on each series in
the DataFrame, resulting in one histogram per column.
.. _histogram: https://en.wikipedia.org/wiki/Histogram
Parameters
----------
data : DataFrame
The pandas object holding the data.
column : string or sequence
If passed, will be used to limit data to a subset of columns.
by : object, optional
If passed, then used to form histograms for separate groups.
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels. For example, a value of 90 displays the
x labels rotated 90 degrees clockwise.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels. For example, a value of 90 displays the
y labels rotated 90 degrees clockwise.
ax : Matplotlib axes object, default None
The axes to plot the histogram on.
sharex : bool, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in.
Note that passing in both an ax and sharex=True will alter all x axis
labels for all subplots in a figure.
sharey : bool, default False
In case subplots=True, share y axis and set some y axis labels to
invisible.
figsize : tuple
The size in inches of the figure to create. Uses the value in
`matplotlib.rcParams` by default.
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms.
bins : integer or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
**kwds
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
Returns
-------
matplotlib.AxesSubplot or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.hist : Plot a histogram using matplotlib.
Examples
--------
.. plot::
:context: close-figs
This example draws a histogram based on the length and width of
some animals, displayed in three bins
>>> df = pd.DataFrame({
... 'length': [1.5, 0.5, 1.2, 0.9, 3],
... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]
... }, index= ['pig', 'rabbit', 'duck', 'chicken', 'horse'])
>>> hist = df.hist(bins=3)
|
[
"Make",
"a",
"histogram",
"of",
"the",
"DataFrame",
"s",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L2331-L2443
|
19,763
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
hist_series
|
def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, figsize=None,
bins=10, **kwds):
"""
Draw histogram of the input series using matplotlib.
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups
ax : matplotlib axis object
If not passed, uses gca()
grid : bool, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
figsize : tuple, default None
figure size in inches by default
bins : integer or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
bins : integer, default 10
Number of histogram bins to be used
`**kwds` : keywords
To be passed to the actual plotting function
See Also
--------
matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
"""
import matplotlib.pyplot as plt
if by is None:
if kwds.get('layout', None) is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else
plt.figure(figsize=figsize))
if (figsize is not None and tuple(figsize) !=
tuple(fig.get_size_inches())):
fig.set_size_inches(*figsize, forward=True)
if ax is None:
ax = fig.gca()
elif ax.get_figure() != fig:
raise AssertionError('passed axis not bound to passed figure')
values = self.dropna().values
ax.hist(values, bins=bins, **kwds)
ax.grid(grid)
axes = np.array([ax])
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
else:
if 'figure' in kwds:
raise ValueError("Cannot pass 'figure' when using the "
"'by' argument, since a new 'Figure' instance "
"will be created")
axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize,
bins=bins, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot, **kwds)
if hasattr(axes, 'ndim'):
if axes.ndim == 1 and len(axes) == 1:
return axes[0]
return axes
|
python
|
def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, figsize=None,
bins=10, **kwds):
"""
Draw histogram of the input series using matplotlib.
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups
ax : matplotlib axis object
If not passed, uses gca()
grid : bool, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
figsize : tuple, default None
figure size in inches by default
bins : integer or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
bins : integer, default 10
Number of histogram bins to be used
`**kwds` : keywords
To be passed to the actual plotting function
See Also
--------
matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
"""
import matplotlib.pyplot as plt
if by is None:
if kwds.get('layout', None) is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else
plt.figure(figsize=figsize))
if (figsize is not None and tuple(figsize) !=
tuple(fig.get_size_inches())):
fig.set_size_inches(*figsize, forward=True)
if ax is None:
ax = fig.gca()
elif ax.get_figure() != fig:
raise AssertionError('passed axis not bound to passed figure')
values = self.dropna().values
ax.hist(values, bins=bins, **kwds)
ax.grid(grid)
axes = np.array([ax])
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
else:
if 'figure' in kwds:
raise ValueError("Cannot pass 'figure' when using the "
"'by' argument, since a new 'Figure' instance "
"will be created")
axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize,
bins=bins, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot, **kwds)
if hasattr(axes, 'ndim'):
if axes.ndim == 1 and len(axes) == 1:
return axes[0]
return axes
|
[
"def",
"hist_series",
"(",
"self",
",",
"by",
"=",
"None",
",",
"ax",
"=",
"None",
",",
"grid",
"=",
"True",
",",
"xlabelsize",
"=",
"None",
",",
"xrot",
"=",
"None",
",",
"ylabelsize",
"=",
"None",
",",
"yrot",
"=",
"None",
",",
"figsize",
"=",
"None",
",",
"bins",
"=",
"10",
",",
"*",
"*",
"kwds",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"if",
"by",
"is",
"None",
":",
"if",
"kwds",
".",
"get",
"(",
"'layout'",
",",
"None",
")",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"The 'layout' keyword is not supported when \"",
"\"'by' is None\"",
")",
"# hack until the plotting interface is a bit more unified",
"fig",
"=",
"kwds",
".",
"pop",
"(",
"'figure'",
",",
"plt",
".",
"gcf",
"(",
")",
"if",
"plt",
".",
"get_fignums",
"(",
")",
"else",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
")",
"if",
"(",
"figsize",
"is",
"not",
"None",
"and",
"tuple",
"(",
"figsize",
")",
"!=",
"tuple",
"(",
"fig",
".",
"get_size_inches",
"(",
")",
")",
")",
":",
"fig",
".",
"set_size_inches",
"(",
"*",
"figsize",
",",
"forward",
"=",
"True",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"fig",
".",
"gca",
"(",
")",
"elif",
"ax",
".",
"get_figure",
"(",
")",
"!=",
"fig",
":",
"raise",
"AssertionError",
"(",
"'passed axis not bound to passed figure'",
")",
"values",
"=",
"self",
".",
"dropna",
"(",
")",
".",
"values",
"ax",
".",
"hist",
"(",
"values",
",",
"bins",
"=",
"bins",
",",
"*",
"*",
"kwds",
")",
"ax",
".",
"grid",
"(",
"grid",
")",
"axes",
"=",
"np",
".",
"array",
"(",
"[",
"ax",
"]",
")",
"_set_ticks_props",
"(",
"axes",
",",
"xlabelsize",
"=",
"xlabelsize",
",",
"xrot",
"=",
"xrot",
",",
"ylabelsize",
"=",
"ylabelsize",
",",
"yrot",
"=",
"yrot",
")",
"else",
":",
"if",
"'figure'",
"in",
"kwds",
":",
"raise",
"ValueError",
"(",
"\"Cannot pass 'figure' when using the \"",
"\"'by' argument, since a new 'Figure' instance \"",
"\"will be created\"",
")",
"axes",
"=",
"grouped_hist",
"(",
"self",
",",
"by",
"=",
"by",
",",
"ax",
"=",
"ax",
",",
"grid",
"=",
"grid",
",",
"figsize",
"=",
"figsize",
",",
"bins",
"=",
"bins",
",",
"xlabelsize",
"=",
"xlabelsize",
",",
"xrot",
"=",
"xrot",
",",
"ylabelsize",
"=",
"ylabelsize",
",",
"yrot",
"=",
"yrot",
",",
"*",
"*",
"kwds",
")",
"if",
"hasattr",
"(",
"axes",
",",
"'ndim'",
")",
":",
"if",
"axes",
".",
"ndim",
"==",
"1",
"and",
"len",
"(",
"axes",
")",
"==",
"1",
":",
"return",
"axes",
"[",
"0",
"]",
"return",
"axes"
] |
Draw histogram of the input series using matplotlib.
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups
ax : matplotlib axis object
If not passed, uses gca()
grid : bool, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
figsize : tuple, default None
figure size in inches by default
bins : integer or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
bins : integer, default 10
Number of histogram bins to be used
`**kwds` : keywords
To be passed to the actual plotting function
See Also
--------
matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
|
[
"Draw",
"histogram",
"of",
"the",
"input",
"series",
"using",
"matplotlib",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L2446-L2521
|
19,764
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
boxplot_frame_groupby
|
def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, ax=None, figsize=None,
layout=None, sharex=False, sharey=True, **kwds):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots : bool
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group
column : column name or list of names, or vector
Can be any valid input to groupby
fontsize : int or string
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
(rows, columns) for the layout of the plot
sharex : bool, default False
Whether x-axes will be shared among subplots
.. versionadded:: 0.23.1
sharey : bool, default True
Whether y-axes will be shared among subplots
.. versionadded:: 0.23.1
`**kwds` : Keyword Arguments
All other plotting keyword arguments to be passed to
matplotlib's boxplot function
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
>>> import itertools
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pd.DataFrame(data, columns=list('ABCD'), index=index)
>>>
>>> grouped = df.groupby(level='lvl1')
>>> boxplot_frame_groupby(grouped)
>>>
>>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)
>>> boxplot_frame_groupby(grouped, subplots=False)
"""
_raise_if_no_mpl()
_converter._WARN = False
if subplots is True:
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, squeeze=False,
ax=ax, sharex=sharex, sharey=sharey,
figsize=figsize, layout=layout)
axes = _flatten(axes)
from pandas.core.series import Series
ret = Series()
for (key, group), ax in zip(grouped, axes):
d = group.boxplot(ax=ax, column=column, fontsize=fontsize,
rot=rot, grid=grid, **kwds)
ax.set_title(pprint_thing(key))
ret.loc[key] = d
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1,
right=0.9, wspace=0.2)
else:
from pandas.core.reshape.concat import concat
keys, frames = zip(*grouped)
if grouped.axis == 0:
df = concat(frames, keys=keys, axis=1)
else:
if len(frames) > 1:
df = frames[0].join(frames[1::])
else:
df = frames[0]
ret = df.boxplot(column=column, fontsize=fontsize, rot=rot,
grid=grid, ax=ax, figsize=figsize,
layout=layout, **kwds)
return ret
|
python
|
def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, ax=None, figsize=None,
layout=None, sharex=False, sharey=True, **kwds):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots : bool
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group
column : column name or list of names, or vector
Can be any valid input to groupby
fontsize : int or string
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
(rows, columns) for the layout of the plot
sharex : bool, default False
Whether x-axes will be shared among subplots
.. versionadded:: 0.23.1
sharey : bool, default True
Whether y-axes will be shared among subplots
.. versionadded:: 0.23.1
`**kwds` : Keyword Arguments
All other plotting keyword arguments to be passed to
matplotlib's boxplot function
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
>>> import itertools
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pd.DataFrame(data, columns=list('ABCD'), index=index)
>>>
>>> grouped = df.groupby(level='lvl1')
>>> boxplot_frame_groupby(grouped)
>>>
>>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)
>>> boxplot_frame_groupby(grouped, subplots=False)
"""
_raise_if_no_mpl()
_converter._WARN = False
if subplots is True:
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, squeeze=False,
ax=ax, sharex=sharex, sharey=sharey,
figsize=figsize, layout=layout)
axes = _flatten(axes)
from pandas.core.series import Series
ret = Series()
for (key, group), ax in zip(grouped, axes):
d = group.boxplot(ax=ax, column=column, fontsize=fontsize,
rot=rot, grid=grid, **kwds)
ax.set_title(pprint_thing(key))
ret.loc[key] = d
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1,
right=0.9, wspace=0.2)
else:
from pandas.core.reshape.concat import concat
keys, frames = zip(*grouped)
if grouped.axis == 0:
df = concat(frames, keys=keys, axis=1)
else:
if len(frames) > 1:
df = frames[0].join(frames[1::])
else:
df = frames[0]
ret = df.boxplot(column=column, fontsize=fontsize, rot=rot,
grid=grid, ax=ax, figsize=figsize,
layout=layout, **kwds)
return ret
|
[
"def",
"boxplot_frame_groupby",
"(",
"grouped",
",",
"subplots",
"=",
"True",
",",
"column",
"=",
"None",
",",
"fontsize",
"=",
"None",
",",
"rot",
"=",
"0",
",",
"grid",
"=",
"True",
",",
"ax",
"=",
"None",
",",
"figsize",
"=",
"None",
",",
"layout",
"=",
"None",
",",
"sharex",
"=",
"False",
",",
"sharey",
"=",
"True",
",",
"*",
"*",
"kwds",
")",
":",
"_raise_if_no_mpl",
"(",
")",
"_converter",
".",
"_WARN",
"=",
"False",
"if",
"subplots",
"is",
"True",
":",
"naxes",
"=",
"len",
"(",
"grouped",
")",
"fig",
",",
"axes",
"=",
"_subplots",
"(",
"naxes",
"=",
"naxes",
",",
"squeeze",
"=",
"False",
",",
"ax",
"=",
"ax",
",",
"sharex",
"=",
"sharex",
",",
"sharey",
"=",
"sharey",
",",
"figsize",
"=",
"figsize",
",",
"layout",
"=",
"layout",
")",
"axes",
"=",
"_flatten",
"(",
"axes",
")",
"from",
"pandas",
".",
"core",
".",
"series",
"import",
"Series",
"ret",
"=",
"Series",
"(",
")",
"for",
"(",
"key",
",",
"group",
")",
",",
"ax",
"in",
"zip",
"(",
"grouped",
",",
"axes",
")",
":",
"d",
"=",
"group",
".",
"boxplot",
"(",
"ax",
"=",
"ax",
",",
"column",
"=",
"column",
",",
"fontsize",
"=",
"fontsize",
",",
"rot",
"=",
"rot",
",",
"grid",
"=",
"grid",
",",
"*",
"*",
"kwds",
")",
"ax",
".",
"set_title",
"(",
"pprint_thing",
"(",
"key",
")",
")",
"ret",
".",
"loc",
"[",
"key",
"]",
"=",
"d",
"fig",
".",
"subplots_adjust",
"(",
"bottom",
"=",
"0.15",
",",
"top",
"=",
"0.9",
",",
"left",
"=",
"0.1",
",",
"right",
"=",
"0.9",
",",
"wspace",
"=",
"0.2",
")",
"else",
":",
"from",
"pandas",
".",
"core",
".",
"reshape",
".",
"concat",
"import",
"concat",
"keys",
",",
"frames",
"=",
"zip",
"(",
"*",
"grouped",
")",
"if",
"grouped",
".",
"axis",
"==",
"0",
":",
"df",
"=",
"concat",
"(",
"frames",
",",
"keys",
"=",
"keys",
",",
"axis",
"=",
"1",
")",
"else",
":",
"if",
"len",
"(",
"frames",
")",
">",
"1",
":",
"df",
"=",
"frames",
"[",
"0",
"]",
".",
"join",
"(",
"frames",
"[",
"1",
":",
":",
"]",
")",
"else",
":",
"df",
"=",
"frames",
"[",
"0",
"]",
"ret",
"=",
"df",
".",
"boxplot",
"(",
"column",
"=",
"column",
",",
"fontsize",
"=",
"fontsize",
",",
"rot",
"=",
"rot",
",",
"grid",
"=",
"grid",
",",
"ax",
"=",
"ax",
",",
"figsize",
"=",
"figsize",
",",
"layout",
"=",
"layout",
",",
"*",
"*",
"kwds",
")",
"return",
"ret"
] |
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots : bool
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group
column : column name or list of names, or vector
Can be any valid input to groupby
fontsize : int or string
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
(rows, columns) for the layout of the plot
sharex : bool, default False
Whether x-axes will be shared among subplots
.. versionadded:: 0.23.1
sharey : bool, default True
Whether y-axes will be shared among subplots
.. versionadded:: 0.23.1
`**kwds` : Keyword Arguments
All other plotting keyword arguments to be passed to
matplotlib's boxplot function
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
>>> import itertools
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pd.DataFrame(data, columns=list('ABCD'), index=index)
>>>
>>> grouped = df.groupby(level='lvl1')
>>> boxplot_frame_groupby(grouped)
>>>
>>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)
>>> boxplot_frame_groupby(grouped, subplots=False)
|
[
"Make",
"box",
"plots",
"from",
"DataFrameGroupBy",
"data",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L2570-L2653
|
19,765
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
MPLPlot._has_plotted_object
|
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
|
python
|
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
|
[
"def",
"_has_plotted_object",
"(",
"self",
",",
"ax",
")",
":",
"return",
"(",
"len",
"(",
"ax",
".",
"lines",
")",
"!=",
"0",
"or",
"len",
"(",
"ax",
".",
"artists",
")",
"!=",
"0",
"or",
"len",
"(",
"ax",
".",
"containers",
")",
"!=",
"0",
")"
] |
check whether ax has data
|
[
"check",
"whether",
"ax",
"has",
"data"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L260-L264
|
19,766
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
MPLPlot.result
|
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
|
python
|
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
|
[
"def",
"result",
"(",
"self",
")",
":",
"if",
"self",
".",
"subplots",
":",
"if",
"self",
".",
"layout",
"is",
"not",
"None",
"and",
"not",
"is_list_like",
"(",
"self",
".",
"ax",
")",
":",
"return",
"self",
".",
"axes",
".",
"reshape",
"(",
"*",
"self",
".",
"layout",
")",
"else",
":",
"return",
"self",
".",
"axes",
"else",
":",
"sec_true",
"=",
"isinstance",
"(",
"self",
".",
"secondary_y",
",",
"bool",
")",
"and",
"self",
".",
"secondary_y",
"all_sec",
"=",
"(",
"is_list_like",
"(",
"self",
".",
"secondary_y",
")",
"and",
"len",
"(",
"self",
".",
"secondary_y",
")",
"==",
"self",
".",
"nseries",
")",
"if",
"(",
"sec_true",
"or",
"all_sec",
")",
":",
"# if all data is plotted on secondary, return right axes",
"return",
"self",
".",
"_get_ax_layer",
"(",
"self",
".",
"axes",
"[",
"0",
"]",
",",
"primary",
"=",
"False",
")",
"else",
":",
"return",
"self",
".",
"axes",
"[",
"0",
"]"
] |
Return result axes
|
[
"Return",
"result",
"axes"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L336-L353
|
19,767
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
MPLPlot._post_plot_logic_common
|
def _post_plot_logic_common(self, ax, data):
"""Common post process for each axes"""
def get_label(i):
try:
return pprint_thing(data.index[i])
except Exception:
return ''
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [get_label(x) for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [get_label(y) for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
else: # pragma no cover
raise ValueError
|
python
|
def _post_plot_logic_common(self, ax, data):
"""Common post process for each axes"""
def get_label(i):
try:
return pprint_thing(data.index[i])
except Exception:
return ''
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [get_label(x) for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [get_label(y) for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
else: # pragma no cover
raise ValueError
|
[
"def",
"_post_plot_logic_common",
"(",
"self",
",",
"ax",
",",
"data",
")",
":",
"def",
"get_label",
"(",
"i",
")",
":",
"try",
":",
"return",
"pprint_thing",
"(",
"data",
".",
"index",
"[",
"i",
"]",
")",
"except",
"Exception",
":",
"return",
"''",
"if",
"self",
".",
"orientation",
"==",
"'vertical'",
"or",
"self",
".",
"orientation",
"is",
"None",
":",
"if",
"self",
".",
"_need_to_set_index",
":",
"xticklabels",
"=",
"[",
"get_label",
"(",
"x",
")",
"for",
"x",
"in",
"ax",
".",
"get_xticks",
"(",
")",
"]",
"ax",
".",
"set_xticklabels",
"(",
"xticklabels",
")",
"self",
".",
"_apply_axis_properties",
"(",
"ax",
".",
"xaxis",
",",
"rot",
"=",
"self",
".",
"rot",
",",
"fontsize",
"=",
"self",
".",
"fontsize",
")",
"self",
".",
"_apply_axis_properties",
"(",
"ax",
".",
"yaxis",
",",
"fontsize",
"=",
"self",
".",
"fontsize",
")",
"if",
"hasattr",
"(",
"ax",
",",
"'right_ax'",
")",
":",
"self",
".",
"_apply_axis_properties",
"(",
"ax",
".",
"right_ax",
".",
"yaxis",
",",
"fontsize",
"=",
"self",
".",
"fontsize",
")",
"elif",
"self",
".",
"orientation",
"==",
"'horizontal'",
":",
"if",
"self",
".",
"_need_to_set_index",
":",
"yticklabels",
"=",
"[",
"get_label",
"(",
"y",
")",
"for",
"y",
"in",
"ax",
".",
"get_yticks",
"(",
")",
"]",
"ax",
".",
"set_yticklabels",
"(",
"yticklabels",
")",
"self",
".",
"_apply_axis_properties",
"(",
"ax",
".",
"yaxis",
",",
"rot",
"=",
"self",
".",
"rot",
",",
"fontsize",
"=",
"self",
".",
"fontsize",
")",
"self",
".",
"_apply_axis_properties",
"(",
"ax",
".",
"xaxis",
",",
"fontsize",
"=",
"self",
".",
"fontsize",
")",
"if",
"hasattr",
"(",
"ax",
",",
"'right_ax'",
")",
":",
"self",
".",
"_apply_axis_properties",
"(",
"ax",
".",
"right_ax",
".",
"yaxis",
",",
"fontsize",
"=",
"self",
".",
"fontsize",
")",
"else",
":",
"# pragma no cover",
"raise",
"ValueError"
] |
Common post process for each axes
|
[
"Common",
"post",
"process",
"for",
"each",
"axes"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L402-L435
|
19,768
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
MPLPlot._adorn_subplots
|
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if len(self.axes) > 0:
all_axes = self._get_subplots()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
if is_list_like(self.title):
if len(self.title) != self.nseries:
msg = ('The length of `title` must equal the number '
'of columns if using `title` of type `list` '
'and `subplots=True`.\n'
'length of title = {}\n'
'number of columns = {}').format(
len(self.title), self.nseries)
raise ValueError(msg)
for (ax, title) in zip(self.axes, self.title):
ax.set_title(title)
else:
self.fig.suptitle(self.title)
else:
if is_list_like(self.title):
msg = ('Using `title` of type `list` is not supported '
'unless `subplots=True` is passed')
raise ValueError(msg)
self.axes[0].set_title(self.title)
|
python
|
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if len(self.axes) > 0:
all_axes = self._get_subplots()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
if is_list_like(self.title):
if len(self.title) != self.nseries:
msg = ('The length of `title` must equal the number '
'of columns if using `title` of type `list` '
'and `subplots=True`.\n'
'length of title = {}\n'
'number of columns = {}').format(
len(self.title), self.nseries)
raise ValueError(msg)
for (ax, title) in zip(self.axes, self.title):
ax.set_title(title)
else:
self.fig.suptitle(self.title)
else:
if is_list_like(self.title):
msg = ('Using `title` of type `list` is not supported '
'unless `subplots=True` is passed')
raise ValueError(msg)
self.axes[0].set_title(self.title)
|
[
"def",
"_adorn_subplots",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"axes",
")",
">",
"0",
":",
"all_axes",
"=",
"self",
".",
"_get_subplots",
"(",
")",
"nrows",
",",
"ncols",
"=",
"self",
".",
"_get_axes_layout",
"(",
")",
"_handle_shared_axes",
"(",
"axarr",
"=",
"all_axes",
",",
"nplots",
"=",
"len",
"(",
"all_axes",
")",
",",
"naxes",
"=",
"nrows",
"*",
"ncols",
",",
"nrows",
"=",
"nrows",
",",
"ncols",
"=",
"ncols",
",",
"sharex",
"=",
"self",
".",
"sharex",
",",
"sharey",
"=",
"self",
".",
"sharey",
")",
"for",
"ax",
"in",
"self",
".",
"axes",
":",
"if",
"self",
".",
"yticks",
"is",
"not",
"None",
":",
"ax",
".",
"set_yticks",
"(",
"self",
".",
"yticks",
")",
"if",
"self",
".",
"xticks",
"is",
"not",
"None",
":",
"ax",
".",
"set_xticks",
"(",
"self",
".",
"xticks",
")",
"if",
"self",
".",
"ylim",
"is",
"not",
"None",
":",
"ax",
".",
"set_ylim",
"(",
"self",
".",
"ylim",
")",
"if",
"self",
".",
"xlim",
"is",
"not",
"None",
":",
"ax",
".",
"set_xlim",
"(",
"self",
".",
"xlim",
")",
"ax",
".",
"grid",
"(",
"self",
".",
"grid",
")",
"if",
"self",
".",
"title",
":",
"if",
"self",
".",
"subplots",
":",
"if",
"is_list_like",
"(",
"self",
".",
"title",
")",
":",
"if",
"len",
"(",
"self",
".",
"title",
")",
"!=",
"self",
".",
"nseries",
":",
"msg",
"=",
"(",
"'The length of `title` must equal the number '",
"'of columns if using `title` of type `list` '",
"'and `subplots=True`.\\n'",
"'length of title = {}\\n'",
"'number of columns = {}'",
")",
".",
"format",
"(",
"len",
"(",
"self",
".",
"title",
")",
",",
"self",
".",
"nseries",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"for",
"(",
"ax",
",",
"title",
")",
"in",
"zip",
"(",
"self",
".",
"axes",
",",
"self",
".",
"title",
")",
":",
"ax",
".",
"set_title",
"(",
"title",
")",
"else",
":",
"self",
".",
"fig",
".",
"suptitle",
"(",
"self",
".",
"title",
")",
"else",
":",
"if",
"is_list_like",
"(",
"self",
".",
"title",
")",
":",
"msg",
"=",
"(",
"'Using `title` of type `list` is not supported '",
"'unless `subplots=True` is passed'",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"self",
".",
"axes",
"[",
"0",
"]",
".",
"set_title",
"(",
"self",
".",
"title",
")"
] |
Common post process unrelated to data
|
[
"Common",
"post",
"process",
"unrelated",
"to",
"data"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L441-L487
|
19,769
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
MPLPlot._apply_style_colors
|
def _apply_style_colors(self, colors, kwds, col_num, label):
"""
Manage style and color based on column number and its label.
Returns tuple of appropriate style and kwds which "color" may be added.
"""
style = None
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[col_num]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(label, style)
else:
style = self.style
has_color = 'color' in kwds or self.colormap is not None
nocolor_style = style is None or re.match('[a-z]+', style) is None
if (has_color or self.subplots) and nocolor_style:
kwds['color'] = colors[col_num % len(colors)]
return style, kwds
|
python
|
def _apply_style_colors(self, colors, kwds, col_num, label):
"""
Manage style and color based on column number and its label.
Returns tuple of appropriate style and kwds which "color" may be added.
"""
style = None
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[col_num]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(label, style)
else:
style = self.style
has_color = 'color' in kwds or self.colormap is not None
nocolor_style = style is None or re.match('[a-z]+', style) is None
if (has_color or self.subplots) and nocolor_style:
kwds['color'] = colors[col_num % len(colors)]
return style, kwds
|
[
"def",
"_apply_style_colors",
"(",
"self",
",",
"colors",
",",
"kwds",
",",
"col_num",
",",
"label",
")",
":",
"style",
"=",
"None",
"if",
"self",
".",
"style",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"self",
".",
"style",
",",
"list",
")",
":",
"try",
":",
"style",
"=",
"self",
".",
"style",
"[",
"col_num",
"]",
"except",
"IndexError",
":",
"pass",
"elif",
"isinstance",
"(",
"self",
".",
"style",
",",
"dict",
")",
":",
"style",
"=",
"self",
".",
"style",
".",
"get",
"(",
"label",
",",
"style",
")",
"else",
":",
"style",
"=",
"self",
".",
"style",
"has_color",
"=",
"'color'",
"in",
"kwds",
"or",
"self",
".",
"colormap",
"is",
"not",
"None",
"nocolor_style",
"=",
"style",
"is",
"None",
"or",
"re",
".",
"match",
"(",
"'[a-z]+'",
",",
"style",
")",
"is",
"None",
"if",
"(",
"has_color",
"or",
"self",
".",
"subplots",
")",
"and",
"nocolor_style",
":",
"kwds",
"[",
"'color'",
"]",
"=",
"colors",
"[",
"col_num",
"%",
"len",
"(",
"colors",
")",
"]",
"return",
"style",
",",
"kwds"
] |
Manage style and color based on column number and its label.
Returns tuple of appropriate style and kwds which "color" may be added.
|
[
"Manage",
"style",
"and",
"color",
"based",
"on",
"column",
"number",
"and",
"its",
"label",
".",
"Returns",
"tuple",
"of",
"appropriate",
"style",
"and",
"kwds",
"which",
"color",
"may",
"be",
"added",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L672-L693
|
19,770
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
FramePlotMethods.line
|
def line(self, x=None, y=None, **kwds):
"""
Plot DataFrame columns as lines.
This function is useful to plot lines using DataFrame's values
as coordinates.
Parameters
----------
x : int or str, optional
Columns to use for the horizontal axis.
Either the location or the label of the columns to be used.
By default, it will use the DataFrame indices.
y : int, str, or list of them, optional
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
**kwds
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or :class:`numpy.ndarray`
Return an ndarray when ``subplots=True``.
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = pd.DataFrame({
... 'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]
... }, index=[1990, 1997, 2003, 2009, 2014])
>>> lines = df.plot.line()
.. plot::
:context: close-figs
An example with subplots, so an array of axes is returned.
>>> axes = df.plot.line(subplots=True)
>>> type(axes)
<class 'numpy.ndarray'>
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot.line(x='pig', y='horse')
"""
return self(kind='line', x=x, y=y, **kwds)
|
python
|
def line(self, x=None, y=None, **kwds):
"""
Plot DataFrame columns as lines.
This function is useful to plot lines using DataFrame's values
as coordinates.
Parameters
----------
x : int or str, optional
Columns to use for the horizontal axis.
Either the location or the label of the columns to be used.
By default, it will use the DataFrame indices.
y : int, str, or list of them, optional
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
**kwds
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or :class:`numpy.ndarray`
Return an ndarray when ``subplots=True``.
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = pd.DataFrame({
... 'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]
... }, index=[1990, 1997, 2003, 2009, 2014])
>>> lines = df.plot.line()
.. plot::
:context: close-figs
An example with subplots, so an array of axes is returned.
>>> axes = df.plot.line(subplots=True)
>>> type(axes)
<class 'numpy.ndarray'>
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot.line(x='pig', y='horse')
"""
return self(kind='line', x=x, y=y, **kwds)
|
[
"def",
"line",
"(",
"self",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"return",
"self",
"(",
"kind",
"=",
"'line'",
",",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"*",
"*",
"kwds",
")"
] |
Plot DataFrame columns as lines.
This function is useful to plot lines using DataFrame's values
as coordinates.
Parameters
----------
x : int or str, optional
Columns to use for the horizontal axis.
Either the location or the label of the columns to be used.
By default, it will use the DataFrame indices.
y : int, str, or list of them, optional
The values to be plotted.
Either the location or the label of the columns to be used.
By default, it will use the remaining DataFrame numeric columns.
**kwds
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or :class:`numpy.ndarray`
Return an ndarray when ``subplots=True``.
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = pd.DataFrame({
... 'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]
... }, index=[1990, 1997, 2003, 2009, 2014])
>>> lines = df.plot.line()
.. plot::
:context: close-figs
An example with subplots, so an array of axes is returned.
>>> axes = df.plot.line(subplots=True)
>>> type(axes)
<class 'numpy.ndarray'>
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot.line(x='pig', y='horse')
|
[
"Plot",
"DataFrame",
"columns",
"as",
"lines",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L2970-L3031
|
19,771
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
FramePlotMethods.bar
|
def bar(self, x=None, y=None, **kwds):
"""
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
**kwds
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
See Also
--------
DataFrame.plot.barh : Horizontal bar plot.
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.bar : Make a bar plot with matplotlib.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
"""
return self(kind='bar', x=x, y=y, **kwds)
|
python
|
def bar(self, x=None, y=None, **kwds):
"""
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
**kwds
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
See Also
--------
DataFrame.plot.barh : Horizontal bar plot.
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.bar : Make a bar plot with matplotlib.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
"""
return self(kind='bar', x=x, y=y, **kwds)
|
[
"def",
"bar",
"(",
"self",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"return",
"self",
"(",
"kind",
"=",
"'bar'",
",",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"*",
"*",
"kwds",
")"
] |
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
**kwds
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
See Also
--------
DataFrame.plot.barh : Horizontal bar plot.
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.bar : Make a bar plot with matplotlib.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
|
[
"Vertical",
"bar",
"plot",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L3033-L3116
|
19,772
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
FramePlotMethods.barh
|
def barh(self, x=None, y=None, **kwds):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, default DataFrame.index
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
**kwds
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
DataFrame.plot.bar: Vertical bar plot.
DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.barh(x='lab', y='val')
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh()
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(y='speed')
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(x='lifespan')
"""
return self(kind='barh', x=x, y=y, **kwds)
|
python
|
def barh(self, x=None, y=None, **kwds):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, default DataFrame.index
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
**kwds
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
DataFrame.plot.bar: Vertical bar plot.
DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.barh(x='lab', y='val')
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh()
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(y='speed')
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(x='lifespan')
"""
return self(kind='barh', x=x, y=y, **kwds)
|
[
"def",
"barh",
"(",
"self",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"return",
"self",
"(",
"kind",
"=",
"'barh'",
",",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"*",
"*",
"kwds",
")"
] |
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
Parameters
----------
x : label or position, default DataFrame.index
Column to be used for categories.
y : label or position, default All numeric columns in dataframe
Columns to be plotted from the DataFrame.
**kwds
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
DataFrame.plot.bar: Vertical bar plot.
DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.barh(x='lab', y='val')
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh()
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(y='speed')
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(x='lifespan')
|
[
"Make",
"a",
"horizontal",
"bar",
"plot",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L3118-L3196
|
19,773
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
FramePlotMethods.hist
|
def hist(self, by=None, bins=10, **kwds):
"""
Draw one histogram of the DataFrame's columns.
A histogram is a representation of the distribution of data.
This function groups the values of all given Series in the DataFrame
into bins and draws all bins in one :class:`matplotlib.axes.Axes`.
This is useful when the DataFrame's Series are in a similar scale.
Parameters
----------
by : str or sequence, optional
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
**kwds
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
class:`matplotlib.AxesSubplot`
Return a histogram plot.
See Also
--------
DataFrame.hist : Draw histograms per DataFrame's Series.
Series.hist : Draw a histogram with Series' data.
Examples
--------
When we draw a dice 6000 times, we expect to get each value around 1000
times. But when we draw two dices and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns = ['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
"""
return self(kind='hist', by=by, bins=bins, **kwds)
|
python
|
def hist(self, by=None, bins=10, **kwds):
"""
Draw one histogram of the DataFrame's columns.
A histogram is a representation of the distribution of data.
This function groups the values of all given Series in the DataFrame
into bins and draws all bins in one :class:`matplotlib.axes.Axes`.
This is useful when the DataFrame's Series are in a similar scale.
Parameters
----------
by : str or sequence, optional
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
**kwds
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
class:`matplotlib.AxesSubplot`
Return a histogram plot.
See Also
--------
DataFrame.hist : Draw histograms per DataFrame's Series.
Series.hist : Draw a histogram with Series' data.
Examples
--------
When we draw a dice 6000 times, we expect to get each value around 1000
times. But when we draw two dices and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns = ['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
"""
return self(kind='hist', by=by, bins=bins, **kwds)
|
[
"def",
"hist",
"(",
"self",
",",
"by",
"=",
"None",
",",
"bins",
"=",
"10",
",",
"*",
"*",
"kwds",
")",
":",
"return",
"self",
"(",
"kind",
"=",
"'hist'",
",",
"by",
"=",
"by",
",",
"bins",
"=",
"bins",
",",
"*",
"*",
"kwds",
")"
] |
Draw one histogram of the DataFrame's columns.
A histogram is a representation of the distribution of data.
This function groups the values of all given Series in the DataFrame
into bins and draws all bins in one :class:`matplotlib.axes.Axes`.
This is useful when the DataFrame's Series are in a similar scale.
Parameters
----------
by : str or sequence, optional
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
**kwds
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
class:`matplotlib.AxesSubplot`
Return a histogram plot.
See Also
--------
DataFrame.hist : Draw histograms per DataFrame's Series.
Series.hist : Draw a histogram with Series' data.
Examples
--------
When we draw a dice 6000 times, we expect to get each value around 1000
times. But when we draw two dices and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns = ['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
|
[
"Draw",
"one",
"histogram",
"of",
"the",
"DataFrame",
"s",
"columns",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L3248-L3293
|
19,774
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
FramePlotMethods.area
|
def area(self, x=None, y=None, **kwds):
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwds : optional
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
See Also
--------
DataFrame.plot : Make plots of DataFrame using matplotlib / pylab.
Examples
--------
Draw an area plot based on basic business metrics:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> ax = df.plot.area()
Area plots are stacked by default. To produce an unstacked plot,
pass ``stacked=False``:
.. plot::
:context: close-figs
>>> ax = df.plot.area(stacked=False)
Draw an area plot for a single column:
.. plot::
:context: close-figs
>>> ax = df.plot.area(y='sales')
Draw with a different `x`:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3],
... 'visits': [20, 42, 28],
... 'day': [1, 2, 3],
... })
>>> ax = df.plot.area(x='day')
"""
return self(kind='area', x=x, y=y, **kwds)
|
python
|
def area(self, x=None, y=None, **kwds):
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwds : optional
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
See Also
--------
DataFrame.plot : Make plots of DataFrame using matplotlib / pylab.
Examples
--------
Draw an area plot based on basic business metrics:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> ax = df.plot.area()
Area plots are stacked by default. To produce an unstacked plot,
pass ``stacked=False``:
.. plot::
:context: close-figs
>>> ax = df.plot.area(stacked=False)
Draw an area plot for a single column:
.. plot::
:context: close-figs
>>> ax = df.plot.area(y='sales')
Draw with a different `x`:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3],
... 'visits': [20, 42, 28],
... 'day': [1, 2, 3],
... })
>>> ax = df.plot.area(x='day')
"""
return self(kind='area', x=x, y=y, **kwds)
|
[
"def",
"area",
"(",
"self",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"return",
"self",
"(",
"kind",
"=",
"'area'",
",",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"*",
"*",
"kwds",
")"
] |
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwds : optional
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
See Also
--------
DataFrame.plot : Make plots of DataFrame using matplotlib / pylab.
Examples
--------
Draw an area plot based on basic business metrics:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> ax = df.plot.area()
Area plots are stacked by default. To produce an unstacked plot,
pass ``stacked=False``:
.. plot::
:context: close-figs
>>> ax = df.plot.area(stacked=False)
Draw an area plot for a single column:
.. plot::
:context: close-figs
>>> ax = df.plot.area(y='sales')
Draw with a different `x`:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3],
... 'visits': [20, 42, 28],
... 'day': [1, 2, 3],
... })
>>> ax = df.plot.area(x='day')
|
[
"Draw",
"a",
"stacked",
"area",
"plot",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L3341-L3412
|
19,775
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
FramePlotMethods.scatter
|
def scatter(self, x, y, s=None, c=None, **kwds):
"""
Create a scatter plot with varying marker point size and color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
s : scalar or array_like, optional
The size of each point. Possible values are:
- A single scalar so all points have the same size.
- A sequence of scalars, which will be used for each point's size
recursively. For instance, when passing [2,14] all points size
will be either 2 or 14, alternatively.
c : str, int or array_like, optional
The color of each point. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each point's color recursively. For
instance ['green','yellow'] all points will be filled in green or
yellow, alternatively.
- A column name or position whose values will be used to color the
marker points according to a colormap.
**kwds
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.scatter : Scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plot::
:context: close-figs
>>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> ax1 = df.plot.scatter(x='length',
... y='width',
... c='DarkBlue')
And now with the color determined by a column as well.
.. plot::
:context: close-figs
>>> ax2 = df.plot.scatter(x='length',
... y='width',
... c='species',
... colormap='viridis')
"""
return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds)
|
python
|
def scatter(self, x, y, s=None, c=None, **kwds):
"""
Create a scatter plot with varying marker point size and color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
s : scalar or array_like, optional
The size of each point. Possible values are:
- A single scalar so all points have the same size.
- A sequence of scalars, which will be used for each point's size
recursively. For instance, when passing [2,14] all points size
will be either 2 or 14, alternatively.
c : str, int or array_like, optional
The color of each point. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each point's color recursively. For
instance ['green','yellow'] all points will be filled in green or
yellow, alternatively.
- A column name or position whose values will be used to color the
marker points according to a colormap.
**kwds
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.scatter : Scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plot::
:context: close-figs
>>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> ax1 = df.plot.scatter(x='length',
... y='width',
... c='DarkBlue')
And now with the color determined by a column as well.
.. plot::
:context: close-figs
>>> ax2 = df.plot.scatter(x='length',
... y='width',
... c='species',
... colormap='viridis')
"""
return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds)
|
[
"def",
"scatter",
"(",
"self",
",",
"x",
",",
"y",
",",
"s",
"=",
"None",
",",
"c",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"return",
"self",
"(",
"kind",
"=",
"'scatter'",
",",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"c",
"=",
"c",
",",
"s",
"=",
"s",
",",
"*",
"*",
"kwds",
")"
] |
Create a scatter plot with varying marker point size and color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
s : scalar or array_like, optional
The size of each point. Possible values are:
- A single scalar so all points have the same size.
- A sequence of scalars, which will be used for each point's size
recursively. For instance, when passing [2,14] all points size
will be either 2 or 14, alternatively.
c : str, int or array_like, optional
The color of each point. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each point's color recursively. For
instance ['green','yellow'] all points will be filled in green or
yellow, alternatively.
- A column name or position whose values will be used to color the
marker points according to a colormap.
**kwds
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.scatter : Scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plot::
:context: close-figs
>>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> ax1 = df.plot.scatter(x='length',
... y='width',
... c='DarkBlue')
And now with the color determined by a column as well.
.. plot::
:context: close-figs
>>> ax2 = df.plot.scatter(x='length',
... y='width',
... c='species',
... colormap='viridis')
|
[
"Create",
"a",
"scatter",
"plot",
"with",
"varying",
"marker",
"point",
"size",
"and",
"color",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L3463-L3542
|
19,776
|
pandas-dev/pandas
|
pandas/plotting/_core.py
|
FramePlotMethods.hexbin
|
def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None,
**kwds):
"""
Generate a hexagonal binning plot.
Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None`
(the default), this is a histogram of the number of occurrences
of the observations at ``(x[i], y[i])``.
If `C` is specified, specifies values at given coordinates
``(x[i], y[i])``. These values are accumulated for each hexagonal
bin and then reduced according to `reduce_C_function`,
having as default the NumPy's mean function (:meth:`numpy.mean`).
(If `C` is specified, it must also be a 1-D sequence
of the same length as `x` and `y`, or a column label.)
Parameters
----------
x : int or str
The column label or position for x points.
y : int or str
The column label or position for y points.
C : int or str, optional
The column label or position for the value of `(x, y)` point.
reduce_C_function : callable, default `np.mean`
Function of one argument that reduces all the values in a bin to
a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`).
gridsize : int or tuple of (int, int), default 100
The number of hexagons in the x-direction.
The corresponding number of hexagons in the y-direction is
chosen in a way that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements
specifying the number of hexagons in the x-direction and the
y-direction.
**kwds
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.AxesSubplot
The matplotlib ``Axes`` on which the hexbin is plotted.
See Also
--------
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.hexbin : Hexagonal binning plot using matplotlib,
the matplotlib function that is used under the hood.
Examples
--------
The following examples are generated with random data from
a normal distribution.
.. plot::
:context: close-figs
>>> n = 10000
>>> df = pd.DataFrame({'x': np.random.randn(n),
... 'y': np.random.randn(n)})
>>> ax = df.plot.hexbin(x='x', y='y', gridsize=20)
The next example uses `C` and `np.sum` as `reduce_C_function`.
Note that `'observations'` values ranges from 1 to 5 but the result
plot shows values up to more than 25. This is because of the
`reduce_C_function`.
.. plot::
:context: close-figs
>>> n = 500
>>> df = pd.DataFrame({
... 'coord_x': np.random.uniform(-3, 3, size=n),
... 'coord_y': np.random.uniform(30, 50, size=n),
... 'observations': np.random.randint(1,5, size=n)
... })
>>> ax = df.plot.hexbin(x='coord_x',
... y='coord_y',
... C='observations',
... reduce_C_function=np.sum,
... gridsize=10,
... cmap="viridis")
"""
if reduce_C_function is not None:
kwds['reduce_C_function'] = reduce_C_function
if gridsize is not None:
kwds['gridsize'] = gridsize
return self(kind='hexbin', x=x, y=y, C=C, **kwds)
|
python
|
def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None,
**kwds):
"""
Generate a hexagonal binning plot.
Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None`
(the default), this is a histogram of the number of occurrences
of the observations at ``(x[i], y[i])``.
If `C` is specified, specifies values at given coordinates
``(x[i], y[i])``. These values are accumulated for each hexagonal
bin and then reduced according to `reduce_C_function`,
having as default the NumPy's mean function (:meth:`numpy.mean`).
(If `C` is specified, it must also be a 1-D sequence
of the same length as `x` and `y`, or a column label.)
Parameters
----------
x : int or str
The column label or position for x points.
y : int or str
The column label or position for y points.
C : int or str, optional
The column label or position for the value of `(x, y)` point.
reduce_C_function : callable, default `np.mean`
Function of one argument that reduces all the values in a bin to
a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`).
gridsize : int or tuple of (int, int), default 100
The number of hexagons in the x-direction.
The corresponding number of hexagons in the y-direction is
chosen in a way that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements
specifying the number of hexagons in the x-direction and the
y-direction.
**kwds
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.AxesSubplot
The matplotlib ``Axes`` on which the hexbin is plotted.
See Also
--------
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.hexbin : Hexagonal binning plot using matplotlib,
the matplotlib function that is used under the hood.
Examples
--------
The following examples are generated with random data from
a normal distribution.
.. plot::
:context: close-figs
>>> n = 10000
>>> df = pd.DataFrame({'x': np.random.randn(n),
... 'y': np.random.randn(n)})
>>> ax = df.plot.hexbin(x='x', y='y', gridsize=20)
The next example uses `C` and `np.sum` as `reduce_C_function`.
Note that `'observations'` values ranges from 1 to 5 but the result
plot shows values up to more than 25. This is because of the
`reduce_C_function`.
.. plot::
:context: close-figs
>>> n = 500
>>> df = pd.DataFrame({
... 'coord_x': np.random.uniform(-3, 3, size=n),
... 'coord_y': np.random.uniform(30, 50, size=n),
... 'observations': np.random.randint(1,5, size=n)
... })
>>> ax = df.plot.hexbin(x='coord_x',
... y='coord_y',
... C='observations',
... reduce_C_function=np.sum,
... gridsize=10,
... cmap="viridis")
"""
if reduce_C_function is not None:
kwds['reduce_C_function'] = reduce_C_function
if gridsize is not None:
kwds['gridsize'] = gridsize
return self(kind='hexbin', x=x, y=y, C=C, **kwds)
|
[
"def",
"hexbin",
"(",
"self",
",",
"x",
",",
"y",
",",
"C",
"=",
"None",
",",
"reduce_C_function",
"=",
"None",
",",
"gridsize",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"if",
"reduce_C_function",
"is",
"not",
"None",
":",
"kwds",
"[",
"'reduce_C_function'",
"]",
"=",
"reduce_C_function",
"if",
"gridsize",
"is",
"not",
"None",
":",
"kwds",
"[",
"'gridsize'",
"]",
"=",
"gridsize",
"return",
"self",
"(",
"kind",
"=",
"'hexbin'",
",",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"C",
"=",
"C",
",",
"*",
"*",
"kwds",
")"
] |
Generate a hexagonal binning plot.
Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None`
(the default), this is a histogram of the number of occurrences
of the observations at ``(x[i], y[i])``.
If `C` is specified, specifies values at given coordinates
``(x[i], y[i])``. These values are accumulated for each hexagonal
bin and then reduced according to `reduce_C_function`,
having as default the NumPy's mean function (:meth:`numpy.mean`).
(If `C` is specified, it must also be a 1-D sequence
of the same length as `x` and `y`, or a column label.)
Parameters
----------
x : int or str
The column label or position for x points.
y : int or str
The column label or position for y points.
C : int or str, optional
The column label or position for the value of `(x, y)` point.
reduce_C_function : callable, default `np.mean`
Function of one argument that reduces all the values in a bin to
a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`).
gridsize : int or tuple of (int, int), default 100
The number of hexagons in the x-direction.
The corresponding number of hexagons in the y-direction is
chosen in a way that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements
specifying the number of hexagons in the x-direction and the
y-direction.
**kwds
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.AxesSubplot
The matplotlib ``Axes`` on which the hexbin is plotted.
See Also
--------
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.hexbin : Hexagonal binning plot using matplotlib,
the matplotlib function that is used under the hood.
Examples
--------
The following examples are generated with random data from
a normal distribution.
.. plot::
:context: close-figs
>>> n = 10000
>>> df = pd.DataFrame({'x': np.random.randn(n),
... 'y': np.random.randn(n)})
>>> ax = df.plot.hexbin(x='x', y='y', gridsize=20)
The next example uses `C` and `np.sum` as `reduce_C_function`.
Note that `'observations'` values ranges from 1 to 5 but the result
plot shows values up to more than 25. This is because of the
`reduce_C_function`.
.. plot::
:context: close-figs
>>> n = 500
>>> df = pd.DataFrame({
... 'coord_x': np.random.uniform(-3, 3, size=n),
... 'coord_y': np.random.uniform(30, 50, size=n),
... 'observations': np.random.randint(1,5, size=n)
... })
>>> ax = df.plot.hexbin(x='coord_x',
... y='coord_y',
... C='observations',
... reduce_C_function=np.sum,
... gridsize=10,
... cmap="viridis")
|
[
"Generate",
"a",
"hexagonal",
"binning",
"plot",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/plotting/_core.py#L3544-L3631
|
19,777
|
pandas-dev/pandas
|
pandas/core/indexes/api.py
|
_get_combined_index
|
def _get_combined_index(indexes, intersect=False, sort=False):
"""
Return the union or intersection of indexes.
Parameters
----------
indexes : list of Index or list objects
When intersect=True, do not accept list of lists.
intersect : bool, default False
If True, calculate the intersection between indexes. Otherwise,
calculate the union.
sort : bool, default False
Whether the result index should come out sorted or not.
Returns
-------
Index
"""
# TODO: handle index names!
indexes = _get_distinct_objs(indexes)
if len(indexes) == 0:
index = Index([])
elif len(indexes) == 1:
index = indexes[0]
elif intersect:
index = indexes[0]
for other in indexes[1:]:
index = index.intersection(other)
else:
index = _union_indexes(indexes, sort=sort)
index = ensure_index(index)
if sort:
try:
index = index.sort_values()
except TypeError:
pass
return index
|
python
|
def _get_combined_index(indexes, intersect=False, sort=False):
"""
Return the union or intersection of indexes.
Parameters
----------
indexes : list of Index or list objects
When intersect=True, do not accept list of lists.
intersect : bool, default False
If True, calculate the intersection between indexes. Otherwise,
calculate the union.
sort : bool, default False
Whether the result index should come out sorted or not.
Returns
-------
Index
"""
# TODO: handle index names!
indexes = _get_distinct_objs(indexes)
if len(indexes) == 0:
index = Index([])
elif len(indexes) == 1:
index = indexes[0]
elif intersect:
index = indexes[0]
for other in indexes[1:]:
index = index.intersection(other)
else:
index = _union_indexes(indexes, sort=sort)
index = ensure_index(index)
if sort:
try:
index = index.sort_values()
except TypeError:
pass
return index
|
[
"def",
"_get_combined_index",
"(",
"indexes",
",",
"intersect",
"=",
"False",
",",
"sort",
"=",
"False",
")",
":",
"# TODO: handle index names!",
"indexes",
"=",
"_get_distinct_objs",
"(",
"indexes",
")",
"if",
"len",
"(",
"indexes",
")",
"==",
"0",
":",
"index",
"=",
"Index",
"(",
"[",
"]",
")",
"elif",
"len",
"(",
"indexes",
")",
"==",
"1",
":",
"index",
"=",
"indexes",
"[",
"0",
"]",
"elif",
"intersect",
":",
"index",
"=",
"indexes",
"[",
"0",
"]",
"for",
"other",
"in",
"indexes",
"[",
"1",
":",
"]",
":",
"index",
"=",
"index",
".",
"intersection",
"(",
"other",
")",
"else",
":",
"index",
"=",
"_union_indexes",
"(",
"indexes",
",",
"sort",
"=",
"sort",
")",
"index",
"=",
"ensure_index",
"(",
"index",
")",
"if",
"sort",
":",
"try",
":",
"index",
"=",
"index",
".",
"sort_values",
"(",
")",
"except",
"TypeError",
":",
"pass",
"return",
"index"
] |
Return the union or intersection of indexes.
Parameters
----------
indexes : list of Index or list objects
When intersect=True, do not accept list of lists.
intersect : bool, default False
If True, calculate the intersection between indexes. Otherwise,
calculate the union.
sort : bool, default False
Whether the result index should come out sorted or not.
Returns
-------
Index
|
[
"Return",
"the",
"union",
"or",
"intersection",
"of",
"indexes",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/api.py#L87-L125
|
19,778
|
pandas-dev/pandas
|
pandas/core/indexes/api.py
|
_union_indexes
|
def _union_indexes(indexes, sort=True):
"""
Return the union of indexes.
The behavior of sort and names is not consistent.
Parameters
----------
indexes : list of Index or list objects
sort : bool, default True
Whether the result index should come out sorted or not.
Returns
-------
Index
"""
if len(indexes) == 0:
raise AssertionError('Must have at least 1 Index to union')
if len(indexes) == 1:
result = indexes[0]
if isinstance(result, list):
result = Index(sorted(result))
return result
indexes, kind = _sanitize_and_check(indexes)
def _unique_indices(inds):
"""
Convert indexes to lists and concatenate them, removing duplicates.
The final dtype is inferred.
Parameters
----------
inds : list of Index or list objects
Returns
-------
Index
"""
def conv(i):
if isinstance(i, Index):
i = i.tolist()
return i
return Index(
lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort))
if kind == 'special':
result = indexes[0]
if hasattr(result, 'union_many'):
return result.union_many(indexes[1:])
else:
for other in indexes[1:]:
result = result.union(other)
return result
elif kind == 'array':
index = indexes[0]
for other in indexes[1:]:
if not index.equals(other):
if sort is None:
# TODO: remove once pd.concat sort default changes
warnings.warn(_sort_msg, FutureWarning, stacklevel=8)
sort = True
return _unique_indices(indexes)
name = _get_consensus_names(indexes)[0]
if name != index.name:
index = index._shallow_copy(name=name)
return index
else: # kind='list'
return _unique_indices(indexes)
|
python
|
def _union_indexes(indexes, sort=True):
"""
Return the union of indexes.
The behavior of sort and names is not consistent.
Parameters
----------
indexes : list of Index or list objects
sort : bool, default True
Whether the result index should come out sorted or not.
Returns
-------
Index
"""
if len(indexes) == 0:
raise AssertionError('Must have at least 1 Index to union')
if len(indexes) == 1:
result = indexes[0]
if isinstance(result, list):
result = Index(sorted(result))
return result
indexes, kind = _sanitize_and_check(indexes)
def _unique_indices(inds):
"""
Convert indexes to lists and concatenate them, removing duplicates.
The final dtype is inferred.
Parameters
----------
inds : list of Index or list objects
Returns
-------
Index
"""
def conv(i):
if isinstance(i, Index):
i = i.tolist()
return i
return Index(
lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort))
if kind == 'special':
result = indexes[0]
if hasattr(result, 'union_many'):
return result.union_many(indexes[1:])
else:
for other in indexes[1:]:
result = result.union(other)
return result
elif kind == 'array':
index = indexes[0]
for other in indexes[1:]:
if not index.equals(other):
if sort is None:
# TODO: remove once pd.concat sort default changes
warnings.warn(_sort_msg, FutureWarning, stacklevel=8)
sort = True
return _unique_indices(indexes)
name = _get_consensus_names(indexes)[0]
if name != index.name:
index = index._shallow_copy(name=name)
return index
else: # kind='list'
return _unique_indices(indexes)
|
[
"def",
"_union_indexes",
"(",
"indexes",
",",
"sort",
"=",
"True",
")",
":",
"if",
"len",
"(",
"indexes",
")",
"==",
"0",
":",
"raise",
"AssertionError",
"(",
"'Must have at least 1 Index to union'",
")",
"if",
"len",
"(",
"indexes",
")",
"==",
"1",
":",
"result",
"=",
"indexes",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"result",
",",
"list",
")",
":",
"result",
"=",
"Index",
"(",
"sorted",
"(",
"result",
")",
")",
"return",
"result",
"indexes",
",",
"kind",
"=",
"_sanitize_and_check",
"(",
"indexes",
")",
"def",
"_unique_indices",
"(",
"inds",
")",
":",
"\"\"\"\n Convert indexes to lists and concatenate them, removing duplicates.\n\n The final dtype is inferred.\n\n Parameters\n ----------\n inds : list of Index or list objects\n\n Returns\n -------\n Index\n \"\"\"",
"def",
"conv",
"(",
"i",
")",
":",
"if",
"isinstance",
"(",
"i",
",",
"Index",
")",
":",
"i",
"=",
"i",
".",
"tolist",
"(",
")",
"return",
"i",
"return",
"Index",
"(",
"lib",
".",
"fast_unique_multiple_list",
"(",
"[",
"conv",
"(",
"i",
")",
"for",
"i",
"in",
"inds",
"]",
",",
"sort",
"=",
"sort",
")",
")",
"if",
"kind",
"==",
"'special'",
":",
"result",
"=",
"indexes",
"[",
"0",
"]",
"if",
"hasattr",
"(",
"result",
",",
"'union_many'",
")",
":",
"return",
"result",
".",
"union_many",
"(",
"indexes",
"[",
"1",
":",
"]",
")",
"else",
":",
"for",
"other",
"in",
"indexes",
"[",
"1",
":",
"]",
":",
"result",
"=",
"result",
".",
"union",
"(",
"other",
")",
"return",
"result",
"elif",
"kind",
"==",
"'array'",
":",
"index",
"=",
"indexes",
"[",
"0",
"]",
"for",
"other",
"in",
"indexes",
"[",
"1",
":",
"]",
":",
"if",
"not",
"index",
".",
"equals",
"(",
"other",
")",
":",
"if",
"sort",
"is",
"None",
":",
"# TODO: remove once pd.concat sort default changes",
"warnings",
".",
"warn",
"(",
"_sort_msg",
",",
"FutureWarning",
",",
"stacklevel",
"=",
"8",
")",
"sort",
"=",
"True",
"return",
"_unique_indices",
"(",
"indexes",
")",
"name",
"=",
"_get_consensus_names",
"(",
"indexes",
")",
"[",
"0",
"]",
"if",
"name",
"!=",
"index",
".",
"name",
":",
"index",
"=",
"index",
".",
"_shallow_copy",
"(",
"name",
"=",
"name",
")",
"return",
"index",
"else",
":",
"# kind='list'",
"return",
"_unique_indices",
"(",
"indexes",
")"
] |
Return the union of indexes.
The behavior of sort and names is not consistent.
Parameters
----------
indexes : list of Index or list objects
sort : bool, default True
Whether the result index should come out sorted or not.
Returns
-------
Index
|
[
"Return",
"the",
"union",
"of",
"indexes",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/api.py#L128-L202
|
19,779
|
pandas-dev/pandas
|
pandas/core/indexes/api.py
|
_sanitize_and_check
|
def _sanitize_and_check(indexes):
"""
Verify the type of indexes and convert lists to Index.
Cases:
- [list, list, ...]: Return ([list, list, ...], 'list')
- [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
Lists are sorted and converted to Index.
- [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
TYPE = 'special' if at least one special type, 'array' otherwise.
Parameters
----------
indexes : list of Index or list objects
Returns
-------
sanitized_indexes : list of Index or list objects
type : {'list', 'array', 'special'}
"""
kinds = list({type(index) for index in indexes})
if list in kinds:
if len(kinds) > 1:
indexes = [Index(com.try_sort(x))
if not isinstance(x, Index) else
x for x in indexes]
kinds.remove(list)
else:
return indexes, 'list'
if len(kinds) > 1 or Index not in kinds:
return indexes, 'special'
else:
return indexes, 'array'
|
python
|
def _sanitize_and_check(indexes):
"""
Verify the type of indexes and convert lists to Index.
Cases:
- [list, list, ...]: Return ([list, list, ...], 'list')
- [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
Lists are sorted and converted to Index.
- [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
TYPE = 'special' if at least one special type, 'array' otherwise.
Parameters
----------
indexes : list of Index or list objects
Returns
-------
sanitized_indexes : list of Index or list objects
type : {'list', 'array', 'special'}
"""
kinds = list({type(index) for index in indexes})
if list in kinds:
if len(kinds) > 1:
indexes = [Index(com.try_sort(x))
if not isinstance(x, Index) else
x for x in indexes]
kinds.remove(list)
else:
return indexes, 'list'
if len(kinds) > 1 or Index not in kinds:
return indexes, 'special'
else:
return indexes, 'array'
|
[
"def",
"_sanitize_and_check",
"(",
"indexes",
")",
":",
"kinds",
"=",
"list",
"(",
"{",
"type",
"(",
"index",
")",
"for",
"index",
"in",
"indexes",
"}",
")",
"if",
"list",
"in",
"kinds",
":",
"if",
"len",
"(",
"kinds",
")",
">",
"1",
":",
"indexes",
"=",
"[",
"Index",
"(",
"com",
".",
"try_sort",
"(",
"x",
")",
")",
"if",
"not",
"isinstance",
"(",
"x",
",",
"Index",
")",
"else",
"x",
"for",
"x",
"in",
"indexes",
"]",
"kinds",
".",
"remove",
"(",
"list",
")",
"else",
":",
"return",
"indexes",
",",
"'list'",
"if",
"len",
"(",
"kinds",
")",
">",
"1",
"or",
"Index",
"not",
"in",
"kinds",
":",
"return",
"indexes",
",",
"'special'",
"else",
":",
"return",
"indexes",
",",
"'array'"
] |
Verify the type of indexes and convert lists to Index.
Cases:
- [list, list, ...]: Return ([list, list, ...], 'list')
- [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])
Lists are sorted and converted to Index.
- [Index, Index, ...]: Return ([Index, Index, ...], TYPE)
TYPE = 'special' if at least one special type, 'array' otherwise.
Parameters
----------
indexes : list of Index or list objects
Returns
-------
sanitized_indexes : list of Index or list objects
type : {'list', 'array', 'special'}
|
[
"Verify",
"the",
"type",
"of",
"indexes",
"and",
"convert",
"lists",
"to",
"Index",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/api.py#L205-L240
|
19,780
|
pandas-dev/pandas
|
pandas/core/indexes/api.py
|
_get_consensus_names
|
def _get_consensus_names(indexes):
"""
Give a consensus 'names' to indexes.
If there's exactly one non-empty 'names', return this,
otherwise, return empty.
Parameters
----------
indexes : list of Index objects
Returns
-------
list
A list representing the consensus 'names' found.
"""
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
consensus_names = {tuple(i.names) for i in indexes
if com._any_not_none(*i.names)}
if len(consensus_names) == 1:
return list(list(consensus_names)[0])
return [None] * indexes[0].nlevels
|
python
|
def _get_consensus_names(indexes):
"""
Give a consensus 'names' to indexes.
If there's exactly one non-empty 'names', return this,
otherwise, return empty.
Parameters
----------
indexes : list of Index objects
Returns
-------
list
A list representing the consensus 'names' found.
"""
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
consensus_names = {tuple(i.names) for i in indexes
if com._any_not_none(*i.names)}
if len(consensus_names) == 1:
return list(list(consensus_names)[0])
return [None] * indexes[0].nlevels
|
[
"def",
"_get_consensus_names",
"(",
"indexes",
")",
":",
"# find the non-none names, need to tupleify to make",
"# the set hashable, then reverse on return",
"consensus_names",
"=",
"{",
"tuple",
"(",
"i",
".",
"names",
")",
"for",
"i",
"in",
"indexes",
"if",
"com",
".",
"_any_not_none",
"(",
"*",
"i",
".",
"names",
")",
"}",
"if",
"len",
"(",
"consensus_names",
")",
"==",
"1",
":",
"return",
"list",
"(",
"list",
"(",
"consensus_names",
")",
"[",
"0",
"]",
")",
"return",
"[",
"None",
"]",
"*",
"indexes",
"[",
"0",
"]",
".",
"nlevels"
] |
Give a consensus 'names' to indexes.
If there's exactly one non-empty 'names', return this,
otherwise, return empty.
Parameters
----------
indexes : list of Index objects
Returns
-------
list
A list representing the consensus 'names' found.
|
[
"Give",
"a",
"consensus",
"names",
"to",
"indexes",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/api.py#L243-L266
|
19,781
|
pandas-dev/pandas
|
pandas/core/indexes/api.py
|
_all_indexes_same
|
def _all_indexes_same(indexes):
"""
Determine if all indexes contain the same elements.
Parameters
----------
indexes : list of Index objects
Returns
-------
bool
True if all indexes contain the same elements, False otherwise.
"""
first = indexes[0]
for index in indexes[1:]:
if not first.equals(index):
return False
return True
|
python
|
def _all_indexes_same(indexes):
"""
Determine if all indexes contain the same elements.
Parameters
----------
indexes : list of Index objects
Returns
-------
bool
True if all indexes contain the same elements, False otherwise.
"""
first = indexes[0]
for index in indexes[1:]:
if not first.equals(index):
return False
return True
|
[
"def",
"_all_indexes_same",
"(",
"indexes",
")",
":",
"first",
"=",
"indexes",
"[",
"0",
"]",
"for",
"index",
"in",
"indexes",
"[",
"1",
":",
"]",
":",
"if",
"not",
"first",
".",
"equals",
"(",
"index",
")",
":",
"return",
"False",
"return",
"True"
] |
Determine if all indexes contain the same elements.
Parameters
----------
indexes : list of Index objects
Returns
-------
bool
True if all indexes contain the same elements, False otherwise.
|
[
"Determine",
"if",
"all",
"indexes",
"contain",
"the",
"same",
"elements",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/api.py#L269-L286
|
19,782
|
pandas-dev/pandas
|
pandas/io/sql.py
|
_convert_params
|
def _convert_params(sql, params):
"""Convert SQL and params args to DBAPI2.0 compliant format."""
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
|
python
|
def _convert_params(sql, params):
"""Convert SQL and params args to DBAPI2.0 compliant format."""
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
|
[
"def",
"_convert_params",
"(",
"sql",
",",
"params",
")",
":",
"args",
"=",
"[",
"sql",
"]",
"if",
"params",
"is",
"not",
"None",
":",
"if",
"hasattr",
"(",
"params",
",",
"'keys'",
")",
":",
"# test if params is a mapping",
"args",
"+=",
"[",
"params",
"]",
"else",
":",
"args",
"+=",
"[",
"list",
"(",
"params",
")",
"]",
"return",
"args"
] |
Convert SQL and params args to DBAPI2.0 compliant format.
|
[
"Convert",
"SQL",
"and",
"params",
"args",
"to",
"DBAPI2",
".",
"0",
"compliant",
"format",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L57-L65
|
19,783
|
pandas-dev/pandas
|
pandas/io/sql.py
|
_process_parse_dates_argument
|
def _process_parse_dates_argument(parse_dates):
"""Process parse_dates argument for read_sql functions"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
elif not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
return parse_dates
|
python
|
def _process_parse_dates_argument(parse_dates):
"""Process parse_dates argument for read_sql functions"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
elif not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
return parse_dates
|
[
"def",
"_process_parse_dates_argument",
"(",
"parse_dates",
")",
":",
"# handle non-list entries for parse_dates gracefully",
"if",
"parse_dates",
"is",
"True",
"or",
"parse_dates",
"is",
"None",
"or",
"parse_dates",
"is",
"False",
":",
"parse_dates",
"=",
"[",
"]",
"elif",
"not",
"hasattr",
"(",
"parse_dates",
",",
"'__iter__'",
")",
":",
"parse_dates",
"=",
"[",
"parse_dates",
"]",
"return",
"parse_dates"
] |
Process parse_dates argument for read_sql functions
|
[
"Process",
"parse_dates",
"argument",
"for",
"read_sql",
"functions"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L68-L76
|
19,784
|
pandas-dev/pandas
|
pandas/io/sql.py
|
_parse_date_columns
|
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
# we want to coerce datetime64_tz dtypes for now to UTC
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.iteritems():
if is_datetime64tz_dtype(df_col) or col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
|
python
|
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
# we want to coerce datetime64_tz dtypes for now to UTC
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.iteritems():
if is_datetime64tz_dtype(df_col) or col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
|
[
"def",
"_parse_date_columns",
"(",
"data_frame",
",",
"parse_dates",
")",
":",
"parse_dates",
"=",
"_process_parse_dates_argument",
"(",
"parse_dates",
")",
"# we want to coerce datetime64_tz dtypes for now to UTC",
"# we could in theory do a 'nice' conversion from a FixedOffset tz",
"# GH11216",
"for",
"col_name",
",",
"df_col",
"in",
"data_frame",
".",
"iteritems",
"(",
")",
":",
"if",
"is_datetime64tz_dtype",
"(",
"df_col",
")",
"or",
"col_name",
"in",
"parse_dates",
":",
"try",
":",
"fmt",
"=",
"parse_dates",
"[",
"col_name",
"]",
"except",
"TypeError",
":",
"fmt",
"=",
"None",
"data_frame",
"[",
"col_name",
"]",
"=",
"_handle_date_column",
"(",
"df_col",
",",
"format",
"=",
"fmt",
")",
"return",
"data_frame"
] |
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns.
|
[
"Force",
"non",
"-",
"datetime",
"columns",
"to",
"be",
"read",
"as",
"such",
".",
"Supports",
"both",
"string",
"formatted",
"and",
"integer",
"timestamp",
"columns",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L98-L116
|
19,785
|
pandas-dev/pandas
|
pandas/io/sql.py
|
_wrap_result
|
def _wrap_result(data, columns, index_col=None, coerce_float=True,
parse_dates=None):
"""Wrap result set of query in a DataFrame."""
frame = DataFrame.from_records(data, columns=columns,
coerce_float=coerce_float)
frame = _parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
|
python
|
def _wrap_result(data, columns, index_col=None, coerce_float=True,
parse_dates=None):
"""Wrap result set of query in a DataFrame."""
frame = DataFrame.from_records(data, columns=columns,
coerce_float=coerce_float)
frame = _parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
|
[
"def",
"_wrap_result",
"(",
"data",
",",
"columns",
",",
"index_col",
"=",
"None",
",",
"coerce_float",
"=",
"True",
",",
"parse_dates",
"=",
"None",
")",
":",
"frame",
"=",
"DataFrame",
".",
"from_records",
"(",
"data",
",",
"columns",
"=",
"columns",
",",
"coerce_float",
"=",
"coerce_float",
")",
"frame",
"=",
"_parse_date_columns",
"(",
"frame",
",",
"parse_dates",
")",
"if",
"index_col",
"is",
"not",
"None",
":",
"frame",
".",
"set_index",
"(",
"index_col",
",",
"inplace",
"=",
"True",
")",
"return",
"frame"
] |
Wrap result set of query in a DataFrame.
|
[
"Wrap",
"result",
"set",
"of",
"query",
"in",
"a",
"DataFrame",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L119-L131
|
19,786
|
pandas-dev/pandas
|
pandas/io/sql.py
|
execute
|
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
SQL query to be executed.
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by the
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
|
python
|
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
SQL query to be executed.
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by the
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
|
[
"def",
"execute",
"(",
"sql",
",",
"con",
",",
"cur",
"=",
"None",
",",
"params",
"=",
"None",
")",
":",
"if",
"cur",
"is",
"None",
":",
"pandas_sql",
"=",
"pandasSQL_builder",
"(",
"con",
")",
"else",
":",
"pandas_sql",
"=",
"pandasSQL_builder",
"(",
"cur",
",",
"is_cursor",
"=",
"True",
")",
"args",
"=",
"_convert_params",
"(",
"sql",
",",
"params",
")",
"return",
"pandas_sql",
".",
"execute",
"(",
"*",
"args",
")"
] |
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
SQL query to be executed.
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by the
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
|
[
"Execute",
"the",
"given",
"SQL",
"query",
"using",
"the",
"provided",
"connection",
"object",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L134-L159
|
19,787
|
pandas-dev/pandas
|
pandas/io/sql.py
|
has_table
|
def has_table(table_name, con, schema=None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table.
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, schema=schema)
return pandas_sql.has_table(table_name)
|
python
|
def has_table(table_name, con, schema=None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table.
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, schema=schema)
return pandas_sql.has_table(table_name)
|
[
"def",
"has_table",
"(",
"table_name",
",",
"con",
",",
"schema",
"=",
"None",
")",
":",
"pandas_sql",
"=",
"pandasSQL_builder",
"(",
"con",
",",
"schema",
"=",
"schema",
")",
"return",
"pandas_sql",
".",
"has_table",
"(",
"table_name",
")"
] |
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table.
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
|
[
"Check",
"if",
"DataBase",
"has",
"named",
"table",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L454-L475
|
19,788
|
pandas-dev/pandas
|
pandas/io/sql.py
|
pandasSQL_builder
|
def pandasSQL_builder(con, schema=None, meta=None,
is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters.
"""
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
elif isinstance(con, str):
raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor)
|
python
|
def pandasSQL_builder(con, schema=None, meta=None,
is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters.
"""
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
elif isinstance(con, str):
raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor)
|
[
"def",
"pandasSQL_builder",
"(",
"con",
",",
"schema",
"=",
"None",
",",
"meta",
"=",
"None",
",",
"is_cursor",
"=",
"False",
")",
":",
"# When support for DBAPI connections is removed,",
"# is_cursor should not be necessary.",
"con",
"=",
"_engine_builder",
"(",
"con",
")",
"if",
"_is_sqlalchemy_connectable",
"(",
"con",
")",
":",
"return",
"SQLDatabase",
"(",
"con",
",",
"schema",
"=",
"schema",
",",
"meta",
"=",
"meta",
")",
"elif",
"isinstance",
"(",
"con",
",",
"str",
")",
":",
"raise",
"ImportError",
"(",
"\"Using URI string without sqlalchemy installed.\"",
")",
"else",
":",
"return",
"SQLiteDatabase",
"(",
"con",
",",
"is_cursor",
"=",
"is_cursor",
")"
] |
Convenience function to return the correct PandasSQL subclass based on the
provided parameters.
|
[
"Convenience",
"function",
"to",
"return",
"the",
"correct",
"PandasSQL",
"subclass",
"based",
"on",
"the",
"provided",
"parameters",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L499-L513
|
19,789
|
pandas-dev/pandas
|
pandas/io/sql.py
|
get_schema
|
def get_schema(frame, name, keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
|
python
|
def get_schema(frame, name, keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
|
[
"def",
"get_schema",
"(",
"frame",
",",
"name",
",",
"keys",
"=",
"None",
",",
"con",
"=",
"None",
",",
"dtype",
"=",
"None",
")",
":",
"pandas_sql",
"=",
"pandasSQL_builder",
"(",
"con",
"=",
"con",
")",
"return",
"pandas_sql",
".",
"_create_sql_schema",
"(",
"frame",
",",
"name",
",",
"keys",
"=",
"keys",
",",
"dtype",
"=",
"dtype",
")"
] |
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
|
[
"Get",
"the",
"SQL",
"db",
"table",
"schema",
"for",
"the",
"given",
"frame",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L1564-L1586
|
19,790
|
pandas-dev/pandas
|
pandas/io/sql.py
|
SQLTable._execute_insert
|
def _execute_insert(self, conn, keys, data_iter):
"""Execute SQL statement inserting data
Parameters
----------
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : generator of list
Each item contains a list of values to be inserted
"""
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(self.table.insert(), data)
|
python
|
def _execute_insert(self, conn, keys, data_iter):
"""Execute SQL statement inserting data
Parameters
----------
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : generator of list
Each item contains a list of values to be inserted
"""
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(self.table.insert(), data)
|
[
"def",
"_execute_insert",
"(",
"self",
",",
"conn",
",",
"keys",
",",
"data_iter",
")",
":",
"data",
"=",
"[",
"dict",
"(",
"zip",
"(",
"keys",
",",
"row",
")",
")",
"for",
"row",
"in",
"data_iter",
"]",
"conn",
".",
"execute",
"(",
"self",
".",
"table",
".",
"insert",
"(",
")",
",",
"data",
")"
] |
Execute SQL statement inserting data
Parameters
----------
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : generator of list
Each item contains a list of values to be inserted
|
[
"Execute",
"SQL",
"statement",
"inserting",
"data"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L578-L590
|
19,791
|
pandas-dev/pandas
|
pandas/io/sql.py
|
SQLTable._query_iterator
|
def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
"""Return generator through chunked result set."""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
|
python
|
def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
"""Return generator through chunked result set."""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
|
[
"def",
"_query_iterator",
"(",
"self",
",",
"result",
",",
"chunksize",
",",
"columns",
",",
"coerce_float",
"=",
"True",
",",
"parse_dates",
"=",
"None",
")",
":",
"while",
"True",
":",
"data",
"=",
"result",
".",
"fetchmany",
"(",
"chunksize",
")",
"if",
"not",
"data",
":",
"break",
"else",
":",
"self",
".",
"frame",
"=",
"DataFrame",
".",
"from_records",
"(",
"data",
",",
"columns",
"=",
"columns",
",",
"coerce_float",
"=",
"coerce_float",
")",
"self",
".",
"_harmonize_columns",
"(",
"parse_dates",
"=",
"parse_dates",
")",
"if",
"self",
".",
"index",
"is",
"not",
"None",
":",
"self",
".",
"frame",
".",
"set_index",
"(",
"self",
".",
"index",
",",
"inplace",
"=",
"True",
")",
"yield",
"self",
".",
"frame"
] |
Return generator through chunked result set.
|
[
"Return",
"generator",
"through",
"chunked",
"result",
"set",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L679-L696
|
19,792
|
pandas-dev/pandas
|
pandas/io/sql.py
|
SQLTable._harmonize_columns
|
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# Handle date parsing upfront; don't try to convert columns
# twice
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
continue
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (col_type is datetime or col_type is date or
col_type is DatetimeTZDtype):
# Convert tz-aware Datetime SQL columns to UTC
utc = col_type is DatetimeTZDtype
self.frame[col_name] = _handle_date_column(df_col, utc=utc)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype('int64') or col_type is bool:
self.frame[col_name] = df_col.astype(
col_type, copy=False)
except KeyError:
pass
|
python
|
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# Handle date parsing upfront; don't try to convert columns
# twice
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
continue
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (col_type is datetime or col_type is date or
col_type is DatetimeTZDtype):
# Convert tz-aware Datetime SQL columns to UTC
utc = col_type is DatetimeTZDtype
self.frame[col_name] = _handle_date_column(df_col, utc=utc)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype('int64') or col_type is bool:
self.frame[col_name] = df_col.astype(
col_type, copy=False)
except KeyError:
pass
|
[
"def",
"_harmonize_columns",
"(",
"self",
",",
"parse_dates",
"=",
"None",
")",
":",
"parse_dates",
"=",
"_process_parse_dates_argument",
"(",
"parse_dates",
")",
"for",
"sql_col",
"in",
"self",
".",
"table",
".",
"columns",
":",
"col_name",
"=",
"sql_col",
".",
"name",
"try",
":",
"df_col",
"=",
"self",
".",
"frame",
"[",
"col_name",
"]",
"# Handle date parsing upfront; don't try to convert columns",
"# twice",
"if",
"col_name",
"in",
"parse_dates",
":",
"try",
":",
"fmt",
"=",
"parse_dates",
"[",
"col_name",
"]",
"except",
"TypeError",
":",
"fmt",
"=",
"None",
"self",
".",
"frame",
"[",
"col_name",
"]",
"=",
"_handle_date_column",
"(",
"df_col",
",",
"format",
"=",
"fmt",
")",
"continue",
"# the type the dataframe column should have",
"col_type",
"=",
"self",
".",
"_get_dtype",
"(",
"sql_col",
".",
"type",
")",
"if",
"(",
"col_type",
"is",
"datetime",
"or",
"col_type",
"is",
"date",
"or",
"col_type",
"is",
"DatetimeTZDtype",
")",
":",
"# Convert tz-aware Datetime SQL columns to UTC",
"utc",
"=",
"col_type",
"is",
"DatetimeTZDtype",
"self",
".",
"frame",
"[",
"col_name",
"]",
"=",
"_handle_date_column",
"(",
"df_col",
",",
"utc",
"=",
"utc",
")",
"elif",
"col_type",
"is",
"float",
":",
"# floats support NA, can always convert!",
"self",
".",
"frame",
"[",
"col_name",
"]",
"=",
"df_col",
".",
"astype",
"(",
"col_type",
",",
"copy",
"=",
"False",
")",
"elif",
"len",
"(",
"df_col",
")",
"==",
"df_col",
".",
"count",
"(",
")",
":",
"# No NA values, can convert ints and bools",
"if",
"col_type",
"is",
"np",
".",
"dtype",
"(",
"'int64'",
")",
"or",
"col_type",
"is",
"bool",
":",
"self",
".",
"frame",
"[",
"col_name",
"]",
"=",
"df_col",
".",
"astype",
"(",
"col_type",
",",
"copy",
"=",
"False",
")",
"except",
"KeyError",
":",
"pass"
] |
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required.
|
[
"Make",
"the",
"DataFrame",
"s",
"column",
"types",
"align",
"with",
"the",
"SQL",
"table",
"column",
"types",
".",
"Need",
"to",
"work",
"around",
"limited",
"NA",
"value",
"support",
".",
"Floats",
"are",
"always",
"fine",
"ints",
"must",
"always",
"be",
"floats",
"if",
"there",
"are",
"Null",
"values",
".",
"Booleans",
"are",
"hard",
"because",
"converting",
"bool",
"column",
"with",
"None",
"replaces",
"all",
"Nones",
"with",
"false",
".",
"Therefore",
"only",
"convert",
"bool",
"if",
"there",
"are",
"no",
"NA",
"values",
".",
"Datetimes",
"should",
"already",
"be",
"converted",
"to",
"np",
".",
"datetime64",
"if",
"supported",
"but",
"here",
"we",
"also",
"force",
"conversion",
"if",
"required",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L803-L851
|
19,793
|
pandas-dev/pandas
|
pandas/io/sql.py
|
SQLiteTable._create_table_setup
|
def _create_table_setup(self):
"""
Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements.
"""
column_names_and_types = self._get_column_names_and_types(
self._sql_type_name
)
pat = re.compile(r'\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [escape(cname) + ' ' + ctype
for cname, ctype, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join(escape(c) for c in keys)
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
create_stmts = ["CREATE TABLE " + escape(self.name) + " (\n" +
',\n '.join(create_tbl_stmts) + "\n)"]
ix_cols = [cname for cname, _, is_index in column_names_and_types
if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join(escape(c) for c in ix_cols)
create_stmts.append(
"CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) +
"ON " + escape(self.name) + " (" + cnames_br + ")")
return create_stmts
|
python
|
def _create_table_setup(self):
"""
Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements.
"""
column_names_and_types = self._get_column_names_and_types(
self._sql_type_name
)
pat = re.compile(r'\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [escape(cname) + ' ' + ctype
for cname, ctype, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join(escape(c) for c in keys)
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
create_stmts = ["CREATE TABLE " + escape(self.name) + " (\n" +
',\n '.join(create_tbl_stmts) + "\n)"]
ix_cols = [cname for cname, _, is_index in column_names_and_types
if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join(escape(c) for c in ix_cols)
create_stmts.append(
"CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) +
"ON " + escape(self.name) + " (" + cnames_br + ")")
return create_stmts
|
[
"def",
"_create_table_setup",
"(",
"self",
")",
":",
"column_names_and_types",
"=",
"self",
".",
"_get_column_names_and_types",
"(",
"self",
".",
"_sql_type_name",
")",
"pat",
"=",
"re",
".",
"compile",
"(",
"r'\\s+'",
")",
"column_names",
"=",
"[",
"col_name",
"for",
"col_name",
",",
"_",
",",
"_",
"in",
"column_names_and_types",
"]",
"if",
"any",
"(",
"map",
"(",
"pat",
".",
"search",
",",
"column_names",
")",
")",
":",
"warnings",
".",
"warn",
"(",
"_SAFE_NAMES_WARNING",
",",
"stacklevel",
"=",
"6",
")",
"escape",
"=",
"_get_valid_sqlite_name",
"create_tbl_stmts",
"=",
"[",
"escape",
"(",
"cname",
")",
"+",
"' '",
"+",
"ctype",
"for",
"cname",
",",
"ctype",
",",
"_",
"in",
"column_names_and_types",
"]",
"if",
"self",
".",
"keys",
"is",
"not",
"None",
"and",
"len",
"(",
"self",
".",
"keys",
")",
":",
"if",
"not",
"is_list_like",
"(",
"self",
".",
"keys",
")",
":",
"keys",
"=",
"[",
"self",
".",
"keys",
"]",
"else",
":",
"keys",
"=",
"self",
".",
"keys",
"cnames_br",
"=",
"\", \"",
".",
"join",
"(",
"escape",
"(",
"c",
")",
"for",
"c",
"in",
"keys",
")",
"create_tbl_stmts",
".",
"append",
"(",
"\"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})\"",
".",
"format",
"(",
"tbl",
"=",
"self",
".",
"name",
",",
"cnames_br",
"=",
"cnames_br",
")",
")",
"create_stmts",
"=",
"[",
"\"CREATE TABLE \"",
"+",
"escape",
"(",
"self",
".",
"name",
")",
"+",
"\" (\\n\"",
"+",
"',\\n '",
".",
"join",
"(",
"create_tbl_stmts",
")",
"+",
"\"\\n)\"",
"]",
"ix_cols",
"=",
"[",
"cname",
"for",
"cname",
",",
"_",
",",
"is_index",
"in",
"column_names_and_types",
"if",
"is_index",
"]",
"if",
"len",
"(",
"ix_cols",
")",
":",
"cnames",
"=",
"\"_\"",
".",
"join",
"(",
"ix_cols",
")",
"cnames_br",
"=",
"\",\"",
".",
"join",
"(",
"escape",
"(",
"c",
")",
"for",
"c",
"in",
"ix_cols",
")",
"create_stmts",
".",
"append",
"(",
"\"CREATE INDEX \"",
"+",
"escape",
"(",
"\"ix_\"",
"+",
"self",
".",
"name",
"+",
"\"_\"",
"+",
"cnames",
")",
"+",
"\"ON \"",
"+",
"escape",
"(",
"self",
".",
"name",
")",
"+",
"\" (\"",
"+",
"cnames_br",
"+",
"\")\"",
")",
"return",
"create_stmts"
] |
Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements.
|
[
"Return",
"a",
"list",
"of",
"SQL",
"statements",
"that",
"creates",
"a",
"table",
"reflecting",
"the",
"structure",
"of",
"a",
"DataFrame",
".",
"The",
"first",
"entry",
"will",
"be",
"a",
"CREATE",
"TABLE",
"statement",
"while",
"the",
"rest",
"will",
"be",
"CREATE",
"INDEX",
"statements",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/sql.py#L1311-L1353
|
19,794
|
pandas-dev/pandas
|
pandas/core/arrays/categorical.py
|
_maybe_to_categorical
|
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
|
python
|
def _maybe_to_categorical(array):
"""
Coerce to a categorical if a series is given.
Internal use ONLY.
"""
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array
|
[
"def",
"_maybe_to_categorical",
"(",
"array",
")",
":",
"if",
"isinstance",
"(",
"array",
",",
"(",
"ABCSeries",
",",
"ABCCategoricalIndex",
")",
")",
":",
"return",
"array",
".",
"_values",
"elif",
"isinstance",
"(",
"array",
",",
"np",
".",
"ndarray",
")",
":",
"return",
"Categorical",
"(",
"array",
")",
"return",
"array"
] |
Coerce to a categorical if a series is given.
Internal use ONLY.
|
[
"Coerce",
"to",
"a",
"categorical",
"if",
"a",
"series",
"is",
"given",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L131-L141
|
19,795
|
pandas-dev/pandas
|
pandas/core/arrays/categorical.py
|
contains
|
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
|
python
|
def contains(cat, key, container):
"""
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
"""
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc)
|
[
"def",
"contains",
"(",
"cat",
",",
"key",
",",
"container",
")",
":",
"hash",
"(",
"key",
")",
"# get location of key in categories.",
"# If a KeyError, the key isn't in categories, so logically",
"# can't be in container either.",
"try",
":",
"loc",
"=",
"cat",
".",
"categories",
".",
"get_loc",
"(",
"key",
")",
"except",
"KeyError",
":",
"return",
"False",
"# loc is the location of key in categories, but also the *value*",
"# for key in container. So, `key` may be in categories,",
"# but still not in `container`. Example ('b' in categories,",
"# but not in values):",
"# 'b' in Categorical(['a'], categories=['a', 'b']) # False",
"if",
"is_scalar",
"(",
"loc",
")",
":",
"return",
"loc",
"in",
"container",
"else",
":",
"# if categories is an IntervalIndex, loc is an array.",
"return",
"any",
"(",
"loc_",
"in",
"container",
"for",
"loc_",
"in",
"loc",
")"
] |
Helper for membership check for ``key`` in ``cat``.
This is a helper method for :method:`__contains__`
and :class:`CategoricalIndex.__contains__`.
Returns True if ``key`` is in ``cat.categories`` and the
location of ``key`` in ``categories`` is in ``container``.
Parameters
----------
cat : :class:`Categorical`or :class:`categoricalIndex`
key : a hashable object
The key to check membership for.
container : Container (e.g. list-like or mapping)
The container to check for membership in.
Returns
-------
is_in : bool
True if ``key`` is in ``self.categories`` and location of
``key`` in ``categories`` is in ``container``, else False.
Notes
-----
This method does not check for NaN values. Do that separately
before calling this method.
|
[
"Helper",
"for",
"membership",
"check",
"for",
"key",
"in",
"cat",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L144-L192
|
19,796
|
pandas-dev/pandas
|
pandas/core/arrays/categorical.py
|
_get_codes_for_values
|
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
dtype_equal = is_dtype_equal(values.dtype, categories.dtype)
if dtype_equal:
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, '_ndarray_values', values)
categories = getattr(categories, '_ndarray_values', categories)
elif (is_extension_array_dtype(categories.dtype) and
is_object_dtype(values)):
# Support inferring the correct extension dtype from an array of
# scalar objects. e.g.
# Categorical(array[Period, Period], categories=PeriodIndex(...))
try:
values = (
categories.dtype.construct_array_type()._from_sequence(values)
)
except Exception:
# but that may fail for any reason, so fall back to object
values = ensure_object(values)
categories = ensure_object(categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
|
python
|
def _get_codes_for_values(values, categories):
"""
utility routine to turn values into codes given the specified categories
"""
from pandas.core.algorithms import _get_data_algo, _hashtables
dtype_equal = is_dtype_equal(values.dtype, categories.dtype)
if dtype_equal:
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, '_ndarray_values', values)
categories = getattr(categories, '_ndarray_values', categories)
elif (is_extension_array_dtype(categories.dtype) and
is_object_dtype(values)):
# Support inferring the correct extension dtype from an array of
# scalar objects. e.g.
# Categorical(array[Period, Period], categories=PeriodIndex(...))
try:
values = (
categories.dtype.construct_array_type()._from_sequence(values)
)
except Exception:
# but that may fail for any reason, so fall back to object
values = ensure_object(values)
categories = ensure_object(categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats)
|
[
"def",
"_get_codes_for_values",
"(",
"values",
",",
"categories",
")",
":",
"from",
"pandas",
".",
"core",
".",
"algorithms",
"import",
"_get_data_algo",
",",
"_hashtables",
"dtype_equal",
"=",
"is_dtype_equal",
"(",
"values",
".",
"dtype",
",",
"categories",
".",
"dtype",
")",
"if",
"dtype_equal",
":",
"# To prevent erroneous dtype coercion in _get_data_algo, retrieve",
"# the underlying numpy array. gh-22702",
"values",
"=",
"getattr",
"(",
"values",
",",
"'_ndarray_values'",
",",
"values",
")",
"categories",
"=",
"getattr",
"(",
"categories",
",",
"'_ndarray_values'",
",",
"categories",
")",
"elif",
"(",
"is_extension_array_dtype",
"(",
"categories",
".",
"dtype",
")",
"and",
"is_object_dtype",
"(",
"values",
")",
")",
":",
"# Support inferring the correct extension dtype from an array of",
"# scalar objects. e.g.",
"# Categorical(array[Period, Period], categories=PeriodIndex(...))",
"try",
":",
"values",
"=",
"(",
"categories",
".",
"dtype",
".",
"construct_array_type",
"(",
")",
".",
"_from_sequence",
"(",
"values",
")",
")",
"except",
"Exception",
":",
"# but that may fail for any reason, so fall back to object",
"values",
"=",
"ensure_object",
"(",
"values",
")",
"categories",
"=",
"ensure_object",
"(",
"categories",
")",
"else",
":",
"values",
"=",
"ensure_object",
"(",
"values",
")",
"categories",
"=",
"ensure_object",
"(",
"categories",
")",
"(",
"hash_klass",
",",
"vec_klass",
")",
",",
"vals",
"=",
"_get_data_algo",
"(",
"values",
",",
"_hashtables",
")",
"(",
"_",
",",
"_",
")",
",",
"cats",
"=",
"_get_data_algo",
"(",
"categories",
",",
"_hashtables",
")",
"t",
"=",
"hash_klass",
"(",
"len",
"(",
"cats",
")",
")",
"t",
".",
"map_locations",
"(",
"cats",
")",
"return",
"coerce_indexer_dtype",
"(",
"t",
".",
"lookup",
"(",
"vals",
")",
",",
"cats",
")"
] |
utility routine to turn values into codes given the specified categories
|
[
"utility",
"routine",
"to",
"turn",
"values",
"into",
"codes",
"given",
"the",
"specified",
"categories"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2549-L2582
|
19,797
|
pandas-dev/pandas
|
pandas/core/arrays/categorical.py
|
_recode_for_categories
|
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
elif new_categories.equals(old_categories):
# Same categories, so no need to actually recode
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
|
python
|
def _recode_for_categories(codes, old_categories, new_categories):
"""
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
"""
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
elif new_categories.equals(old_categories):
# Same categories, so no need to actually recode
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes
|
[
"def",
"_recode_for_categories",
"(",
"codes",
",",
"old_categories",
",",
"new_categories",
")",
":",
"from",
"pandas",
".",
"core",
".",
"algorithms",
"import",
"take_1d",
"if",
"len",
"(",
"old_categories",
")",
"==",
"0",
":",
"# All null anyway, so just retain the nulls",
"return",
"codes",
".",
"copy",
"(",
")",
"elif",
"new_categories",
".",
"equals",
"(",
"old_categories",
")",
":",
"# Same categories, so no need to actually recode",
"return",
"codes",
".",
"copy",
"(",
")",
"indexer",
"=",
"coerce_indexer_dtype",
"(",
"new_categories",
".",
"get_indexer",
"(",
"old_categories",
")",
",",
"new_categories",
")",
"new_codes",
"=",
"take_1d",
"(",
"indexer",
",",
"codes",
".",
"copy",
"(",
")",
",",
"fill_value",
"=",
"-",
"1",
")",
"return",
"new_codes"
] |
Convert a set of codes for to a new set of categories
Parameters
----------
codes : array
old_categories, new_categories : Index
Returns
-------
new_codes : array
Examples
--------
>>> old_cat = pd.Index(['b', 'a', 'c'])
>>> new_cat = pd.Index(['a', 'b'])
>>> codes = np.array([0, 1, 1, 2])
>>> _recode_for_categories(codes, old_cat, new_cat)
array([ 1, 0, 0, -1])
|
[
"Convert",
"a",
"set",
"of",
"codes",
"for",
"to",
"a",
"new",
"set",
"of",
"categories"
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2585-L2617
|
19,798
|
pandas-dev/pandas
|
pandas/core/arrays/categorical.py
|
_factorize_from_iterable
|
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories, dtype=values.dtype)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
|
python
|
def _factorize_from_iterable(values):
"""
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
"""
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories, dtype=values.dtype)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories
|
[
"def",
"_factorize_from_iterable",
"(",
"values",
")",
":",
"from",
"pandas",
".",
"core",
".",
"indexes",
".",
"category",
"import",
"CategoricalIndex",
"if",
"not",
"is_list_like",
"(",
"values",
")",
":",
"raise",
"TypeError",
"(",
"\"Input must be list-like\"",
")",
"if",
"is_categorical",
"(",
"values",
")",
":",
"if",
"isinstance",
"(",
"values",
",",
"(",
"ABCCategoricalIndex",
",",
"ABCSeries",
")",
")",
":",
"values",
"=",
"values",
".",
"_values",
"categories",
"=",
"CategoricalIndex",
"(",
"values",
".",
"categories",
",",
"dtype",
"=",
"values",
".",
"dtype",
")",
"codes",
"=",
"values",
".",
"codes",
"else",
":",
"# The value of ordered is irrelevant since we don't use cat as such,",
"# but only the resulting categories, the order of which is independent",
"# from ordered. Set ordered to False as default. See GH #15457",
"cat",
"=",
"Categorical",
"(",
"values",
",",
"ordered",
"=",
"False",
")",
"categories",
"=",
"cat",
".",
"categories",
"codes",
"=",
"cat",
".",
"codes",
"return",
"codes",
",",
"categories"
] |
Factorize an input `values` into `categories` and `codes`. Preserves
categorical dtype in `categories`.
*This is an internal function*
Parameters
----------
values : list-like
Returns
-------
codes : ndarray
categories : Index
If `values` has a categorical dtype, then `categories` is
a CategoricalIndex keeping the categories and order of `values`.
|
[
"Factorize",
"an",
"input",
"values",
"into",
"categories",
"and",
"codes",
".",
"Preserves",
"categorical",
"dtype",
"in",
"categories",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2635-L2670
|
19,799
|
pandas-dev/pandas
|
pandas/core/arrays/categorical.py
|
_factorize_from_iterables
|
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
python
|
def _factorize_from_iterables(iterables):
"""
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
"""
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables]))
|
[
"def",
"_factorize_from_iterables",
"(",
"iterables",
")",
":",
"if",
"len",
"(",
"iterables",
")",
"==",
"0",
":",
"# For consistency, it should return a list of 2 lists.",
"return",
"[",
"[",
"]",
",",
"[",
"]",
"]",
"return",
"map",
"(",
"list",
",",
"lzip",
"(",
"*",
"[",
"_factorize_from_iterable",
"(",
"it",
")",
"for",
"it",
"in",
"iterables",
"]",
")",
")"
] |
A higher-level wrapper over `_factorize_from_iterable`.
*This is an internal function*
Parameters
----------
iterables : list-like of list-likes
Returns
-------
codes_list : list of ndarrays
categories_list : list of Indexes
Notes
-----
See `_factorize_from_iterable` for more info.
|
[
"A",
"higher",
"-",
"level",
"wrapper",
"over",
"_factorize_from_iterable",
"."
] |
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/categorical.py#L2673-L2695
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.