partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
train
BusinessHourMixin.rollback
Roll provided date backward to next offset only if not on offset.
pandas/tseries/offsets.py
def rollback(self, dt): """ Roll provided date backward to next offset only if not on offset. """ if not self.onOffset(dt): businesshours = self._get_business_hours_by_sec if self.n >= 0: dt = self._prev_opening_time( dt) + timedelta(seconds=businesshours) else: dt = self._next_opening_time( dt) + timedelta(seconds=businesshours) return dt
def rollback(self, dt): """ Roll provided date backward to next offset only if not on offset. """ if not self.onOffset(dt): businesshours = self._get_business_hours_by_sec if self.n >= 0: dt = self._prev_opening_time( dt) + timedelta(seconds=businesshours) else: dt = self._next_opening_time( dt) + timedelta(seconds=businesshours) return dt
[ "Roll", "provided", "date", "backward", "to", "next", "offset", "only", "if", "not", "on", "offset", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L654-L666
[ "def", "rollback", "(", "self", ",", "dt", ")", ":", "if", "not", "self", ".", "onOffset", "(", "dt", ")", ":", "businesshours", "=", "self", ".", "_get_business_hours_by_sec", "if", "self", ".", "n", ">=", "0", ":", "dt", "=", "self", ".", "_prev_opening_time", "(", "dt", ")", "+", "timedelta", "(", "seconds", "=", "businesshours", ")", "else", ":", "dt", "=", "self", ".", "_next_opening_time", "(", "dt", ")", "+", "timedelta", "(", "seconds", "=", "businesshours", ")", "return", "dt" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
BusinessHourMixin.rollforward
Roll provided date forward to next offset only if not on offset.
pandas/tseries/offsets.py
def rollforward(self, dt): """ Roll provided date forward to next offset only if not on offset. """ if not self.onOffset(dt): if self.n >= 0: return self._next_opening_time(dt) else: return self._prev_opening_time(dt) return dt
def rollforward(self, dt): """ Roll provided date forward to next offset only if not on offset. """ if not self.onOffset(dt): if self.n >= 0: return self._next_opening_time(dt) else: return self._prev_opening_time(dt) return dt
[ "Roll", "provided", "date", "forward", "to", "next", "offset", "only", "if", "not", "on", "offset", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L669-L678
[ "def", "rollforward", "(", "self", ",", "dt", ")", ":", "if", "not", "self", ".", "onOffset", "(", "dt", ")", ":", "if", "self", ".", "n", ">=", "0", ":", "return", "self", ".", "_next_opening_time", "(", "dt", ")", "else", ":", "return", "self", ".", "_prev_opening_time", "(", "dt", ")", "return", "dt" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
BusinessHourMixin._onOffset
Slight speedups using calculated values.
pandas/tseries/offsets.py
def _onOffset(self, dt, businesshours): """ Slight speedups using calculated values. """ # if self.normalize and not _is_normalized(dt): # return False # Valid BH can be on the different BusinessDay during midnight # Distinguish by the time spent from previous opening time if self.n >= 0: op = self._prev_opening_time(dt) else: op = self._next_opening_time(dt) span = (dt - op).total_seconds() if span <= businesshours: return True else: return False
def _onOffset(self, dt, businesshours): """ Slight speedups using calculated values. """ # if self.normalize and not _is_normalized(dt): # return False # Valid BH can be on the different BusinessDay during midnight # Distinguish by the time spent from previous opening time if self.n >= 0: op = self._prev_opening_time(dt) else: op = self._next_opening_time(dt) span = (dt - op).total_seconds() if span <= businesshours: return True else: return False
[ "Slight", "speedups", "using", "calculated", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L767-L783
[ "def", "_onOffset", "(", "self", ",", "dt", ",", "businesshours", ")", ":", "# if self.normalize and not _is_normalized(dt):", "# return False", "# Valid BH can be on the different BusinessDay during midnight", "# Distinguish by the time spent from previous opening time", "if", "self", ".", "n", ">=", "0", ":", "op", "=", "self", ".", "_prev_opening_time", "(", "dt", ")", "else", ":", "op", "=", "self", ".", "_next_opening_time", "(", "dt", ")", "span", "=", "(", "dt", "-", "op", ")", ".", "total_seconds", "(", ")", "if", "span", "<=", "businesshours", ":", "return", "True", "else", ":", "return", "False" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_CustomBusinessMonth.cbday_roll
Define default roll function to be called in apply method.
pandas/tseries/offsets.py
def cbday_roll(self): """ Define default roll function to be called in apply method. """ cbday = CustomBusinessDay(n=self.n, normalize=False, **self.kwds) if self._prefix.endswith('S'): # MonthBegin roll_func = cbday.rollforward else: # MonthEnd roll_func = cbday.rollback return roll_func
def cbday_roll(self): """ Define default roll function to be called in apply method. """ cbday = CustomBusinessDay(n=self.n, normalize=False, **self.kwds) if self._prefix.endswith('S'): # MonthBegin roll_func = cbday.rollforward else: # MonthEnd roll_func = cbday.rollback return roll_func
[ "Define", "default", "roll", "function", "to", "be", "called", "in", "apply", "method", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L1007-L1019
[ "def", "cbday_roll", "(", "self", ")", ":", "cbday", "=", "CustomBusinessDay", "(", "n", "=", "self", ".", "n", ",", "normalize", "=", "False", ",", "*", "*", "self", ".", "kwds", ")", "if", "self", ".", "_prefix", ".", "endswith", "(", "'S'", ")", ":", "# MonthBegin", "roll_func", "=", "cbday", ".", "rollforward", "else", ":", "# MonthEnd", "roll_func", "=", "cbday", ".", "rollback", "return", "roll_func" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_CustomBusinessMonth.month_roll
Define default roll function to be called in apply method.
pandas/tseries/offsets.py
def month_roll(self): """ Define default roll function to be called in apply method. """ if self._prefix.endswith('S'): # MonthBegin roll_func = self.m_offset.rollback else: # MonthEnd roll_func = self.m_offset.rollforward return roll_func
def month_roll(self): """ Define default roll function to be called in apply method. """ if self._prefix.endswith('S'): # MonthBegin roll_func = self.m_offset.rollback else: # MonthEnd roll_func = self.m_offset.rollforward return roll_func
[ "Define", "default", "roll", "function", "to", "be", "called", "in", "apply", "method", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L1032-L1042
[ "def", "month_roll", "(", "self", ")", ":", "if", "self", ".", "_prefix", ".", "endswith", "(", "'S'", ")", ":", "# MonthBegin", "roll_func", "=", "self", ".", "m_offset", ".", "rollback", "else", ":", "# MonthEnd", "roll_func", "=", "self", ".", "m_offset", ".", "rollforward", "return", "roll_func" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
SemiMonthBegin._apply_index_days
Add days portion of offset to DatetimeIndex i. Parameters ---------- i : DatetimeIndex roll : ndarray[int64_t] Returns ------- result : DatetimeIndex
pandas/tseries/offsets.py
def _apply_index_days(self, i, roll): """ Add days portion of offset to DatetimeIndex i. Parameters ---------- i : DatetimeIndex roll : ndarray[int64_t] Returns ------- result : DatetimeIndex """ nanos = (roll % 2) * Timedelta(days=self.day_of_month - 1).value return i + nanos.astype('timedelta64[ns]')
def _apply_index_days(self, i, roll): """ Add days portion of offset to DatetimeIndex i. Parameters ---------- i : DatetimeIndex roll : ndarray[int64_t] Returns ------- result : DatetimeIndex """ nanos = (roll % 2) * Timedelta(days=self.day_of_month - 1).value return i + nanos.astype('timedelta64[ns]')
[ "Add", "days", "portion", "of", "offset", "to", "DatetimeIndex", "i", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L1279-L1293
[ "def", "_apply_index_days", "(", "self", ",", "i", ",", "roll", ")", ":", "nanos", "=", "(", "roll", "%", "2", ")", "*", "Timedelta", "(", "days", "=", "self", ".", "day_of_month", "-", "1", ")", ".", "value", "return", "i", "+", "nanos", ".", "astype", "(", "'timedelta64[ns]'", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Week._end_apply_index
Add self to the given DatetimeIndex, specialized for case where self.weekday is non-null. Parameters ---------- dtindex : DatetimeIndex Returns ------- result : DatetimeIndex
pandas/tseries/offsets.py
def _end_apply_index(self, dtindex): """ Add self to the given DatetimeIndex, specialized for case where self.weekday is non-null. Parameters ---------- dtindex : DatetimeIndex Returns ------- result : DatetimeIndex """ off = dtindex.to_perioddelta('D') base, mult = libfrequencies.get_freq_code(self.freqstr) base_period = dtindex.to_period(base) if not isinstance(base_period._data, np.ndarray): # unwrap PeriodIndex --> PeriodArray base_period = base_period._data if self.n > 0: # when adding, dates on end roll to next normed = dtindex - off + Timedelta(1, 'D') - Timedelta(1, 'ns') roll = np.where(base_period.to_timestamp(how='end') == normed, self.n, self.n - 1) # integer-array addition on PeriodIndex is deprecated, # so we use _addsub_int_array directly shifted = base_period._addsub_int_array(roll, operator.add) base = shifted.to_timestamp(how='end') else: # integer addition on PeriodIndex is deprecated, # so we use _time_shift directly roll = self.n base = base_period._time_shift(roll).to_timestamp(how='end') return base + off + Timedelta(1, 'ns') - Timedelta(1, 'D')
def _end_apply_index(self, dtindex): """ Add self to the given DatetimeIndex, specialized for case where self.weekday is non-null. Parameters ---------- dtindex : DatetimeIndex Returns ------- result : DatetimeIndex """ off = dtindex.to_perioddelta('D') base, mult = libfrequencies.get_freq_code(self.freqstr) base_period = dtindex.to_period(base) if not isinstance(base_period._data, np.ndarray): # unwrap PeriodIndex --> PeriodArray base_period = base_period._data if self.n > 0: # when adding, dates on end roll to next normed = dtindex - off + Timedelta(1, 'D') - Timedelta(1, 'ns') roll = np.where(base_period.to_timestamp(how='end') == normed, self.n, self.n - 1) # integer-array addition on PeriodIndex is deprecated, # so we use _addsub_int_array directly shifted = base_period._addsub_int_array(roll, operator.add) base = shifted.to_timestamp(how='end') else: # integer addition on PeriodIndex is deprecated, # so we use _time_shift directly roll = self.n base = base_period._time_shift(roll).to_timestamp(how='end') return base + off + Timedelta(1, 'ns') - Timedelta(1, 'D')
[ "Add", "self", "to", "the", "given", "DatetimeIndex", "specialized", "for", "case", "where", "self", ".", "weekday", "is", "non", "-", "null", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L1354-L1390
[ "def", "_end_apply_index", "(", "self", ",", "dtindex", ")", ":", "off", "=", "dtindex", ".", "to_perioddelta", "(", "'D'", ")", "base", ",", "mult", "=", "libfrequencies", ".", "get_freq_code", "(", "self", ".", "freqstr", ")", "base_period", "=", "dtindex", ".", "to_period", "(", "base", ")", "if", "not", "isinstance", "(", "base_period", ".", "_data", ",", "np", ".", "ndarray", ")", ":", "# unwrap PeriodIndex --> PeriodArray", "base_period", "=", "base_period", ".", "_data", "if", "self", ".", "n", ">", "0", ":", "# when adding, dates on end roll to next", "normed", "=", "dtindex", "-", "off", "+", "Timedelta", "(", "1", ",", "'D'", ")", "-", "Timedelta", "(", "1", ",", "'ns'", ")", "roll", "=", "np", ".", "where", "(", "base_period", ".", "to_timestamp", "(", "how", "=", "'end'", ")", "==", "normed", ",", "self", ".", "n", ",", "self", ".", "n", "-", "1", ")", "# integer-array addition on PeriodIndex is deprecated,", "# so we use _addsub_int_array directly", "shifted", "=", "base_period", ".", "_addsub_int_array", "(", "roll", ",", "operator", ".", "add", ")", "base", "=", "shifted", ".", "to_timestamp", "(", "how", "=", "'end'", ")", "else", ":", "# integer addition on PeriodIndex is deprecated,", "# so we use _time_shift directly", "roll", "=", "self", ".", "n", "base", "=", "base_period", ".", "_time_shift", "(", "roll", ")", ".", "to_timestamp", "(", "how", "=", "'end'", ")", "return", "base", "+", "off", "+", "Timedelta", "(", "1", ",", "'ns'", ")", "-", "Timedelta", "(", "1", ",", "'D'", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
WeekOfMonth._get_offset_day
Find the day in the same month as other that has the same weekday as self.weekday and is the self.week'th such day in the month. Parameters ---------- other : datetime Returns ------- day : int
pandas/tseries/offsets.py
def _get_offset_day(self, other): """ Find the day in the same month as other that has the same weekday as self.weekday and is the self.week'th such day in the month. Parameters ---------- other : datetime Returns ------- day : int """ mstart = datetime(other.year, other.month, 1) wday = mstart.weekday() shift_days = (self.weekday - wday) % 7 return 1 + shift_days + self.week * 7
def _get_offset_day(self, other): """ Find the day in the same month as other that has the same weekday as self.weekday and is the self.week'th such day in the month. Parameters ---------- other : datetime Returns ------- day : int """ mstart = datetime(other.year, other.month, 1) wday = mstart.weekday() shift_days = (self.weekday - wday) % 7 return 1 + shift_days + self.week * 7
[ "Find", "the", "day", "in", "the", "same", "month", "as", "other", "that", "has", "the", "same", "weekday", "as", "self", ".", "weekday", "and", "is", "the", "self", ".", "week", "th", "such", "day", "in", "the", "month", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L1474-L1490
[ "def", "_get_offset_day", "(", "self", ",", "other", ")", ":", "mstart", "=", "datetime", "(", "other", ".", "year", ",", "other", ".", "month", ",", "1", ")", "wday", "=", "mstart", ".", "weekday", "(", ")", "shift_days", "=", "(", "self", ".", "weekday", "-", "wday", ")", "%", "7", "return", "1", "+", "shift_days", "+", "self", ".", "week", "*", "7" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
LastWeekOfMonth._get_offset_day
Find the day in the same month as other that has the same weekday as self.weekday and is the last such day in the month. Parameters ---------- other: datetime Returns ------- day: int
pandas/tseries/offsets.py
def _get_offset_day(self, other): """ Find the day in the same month as other that has the same weekday as self.weekday and is the last such day in the month. Parameters ---------- other: datetime Returns ------- day: int """ dim = ccalendar.get_days_in_month(other.year, other.month) mend = datetime(other.year, other.month, dim) wday = mend.weekday() shift_days = (wday - self.weekday) % 7 return dim - shift_days
def _get_offset_day(self, other): """ Find the day in the same month as other that has the same weekday as self.weekday and is the last such day in the month. Parameters ---------- other: datetime Returns ------- day: int """ dim = ccalendar.get_days_in_month(other.year, other.month) mend = datetime(other.year, other.month, dim) wday = mend.weekday() shift_days = (wday - self.weekday) % 7 return dim - shift_days
[ "Find", "the", "day", "in", "the", "same", "month", "as", "other", "that", "has", "the", "same", "weekday", "as", "self", ".", "weekday", "and", "is", "the", "last", "such", "day", "in", "the", "month", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L1543-L1560
[ "def", "_get_offset_day", "(", "self", ",", "other", ")", ":", "dim", "=", "ccalendar", ".", "get_days_in_month", "(", "other", ".", "year", ",", "other", ".", "month", ")", "mend", "=", "datetime", "(", "other", ".", "year", ",", "other", ".", "month", ",", "dim", ")", "wday", "=", "mend", ".", "weekday", "(", ")", "shift_days", "=", "(", "wday", "-", "self", ".", "weekday", ")", "%", "7", "return", "dim", "-", "shift_days" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
FY5253Quarter._rollback_to_year
Roll `other` back to the most recent date that was on a fiscal year end. Return the date of that year-end, the number of full quarters elapsed between that year-end and other, and the remaining Timedelta since the most recent quarter-end. Parameters ---------- other : datetime or Timestamp Returns ------- tuple of prev_year_end : Timestamp giving most recent fiscal year end num_qtrs : int tdelta : Timedelta
pandas/tseries/offsets.py
def _rollback_to_year(self, other): """ Roll `other` back to the most recent date that was on a fiscal year end. Return the date of that year-end, the number of full quarters elapsed between that year-end and other, and the remaining Timedelta since the most recent quarter-end. Parameters ---------- other : datetime or Timestamp Returns ------- tuple of prev_year_end : Timestamp giving most recent fiscal year end num_qtrs : int tdelta : Timedelta """ num_qtrs = 0 norm = Timestamp(other).tz_localize(None) start = self._offset.rollback(norm) # Note: start <= norm and self._offset.onOffset(start) if start < norm: # roll adjustment qtr_lens = self.get_weeks(norm) # check thet qtr_lens is consistent with self._offset addition end = liboffsets.shift_day(start, days=7 * sum(qtr_lens)) assert self._offset.onOffset(end), (start, end, qtr_lens) tdelta = norm - start for qlen in qtr_lens: if qlen * 7 <= tdelta.days: num_qtrs += 1 tdelta -= Timedelta(days=qlen * 7) else: break else: tdelta = Timedelta(0) # Note: we always have tdelta.value >= 0 return start, num_qtrs, tdelta
def _rollback_to_year(self, other): """ Roll `other` back to the most recent date that was on a fiscal year end. Return the date of that year-end, the number of full quarters elapsed between that year-end and other, and the remaining Timedelta since the most recent quarter-end. Parameters ---------- other : datetime or Timestamp Returns ------- tuple of prev_year_end : Timestamp giving most recent fiscal year end num_qtrs : int tdelta : Timedelta """ num_qtrs = 0 norm = Timestamp(other).tz_localize(None) start = self._offset.rollback(norm) # Note: start <= norm and self._offset.onOffset(start) if start < norm: # roll adjustment qtr_lens = self.get_weeks(norm) # check thet qtr_lens is consistent with self._offset addition end = liboffsets.shift_day(start, days=7 * sum(qtr_lens)) assert self._offset.onOffset(end), (start, end, qtr_lens) tdelta = norm - start for qlen in qtr_lens: if qlen * 7 <= tdelta.days: num_qtrs += 1 tdelta -= Timedelta(days=qlen * 7) else: break else: tdelta = Timedelta(0) # Note: we always have tdelta.value >= 0 return start, num_qtrs, tdelta
[ "Roll", "other", "back", "to", "the", "most", "recent", "date", "that", "was", "on", "a", "fiscal", "year", "end", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/tseries/offsets.py#L2054-L2099
[ "def", "_rollback_to_year", "(", "self", ",", "other", ")", ":", "num_qtrs", "=", "0", "norm", "=", "Timestamp", "(", "other", ")", ".", "tz_localize", "(", "None", ")", "start", "=", "self", ".", "_offset", ".", "rollback", "(", "norm", ")", "# Note: start <= norm and self._offset.onOffset(start)", "if", "start", "<", "norm", ":", "# roll adjustment", "qtr_lens", "=", "self", ".", "get_weeks", "(", "norm", ")", "# check thet qtr_lens is consistent with self._offset addition", "end", "=", "liboffsets", ".", "shift_day", "(", "start", ",", "days", "=", "7", "*", "sum", "(", "qtr_lens", ")", ")", "assert", "self", ".", "_offset", ".", "onOffset", "(", "end", ")", ",", "(", "start", ",", "end", ",", "qtr_lens", ")", "tdelta", "=", "norm", "-", "start", "for", "qlen", "in", "qtr_lens", ":", "if", "qlen", "*", "7", "<=", "tdelta", ".", "days", ":", "num_qtrs", "+=", "1", "tdelta", "-=", "Timedelta", "(", "days", "=", "qlen", "*", "7", ")", "else", ":", "break", "else", ":", "tdelta", "=", "Timedelta", "(", "0", ")", "# Note: we always have tdelta.value >= 0", "return", "start", ",", "num_qtrs", ",", "tdelta" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
concat
Concatenate pandas objects along a particular axis with optional set logic along the other axes. Can also add a layer of hierarchical indexing on the concatenation axis, which may be useful if the labels are the same (or overlapping) on the passed axis number. Parameters ---------- objs : a sequence or mapping of Series, DataFrame, or Panel objects If a dict is passed, the sorted keys will be used as the `keys` argument, unless it is passed, in which case the values will be selected (see below). Any None objects will be dropped silently unless they are all None in which case a ValueError will be raised. axis : {0/'index', 1/'columns'}, default 0 The axis to concatenate along. join : {'inner', 'outer'}, default 'outer' How to handle indexes on other axis (or axes). join_axes : list of Index objects Specific indexes to use for the other n - 1 axes instead of performing inner/outer set logic. ignore_index : bool, default False If True, do not use the index values along the concatenation axis. The resulting axis will be labeled 0, ..., n - 1. This is useful if you are concatenating objects where the concatenation axis does not have meaningful indexing information. Note the index values on the other axes are still respected in the join. keys : sequence, default None If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level. levels : list of sequences, default None Specific levels (unique values) to use for constructing a MultiIndex. Otherwise they will be inferred from the keys. names : list, default None Names for the levels in the resulting hierarchical index. verify_integrity : bool, default False Check whether the new concatenated axis contains duplicates. This can be very expensive relative to the actual data concatenation. sort : bool, default None Sort non-concatenation axis if it is not already aligned when `join` is 'outer'. The current default of sorting is deprecated and will change to not-sorting in a future version of pandas. Explicitly pass ``sort=True`` to silence the warning and sort. Explicitly pass ``sort=False`` to silence the warning and not sort. This has no effect when ``join='inner'``, which already preserves the order of the non-concatenation axis. .. versionadded:: 0.23.0 copy : bool, default True If False, do not copy data unnecessarily. Returns ------- object, type of objs When concatenating all ``Series`` along the index (axis=0), a ``Series`` is returned. When ``objs`` contains at least one ``DataFrame``, a ``DataFrame`` is returned. When concatenating along the columns (axis=1), a ``DataFrame`` is returned. See Also -------- Series.append : Concatenate Series. DataFrame.append : Concatenate DataFrames. DataFrame.join : Join DataFrames using indexes. DataFrame.merge : Merge DataFrames by indexes or columns. Notes ----- The keys, levels, and names arguments are all optional. A walkthrough of how this method fits in with other tools for combining pandas objects can be found `here <http://pandas.pydata.org/pandas-docs/stable/merging.html>`__. Examples -------- Combine two ``Series``. >>> s1 = pd.Series(['a', 'b']) >>> s2 = pd.Series(['c', 'd']) >>> pd.concat([s1, s2]) 0 a 1 b 0 c 1 d dtype: object Clear the existing index and reset it in the result by setting the ``ignore_index`` option to ``True``. >>> pd.concat([s1, s2], ignore_index=True) 0 a 1 b 2 c 3 d dtype: object Add a hierarchical index at the outermost level of the data with the ``keys`` option. >>> pd.concat([s1, s2], keys=['s1', 's2']) s1 0 a 1 b s2 0 c 1 d dtype: object Label the index keys you create with the ``names`` option. >>> pd.concat([s1, s2], keys=['s1', 's2'], ... names=['Series name', 'Row ID']) Series name Row ID s1 0 a 1 b s2 0 c 1 d dtype: object Combine two ``DataFrame`` objects with identical columns. >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], ... columns=['letter', 'number']) >>> df1 letter number 0 a 1 1 b 2 >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], ... columns=['letter', 'number']) >>> df2 letter number 0 c 3 1 d 4 >>> pd.concat([df1, df2]) letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects with overlapping columns and return everything. Columns outside the intersection will be filled with ``NaN`` values. >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], ... columns=['letter', 'number', 'animal']) >>> df3 letter number animal 0 c 3 cat 1 d 4 dog >>> pd.concat([df1, df3], sort=False) letter number animal 0 a 1 NaN 1 b 2 NaN 0 c 3 cat 1 d 4 dog Combine ``DataFrame`` objects with overlapping columns and return only those that are shared by passing ``inner`` to the ``join`` keyword argument. >>> pd.concat([df1, df3], join="inner") letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects horizontally along the x axis by passing in ``axis=1``. >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], ... columns=['animal', 'name']) >>> pd.concat([df1, df4], axis=1) letter number animal name 0 a 1 bird polly 1 b 2 monkey george Prevent the result from including duplicate index values with the ``verify_integrity`` option. >>> df5 = pd.DataFrame([1], index=['a']) >>> df5 0 a 1 >>> df6 = pd.DataFrame([2], index=['a']) >>> df6 0 a 2 >>> pd.concat([df5, df6], verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: ['a']
pandas/core/reshape/concat.py
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False, sort=None, copy=True): """ Concatenate pandas objects along a particular axis with optional set logic along the other axes. Can also add a layer of hierarchical indexing on the concatenation axis, which may be useful if the labels are the same (or overlapping) on the passed axis number. Parameters ---------- objs : a sequence or mapping of Series, DataFrame, or Panel objects If a dict is passed, the sorted keys will be used as the `keys` argument, unless it is passed, in which case the values will be selected (see below). Any None objects will be dropped silently unless they are all None in which case a ValueError will be raised. axis : {0/'index', 1/'columns'}, default 0 The axis to concatenate along. join : {'inner', 'outer'}, default 'outer' How to handle indexes on other axis (or axes). join_axes : list of Index objects Specific indexes to use for the other n - 1 axes instead of performing inner/outer set logic. ignore_index : bool, default False If True, do not use the index values along the concatenation axis. The resulting axis will be labeled 0, ..., n - 1. This is useful if you are concatenating objects where the concatenation axis does not have meaningful indexing information. Note the index values on the other axes are still respected in the join. keys : sequence, default None If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level. levels : list of sequences, default None Specific levels (unique values) to use for constructing a MultiIndex. Otherwise they will be inferred from the keys. names : list, default None Names for the levels in the resulting hierarchical index. verify_integrity : bool, default False Check whether the new concatenated axis contains duplicates. This can be very expensive relative to the actual data concatenation. sort : bool, default None Sort non-concatenation axis if it is not already aligned when `join` is 'outer'. The current default of sorting is deprecated and will change to not-sorting in a future version of pandas. Explicitly pass ``sort=True`` to silence the warning and sort. Explicitly pass ``sort=False`` to silence the warning and not sort. This has no effect when ``join='inner'``, which already preserves the order of the non-concatenation axis. .. versionadded:: 0.23.0 copy : bool, default True If False, do not copy data unnecessarily. Returns ------- object, type of objs When concatenating all ``Series`` along the index (axis=0), a ``Series`` is returned. When ``objs`` contains at least one ``DataFrame``, a ``DataFrame`` is returned. When concatenating along the columns (axis=1), a ``DataFrame`` is returned. See Also -------- Series.append : Concatenate Series. DataFrame.append : Concatenate DataFrames. DataFrame.join : Join DataFrames using indexes. DataFrame.merge : Merge DataFrames by indexes or columns. Notes ----- The keys, levels, and names arguments are all optional. A walkthrough of how this method fits in with other tools for combining pandas objects can be found `here <http://pandas.pydata.org/pandas-docs/stable/merging.html>`__. Examples -------- Combine two ``Series``. >>> s1 = pd.Series(['a', 'b']) >>> s2 = pd.Series(['c', 'd']) >>> pd.concat([s1, s2]) 0 a 1 b 0 c 1 d dtype: object Clear the existing index and reset it in the result by setting the ``ignore_index`` option to ``True``. >>> pd.concat([s1, s2], ignore_index=True) 0 a 1 b 2 c 3 d dtype: object Add a hierarchical index at the outermost level of the data with the ``keys`` option. >>> pd.concat([s1, s2], keys=['s1', 's2']) s1 0 a 1 b s2 0 c 1 d dtype: object Label the index keys you create with the ``names`` option. >>> pd.concat([s1, s2], keys=['s1', 's2'], ... names=['Series name', 'Row ID']) Series name Row ID s1 0 a 1 b s2 0 c 1 d dtype: object Combine two ``DataFrame`` objects with identical columns. >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], ... columns=['letter', 'number']) >>> df1 letter number 0 a 1 1 b 2 >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], ... columns=['letter', 'number']) >>> df2 letter number 0 c 3 1 d 4 >>> pd.concat([df1, df2]) letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects with overlapping columns and return everything. Columns outside the intersection will be filled with ``NaN`` values. >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], ... columns=['letter', 'number', 'animal']) >>> df3 letter number animal 0 c 3 cat 1 d 4 dog >>> pd.concat([df1, df3], sort=False) letter number animal 0 a 1 NaN 1 b 2 NaN 0 c 3 cat 1 d 4 dog Combine ``DataFrame`` objects with overlapping columns and return only those that are shared by passing ``inner`` to the ``join`` keyword argument. >>> pd.concat([df1, df3], join="inner") letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects horizontally along the x axis by passing in ``axis=1``. >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], ... columns=['animal', 'name']) >>> pd.concat([df1, df4], axis=1) letter number animal name 0 a 1 bird polly 1 b 2 monkey george Prevent the result from including duplicate index values with the ``verify_integrity`` option. >>> df5 = pd.DataFrame([1], index=['a']) >>> df5 0 a 1 >>> df6 = pd.DataFrame([2], index=['a']) >>> df6 0 a 2 >>> pd.concat([df5, df6], verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: ['a'] """ op = _Concatenator(objs, axis=axis, join_axes=join_axes, ignore_index=ignore_index, join=join, keys=keys, levels=levels, names=names, verify_integrity=verify_integrity, copy=copy, sort=sort) return op.get_result()
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False, sort=None, copy=True): """ Concatenate pandas objects along a particular axis with optional set logic along the other axes. Can also add a layer of hierarchical indexing on the concatenation axis, which may be useful if the labels are the same (or overlapping) on the passed axis number. Parameters ---------- objs : a sequence or mapping of Series, DataFrame, or Panel objects If a dict is passed, the sorted keys will be used as the `keys` argument, unless it is passed, in which case the values will be selected (see below). Any None objects will be dropped silently unless they are all None in which case a ValueError will be raised. axis : {0/'index', 1/'columns'}, default 0 The axis to concatenate along. join : {'inner', 'outer'}, default 'outer' How to handle indexes on other axis (or axes). join_axes : list of Index objects Specific indexes to use for the other n - 1 axes instead of performing inner/outer set logic. ignore_index : bool, default False If True, do not use the index values along the concatenation axis. The resulting axis will be labeled 0, ..., n - 1. This is useful if you are concatenating objects where the concatenation axis does not have meaningful indexing information. Note the index values on the other axes are still respected in the join. keys : sequence, default None If multiple levels passed, should contain tuples. Construct hierarchical index using the passed keys as the outermost level. levels : list of sequences, default None Specific levels (unique values) to use for constructing a MultiIndex. Otherwise they will be inferred from the keys. names : list, default None Names for the levels in the resulting hierarchical index. verify_integrity : bool, default False Check whether the new concatenated axis contains duplicates. This can be very expensive relative to the actual data concatenation. sort : bool, default None Sort non-concatenation axis if it is not already aligned when `join` is 'outer'. The current default of sorting is deprecated and will change to not-sorting in a future version of pandas. Explicitly pass ``sort=True`` to silence the warning and sort. Explicitly pass ``sort=False`` to silence the warning and not sort. This has no effect when ``join='inner'``, which already preserves the order of the non-concatenation axis. .. versionadded:: 0.23.0 copy : bool, default True If False, do not copy data unnecessarily. Returns ------- object, type of objs When concatenating all ``Series`` along the index (axis=0), a ``Series`` is returned. When ``objs`` contains at least one ``DataFrame``, a ``DataFrame`` is returned. When concatenating along the columns (axis=1), a ``DataFrame`` is returned. See Also -------- Series.append : Concatenate Series. DataFrame.append : Concatenate DataFrames. DataFrame.join : Join DataFrames using indexes. DataFrame.merge : Merge DataFrames by indexes or columns. Notes ----- The keys, levels, and names arguments are all optional. A walkthrough of how this method fits in with other tools for combining pandas objects can be found `here <http://pandas.pydata.org/pandas-docs/stable/merging.html>`__. Examples -------- Combine two ``Series``. >>> s1 = pd.Series(['a', 'b']) >>> s2 = pd.Series(['c', 'd']) >>> pd.concat([s1, s2]) 0 a 1 b 0 c 1 d dtype: object Clear the existing index and reset it in the result by setting the ``ignore_index`` option to ``True``. >>> pd.concat([s1, s2], ignore_index=True) 0 a 1 b 2 c 3 d dtype: object Add a hierarchical index at the outermost level of the data with the ``keys`` option. >>> pd.concat([s1, s2], keys=['s1', 's2']) s1 0 a 1 b s2 0 c 1 d dtype: object Label the index keys you create with the ``names`` option. >>> pd.concat([s1, s2], keys=['s1', 's2'], ... names=['Series name', 'Row ID']) Series name Row ID s1 0 a 1 b s2 0 c 1 d dtype: object Combine two ``DataFrame`` objects with identical columns. >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], ... columns=['letter', 'number']) >>> df1 letter number 0 a 1 1 b 2 >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], ... columns=['letter', 'number']) >>> df2 letter number 0 c 3 1 d 4 >>> pd.concat([df1, df2]) letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects with overlapping columns and return everything. Columns outside the intersection will be filled with ``NaN`` values. >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], ... columns=['letter', 'number', 'animal']) >>> df3 letter number animal 0 c 3 cat 1 d 4 dog >>> pd.concat([df1, df3], sort=False) letter number animal 0 a 1 NaN 1 b 2 NaN 0 c 3 cat 1 d 4 dog Combine ``DataFrame`` objects with overlapping columns and return only those that are shared by passing ``inner`` to the ``join`` keyword argument. >>> pd.concat([df1, df3], join="inner") letter number 0 a 1 1 b 2 0 c 3 1 d 4 Combine ``DataFrame`` objects horizontally along the x axis by passing in ``axis=1``. >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], ... columns=['animal', 'name']) >>> pd.concat([df1, df4], axis=1) letter number animal name 0 a 1 bird polly 1 b 2 monkey george Prevent the result from including duplicate index values with the ``verify_integrity`` option. >>> df5 = pd.DataFrame([1], index=['a']) >>> df5 0 a 1 >>> df6 = pd.DataFrame([2], index=['a']) >>> df6 0 a 2 >>> pd.concat([df5, df6], verify_integrity=True) Traceback (most recent call last): ... ValueError: Indexes have overlapping values: ['a'] """ op = _Concatenator(objs, axis=axis, join_axes=join_axes, ignore_index=ignore_index, join=join, keys=keys, levels=levels, names=names, verify_integrity=verify_integrity, copy=copy, sort=sort) return op.get_result()
[ "Concatenate", "pandas", "objects", "along", "a", "particular", "axis", "with", "optional", "set", "logic", "along", "the", "other", "axes", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/concat.py#L24-L229
[ "def", "concat", "(", "objs", ",", "axis", "=", "0", ",", "join", "=", "'outer'", ",", "join_axes", "=", "None", ",", "ignore_index", "=", "False", ",", "keys", "=", "None", ",", "levels", "=", "None", ",", "names", "=", "None", ",", "verify_integrity", "=", "False", ",", "sort", "=", "None", ",", "copy", "=", "True", ")", ":", "op", "=", "_Concatenator", "(", "objs", ",", "axis", "=", "axis", ",", "join_axes", "=", "join_axes", ",", "ignore_index", "=", "ignore_index", ",", "join", "=", "join", ",", "keys", "=", "keys", ",", "levels", "=", "levels", ",", "names", "=", "names", ",", "verify_integrity", "=", "verify_integrity", ",", "copy", "=", "copy", ",", "sort", "=", "sort", ")", "return", "op", ".", "get_result", "(", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_Concatenator._get_concat_axis
Return index to be used along concatenation axis.
pandas/core/reshape/concat.py
def _get_concat_axis(self): """ Return index to be used along concatenation axis. """ if self._is_series: if self.axis == 0: indexes = [x.index for x in self.objs] elif self.ignore_index: idx = ibase.default_index(len(self.objs)) return idx elif self.keys is None: names = [None] * len(self.objs) num = 0 has_names = False for i, x in enumerate(self.objs): if not isinstance(x, Series): raise TypeError("Cannot concatenate type 'Series' " "with object of type {type!r}" .format(type=type(x).__name__)) if x.name is not None: names[i] = x.name has_names = True else: names[i] = num num += 1 if has_names: return Index(names) else: return ibase.default_index(len(self.objs)) else: return ensure_index(self.keys).set_names(self.names) else: indexes = [x._data.axes[self.axis] for x in self.objs] if self.ignore_index: idx = ibase.default_index(sum(len(i) for i in indexes)) return idx if self.keys is None: concat_axis = _concat_indexes(indexes) else: concat_axis = _make_concat_multiindex(indexes, self.keys, self.levels, self.names) self._maybe_check_integrity(concat_axis) return concat_axis
def _get_concat_axis(self): """ Return index to be used along concatenation axis. """ if self._is_series: if self.axis == 0: indexes = [x.index for x in self.objs] elif self.ignore_index: idx = ibase.default_index(len(self.objs)) return idx elif self.keys is None: names = [None] * len(self.objs) num = 0 has_names = False for i, x in enumerate(self.objs): if not isinstance(x, Series): raise TypeError("Cannot concatenate type 'Series' " "with object of type {type!r}" .format(type=type(x).__name__)) if x.name is not None: names[i] = x.name has_names = True else: names[i] = num num += 1 if has_names: return Index(names) else: return ibase.default_index(len(self.objs)) else: return ensure_index(self.keys).set_names(self.names) else: indexes = [x._data.axes[self.axis] for x in self.objs] if self.ignore_index: idx = ibase.default_index(sum(len(i) for i in indexes)) return idx if self.keys is None: concat_axis = _concat_indexes(indexes) else: concat_axis = _make_concat_multiindex(indexes, self.keys, self.levels, self.names) self._maybe_check_integrity(concat_axis) return concat_axis
[ "Return", "index", "to", "be", "used", "along", "concatenation", "axis", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/concat.py#L475-L521
[ "def", "_get_concat_axis", "(", "self", ")", ":", "if", "self", ".", "_is_series", ":", "if", "self", ".", "axis", "==", "0", ":", "indexes", "=", "[", "x", ".", "index", "for", "x", "in", "self", ".", "objs", "]", "elif", "self", ".", "ignore_index", ":", "idx", "=", "ibase", ".", "default_index", "(", "len", "(", "self", ".", "objs", ")", ")", "return", "idx", "elif", "self", ".", "keys", "is", "None", ":", "names", "=", "[", "None", "]", "*", "len", "(", "self", ".", "objs", ")", "num", "=", "0", "has_names", "=", "False", "for", "i", ",", "x", "in", "enumerate", "(", "self", ".", "objs", ")", ":", "if", "not", "isinstance", "(", "x", ",", "Series", ")", ":", "raise", "TypeError", "(", "\"Cannot concatenate type 'Series' \"", "\"with object of type {type!r}\"", ".", "format", "(", "type", "=", "type", "(", "x", ")", ".", "__name__", ")", ")", "if", "x", ".", "name", "is", "not", "None", ":", "names", "[", "i", "]", "=", "x", ".", "name", "has_names", "=", "True", "else", ":", "names", "[", "i", "]", "=", "num", "num", "+=", "1", "if", "has_names", ":", "return", "Index", "(", "names", ")", "else", ":", "return", "ibase", ".", "default_index", "(", "len", "(", "self", ".", "objs", ")", ")", "else", ":", "return", "ensure_index", "(", "self", ".", "keys", ")", ".", "set_names", "(", "self", ".", "names", ")", "else", ":", "indexes", "=", "[", "x", ".", "_data", ".", "axes", "[", "self", ".", "axis", "]", "for", "x", "in", "self", ".", "objs", "]", "if", "self", ".", "ignore_index", ":", "idx", "=", "ibase", ".", "default_index", "(", "sum", "(", "len", "(", "i", ")", "for", "i", "in", "indexes", ")", ")", "return", "idx", "if", "self", ".", "keys", "is", "None", ":", "concat_axis", "=", "_concat_indexes", "(", "indexes", ")", "else", ":", "concat_axis", "=", "_make_concat_multiindex", "(", "indexes", ",", "self", ".", "keys", ",", "self", ".", "levels", ",", "self", ".", "names", ")", "self", ".", "_maybe_check_integrity", "(", "concat_axis", ")", "return", "concat_axis" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_in
Compute the vectorized membership of ``x in y`` if possible, otherwise use Python.
pandas/core/computation/ops.py
def _in(x, y): """Compute the vectorized membership of ``x in y`` if possible, otherwise use Python. """ try: return x.isin(y) except AttributeError: if is_list_like(x): try: return y.isin(x) except AttributeError: pass return x in y
def _in(x, y): """Compute the vectorized membership of ``x in y`` if possible, otherwise use Python. """ try: return x.isin(y) except AttributeError: if is_list_like(x): try: return y.isin(x) except AttributeError: pass return x in y
[ "Compute", "the", "vectorized", "membership", "of", "x", "in", "y", "if", "possible", "otherwise", "use", "Python", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/ops.py#L234-L246
[ "def", "_in", "(", "x", ",", "y", ")", ":", "try", ":", "return", "x", ".", "isin", "(", "y", ")", "except", "AttributeError", ":", "if", "is_list_like", "(", "x", ")", ":", "try", ":", "return", "y", ".", "isin", "(", "x", ")", "except", "AttributeError", ":", "pass", "return", "x", "in", "y" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_not_in
Compute the vectorized membership of ``x not in y`` if possible, otherwise use Python.
pandas/core/computation/ops.py
def _not_in(x, y): """Compute the vectorized membership of ``x not in y`` if possible, otherwise use Python. """ try: return ~x.isin(y) except AttributeError: if is_list_like(x): try: return ~y.isin(x) except AttributeError: pass return x not in y
def _not_in(x, y): """Compute the vectorized membership of ``x not in y`` if possible, otherwise use Python. """ try: return ~x.isin(y) except AttributeError: if is_list_like(x): try: return ~y.isin(x) except AttributeError: pass return x not in y
[ "Compute", "the", "vectorized", "membership", "of", "x", "not", "in", "y", "if", "possible", "otherwise", "use", "Python", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/ops.py#L249-L261
[ "def", "_not_in", "(", "x", ",", "y", ")", ":", "try", ":", "return", "~", "x", ".", "isin", "(", "y", ")", "except", "AttributeError", ":", "if", "is_list_like", "(", "x", ")", ":", "try", ":", "return", "~", "y", ".", "isin", "(", "x", ")", "except", "AttributeError", ":", "pass", "return", "x", "not", "in", "y" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_cast_inplace
Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. .. versionadded:: 0.19.0 dtype : str or numpy.dtype The dtype to cast to.
pandas/core/computation/ops.py
def _cast_inplace(terms, acceptable_dtypes, dtype): """Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. .. versionadded:: 0.19.0 dtype : str or numpy.dtype The dtype to cast to. """ dt = np.dtype(dtype) for term in terms: if term.type in acceptable_dtypes: continue try: new_value = term.value.astype(dt) except AttributeError: new_value = dt.type(term.value) term.update(new_value)
def _cast_inplace(terms, acceptable_dtypes, dtype): """Cast an expression inplace. Parameters ---------- terms : Op The expression that should cast. acceptable_dtypes : list of acceptable numpy.dtype Will not cast if term's dtype in this list. .. versionadded:: 0.19.0 dtype : str or numpy.dtype The dtype to cast to. """ dt = np.dtype(dtype) for term in terms: if term.type in acceptable_dtypes: continue try: new_value = term.value.astype(dt) except AttributeError: new_value = dt.type(term.value) term.update(new_value)
[ "Cast", "an", "expression", "inplace", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/ops.py#L288-L312
[ "def", "_cast_inplace", "(", "terms", ",", "acceptable_dtypes", ",", "dtype", ")", ":", "dt", "=", "np", ".", "dtype", "(", "dtype", ")", "for", "term", "in", "terms", ":", "if", "term", ".", "type", "in", "acceptable_dtypes", ":", "continue", "try", ":", "new_value", "=", "term", ".", "value", ".", "astype", "(", "dt", ")", "except", "AttributeError", ":", "new_value", "=", "dt", ".", "type", "(", "term", ".", "value", ")", "term", ".", "update", "(", "new_value", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Term.update
search order for local (i.e., @variable) variables: scope, key_variable [('locals', 'local_name'), ('globals', 'local_name'), ('locals', 'key'), ('globals', 'key')]
pandas/core/computation/ops.py
def update(self, value): """ search order for local (i.e., @variable) variables: scope, key_variable [('locals', 'local_name'), ('globals', 'local_name'), ('locals', 'key'), ('globals', 'key')] """ key = self.name # if it's a variable name (otherwise a constant) if isinstance(key, str): self.env.swapkey(self.local_name, key, new_value=value) self.value = value
def update(self, value): """ search order for local (i.e., @variable) variables: scope, key_variable [('locals', 'local_name'), ('globals', 'local_name'), ('locals', 'key'), ('globals', 'key')] """ key = self.name # if it's a variable name (otherwise a constant) if isinstance(key, str): self.env.swapkey(self.local_name, key, new_value=value) self.value = value
[ "search", "order", "for", "local", "(", "i", ".", "e", ".", "@variable", ")", "variables", ":" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/ops.py#L88-L104
[ "def", "update", "(", "self", ",", "value", ")", ":", "key", "=", "self", ".", "name", "# if it's a variable name (otherwise a constant)", "if", "isinstance", "(", "key", ",", "str", ")", ":", "self", ".", "env", ".", "swapkey", "(", "self", ".", "local_name", ",", "key", ",", "new_value", "=", "value", ")", "self", ".", "value", "=", "value" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
BinOp.evaluate
Evaluate a binary operation *before* being passed to the engine. Parameters ---------- env : Scope engine : str parser : str term_type : type eval_in_python : list Returns ------- term_type The "pre-evaluated" expression as an instance of ``term_type``
pandas/core/computation/ops.py
def evaluate(self, env, engine, parser, term_type, eval_in_python): """Evaluate a binary operation *before* being passed to the engine. Parameters ---------- env : Scope engine : str parser : str term_type : type eval_in_python : list Returns ------- term_type The "pre-evaluated" expression as an instance of ``term_type`` """ if engine == 'python': res = self(env) else: # recurse over the left/right nodes left = self.lhs.evaluate(env, engine=engine, parser=parser, term_type=term_type, eval_in_python=eval_in_python) right = self.rhs.evaluate(env, engine=engine, parser=parser, term_type=term_type, eval_in_python=eval_in_python) # base cases if self.op in eval_in_python: res = self.func(left.value, right.value) else: from pandas.core.computation.eval import eval res = eval(self, local_dict=env, engine=engine, parser=parser) name = env.add_tmp(res) return term_type(name, env=env)
def evaluate(self, env, engine, parser, term_type, eval_in_python): """Evaluate a binary operation *before* being passed to the engine. Parameters ---------- env : Scope engine : str parser : str term_type : type eval_in_python : list Returns ------- term_type The "pre-evaluated" expression as an instance of ``term_type`` """ if engine == 'python': res = self(env) else: # recurse over the left/right nodes left = self.lhs.evaluate(env, engine=engine, parser=parser, term_type=term_type, eval_in_python=eval_in_python) right = self.rhs.evaluate(env, engine=engine, parser=parser, term_type=term_type, eval_in_python=eval_in_python) # base cases if self.op in eval_in_python: res = self.func(left.value, right.value) else: from pandas.core.computation.eval import eval res = eval(self, local_dict=env, engine=engine, parser=parser) name = env.add_tmp(res) return term_type(name, env=env)
[ "Evaluate", "a", "binary", "operation", "*", "before", "*", "being", "passed", "to", "the", "engine", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/ops.py#L369-L405
[ "def", "evaluate", "(", "self", ",", "env", ",", "engine", ",", "parser", ",", "term_type", ",", "eval_in_python", ")", ":", "if", "engine", "==", "'python'", ":", "res", "=", "self", "(", "env", ")", "else", ":", "# recurse over the left/right nodes", "left", "=", "self", ".", "lhs", ".", "evaluate", "(", "env", ",", "engine", "=", "engine", ",", "parser", "=", "parser", ",", "term_type", "=", "term_type", ",", "eval_in_python", "=", "eval_in_python", ")", "right", "=", "self", ".", "rhs", ".", "evaluate", "(", "env", ",", "engine", "=", "engine", ",", "parser", "=", "parser", ",", "term_type", "=", "term_type", ",", "eval_in_python", "=", "eval_in_python", ")", "# base cases", "if", "self", ".", "op", "in", "eval_in_python", ":", "res", "=", "self", ".", "func", "(", "left", ".", "value", ",", "right", ".", "value", ")", "else", ":", "from", "pandas", ".", "core", ".", "computation", ".", "eval", "import", "eval", "res", "=", "eval", "(", "self", ",", "local_dict", "=", "env", ",", "engine", "=", "engine", ",", "parser", "=", "parser", ")", "name", "=", "env", ".", "add_tmp", "(", "res", ")", "return", "term_type", "(", "name", ",", "env", "=", "env", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
BinOp.convert_values
Convert datetimes to a comparable value in an expression.
pandas/core/computation/ops.py
def convert_values(self): """Convert datetimes to a comparable value in an expression. """ def stringify(value): if self.encoding is not None: encoder = partial(pprint_thing_encoded, encoding=self.encoding) else: encoder = pprint_thing return encoder(value) lhs, rhs = self.lhs, self.rhs if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar: v = rhs.value if isinstance(v, (int, float)): v = stringify(v) v = Timestamp(_ensure_decoded(v)) if v.tz is not None: v = v.tz_convert('UTC') self.rhs.update(v) if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar: v = lhs.value if isinstance(v, (int, float)): v = stringify(v) v = Timestamp(_ensure_decoded(v)) if v.tz is not None: v = v.tz_convert('UTC') self.lhs.update(v)
def convert_values(self): """Convert datetimes to a comparable value in an expression. """ def stringify(value): if self.encoding is not None: encoder = partial(pprint_thing_encoded, encoding=self.encoding) else: encoder = pprint_thing return encoder(value) lhs, rhs = self.lhs, self.rhs if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar: v = rhs.value if isinstance(v, (int, float)): v = stringify(v) v = Timestamp(_ensure_decoded(v)) if v.tz is not None: v = v.tz_convert('UTC') self.rhs.update(v) if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar: v = lhs.value if isinstance(v, (int, float)): v = stringify(v) v = Timestamp(_ensure_decoded(v)) if v.tz is not None: v = v.tz_convert('UTC') self.lhs.update(v)
[ "Convert", "datetimes", "to", "a", "comparable", "value", "in", "an", "expression", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/ops.py#L407-L436
[ "def", "convert_values", "(", "self", ")", ":", "def", "stringify", "(", "value", ")", ":", "if", "self", ".", "encoding", "is", "not", "None", ":", "encoder", "=", "partial", "(", "pprint_thing_encoded", ",", "encoding", "=", "self", ".", "encoding", ")", "else", ":", "encoder", "=", "pprint_thing", "return", "encoder", "(", "value", ")", "lhs", ",", "rhs", "=", "self", ".", "lhs", ",", "self", ".", "rhs", "if", "is_term", "(", "lhs", ")", "and", "lhs", ".", "is_datetime", "and", "is_term", "(", "rhs", ")", "and", "rhs", ".", "is_scalar", ":", "v", "=", "rhs", ".", "value", "if", "isinstance", "(", "v", ",", "(", "int", ",", "float", ")", ")", ":", "v", "=", "stringify", "(", "v", ")", "v", "=", "Timestamp", "(", "_ensure_decoded", "(", "v", ")", ")", "if", "v", ".", "tz", "is", "not", "None", ":", "v", "=", "v", ".", "tz_convert", "(", "'UTC'", ")", "self", ".", "rhs", ".", "update", "(", "v", ")", "if", "is_term", "(", "rhs", ")", "and", "rhs", ".", "is_datetime", "and", "is_term", "(", "lhs", ")", "and", "lhs", ".", "is_scalar", ":", "v", "=", "lhs", ".", "value", "if", "isinstance", "(", "v", ",", "(", "int", ",", "float", ")", ")", ":", "v", "=", "stringify", "(", "v", ")", "v", "=", "Timestamp", "(", "_ensure_decoded", "(", "v", ")", ")", "if", "v", ".", "tz", "is", "not", "None", ":", "v", "=", "v", ".", "tz_convert", "(", "'UTC'", ")", "self", ".", "lhs", ".", "update", "(", "v", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
crosstab
Compute a simple cross tabulation of two (or more) factors. By default computes a frequency table of the factors unless an array of values and an aggregation function are passed. Parameters ---------- index : array-like, Series, or list of arrays/Series Values to group by in the rows. columns : array-like, Series, or list of arrays/Series Values to group by in the columns. values : array-like, optional Array of values to aggregate according to the factors. Requires `aggfunc` be specified. rownames : sequence, default None If passed, must match number of row arrays passed. colnames : sequence, default None If passed, must match number of column arrays passed. aggfunc : function, optional If specified, requires `values` be specified as well. margins : bool, default False Add row/column margins (subtotals). margins_name : str, default 'All' Name of the row/column that will contain the totals when margins is True. .. versionadded:: 0.21.0 dropna : bool, default True Do not include columns whose entries are all NaN. normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False Normalize by dividing all values by the sum of values. - If passed 'all' or `True`, will normalize over all values. - If passed 'index' will normalize over each row. - If passed 'columns' will normalize over each column. - If margins is `True`, will also normalize margin values. .. versionadded:: 0.18.1 Returns ------- DataFrame Cross tabulation of the data. See Also -------- DataFrame.pivot : Reshape data based on column values. pivot_table : Create a pivot table as a DataFrame. Notes ----- Any Series passed will have their name attributes used unless row or column names for the cross-tabulation are specified. Any input passed containing Categorical data will have **all** of its categories included in the cross-tabulation, even if the actual data does not contain any instances of a particular category. In the event that there aren't overlapping indexes an empty DataFrame will be returned. Examples -------- >>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar", ... "bar", "bar", "foo", "foo", "foo"], dtype=object) >>> b = np.array(["one", "one", "one", "two", "one", "one", ... "one", "two", "two", "two", "one"], dtype=object) >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny", ... "shiny", "dull", "shiny", "shiny", "shiny"], ... dtype=object) >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) b one two c dull shiny dull shiny a bar 1 2 1 0 foo 2 2 1 2 Here 'c' and 'f' are not represented in the data and will not be shown in the output because dropna is True by default. Set dropna=False to preserve categories with no data. >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c']) >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f']) >>> pd.crosstab(foo, bar) col_0 d e row_0 a 1 0 b 0 1 >>> pd.crosstab(foo, bar, dropna=False) col_0 d e f row_0 a 1 0 0 b 0 1 0 c 0 0 0
pandas/core/reshape/pivot.py
def crosstab(index, columns, values=None, rownames=None, colnames=None, aggfunc=None, margins=False, margins_name='All', dropna=True, normalize=False): """ Compute a simple cross tabulation of two (or more) factors. By default computes a frequency table of the factors unless an array of values and an aggregation function are passed. Parameters ---------- index : array-like, Series, or list of arrays/Series Values to group by in the rows. columns : array-like, Series, or list of arrays/Series Values to group by in the columns. values : array-like, optional Array of values to aggregate according to the factors. Requires `aggfunc` be specified. rownames : sequence, default None If passed, must match number of row arrays passed. colnames : sequence, default None If passed, must match number of column arrays passed. aggfunc : function, optional If specified, requires `values` be specified as well. margins : bool, default False Add row/column margins (subtotals). margins_name : str, default 'All' Name of the row/column that will contain the totals when margins is True. .. versionadded:: 0.21.0 dropna : bool, default True Do not include columns whose entries are all NaN. normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False Normalize by dividing all values by the sum of values. - If passed 'all' or `True`, will normalize over all values. - If passed 'index' will normalize over each row. - If passed 'columns' will normalize over each column. - If margins is `True`, will also normalize margin values. .. versionadded:: 0.18.1 Returns ------- DataFrame Cross tabulation of the data. See Also -------- DataFrame.pivot : Reshape data based on column values. pivot_table : Create a pivot table as a DataFrame. Notes ----- Any Series passed will have their name attributes used unless row or column names for the cross-tabulation are specified. Any input passed containing Categorical data will have **all** of its categories included in the cross-tabulation, even if the actual data does not contain any instances of a particular category. In the event that there aren't overlapping indexes an empty DataFrame will be returned. Examples -------- >>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar", ... "bar", "bar", "foo", "foo", "foo"], dtype=object) >>> b = np.array(["one", "one", "one", "two", "one", "one", ... "one", "two", "two", "two", "one"], dtype=object) >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny", ... "shiny", "dull", "shiny", "shiny", "shiny"], ... dtype=object) >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) b one two c dull shiny dull shiny a bar 1 2 1 0 foo 2 2 1 2 Here 'c' and 'f' are not represented in the data and will not be shown in the output because dropna is True by default. Set dropna=False to preserve categories with no data. >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c']) >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f']) >>> pd.crosstab(foo, bar) col_0 d e row_0 a 1 0 b 0 1 >>> pd.crosstab(foo, bar, dropna=False) col_0 d e f row_0 a 1 0 0 b 0 1 0 c 0 0 0 """ index = com.maybe_make_list(index) columns = com.maybe_make_list(columns) rownames = _get_names(index, rownames, prefix='row') colnames = _get_names(columns, colnames, prefix='col') common_idx = _get_objs_combined_axis(index + columns, intersect=True, sort=False) data = {} data.update(zip(rownames, index)) data.update(zip(colnames, columns)) if values is None and aggfunc is not None: raise ValueError("aggfunc cannot be used without values.") if values is not None and aggfunc is None: raise ValueError("values cannot be used without an aggfunc.") from pandas import DataFrame df = DataFrame(data, index=common_idx) if values is None: df['__dummy__'] = 0 kwargs = {'aggfunc': len, 'fill_value': 0} else: df['__dummy__'] = values kwargs = {'aggfunc': aggfunc} table = df.pivot_table('__dummy__', index=rownames, columns=colnames, margins=margins, margins_name=margins_name, dropna=dropna, **kwargs) # Post-process if normalize is not False: table = _normalize(table, normalize=normalize, margins=margins, margins_name=margins_name) return table
def crosstab(index, columns, values=None, rownames=None, colnames=None, aggfunc=None, margins=False, margins_name='All', dropna=True, normalize=False): """ Compute a simple cross tabulation of two (or more) factors. By default computes a frequency table of the factors unless an array of values and an aggregation function are passed. Parameters ---------- index : array-like, Series, or list of arrays/Series Values to group by in the rows. columns : array-like, Series, or list of arrays/Series Values to group by in the columns. values : array-like, optional Array of values to aggregate according to the factors. Requires `aggfunc` be specified. rownames : sequence, default None If passed, must match number of row arrays passed. colnames : sequence, default None If passed, must match number of column arrays passed. aggfunc : function, optional If specified, requires `values` be specified as well. margins : bool, default False Add row/column margins (subtotals). margins_name : str, default 'All' Name of the row/column that will contain the totals when margins is True. .. versionadded:: 0.21.0 dropna : bool, default True Do not include columns whose entries are all NaN. normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False Normalize by dividing all values by the sum of values. - If passed 'all' or `True`, will normalize over all values. - If passed 'index' will normalize over each row. - If passed 'columns' will normalize over each column. - If margins is `True`, will also normalize margin values. .. versionadded:: 0.18.1 Returns ------- DataFrame Cross tabulation of the data. See Also -------- DataFrame.pivot : Reshape data based on column values. pivot_table : Create a pivot table as a DataFrame. Notes ----- Any Series passed will have their name attributes used unless row or column names for the cross-tabulation are specified. Any input passed containing Categorical data will have **all** of its categories included in the cross-tabulation, even if the actual data does not contain any instances of a particular category. In the event that there aren't overlapping indexes an empty DataFrame will be returned. Examples -------- >>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar", ... "bar", "bar", "foo", "foo", "foo"], dtype=object) >>> b = np.array(["one", "one", "one", "two", "one", "one", ... "one", "two", "two", "two", "one"], dtype=object) >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny", ... "shiny", "dull", "shiny", "shiny", "shiny"], ... dtype=object) >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) b one two c dull shiny dull shiny a bar 1 2 1 0 foo 2 2 1 2 Here 'c' and 'f' are not represented in the data and will not be shown in the output because dropna is True by default. Set dropna=False to preserve categories with no data. >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c']) >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f']) >>> pd.crosstab(foo, bar) col_0 d e row_0 a 1 0 b 0 1 >>> pd.crosstab(foo, bar, dropna=False) col_0 d e f row_0 a 1 0 0 b 0 1 0 c 0 0 0 """ index = com.maybe_make_list(index) columns = com.maybe_make_list(columns) rownames = _get_names(index, rownames, prefix='row') colnames = _get_names(columns, colnames, prefix='col') common_idx = _get_objs_combined_axis(index + columns, intersect=True, sort=False) data = {} data.update(zip(rownames, index)) data.update(zip(colnames, columns)) if values is None and aggfunc is not None: raise ValueError("aggfunc cannot be used without values.") if values is not None and aggfunc is None: raise ValueError("values cannot be used without an aggfunc.") from pandas import DataFrame df = DataFrame(data, index=common_idx) if values is None: df['__dummy__'] = 0 kwargs = {'aggfunc': len, 'fill_value': 0} else: df['__dummy__'] = values kwargs = {'aggfunc': aggfunc} table = df.pivot_table('__dummy__', index=rownames, columns=colnames, margins=margins, margins_name=margins_name, dropna=dropna, **kwargs) # Post-process if normalize is not False: table = _normalize(table, normalize=normalize, margins=margins, margins_name=margins_name) return table
[ "Compute", "a", "simple", "cross", "tabulation", "of", "two", "(", "or", "more", ")", "factors", ".", "By", "default", "computes", "a", "frequency", "table", "of", "the", "factors", "unless", "an", "array", "of", "values", "and", "an", "aggregation", "function", "are", "passed", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/pivot.py#L391-L528
[ "def", "crosstab", "(", "index", ",", "columns", ",", "values", "=", "None", ",", "rownames", "=", "None", ",", "colnames", "=", "None", ",", "aggfunc", "=", "None", ",", "margins", "=", "False", ",", "margins_name", "=", "'All'", ",", "dropna", "=", "True", ",", "normalize", "=", "False", ")", ":", "index", "=", "com", ".", "maybe_make_list", "(", "index", ")", "columns", "=", "com", ".", "maybe_make_list", "(", "columns", ")", "rownames", "=", "_get_names", "(", "index", ",", "rownames", ",", "prefix", "=", "'row'", ")", "colnames", "=", "_get_names", "(", "columns", ",", "colnames", ",", "prefix", "=", "'col'", ")", "common_idx", "=", "_get_objs_combined_axis", "(", "index", "+", "columns", ",", "intersect", "=", "True", ",", "sort", "=", "False", ")", "data", "=", "{", "}", "data", ".", "update", "(", "zip", "(", "rownames", ",", "index", ")", ")", "data", ".", "update", "(", "zip", "(", "colnames", ",", "columns", ")", ")", "if", "values", "is", "None", "and", "aggfunc", "is", "not", "None", ":", "raise", "ValueError", "(", "\"aggfunc cannot be used without values.\"", ")", "if", "values", "is", "not", "None", "and", "aggfunc", "is", "None", ":", "raise", "ValueError", "(", "\"values cannot be used without an aggfunc.\"", ")", "from", "pandas", "import", "DataFrame", "df", "=", "DataFrame", "(", "data", ",", "index", "=", "common_idx", ")", "if", "values", "is", "None", ":", "df", "[", "'__dummy__'", "]", "=", "0", "kwargs", "=", "{", "'aggfunc'", ":", "len", ",", "'fill_value'", ":", "0", "}", "else", ":", "df", "[", "'__dummy__'", "]", "=", "values", "kwargs", "=", "{", "'aggfunc'", ":", "aggfunc", "}", "table", "=", "df", ".", "pivot_table", "(", "'__dummy__'", ",", "index", "=", "rownames", ",", "columns", "=", "colnames", ",", "margins", "=", "margins", ",", "margins_name", "=", "margins_name", ",", "dropna", "=", "dropna", ",", "*", "*", "kwargs", ")", "# Post-process", "if", "normalize", "is", "not", "False", ":", "table", "=", "_normalize", "(", "table", ",", "normalize", "=", "normalize", ",", "margins", "=", "margins", ",", "margins_name", "=", "margins_name", ")", "return", "table" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
TablePlotter._shape
Calculate table chape considering index levels.
pandas/util/_doctools.py
def _shape(self, df): """ Calculate table chape considering index levels. """ row, col = df.shape return row + df.columns.nlevels, col + df.index.nlevels
def _shape(self, df): """ Calculate table chape considering index levels. """ row, col = df.shape return row + df.columns.nlevels, col + df.index.nlevels
[ "Calculate", "table", "chape", "considering", "index", "levels", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_doctools.py#L17-L23
[ "def", "_shape", "(", "self", ",", "df", ")", ":", "row", ",", "col", "=", "df", ".", "shape", "return", "row", "+", "df", ".", "columns", ".", "nlevels", ",", "col", "+", "df", ".", "index", ".", "nlevels" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
TablePlotter._get_cells
Calculate appropriate figure size based on left and right data.
pandas/util/_doctools.py
def _get_cells(self, left, right, vertical): """ Calculate appropriate figure size based on left and right data. """ if vertical: # calculate required number of cells vcells = max(sum(self._shape(l)[0] for l in left), self._shape(right)[0]) hcells = (max(self._shape(l)[1] for l in left) + self._shape(right)[1]) else: vcells = max([self._shape(l)[0] for l in left] + [self._shape(right)[0]]) hcells = sum([self._shape(l)[1] for l in left] + [self._shape(right)[1]]) return hcells, vcells
def _get_cells(self, left, right, vertical): """ Calculate appropriate figure size based on left and right data. """ if vertical: # calculate required number of cells vcells = max(sum(self._shape(l)[0] for l in left), self._shape(right)[0]) hcells = (max(self._shape(l)[1] for l in left) + self._shape(right)[1]) else: vcells = max([self._shape(l)[0] for l in left] + [self._shape(right)[0]]) hcells = sum([self._shape(l)[1] for l in left] + [self._shape(right)[1]]) return hcells, vcells
[ "Calculate", "appropriate", "figure", "size", "based", "on", "left", "and", "right", "data", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_doctools.py#L25-L41
[ "def", "_get_cells", "(", "self", ",", "left", ",", "right", ",", "vertical", ")", ":", "if", "vertical", ":", "# calculate required number of cells", "vcells", "=", "max", "(", "sum", "(", "self", ".", "_shape", "(", "l", ")", "[", "0", "]", "for", "l", "in", "left", ")", ",", "self", ".", "_shape", "(", "right", ")", "[", "0", "]", ")", "hcells", "=", "(", "max", "(", "self", ".", "_shape", "(", "l", ")", "[", "1", "]", "for", "l", "in", "left", ")", "+", "self", ".", "_shape", "(", "right", ")", "[", "1", "]", ")", "else", ":", "vcells", "=", "max", "(", "[", "self", ".", "_shape", "(", "l", ")", "[", "0", "]", "for", "l", "in", "left", "]", "+", "[", "self", ".", "_shape", "(", "right", ")", "[", "0", "]", "]", ")", "hcells", "=", "sum", "(", "[", "self", ".", "_shape", "(", "l", ")", "[", "1", "]", "for", "l", "in", "left", "]", "+", "[", "self", ".", "_shape", "(", "right", ")", "[", "1", "]", "]", ")", "return", "hcells", ",", "vcells" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
TablePlotter.plot
Plot left / right DataFrames in specified layout. Parameters ---------- left : list of DataFrames before operation is applied right : DataFrame of operation result labels : list of str to be drawn as titles of left DataFrames vertical : bool If True, use vertical layout. If False, use horizontal layout.
pandas/util/_doctools.py
def plot(self, left, right, labels=None, vertical=True): """ Plot left / right DataFrames in specified layout. Parameters ---------- left : list of DataFrames before operation is applied right : DataFrame of operation result labels : list of str to be drawn as titles of left DataFrames vertical : bool If True, use vertical layout. If False, use horizontal layout. """ import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec if not isinstance(left, list): left = [left] left = [self._conv(l) for l in left] right = self._conv(right) hcells, vcells = self._get_cells(left, right, vertical) if vertical: figsize = self.cell_width * hcells, self.cell_height * vcells else: # include margin for titles figsize = self.cell_width * hcells, self.cell_height * vcells fig = plt.figure(figsize=figsize) if vertical: gs = gridspec.GridSpec(len(left), hcells) # left max_left_cols = max(self._shape(l)[1] for l in left) max_left_rows = max(self._shape(l)[0] for l in left) for i, (l, label) in enumerate(zip(left, labels)): ax = fig.add_subplot(gs[i, 0:max_left_cols]) self._make_table(ax, l, title=label, height=1.0 / max_left_rows) # right ax = plt.subplot(gs[:, max_left_cols:]) self._make_table(ax, right, title='Result', height=1.05 / vcells) fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95) else: max_rows = max(self._shape(df)[0] for df in left + [right]) height = 1.0 / np.max(max_rows) gs = gridspec.GridSpec(1, hcells) # left i = 0 for l, label in zip(left, labels): sp = self._shape(l) ax = fig.add_subplot(gs[0, i:i + sp[1]]) self._make_table(ax, l, title=label, height=height) i += sp[1] # right ax = plt.subplot(gs[0, i:]) self._make_table(ax, right, title='Result', height=height) fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95) return fig
def plot(self, left, right, labels=None, vertical=True): """ Plot left / right DataFrames in specified layout. Parameters ---------- left : list of DataFrames before operation is applied right : DataFrame of operation result labels : list of str to be drawn as titles of left DataFrames vertical : bool If True, use vertical layout. If False, use horizontal layout. """ import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec if not isinstance(left, list): left = [left] left = [self._conv(l) for l in left] right = self._conv(right) hcells, vcells = self._get_cells(left, right, vertical) if vertical: figsize = self.cell_width * hcells, self.cell_height * vcells else: # include margin for titles figsize = self.cell_width * hcells, self.cell_height * vcells fig = plt.figure(figsize=figsize) if vertical: gs = gridspec.GridSpec(len(left), hcells) # left max_left_cols = max(self._shape(l)[1] for l in left) max_left_rows = max(self._shape(l)[0] for l in left) for i, (l, label) in enumerate(zip(left, labels)): ax = fig.add_subplot(gs[i, 0:max_left_cols]) self._make_table(ax, l, title=label, height=1.0 / max_left_rows) # right ax = plt.subplot(gs[:, max_left_cols:]) self._make_table(ax, right, title='Result', height=1.05 / vcells) fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95) else: max_rows = max(self._shape(df)[0] for df in left + [right]) height = 1.0 / np.max(max_rows) gs = gridspec.GridSpec(1, hcells) # left i = 0 for l, label in zip(left, labels): sp = self._shape(l) ax = fig.add_subplot(gs[0, i:i + sp[1]]) self._make_table(ax, l, title=label, height=height) i += sp[1] # right ax = plt.subplot(gs[0, i:]) self._make_table(ax, right, title='Result', height=height) fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95) return fig
[ "Plot", "left", "/", "right", "DataFrames", "in", "specified", "layout", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_doctools.py#L43-L101
[ "def", "plot", "(", "self", ",", "left", ",", "right", ",", "labels", "=", "None", ",", "vertical", "=", "True", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "matplotlib", ".", "gridspec", "as", "gridspec", "if", "not", "isinstance", "(", "left", ",", "list", ")", ":", "left", "=", "[", "left", "]", "left", "=", "[", "self", ".", "_conv", "(", "l", ")", "for", "l", "in", "left", "]", "right", "=", "self", ".", "_conv", "(", "right", ")", "hcells", ",", "vcells", "=", "self", ".", "_get_cells", "(", "left", ",", "right", ",", "vertical", ")", "if", "vertical", ":", "figsize", "=", "self", ".", "cell_width", "*", "hcells", ",", "self", ".", "cell_height", "*", "vcells", "else", ":", "# include margin for titles", "figsize", "=", "self", ".", "cell_width", "*", "hcells", ",", "self", ".", "cell_height", "*", "vcells", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "if", "vertical", ":", "gs", "=", "gridspec", ".", "GridSpec", "(", "len", "(", "left", ")", ",", "hcells", ")", "# left", "max_left_cols", "=", "max", "(", "self", ".", "_shape", "(", "l", ")", "[", "1", "]", "for", "l", "in", "left", ")", "max_left_rows", "=", "max", "(", "self", ".", "_shape", "(", "l", ")", "[", "0", "]", "for", "l", "in", "left", ")", "for", "i", ",", "(", "l", ",", "label", ")", "in", "enumerate", "(", "zip", "(", "left", ",", "labels", ")", ")", ":", "ax", "=", "fig", ".", "add_subplot", "(", "gs", "[", "i", ",", "0", ":", "max_left_cols", "]", ")", "self", ".", "_make_table", "(", "ax", ",", "l", ",", "title", "=", "label", ",", "height", "=", "1.0", "/", "max_left_rows", ")", "# right", "ax", "=", "plt", ".", "subplot", "(", "gs", "[", ":", ",", "max_left_cols", ":", "]", ")", "self", ".", "_make_table", "(", "ax", ",", "right", ",", "title", "=", "'Result'", ",", "height", "=", "1.05", "/", "vcells", ")", "fig", ".", "subplots_adjust", "(", "top", "=", "0.9", ",", "bottom", "=", "0.05", ",", "left", "=", "0.05", ",", "right", "=", "0.95", ")", "else", ":", "max_rows", "=", "max", "(", "self", ".", "_shape", "(", "df", ")", "[", "0", "]", "for", "df", "in", "left", "+", "[", "right", "]", ")", "height", "=", "1.0", "/", "np", ".", "max", "(", "max_rows", ")", "gs", "=", "gridspec", ".", "GridSpec", "(", "1", ",", "hcells", ")", "# left", "i", "=", "0", "for", "l", ",", "label", "in", "zip", "(", "left", ",", "labels", ")", ":", "sp", "=", "self", ".", "_shape", "(", "l", ")", "ax", "=", "fig", ".", "add_subplot", "(", "gs", "[", "0", ",", "i", ":", "i", "+", "sp", "[", "1", "]", "]", ")", "self", ".", "_make_table", "(", "ax", ",", "l", ",", "title", "=", "label", ",", "height", "=", "height", ")", "i", "+=", "sp", "[", "1", "]", "# right", "ax", "=", "plt", ".", "subplot", "(", "gs", "[", "0", ",", "i", ":", "]", ")", "self", ".", "_make_table", "(", "ax", ",", "right", ",", "title", "=", "'Result'", ",", "height", "=", "height", ")", "fig", ".", "subplots_adjust", "(", "top", "=", "0.85", ",", "bottom", "=", "0.05", ",", "left", "=", "0.05", ",", "right", "=", "0.95", ")", "return", "fig" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
TablePlotter._conv
Convert each input to appropriate for table outplot
pandas/util/_doctools.py
def _conv(self, data): """Convert each input to appropriate for table outplot""" if isinstance(data, pd.Series): if data.name is None: data = data.to_frame(name='') else: data = data.to_frame() data = data.fillna('NaN') return data
def _conv(self, data): """Convert each input to appropriate for table outplot""" if isinstance(data, pd.Series): if data.name is None: data = data.to_frame(name='') else: data = data.to_frame() data = data.fillna('NaN') return data
[ "Convert", "each", "input", "to", "appropriate", "for", "table", "outplot" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_doctools.py#L103-L111
[ "def", "_conv", "(", "self", ",", "data", ")", ":", "if", "isinstance", "(", "data", ",", "pd", ".", "Series", ")", ":", "if", "data", ".", "name", "is", "None", ":", "data", "=", "data", ".", "to_frame", "(", "name", "=", "''", ")", "else", ":", "data", "=", "data", ".", "to_frame", "(", ")", "data", "=", "data", ".", "fillna", "(", "'NaN'", ")", "return", "data" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
cut
Bin values into discrete intervals. Use `cut` when you need to segment and sort data values into bins. This function is also useful for going from a continuous variable to a categorical variable. For example, `cut` could convert ages to groups of age ranges. Supports binning into an equal number of bins, or a pre-specified array of bins. Parameters ---------- x : array-like The input array to be binned. Must be 1-dimensional. bins : int, sequence of scalars, or IntervalIndex The criteria to bin by. * int : Defines the number of equal-width bins in the range of `x`. The range of `x` is extended by .1% on each side to include the minimum and maximum values of `x`. * sequence of scalars : Defines the bin edges allowing for non-uniform width. No extension of the range of `x` is done. * IntervalIndex : Defines the exact bins to be used. Note that IntervalIndex for `bins` must be non-overlapping. right : bool, default True Indicates whether `bins` includes the rightmost edge or not. If ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` indicate (1,2], (2,3], (3,4]. This argument is ignored when `bins` is an IntervalIndex. labels : array or bool, optional Specifies the labels for the returned bins. Must be the same length as the resulting bins. If False, returns only integer indicators of the bins. This affects the type of the output container (see below). This argument is ignored when `bins` is an IntervalIndex. retbins : bool, default False Whether to return the bins or not. Useful when bins is provided as a scalar. precision : int, default 3 The precision at which to store and display the bins labels. include_lowest : bool, default False Whether the first interval should be left-inclusive or not. duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. .. versionadded:: 0.23.0 Returns ------- out : Categorical, Series, or ndarray An array-like object representing the respective bin for each value of `x`. The type depends on the value of `labels`. * True (default) : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are Interval dtype. * sequence of scalars : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are whatever the type in the sequence is. * False : returns an ndarray of integers. bins : numpy.ndarray or IntervalIndex. The computed or specified bins. Only returned when `retbins=True`. For scalar or sequence `bins`, this is an ndarray with the computed bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For an IntervalIndex `bins`, this is equal to `bins`. See Also -------- qcut : Discretize variable into equal-sized buckets based on rank or based on sample quantiles. Categorical : Array type for storing data that come from a fixed set of values. Series : One-dimensional array with axis labels (including time series). IntervalIndex : Immutable Index implementing an ordered, sliceable set. Notes ----- Any NA values will be NA in the result. Out of bounds values will be NA in the resulting Series or Categorical object. Examples -------- Discretize into three equal-sized bins. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3) ... # doctest: +ELLIPSIS [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True) ... # doctest: +ELLIPSIS ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... array([0.994, 3. , 5. , 7. ])) Discovers the same bins, but assign them specific labels. Notice that the returned Categorical's categories are `labels` and is ordered. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), ... 3, labels=["bad", "medium", "good"]) [bad, good, medium, medium, good, bad] Categories (3, object): [bad < medium < good] ``labels=False`` implies you just want the bins back. >>> pd.cut([0, 1, 1, 2], bins=4, labels=False) array([0, 1, 1, 3]) Passing a Series as an input returns a Series with categorical dtype: >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, 3) ... # doctest: +ELLIPSIS a (1.992, 4.667] b (1.992, 4.667] c (4.667, 7.333] d (7.333, 10.0] e (7.333, 10.0] dtype: category Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ... Passing a Series as an input returns a Series with mapping value. It is used to map numerically to intervals based on bins. >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False) ... # doctest: +ELLIPSIS (a 0.0 b 1.0 c 2.0 d 3.0 e 4.0 dtype: float64, array([0, 2, 4, 6, 8])) Use `drop` optional when bins is not unique >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True, ... right=False, duplicates='drop') ... # doctest: +ELLIPSIS (a 0.0 b 1.0 c 2.0 d 3.0 e 3.0 dtype: float64, array([0, 2, 4, 6, 8])) Passing an IntervalIndex for `bins` results in those categories exactly. Notice that values not covered by the IntervalIndex are set to NaN. 0 is to the left of the first bin (which is closed on the right), and 1.5 falls between two bins. >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins) [NaN, (0, 1], NaN, (2, 3], (4, 5]] Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]]
pandas/core/reshape/tile.py
def cut(x, bins, right=True, labels=None, retbins=False, precision=3, include_lowest=False, duplicates='raise'): """ Bin values into discrete intervals. Use `cut` when you need to segment and sort data values into bins. This function is also useful for going from a continuous variable to a categorical variable. For example, `cut` could convert ages to groups of age ranges. Supports binning into an equal number of bins, or a pre-specified array of bins. Parameters ---------- x : array-like The input array to be binned. Must be 1-dimensional. bins : int, sequence of scalars, or IntervalIndex The criteria to bin by. * int : Defines the number of equal-width bins in the range of `x`. The range of `x` is extended by .1% on each side to include the minimum and maximum values of `x`. * sequence of scalars : Defines the bin edges allowing for non-uniform width. No extension of the range of `x` is done. * IntervalIndex : Defines the exact bins to be used. Note that IntervalIndex for `bins` must be non-overlapping. right : bool, default True Indicates whether `bins` includes the rightmost edge or not. If ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` indicate (1,2], (2,3], (3,4]. This argument is ignored when `bins` is an IntervalIndex. labels : array or bool, optional Specifies the labels for the returned bins. Must be the same length as the resulting bins. If False, returns only integer indicators of the bins. This affects the type of the output container (see below). This argument is ignored when `bins` is an IntervalIndex. retbins : bool, default False Whether to return the bins or not. Useful when bins is provided as a scalar. precision : int, default 3 The precision at which to store and display the bins labels. include_lowest : bool, default False Whether the first interval should be left-inclusive or not. duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. .. versionadded:: 0.23.0 Returns ------- out : Categorical, Series, or ndarray An array-like object representing the respective bin for each value of `x`. The type depends on the value of `labels`. * True (default) : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are Interval dtype. * sequence of scalars : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are whatever the type in the sequence is. * False : returns an ndarray of integers. bins : numpy.ndarray or IntervalIndex. The computed or specified bins. Only returned when `retbins=True`. For scalar or sequence `bins`, this is an ndarray with the computed bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For an IntervalIndex `bins`, this is equal to `bins`. See Also -------- qcut : Discretize variable into equal-sized buckets based on rank or based on sample quantiles. Categorical : Array type for storing data that come from a fixed set of values. Series : One-dimensional array with axis labels (including time series). IntervalIndex : Immutable Index implementing an ordered, sliceable set. Notes ----- Any NA values will be NA in the result. Out of bounds values will be NA in the resulting Series or Categorical object. Examples -------- Discretize into three equal-sized bins. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3) ... # doctest: +ELLIPSIS [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True) ... # doctest: +ELLIPSIS ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... array([0.994, 3. , 5. , 7. ])) Discovers the same bins, but assign them specific labels. Notice that the returned Categorical's categories are `labels` and is ordered. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), ... 3, labels=["bad", "medium", "good"]) [bad, good, medium, medium, good, bad] Categories (3, object): [bad < medium < good] ``labels=False`` implies you just want the bins back. >>> pd.cut([0, 1, 1, 2], bins=4, labels=False) array([0, 1, 1, 3]) Passing a Series as an input returns a Series with categorical dtype: >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, 3) ... # doctest: +ELLIPSIS a (1.992, 4.667] b (1.992, 4.667] c (4.667, 7.333] d (7.333, 10.0] e (7.333, 10.0] dtype: category Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ... Passing a Series as an input returns a Series with mapping value. It is used to map numerically to intervals based on bins. >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False) ... # doctest: +ELLIPSIS (a 0.0 b 1.0 c 2.0 d 3.0 e 4.0 dtype: float64, array([0, 2, 4, 6, 8])) Use `drop` optional when bins is not unique >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True, ... right=False, duplicates='drop') ... # doctest: +ELLIPSIS (a 0.0 b 1.0 c 2.0 d 3.0 e 3.0 dtype: float64, array([0, 2, 4, 6, 8])) Passing an IntervalIndex for `bins` results in those categories exactly. Notice that values not covered by the IntervalIndex are set to NaN. 0 is to the left of the first bin (which is closed on the right), and 1.5 falls between two bins. >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins) [NaN, (0, 1], NaN, (2, 3], (4, 5]] Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]] """ # NOTE: this binning code is changed a bit from histogram for var(x) == 0 # for handling the cut for datetime and timedelta objects x_is_series, series_index, name, x = _preprocess_for_cut(x) x, dtype = _coerce_to_type(x) if not np.iterable(bins): if is_scalar(bins) and bins < 1: raise ValueError("`bins` should be a positive integer.") try: # for array-like sz = x.size except AttributeError: x = np.asarray(x) sz = x.size if sz == 0: raise ValueError('Cannot cut empty array') rng = (nanops.nanmin(x), nanops.nanmax(x)) mn, mx = [mi + 0.0 for mi in rng] if np.isinf(mn) or np.isinf(mx): # GH 24314 raise ValueError('cannot specify integer `bins` when input data ' 'contains infinity') elif mn == mx: # adjust end points before binning mn -= .001 * abs(mn) if mn != 0 else .001 mx += .001 * abs(mx) if mx != 0 else .001 bins = np.linspace(mn, mx, bins + 1, endpoint=True) else: # adjust end points after binning bins = np.linspace(mn, mx, bins + 1, endpoint=True) adj = (mx - mn) * 0.001 # 0.1% of the range if right: bins[0] -= adj else: bins[-1] += adj elif isinstance(bins, IntervalIndex): if bins.is_overlapping: raise ValueError('Overlapping IntervalIndex is not accepted.') else: if is_datetime64tz_dtype(bins): bins = np.asarray(bins, dtype=_NS_DTYPE) else: bins = np.asarray(bins) bins = _convert_bin_to_numeric_type(bins, dtype) # GH 26045: cast to float64 to avoid an overflow if (np.diff(bins.astype('float64')) < 0).any(): raise ValueError('bins must increase monotonically.') fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, dtype=dtype, duplicates=duplicates) return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name, dtype)
def cut(x, bins, right=True, labels=None, retbins=False, precision=3, include_lowest=False, duplicates='raise'): """ Bin values into discrete intervals. Use `cut` when you need to segment and sort data values into bins. This function is also useful for going from a continuous variable to a categorical variable. For example, `cut` could convert ages to groups of age ranges. Supports binning into an equal number of bins, or a pre-specified array of bins. Parameters ---------- x : array-like The input array to be binned. Must be 1-dimensional. bins : int, sequence of scalars, or IntervalIndex The criteria to bin by. * int : Defines the number of equal-width bins in the range of `x`. The range of `x` is extended by .1% on each side to include the minimum and maximum values of `x`. * sequence of scalars : Defines the bin edges allowing for non-uniform width. No extension of the range of `x` is done. * IntervalIndex : Defines the exact bins to be used. Note that IntervalIndex for `bins` must be non-overlapping. right : bool, default True Indicates whether `bins` includes the rightmost edge or not. If ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` indicate (1,2], (2,3], (3,4]. This argument is ignored when `bins` is an IntervalIndex. labels : array or bool, optional Specifies the labels for the returned bins. Must be the same length as the resulting bins. If False, returns only integer indicators of the bins. This affects the type of the output container (see below). This argument is ignored when `bins` is an IntervalIndex. retbins : bool, default False Whether to return the bins or not. Useful when bins is provided as a scalar. precision : int, default 3 The precision at which to store and display the bins labels. include_lowest : bool, default False Whether the first interval should be left-inclusive or not. duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. .. versionadded:: 0.23.0 Returns ------- out : Categorical, Series, or ndarray An array-like object representing the respective bin for each value of `x`. The type depends on the value of `labels`. * True (default) : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are Interval dtype. * sequence of scalars : returns a Series for Series `x` or a Categorical for all other inputs. The values stored within are whatever the type in the sequence is. * False : returns an ndarray of integers. bins : numpy.ndarray or IntervalIndex. The computed or specified bins. Only returned when `retbins=True`. For scalar or sequence `bins`, this is an ndarray with the computed bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For an IntervalIndex `bins`, this is equal to `bins`. See Also -------- qcut : Discretize variable into equal-sized buckets based on rank or based on sample quantiles. Categorical : Array type for storing data that come from a fixed set of values. Series : One-dimensional array with axis labels (including time series). IntervalIndex : Immutable Index implementing an ordered, sliceable set. Notes ----- Any NA values will be NA in the result. Out of bounds values will be NA in the resulting Series or Categorical object. Examples -------- Discretize into three equal-sized bins. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3) ... # doctest: +ELLIPSIS [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True) ... # doctest: +ELLIPSIS ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ... array([0.994, 3. , 5. , 7. ])) Discovers the same bins, but assign them specific labels. Notice that the returned Categorical's categories are `labels` and is ordered. >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), ... 3, labels=["bad", "medium", "good"]) [bad, good, medium, medium, good, bad] Categories (3, object): [bad < medium < good] ``labels=False`` implies you just want the bins back. >>> pd.cut([0, 1, 1, 2], bins=4, labels=False) array([0, 1, 1, 3]) Passing a Series as an input returns a Series with categorical dtype: >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, 3) ... # doctest: +ELLIPSIS a (1.992, 4.667] b (1.992, 4.667] c (4.667, 7.333] d (7.333, 10.0] e (7.333, 10.0] dtype: category Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ... Passing a Series as an input returns a Series with mapping value. It is used to map numerically to intervals based on bins. >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), ... index=['a', 'b', 'c', 'd', 'e']) >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False) ... # doctest: +ELLIPSIS (a 0.0 b 1.0 c 2.0 d 3.0 e 4.0 dtype: float64, array([0, 2, 4, 6, 8])) Use `drop` optional when bins is not unique >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True, ... right=False, duplicates='drop') ... # doctest: +ELLIPSIS (a 0.0 b 1.0 c 2.0 d 3.0 e 3.0 dtype: float64, array([0, 2, 4, 6, 8])) Passing an IntervalIndex for `bins` results in those categories exactly. Notice that values not covered by the IntervalIndex are set to NaN. 0 is to the left of the first bin (which is closed on the right), and 1.5 falls between two bins. >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins) [NaN, (0, 1], NaN, (2, 3], (4, 5]] Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]] """ # NOTE: this binning code is changed a bit from histogram for var(x) == 0 # for handling the cut for datetime and timedelta objects x_is_series, series_index, name, x = _preprocess_for_cut(x) x, dtype = _coerce_to_type(x) if not np.iterable(bins): if is_scalar(bins) and bins < 1: raise ValueError("`bins` should be a positive integer.") try: # for array-like sz = x.size except AttributeError: x = np.asarray(x) sz = x.size if sz == 0: raise ValueError('Cannot cut empty array') rng = (nanops.nanmin(x), nanops.nanmax(x)) mn, mx = [mi + 0.0 for mi in rng] if np.isinf(mn) or np.isinf(mx): # GH 24314 raise ValueError('cannot specify integer `bins` when input data ' 'contains infinity') elif mn == mx: # adjust end points before binning mn -= .001 * abs(mn) if mn != 0 else .001 mx += .001 * abs(mx) if mx != 0 else .001 bins = np.linspace(mn, mx, bins + 1, endpoint=True) else: # adjust end points after binning bins = np.linspace(mn, mx, bins + 1, endpoint=True) adj = (mx - mn) * 0.001 # 0.1% of the range if right: bins[0] -= adj else: bins[-1] += adj elif isinstance(bins, IntervalIndex): if bins.is_overlapping: raise ValueError('Overlapping IntervalIndex is not accepted.') else: if is_datetime64tz_dtype(bins): bins = np.asarray(bins, dtype=_NS_DTYPE) else: bins = np.asarray(bins) bins = _convert_bin_to_numeric_type(bins, dtype) # GH 26045: cast to float64 to avoid an overflow if (np.diff(bins.astype('float64')) < 0).any(): raise ValueError('bins must increase monotonically.') fac, bins = _bins_to_cuts(x, bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, dtype=dtype, duplicates=duplicates) return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name, dtype)
[ "Bin", "values", "into", "discrete", "intervals", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L23-L245
[ "def", "cut", "(", "x", ",", "bins", ",", "right", "=", "True", ",", "labels", "=", "None", ",", "retbins", "=", "False", ",", "precision", "=", "3", ",", "include_lowest", "=", "False", ",", "duplicates", "=", "'raise'", ")", ":", "# NOTE: this binning code is changed a bit from histogram for var(x) == 0", "# for handling the cut for datetime and timedelta objects", "x_is_series", ",", "series_index", ",", "name", ",", "x", "=", "_preprocess_for_cut", "(", "x", ")", "x", ",", "dtype", "=", "_coerce_to_type", "(", "x", ")", "if", "not", "np", ".", "iterable", "(", "bins", ")", ":", "if", "is_scalar", "(", "bins", ")", "and", "bins", "<", "1", ":", "raise", "ValueError", "(", "\"`bins` should be a positive integer.\"", ")", "try", ":", "# for array-like", "sz", "=", "x", ".", "size", "except", "AttributeError", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "sz", "=", "x", ".", "size", "if", "sz", "==", "0", ":", "raise", "ValueError", "(", "'Cannot cut empty array'", ")", "rng", "=", "(", "nanops", ".", "nanmin", "(", "x", ")", ",", "nanops", ".", "nanmax", "(", "x", ")", ")", "mn", ",", "mx", "=", "[", "mi", "+", "0.0", "for", "mi", "in", "rng", "]", "if", "np", ".", "isinf", "(", "mn", ")", "or", "np", ".", "isinf", "(", "mx", ")", ":", "# GH 24314", "raise", "ValueError", "(", "'cannot specify integer `bins` when input data '", "'contains infinity'", ")", "elif", "mn", "==", "mx", ":", "# adjust end points before binning", "mn", "-=", ".001", "*", "abs", "(", "mn", ")", "if", "mn", "!=", "0", "else", ".001", "mx", "+=", ".001", "*", "abs", "(", "mx", ")", "if", "mx", "!=", "0", "else", ".001", "bins", "=", "np", ".", "linspace", "(", "mn", ",", "mx", ",", "bins", "+", "1", ",", "endpoint", "=", "True", ")", "else", ":", "# adjust end points after binning", "bins", "=", "np", ".", "linspace", "(", "mn", ",", "mx", ",", "bins", "+", "1", ",", "endpoint", "=", "True", ")", "adj", "=", "(", "mx", "-", "mn", ")", "*", "0.001", "# 0.1% of the range", "if", "right", ":", "bins", "[", "0", "]", "-=", "adj", "else", ":", "bins", "[", "-", "1", "]", "+=", "adj", "elif", "isinstance", "(", "bins", ",", "IntervalIndex", ")", ":", "if", "bins", ".", "is_overlapping", ":", "raise", "ValueError", "(", "'Overlapping IntervalIndex is not accepted.'", ")", "else", ":", "if", "is_datetime64tz_dtype", "(", "bins", ")", ":", "bins", "=", "np", ".", "asarray", "(", "bins", ",", "dtype", "=", "_NS_DTYPE", ")", "else", ":", "bins", "=", "np", ".", "asarray", "(", "bins", ")", "bins", "=", "_convert_bin_to_numeric_type", "(", "bins", ",", "dtype", ")", "# GH 26045: cast to float64 to avoid an overflow", "if", "(", "np", ".", "diff", "(", "bins", ".", "astype", "(", "'float64'", ")", ")", "<", "0", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "'bins must increase monotonically.'", ")", "fac", ",", "bins", "=", "_bins_to_cuts", "(", "x", ",", "bins", ",", "right", "=", "right", ",", "labels", "=", "labels", ",", "precision", "=", "precision", ",", "include_lowest", "=", "include_lowest", ",", "dtype", "=", "dtype", ",", "duplicates", "=", "duplicates", ")", "return", "_postprocess_for_cut", "(", "fac", ",", "bins", ",", "retbins", ",", "x_is_series", ",", "series_index", ",", "name", ",", "dtype", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
qcut
Quantile-based discretization function. Discretize variable into equal-sized buckets based on rank or based on sample quantiles. For example 1000 values for 10 quantiles would produce a Categorical object indicating quantile membership for each data point. Parameters ---------- x : 1d ndarray or Series q : integer or array of quantiles Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles labels : array or boolean, default None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, return only integer indicators of the bins. retbins : bool, optional Whether to return the (bins, labels) or not. Can be useful if bins is given as a scalar. precision : int, optional The precision at which to store and display the bins labels duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. .. versionadded:: 0.20.0 Returns ------- out : Categorical or Series or array of integers if labels is False The return type (Categorical or Series) depends on the input: a Series of type category if input is a Series else Categorical. Bins are represented as categories when categorical data is returned. bins : ndarray of floats Returned only if `retbins` is True. Notes ----- Out of bounds values will be NA in the resulting Categorical object Examples -------- >>> pd.qcut(range(5), 4) ... # doctest: +ELLIPSIS [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]] Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ... >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"]) ... # doctest: +SKIP [good, good, medium, bad, bad] Categories (3, object): [good < medium < bad] >>> pd.qcut(range(5), 4, labels=False) array([0, 0, 1, 2, 3])
pandas/core/reshape/tile.py
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'): """ Quantile-based discretization function. Discretize variable into equal-sized buckets based on rank or based on sample quantiles. For example 1000 values for 10 quantiles would produce a Categorical object indicating quantile membership for each data point. Parameters ---------- x : 1d ndarray or Series q : integer or array of quantiles Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles labels : array or boolean, default None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, return only integer indicators of the bins. retbins : bool, optional Whether to return the (bins, labels) or not. Can be useful if bins is given as a scalar. precision : int, optional The precision at which to store and display the bins labels duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. .. versionadded:: 0.20.0 Returns ------- out : Categorical or Series or array of integers if labels is False The return type (Categorical or Series) depends on the input: a Series of type category if input is a Series else Categorical. Bins are represented as categories when categorical data is returned. bins : ndarray of floats Returned only if `retbins` is True. Notes ----- Out of bounds values will be NA in the resulting Categorical object Examples -------- >>> pd.qcut(range(5), 4) ... # doctest: +ELLIPSIS [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]] Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ... >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"]) ... # doctest: +SKIP [good, good, medium, bad, bad] Categories (3, object): [good < medium < bad] >>> pd.qcut(range(5), 4, labels=False) array([0, 0, 1, 2, 3]) """ x_is_series, series_index, name, x = _preprocess_for_cut(x) x, dtype = _coerce_to_type(x) if is_integer(q): quantiles = np.linspace(0, 1, q + 1) else: quantiles = q bins = algos.quantile(x, quantiles) fac, bins = _bins_to_cuts(x, bins, labels=labels, precision=precision, include_lowest=True, dtype=dtype, duplicates=duplicates) return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name, dtype)
def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'): """ Quantile-based discretization function. Discretize variable into equal-sized buckets based on rank or based on sample quantiles. For example 1000 values for 10 quantiles would produce a Categorical object indicating quantile membership for each data point. Parameters ---------- x : 1d ndarray or Series q : integer or array of quantiles Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles labels : array or boolean, default None Used as labels for the resulting bins. Must be of the same length as the resulting bins. If False, return only integer indicators of the bins. retbins : bool, optional Whether to return the (bins, labels) or not. Can be useful if bins is given as a scalar. precision : int, optional The precision at which to store and display the bins labels duplicates : {default 'raise', 'drop'}, optional If bin edges are not unique, raise ValueError or drop non-uniques. .. versionadded:: 0.20.0 Returns ------- out : Categorical or Series or array of integers if labels is False The return type (Categorical or Series) depends on the input: a Series of type category if input is a Series else Categorical. Bins are represented as categories when categorical data is returned. bins : ndarray of floats Returned only if `retbins` is True. Notes ----- Out of bounds values will be NA in the resulting Categorical object Examples -------- >>> pd.qcut(range(5), 4) ... # doctest: +ELLIPSIS [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]] Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ... >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"]) ... # doctest: +SKIP [good, good, medium, bad, bad] Categories (3, object): [good < medium < bad] >>> pd.qcut(range(5), 4, labels=False) array([0, 0, 1, 2, 3]) """ x_is_series, series_index, name, x = _preprocess_for_cut(x) x, dtype = _coerce_to_type(x) if is_integer(q): quantiles = np.linspace(0, 1, q + 1) else: quantiles = q bins = algos.quantile(x, quantiles) fac, bins = _bins_to_cuts(x, bins, labels=labels, precision=precision, include_lowest=True, dtype=dtype, duplicates=duplicates) return _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name, dtype)
[ "Quantile", "-", "based", "discretization", "function", ".", "Discretize", "variable", "into", "equal", "-", "sized", "buckets", "based", "on", "rank", "or", "based", "on", "sample", "quantiles", ".", "For", "example", "1000", "values", "for", "10", "quantiles", "would", "produce", "a", "Categorical", "object", "indicating", "quantile", "membership", "for", "each", "data", "point", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L248-L317
[ "def", "qcut", "(", "x", ",", "q", ",", "labels", "=", "None", ",", "retbins", "=", "False", ",", "precision", "=", "3", ",", "duplicates", "=", "'raise'", ")", ":", "x_is_series", ",", "series_index", ",", "name", ",", "x", "=", "_preprocess_for_cut", "(", "x", ")", "x", ",", "dtype", "=", "_coerce_to_type", "(", "x", ")", "if", "is_integer", "(", "q", ")", ":", "quantiles", "=", "np", ".", "linspace", "(", "0", ",", "1", ",", "q", "+", "1", ")", "else", ":", "quantiles", "=", "q", "bins", "=", "algos", ".", "quantile", "(", "x", ",", "quantiles", ")", "fac", ",", "bins", "=", "_bins_to_cuts", "(", "x", ",", "bins", ",", "labels", "=", "labels", ",", "precision", "=", "precision", ",", "include_lowest", "=", "True", ",", "dtype", "=", "dtype", ",", "duplicates", "=", "duplicates", ")", "return", "_postprocess_for_cut", "(", "fac", ",", "bins", ",", "retbins", ",", "x_is_series", ",", "series_index", ",", "name", ",", "dtype", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_coerce_to_type
if the passed data is of datetime/timedelta type, this method converts it to numeric so that cut method can handle it
pandas/core/reshape/tile.py
def _coerce_to_type(x): """ if the passed data is of datetime/timedelta type, this method converts it to numeric so that cut method can handle it """ dtype = None if is_datetime64tz_dtype(x): dtype = x.dtype elif is_datetime64_dtype(x): x = to_datetime(x) dtype = np.dtype('datetime64[ns]') elif is_timedelta64_dtype(x): x = to_timedelta(x) dtype = np.dtype('timedelta64[ns]') if dtype is not None: # GH 19768: force NaT to NaN during integer conversion x = np.where(x.notna(), x.view(np.int64), np.nan) return x, dtype
def _coerce_to_type(x): """ if the passed data is of datetime/timedelta type, this method converts it to numeric so that cut method can handle it """ dtype = None if is_datetime64tz_dtype(x): dtype = x.dtype elif is_datetime64_dtype(x): x = to_datetime(x) dtype = np.dtype('datetime64[ns]') elif is_timedelta64_dtype(x): x = to_timedelta(x) dtype = np.dtype('timedelta64[ns]') if dtype is not None: # GH 19768: force NaT to NaN during integer conversion x = np.where(x.notna(), x.view(np.int64), np.nan) return x, dtype
[ "if", "the", "passed", "data", "is", "of", "datetime", "/", "timedelta", "type", "this", "method", "converts", "it", "to", "numeric", "so", "that", "cut", "method", "can", "handle", "it" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L377-L398
[ "def", "_coerce_to_type", "(", "x", ")", ":", "dtype", "=", "None", "if", "is_datetime64tz_dtype", "(", "x", ")", ":", "dtype", "=", "x", ".", "dtype", "elif", "is_datetime64_dtype", "(", "x", ")", ":", "x", "=", "to_datetime", "(", "x", ")", "dtype", "=", "np", ".", "dtype", "(", "'datetime64[ns]'", ")", "elif", "is_timedelta64_dtype", "(", "x", ")", ":", "x", "=", "to_timedelta", "(", "x", ")", "dtype", "=", "np", ".", "dtype", "(", "'timedelta64[ns]'", ")", "if", "dtype", "is", "not", "None", ":", "# GH 19768: force NaT to NaN during integer conversion", "x", "=", "np", ".", "where", "(", "x", ".", "notna", "(", ")", ",", "x", ".", "view", "(", "np", ".", "int64", ")", ",", "np", ".", "nan", ")", "return", "x", ",", "dtype" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_convert_bin_to_numeric_type
if the passed bin is of datetime/timedelta type, this method converts it to integer Parameters ---------- bins : list-like of bins dtype : dtype of data Raises ------ ValueError if bins are not of a compat dtype to dtype
pandas/core/reshape/tile.py
def _convert_bin_to_numeric_type(bins, dtype): """ if the passed bin is of datetime/timedelta type, this method converts it to integer Parameters ---------- bins : list-like of bins dtype : dtype of data Raises ------ ValueError if bins are not of a compat dtype to dtype """ bins_dtype = infer_dtype(bins, skipna=False) if is_timedelta64_dtype(dtype): if bins_dtype in ['timedelta', 'timedelta64']: bins = to_timedelta(bins).view(np.int64) else: raise ValueError("bins must be of timedelta64 dtype") elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): if bins_dtype in ['datetime', 'datetime64']: bins = to_datetime(bins).view(np.int64) else: raise ValueError("bins must be of datetime64 dtype") return bins
def _convert_bin_to_numeric_type(bins, dtype): """ if the passed bin is of datetime/timedelta type, this method converts it to integer Parameters ---------- bins : list-like of bins dtype : dtype of data Raises ------ ValueError if bins are not of a compat dtype to dtype """ bins_dtype = infer_dtype(bins, skipna=False) if is_timedelta64_dtype(dtype): if bins_dtype in ['timedelta', 'timedelta64']: bins = to_timedelta(bins).view(np.int64) else: raise ValueError("bins must be of timedelta64 dtype") elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): if bins_dtype in ['datetime', 'datetime64']: bins = to_datetime(bins).view(np.int64) else: raise ValueError("bins must be of datetime64 dtype") return bins
[ "if", "the", "passed", "bin", "is", "of", "datetime", "/", "timedelta", "type", "this", "method", "converts", "it", "to", "integer" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L401-L427
[ "def", "_convert_bin_to_numeric_type", "(", "bins", ",", "dtype", ")", ":", "bins_dtype", "=", "infer_dtype", "(", "bins", ",", "skipna", "=", "False", ")", "if", "is_timedelta64_dtype", "(", "dtype", ")", ":", "if", "bins_dtype", "in", "[", "'timedelta'", ",", "'timedelta64'", "]", ":", "bins", "=", "to_timedelta", "(", "bins", ")", ".", "view", "(", "np", ".", "int64", ")", "else", ":", "raise", "ValueError", "(", "\"bins must be of timedelta64 dtype\"", ")", "elif", "is_datetime64_dtype", "(", "dtype", ")", "or", "is_datetime64tz_dtype", "(", "dtype", ")", ":", "if", "bins_dtype", "in", "[", "'datetime'", ",", "'datetime64'", "]", ":", "bins", "=", "to_datetime", "(", "bins", ")", ".", "view", "(", "np", ".", "int64", ")", "else", ":", "raise", "ValueError", "(", "\"bins must be of datetime64 dtype\"", ")", "return", "bins" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_convert_bin_to_datelike_type
Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is datelike Parameters ---------- bins : list-like of bins dtype : dtype of data Returns ------- bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is datelike
pandas/core/reshape/tile.py
def _convert_bin_to_datelike_type(bins, dtype): """ Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is datelike Parameters ---------- bins : list-like of bins dtype : dtype of data Returns ------- bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is datelike """ if is_datetime64tz_dtype(dtype): bins = to_datetime(bins.astype(np.int64), utc=True).tz_convert(dtype.tz) elif is_datetime_or_timedelta_dtype(dtype): bins = Index(bins.astype(np.int64), dtype=dtype) return bins
def _convert_bin_to_datelike_type(bins, dtype): """ Convert bins to a DatetimeIndex or TimedeltaIndex if the orginal dtype is datelike Parameters ---------- bins : list-like of bins dtype : dtype of data Returns ------- bins : Array-like of bins, DatetimeIndex or TimedeltaIndex if dtype is datelike """ if is_datetime64tz_dtype(dtype): bins = to_datetime(bins.astype(np.int64), utc=True).tz_convert(dtype.tz) elif is_datetime_or_timedelta_dtype(dtype): bins = Index(bins.astype(np.int64), dtype=dtype) return bins
[ "Convert", "bins", "to", "a", "DatetimeIndex", "or", "TimedeltaIndex", "if", "the", "orginal", "dtype", "is", "datelike" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L430-L450
[ "def", "_convert_bin_to_datelike_type", "(", "bins", ",", "dtype", ")", ":", "if", "is_datetime64tz_dtype", "(", "dtype", ")", ":", "bins", "=", "to_datetime", "(", "bins", ".", "astype", "(", "np", ".", "int64", ")", ",", "utc", "=", "True", ")", ".", "tz_convert", "(", "dtype", ".", "tz", ")", "elif", "is_datetime_or_timedelta_dtype", "(", "dtype", ")", ":", "bins", "=", "Index", "(", "bins", ".", "astype", "(", "np", ".", "int64", ")", ",", "dtype", "=", "dtype", ")", "return", "bins" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_format_labels
based on the dtype, return our labels
pandas/core/reshape/tile.py
def _format_labels(bins, precision, right=True, include_lowest=False, dtype=None): """ based on the dtype, return our labels """ closed = 'right' if right else 'left' if is_datetime64tz_dtype(dtype): formatter = partial(Timestamp, tz=dtype.tz) adjust = lambda x: x - Timedelta('1ns') elif is_datetime64_dtype(dtype): formatter = Timestamp adjust = lambda x: x - Timedelta('1ns') elif is_timedelta64_dtype(dtype): formatter = Timedelta adjust = lambda x: x - Timedelta('1ns') else: precision = _infer_precision(precision, bins) formatter = lambda x: _round_frac(x, precision) adjust = lambda x: x - 10 ** (-precision) breaks = [formatter(b) for b in bins] labels = IntervalIndex.from_breaks(breaks, closed=closed) if right and include_lowest: # we will adjust the left hand side by precision to # account that we are all right closed v = adjust(labels[0].left) i = IntervalIndex([Interval(v, labels[0].right, closed='right')]) labels = i.append(labels[1:]) return labels
def _format_labels(bins, precision, right=True, include_lowest=False, dtype=None): """ based on the dtype, return our labels """ closed = 'right' if right else 'left' if is_datetime64tz_dtype(dtype): formatter = partial(Timestamp, tz=dtype.tz) adjust = lambda x: x - Timedelta('1ns') elif is_datetime64_dtype(dtype): formatter = Timestamp adjust = lambda x: x - Timedelta('1ns') elif is_timedelta64_dtype(dtype): formatter = Timedelta adjust = lambda x: x - Timedelta('1ns') else: precision = _infer_precision(precision, bins) formatter = lambda x: _round_frac(x, precision) adjust = lambda x: x - 10 ** (-precision) breaks = [formatter(b) for b in bins] labels = IntervalIndex.from_breaks(breaks, closed=closed) if right and include_lowest: # we will adjust the left hand side by precision to # account that we are all right closed v = adjust(labels[0].left) i = IntervalIndex([Interval(v, labels[0].right, closed='right')]) labels = i.append(labels[1:]) return labels
[ "based", "on", "the", "dtype", "return", "our", "labels" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L453-L484
[ "def", "_format_labels", "(", "bins", ",", "precision", ",", "right", "=", "True", ",", "include_lowest", "=", "False", ",", "dtype", "=", "None", ")", ":", "closed", "=", "'right'", "if", "right", "else", "'left'", "if", "is_datetime64tz_dtype", "(", "dtype", ")", ":", "formatter", "=", "partial", "(", "Timestamp", ",", "tz", "=", "dtype", ".", "tz", ")", "adjust", "=", "lambda", "x", ":", "x", "-", "Timedelta", "(", "'1ns'", ")", "elif", "is_datetime64_dtype", "(", "dtype", ")", ":", "formatter", "=", "Timestamp", "adjust", "=", "lambda", "x", ":", "x", "-", "Timedelta", "(", "'1ns'", ")", "elif", "is_timedelta64_dtype", "(", "dtype", ")", ":", "formatter", "=", "Timedelta", "adjust", "=", "lambda", "x", ":", "x", "-", "Timedelta", "(", "'1ns'", ")", "else", ":", "precision", "=", "_infer_precision", "(", "precision", ",", "bins", ")", "formatter", "=", "lambda", "x", ":", "_round_frac", "(", "x", ",", "precision", ")", "adjust", "=", "lambda", "x", ":", "x", "-", "10", "**", "(", "-", "precision", ")", "breaks", "=", "[", "formatter", "(", "b", ")", "for", "b", "in", "bins", "]", "labels", "=", "IntervalIndex", ".", "from_breaks", "(", "breaks", ",", "closed", "=", "closed", ")", "if", "right", "and", "include_lowest", ":", "# we will adjust the left hand side by precision to", "# account that we are all right closed", "v", "=", "adjust", "(", "labels", "[", "0", "]", ".", "left", ")", "i", "=", "IntervalIndex", "(", "[", "Interval", "(", "v", ",", "labels", "[", "0", "]", ".", "right", ",", "closed", "=", "'right'", ")", "]", ")", "labels", "=", "i", ".", "append", "(", "labels", "[", "1", ":", "]", ")", "return", "labels" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_preprocess_for_cut
handles preprocessing for cut where we convert passed input to array, strip the index information and store it separately
pandas/core/reshape/tile.py
def _preprocess_for_cut(x): """ handles preprocessing for cut where we convert passed input to array, strip the index information and store it separately """ x_is_series = isinstance(x, Series) series_index = None name = None if x_is_series: series_index = x.index name = x.name # Check that the passed array is a Pandas or Numpy object # We don't want to strip away a Pandas data-type here (e.g. datetimetz) ndim = getattr(x, 'ndim', None) if ndim is None: x = np.asarray(x) if x.ndim != 1: raise ValueError("Input array must be 1 dimensional") return x_is_series, series_index, name, x
def _preprocess_for_cut(x): """ handles preprocessing for cut where we convert passed input to array, strip the index information and store it separately """ x_is_series = isinstance(x, Series) series_index = None name = None if x_is_series: series_index = x.index name = x.name # Check that the passed array is a Pandas or Numpy object # We don't want to strip away a Pandas data-type here (e.g. datetimetz) ndim = getattr(x, 'ndim', None) if ndim is None: x = np.asarray(x) if x.ndim != 1: raise ValueError("Input array must be 1 dimensional") return x_is_series, series_index, name, x
[ "handles", "preprocessing", "for", "cut", "where", "we", "convert", "passed", "input", "to", "array", "strip", "the", "index", "information", "and", "store", "it", "separately" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L487-L509
[ "def", "_preprocess_for_cut", "(", "x", ")", ":", "x_is_series", "=", "isinstance", "(", "x", ",", "Series", ")", "series_index", "=", "None", "name", "=", "None", "if", "x_is_series", ":", "series_index", "=", "x", ".", "index", "name", "=", "x", ".", "name", "# Check that the passed array is a Pandas or Numpy object", "# We don't want to strip away a Pandas data-type here (e.g. datetimetz)", "ndim", "=", "getattr", "(", "x", ",", "'ndim'", ",", "None", ")", "if", "ndim", "is", "None", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", "if", "x", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"Input array must be 1 dimensional\"", ")", "return", "x_is_series", ",", "series_index", ",", "name", ",", "x" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_postprocess_for_cut
handles post processing for the cut method where we combine the index information if the originally passed datatype was a series
pandas/core/reshape/tile.py
def _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name, dtype): """ handles post processing for the cut method where we combine the index information if the originally passed datatype was a series """ if x_is_series: fac = Series(fac, index=series_index, name=name) if not retbins: return fac bins = _convert_bin_to_datelike_type(bins, dtype) return fac, bins
def _postprocess_for_cut(fac, bins, retbins, x_is_series, series_index, name, dtype): """ handles post processing for the cut method where we combine the index information if the originally passed datatype was a series """ if x_is_series: fac = Series(fac, index=series_index, name=name) if not retbins: return fac bins = _convert_bin_to_datelike_type(bins, dtype) return fac, bins
[ "handles", "post", "processing", "for", "the", "cut", "method", "where", "we", "combine", "the", "index", "information", "if", "the", "originally", "passed", "datatype", "was", "a", "series" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L512-L527
[ "def", "_postprocess_for_cut", "(", "fac", ",", "bins", ",", "retbins", ",", "x_is_series", ",", "series_index", ",", "name", ",", "dtype", ")", ":", "if", "x_is_series", ":", "fac", "=", "Series", "(", "fac", ",", "index", "=", "series_index", ",", "name", "=", "name", ")", "if", "not", "retbins", ":", "return", "fac", "bins", "=", "_convert_bin_to_datelike_type", "(", "bins", ",", "dtype", ")", "return", "fac", ",", "bins" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_round_frac
Round the fractional part of the given number
pandas/core/reshape/tile.py
def _round_frac(x, precision): """ Round the fractional part of the given number """ if not np.isfinite(x) or x == 0: return x else: frac, whole = np.modf(x) if whole == 0: digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision else: digits = precision return np.around(x, digits)
def _round_frac(x, precision): """ Round the fractional part of the given number """ if not np.isfinite(x) or x == 0: return x else: frac, whole = np.modf(x) if whole == 0: digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision else: digits = precision return np.around(x, digits)
[ "Round", "the", "fractional", "part", "of", "the", "given", "number" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L530-L542
[ "def", "_round_frac", "(", "x", ",", "precision", ")", ":", "if", "not", "np", ".", "isfinite", "(", "x", ")", "or", "x", "==", "0", ":", "return", "x", "else", ":", "frac", ",", "whole", "=", "np", ".", "modf", "(", "x", ")", "if", "whole", "==", "0", ":", "digits", "=", "-", "int", "(", "np", ".", "floor", "(", "np", ".", "log10", "(", "abs", "(", "frac", ")", ")", ")", ")", "-", "1", "+", "precision", "else", ":", "digits", "=", "precision", "return", "np", ".", "around", "(", "x", ",", "digits", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_infer_precision
Infer an appropriate precision for _round_frac
pandas/core/reshape/tile.py
def _infer_precision(base_precision, bins): """Infer an appropriate precision for _round_frac """ for precision in range(base_precision, 20): levels = [_round_frac(b, precision) for b in bins] if algos.unique(levels).size == bins.size: return precision return base_precision
def _infer_precision(base_precision, bins): """Infer an appropriate precision for _round_frac """ for precision in range(base_precision, 20): levels = [_round_frac(b, precision) for b in bins] if algos.unique(levels).size == bins.size: return precision return base_precision
[ "Infer", "an", "appropriate", "precision", "for", "_round_frac" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/reshape/tile.py#L545-L552
[ "def", "_infer_precision", "(", "base_precision", ",", "bins", ")", ":", "for", "precision", "in", "range", "(", "base_precision", ",", "20", ")", ":", "levels", "=", "[", "_round_frac", "(", "b", ",", "precision", ")", "for", "b", "in", "bins", "]", "if", "algos", ".", "unique", "(", "levels", ")", ".", "size", "==", "bins", ".", "size", ":", "return", "precision", "return", "base_precision" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
detect_console_encoding
Try to find the most capable encoding supported by the console. slightly modified from the way IPython handles the same issue.
pandas/_config/display.py
def detect_console_encoding(): """ Try to find the most capable encoding supported by the console. slightly modified from the way IPython handles the same issue. """ global _initial_defencoding encoding = None try: encoding = sys.stdout.encoding or sys.stdin.encoding except (AttributeError, IOError): pass # try again for something better if not encoding or 'ascii' in encoding.lower(): try: encoding = locale.getpreferredencoding() except Exception: pass # when all else fails. this will usually be "ascii" if not encoding or 'ascii' in encoding.lower(): encoding = sys.getdefaultencoding() # GH#3360, save the reported defencoding at import time # MPL backends may change it. Make available for debugging. if not _initial_defencoding: _initial_defencoding = sys.getdefaultencoding() return encoding
def detect_console_encoding(): """ Try to find the most capable encoding supported by the console. slightly modified from the way IPython handles the same issue. """ global _initial_defencoding encoding = None try: encoding = sys.stdout.encoding or sys.stdin.encoding except (AttributeError, IOError): pass # try again for something better if not encoding or 'ascii' in encoding.lower(): try: encoding = locale.getpreferredencoding() except Exception: pass # when all else fails. this will usually be "ascii" if not encoding or 'ascii' in encoding.lower(): encoding = sys.getdefaultencoding() # GH#3360, save the reported defencoding at import time # MPL backends may change it. Make available for debugging. if not _initial_defencoding: _initial_defencoding = sys.getdefaultencoding() return encoding
[ "Try", "to", "find", "the", "most", "capable", "encoding", "supported", "by", "the", "console", ".", "slightly", "modified", "from", "the", "way", "IPython", "handles", "the", "same", "issue", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/_config/display.py#L14-L43
[ "def", "detect_console_encoding", "(", ")", ":", "global", "_initial_defencoding", "encoding", "=", "None", "try", ":", "encoding", "=", "sys", ".", "stdout", ".", "encoding", "or", "sys", ".", "stdin", ".", "encoding", "except", "(", "AttributeError", ",", "IOError", ")", ":", "pass", "# try again for something better", "if", "not", "encoding", "or", "'ascii'", "in", "encoding", ".", "lower", "(", ")", ":", "try", ":", "encoding", "=", "locale", ".", "getpreferredencoding", "(", ")", "except", "Exception", ":", "pass", "# when all else fails. this will usually be \"ascii\"", "if", "not", "encoding", "or", "'ascii'", "in", "encoding", ".", "lower", "(", ")", ":", "encoding", "=", "sys", ".", "getdefaultencoding", "(", ")", "# GH#3360, save the reported defencoding at import time", "# MPL backends may change it. Make available for debugging.", "if", "not", "_initial_defencoding", ":", "_initial_defencoding", "=", "sys", ".", "getdefaultencoding", "(", ")", "return", "encoding" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_check_arg_length
Checks whether 'args' has length of at most 'compat_args'. Raises a TypeError if that is not the case, similar to in Python when a function is called with too many arguments.
pandas/util/_validators.py
def _check_arg_length(fname, args, max_fname_arg_count, compat_args): """ Checks whether 'args' has length of at most 'compat_args'. Raises a TypeError if that is not the case, similar to in Python when a function is called with too many arguments. """ if max_fname_arg_count < 0: raise ValueError("'max_fname_arg_count' must be non-negative") if len(args) > len(compat_args): max_arg_count = len(compat_args) + max_fname_arg_count actual_arg_count = len(args) + max_fname_arg_count argument = 'argument' if max_arg_count == 1 else 'arguments' raise TypeError( "{fname}() takes at most {max_arg} {argument} " "({given_arg} given)".format( fname=fname, max_arg=max_arg_count, argument=argument, given_arg=actual_arg_count))
def _check_arg_length(fname, args, max_fname_arg_count, compat_args): """ Checks whether 'args' has length of at most 'compat_args'. Raises a TypeError if that is not the case, similar to in Python when a function is called with too many arguments. """ if max_fname_arg_count < 0: raise ValueError("'max_fname_arg_count' must be non-negative") if len(args) > len(compat_args): max_arg_count = len(compat_args) + max_fname_arg_count actual_arg_count = len(args) + max_fname_arg_count argument = 'argument' if max_arg_count == 1 else 'arguments' raise TypeError( "{fname}() takes at most {max_arg} {argument} " "({given_arg} given)".format( fname=fname, max_arg=max_arg_count, argument=argument, given_arg=actual_arg_count))
[ "Checks", "whether", "args", "has", "length", "of", "at", "most", "compat_args", ".", "Raises", "a", "TypeError", "if", "that", "is", "not", "the", "case", "similar", "to", "in", "Python", "when", "a", "function", "is", "called", "with", "too", "many", "arguments", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L10-L29
[ "def", "_check_arg_length", "(", "fname", ",", "args", ",", "max_fname_arg_count", ",", "compat_args", ")", ":", "if", "max_fname_arg_count", "<", "0", ":", "raise", "ValueError", "(", "\"'max_fname_arg_count' must be non-negative\"", ")", "if", "len", "(", "args", ")", ">", "len", "(", "compat_args", ")", ":", "max_arg_count", "=", "len", "(", "compat_args", ")", "+", "max_fname_arg_count", "actual_arg_count", "=", "len", "(", "args", ")", "+", "max_fname_arg_count", "argument", "=", "'argument'", "if", "max_arg_count", "==", "1", "else", "'arguments'", "raise", "TypeError", "(", "\"{fname}() takes at most {max_arg} {argument} \"", "\"({given_arg} given)\"", ".", "format", "(", "fname", "=", "fname", ",", "max_arg", "=", "max_arg_count", ",", "argument", "=", "argument", ",", "given_arg", "=", "actual_arg_count", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_check_for_default_values
Check that the keys in `arg_val_dict` are mapped to their default values as specified in `compat_args`. Note that this function is to be called only when it has been checked that arg_val_dict.keys() is a subset of compat_args
pandas/util/_validators.py
def _check_for_default_values(fname, arg_val_dict, compat_args): """ Check that the keys in `arg_val_dict` are mapped to their default values as specified in `compat_args`. Note that this function is to be called only when it has been checked that arg_val_dict.keys() is a subset of compat_args """ for key in arg_val_dict: # try checking equality directly with '=' operator, # as comparison may have been overridden for the left # hand object try: v1 = arg_val_dict[key] v2 = compat_args[key] # check for None-ness otherwise we could end up # comparing a numpy array vs None if (v1 is not None and v2 is None) or \ (v1 is None and v2 is not None): match = False else: match = (v1 == v2) if not is_bool(match): raise ValueError("'match' is not a boolean") # could not compare them directly, so try comparison # using the 'is' operator except ValueError: match = (arg_val_dict[key] is compat_args[key]) if not match: raise ValueError(("the '{arg}' parameter is not " "supported in the pandas " "implementation of {fname}()". format(fname=fname, arg=key)))
def _check_for_default_values(fname, arg_val_dict, compat_args): """ Check that the keys in `arg_val_dict` are mapped to their default values as specified in `compat_args`. Note that this function is to be called only when it has been checked that arg_val_dict.keys() is a subset of compat_args """ for key in arg_val_dict: # try checking equality directly with '=' operator, # as comparison may have been overridden for the left # hand object try: v1 = arg_val_dict[key] v2 = compat_args[key] # check for None-ness otherwise we could end up # comparing a numpy array vs None if (v1 is not None and v2 is None) or \ (v1 is None and v2 is not None): match = False else: match = (v1 == v2) if not is_bool(match): raise ValueError("'match' is not a boolean") # could not compare them directly, so try comparison # using the 'is' operator except ValueError: match = (arg_val_dict[key] is compat_args[key]) if not match: raise ValueError(("the '{arg}' parameter is not " "supported in the pandas " "implementation of {fname}()". format(fname=fname, arg=key)))
[ "Check", "that", "the", "keys", "in", "arg_val_dict", "are", "mapped", "to", "their", "default", "values", "as", "specified", "in", "compat_args", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L32-L69
[ "def", "_check_for_default_values", "(", "fname", ",", "arg_val_dict", ",", "compat_args", ")", ":", "for", "key", "in", "arg_val_dict", ":", "# try checking equality directly with '=' operator,", "# as comparison may have been overridden for the left", "# hand object", "try", ":", "v1", "=", "arg_val_dict", "[", "key", "]", "v2", "=", "compat_args", "[", "key", "]", "# check for None-ness otherwise we could end up", "# comparing a numpy array vs None", "if", "(", "v1", "is", "not", "None", "and", "v2", "is", "None", ")", "or", "(", "v1", "is", "None", "and", "v2", "is", "not", "None", ")", ":", "match", "=", "False", "else", ":", "match", "=", "(", "v1", "==", "v2", ")", "if", "not", "is_bool", "(", "match", ")", ":", "raise", "ValueError", "(", "\"'match' is not a boolean\"", ")", "# could not compare them directly, so try comparison", "# using the 'is' operator", "except", "ValueError", ":", "match", "=", "(", "arg_val_dict", "[", "key", "]", "is", "compat_args", "[", "key", "]", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "(", "\"the '{arg}' parameter is not \"", "\"supported in the pandas \"", "\"implementation of {fname}()\"", ".", "format", "(", "fname", "=", "fname", ",", "arg", "=", "key", ")", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
validate_args
Checks whether the length of the `*args` argument passed into a function has at most `len(compat_args)` arguments and whether or not all of these elements in `args` are set to their default values. fname: str The name of the function being passed the `*args` parameter args: tuple The `*args` parameter passed into a function max_fname_arg_count: int The maximum number of arguments that the function `fname` can accept, excluding those in `args`. Used for displaying appropriate error messages. Must be non-negative. compat_args: OrderedDict A ordered dictionary of keys and their associated default values. In order to accommodate buggy behaviour in some versions of `numpy`, where a signature displayed keyword arguments but then passed those arguments **positionally** internally when calling downstream implementations, an ordered dictionary ensures that the original order of the keyword arguments is enforced. Note that if there is only one key, a generic dict can be passed in as well. Raises ------ TypeError if `args` contains more values than there are `compat_args` ValueError if `args` contains values that do not correspond to those of the default values specified in `compat_args`
pandas/util/_validators.py
def validate_args(fname, args, max_fname_arg_count, compat_args): """ Checks whether the length of the `*args` argument passed into a function has at most `len(compat_args)` arguments and whether or not all of these elements in `args` are set to their default values. fname: str The name of the function being passed the `*args` parameter args: tuple The `*args` parameter passed into a function max_fname_arg_count: int The maximum number of arguments that the function `fname` can accept, excluding those in `args`. Used for displaying appropriate error messages. Must be non-negative. compat_args: OrderedDict A ordered dictionary of keys and their associated default values. In order to accommodate buggy behaviour in some versions of `numpy`, where a signature displayed keyword arguments but then passed those arguments **positionally** internally when calling downstream implementations, an ordered dictionary ensures that the original order of the keyword arguments is enforced. Note that if there is only one key, a generic dict can be passed in as well. Raises ------ TypeError if `args` contains more values than there are `compat_args` ValueError if `args` contains values that do not correspond to those of the default values specified in `compat_args` """ _check_arg_length(fname, args, max_fname_arg_count, compat_args) # We do this so that we can provide a more informative # error message about the parameters that we are not # supporting in the pandas implementation of 'fname' kwargs = dict(zip(compat_args, args)) _check_for_default_values(fname, kwargs, compat_args)
def validate_args(fname, args, max_fname_arg_count, compat_args): """ Checks whether the length of the `*args` argument passed into a function has at most `len(compat_args)` arguments and whether or not all of these elements in `args` are set to their default values. fname: str The name of the function being passed the `*args` parameter args: tuple The `*args` parameter passed into a function max_fname_arg_count: int The maximum number of arguments that the function `fname` can accept, excluding those in `args`. Used for displaying appropriate error messages. Must be non-negative. compat_args: OrderedDict A ordered dictionary of keys and their associated default values. In order to accommodate buggy behaviour in some versions of `numpy`, where a signature displayed keyword arguments but then passed those arguments **positionally** internally when calling downstream implementations, an ordered dictionary ensures that the original order of the keyword arguments is enforced. Note that if there is only one key, a generic dict can be passed in as well. Raises ------ TypeError if `args` contains more values than there are `compat_args` ValueError if `args` contains values that do not correspond to those of the default values specified in `compat_args` """ _check_arg_length(fname, args, max_fname_arg_count, compat_args) # We do this so that we can provide a more informative # error message about the parameters that we are not # supporting in the pandas implementation of 'fname' kwargs = dict(zip(compat_args, args)) _check_for_default_values(fname, kwargs, compat_args)
[ "Checks", "whether", "the", "length", "of", "the", "*", "args", "argument", "passed", "into", "a", "function", "has", "at", "most", "len", "(", "compat_args", ")", "arguments", "and", "whether", "or", "not", "all", "of", "these", "elements", "in", "args", "are", "set", "to", "their", "default", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L72-L111
[ "def", "validate_args", "(", "fname", ",", "args", ",", "max_fname_arg_count", ",", "compat_args", ")", ":", "_check_arg_length", "(", "fname", ",", "args", ",", "max_fname_arg_count", ",", "compat_args", ")", "# We do this so that we can provide a more informative", "# error message about the parameters that we are not", "# supporting in the pandas implementation of 'fname'", "kwargs", "=", "dict", "(", "zip", "(", "compat_args", ",", "args", ")", ")", "_check_for_default_values", "(", "fname", ",", "kwargs", ",", "compat_args", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_check_for_invalid_keys
Checks whether 'kwargs' contains any keys that are not in 'compat_args' and raises a TypeError if there is one.
pandas/util/_validators.py
def _check_for_invalid_keys(fname, kwargs, compat_args): """ Checks whether 'kwargs' contains any keys that are not in 'compat_args' and raises a TypeError if there is one. """ # set(dict) --> set of the dictionary's keys diff = set(kwargs) - set(compat_args) if diff: bad_arg = list(diff)[0] raise TypeError(("{fname}() got an unexpected " "keyword argument '{arg}'". format(fname=fname, arg=bad_arg)))
def _check_for_invalid_keys(fname, kwargs, compat_args): """ Checks whether 'kwargs' contains any keys that are not in 'compat_args' and raises a TypeError if there is one. """ # set(dict) --> set of the dictionary's keys diff = set(kwargs) - set(compat_args) if diff: bad_arg = list(diff)[0] raise TypeError(("{fname}() got an unexpected " "keyword argument '{arg}'". format(fname=fname, arg=bad_arg)))
[ "Checks", "whether", "kwargs", "contains", "any", "keys", "that", "are", "not", "in", "compat_args", "and", "raises", "a", "TypeError", "if", "there", "is", "one", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L114-L127
[ "def", "_check_for_invalid_keys", "(", "fname", ",", "kwargs", ",", "compat_args", ")", ":", "# set(dict) --> set of the dictionary's keys", "diff", "=", "set", "(", "kwargs", ")", "-", "set", "(", "compat_args", ")", "if", "diff", ":", "bad_arg", "=", "list", "(", "diff", ")", "[", "0", "]", "raise", "TypeError", "(", "(", "\"{fname}() got an unexpected \"", "\"keyword argument '{arg}'\"", ".", "format", "(", "fname", "=", "fname", ",", "arg", "=", "bad_arg", ")", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
validate_kwargs
Checks whether parameters passed to the **kwargs argument in a function `fname` are valid parameters as specified in `*compat_args` and whether or not they are set to their default values. Parameters ---------- fname: str The name of the function being passed the `**kwargs` parameter kwargs: dict The `**kwargs` parameter passed into `fname` compat_args: dict A dictionary of keys that `kwargs` is allowed to have and their associated default values Raises ------ TypeError if `kwargs` contains keys not in `compat_args` ValueError if `kwargs` contains keys in `compat_args` that do not map to the default values specified in `compat_args`
pandas/util/_validators.py
def validate_kwargs(fname, kwargs, compat_args): """ Checks whether parameters passed to the **kwargs argument in a function `fname` are valid parameters as specified in `*compat_args` and whether or not they are set to their default values. Parameters ---------- fname: str The name of the function being passed the `**kwargs` parameter kwargs: dict The `**kwargs` parameter passed into `fname` compat_args: dict A dictionary of keys that `kwargs` is allowed to have and their associated default values Raises ------ TypeError if `kwargs` contains keys not in `compat_args` ValueError if `kwargs` contains keys in `compat_args` that do not map to the default values specified in `compat_args` """ kwds = kwargs.copy() _check_for_invalid_keys(fname, kwargs, compat_args) _check_for_default_values(fname, kwds, compat_args)
def validate_kwargs(fname, kwargs, compat_args): """ Checks whether parameters passed to the **kwargs argument in a function `fname` are valid parameters as specified in `*compat_args` and whether or not they are set to their default values. Parameters ---------- fname: str The name of the function being passed the `**kwargs` parameter kwargs: dict The `**kwargs` parameter passed into `fname` compat_args: dict A dictionary of keys that `kwargs` is allowed to have and their associated default values Raises ------ TypeError if `kwargs` contains keys not in `compat_args` ValueError if `kwargs` contains keys in `compat_args` that do not map to the default values specified in `compat_args` """ kwds = kwargs.copy() _check_for_invalid_keys(fname, kwargs, compat_args) _check_for_default_values(fname, kwds, compat_args)
[ "Checks", "whether", "parameters", "passed", "to", "the", "**", "kwargs", "argument", "in", "a", "function", "fname", "are", "valid", "parameters", "as", "specified", "in", "*", "compat_args", "and", "whether", "or", "not", "they", "are", "set", "to", "their", "default", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L130-L157
[ "def", "validate_kwargs", "(", "fname", ",", "kwargs", ",", "compat_args", ")", ":", "kwds", "=", "kwargs", ".", "copy", "(", ")", "_check_for_invalid_keys", "(", "fname", ",", "kwargs", ",", "compat_args", ")", "_check_for_default_values", "(", "fname", ",", "kwds", ",", "compat_args", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
validate_args_and_kwargs
Checks whether parameters passed to the *args and **kwargs argument in a function `fname` are valid parameters as specified in `*compat_args` and whether or not they are set to their default values. Parameters ---------- fname: str The name of the function being passed the `**kwargs` parameter args: tuple The `*args` parameter passed into a function kwargs: dict The `**kwargs` parameter passed into `fname` max_fname_arg_count: int The minimum number of arguments that the function `fname` requires, excluding those in `args`. Used for displaying appropriate error messages. Must be non-negative. compat_args: OrderedDict A ordered dictionary of keys that `kwargs` is allowed to have and their associated default values. Note that if there is only one key, a generic dict can be passed in as well. Raises ------ TypeError if `args` contains more values than there are `compat_args` OR `kwargs` contains keys not in `compat_args` ValueError if `args` contains values not at the default value (`None`) `kwargs` contains keys in `compat_args` that do not map to the default value as specified in `compat_args` See Also -------- validate_args : Purely args validation. validate_kwargs : Purely kwargs validation.
pandas/util/_validators.py
def validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, compat_args): """ Checks whether parameters passed to the *args and **kwargs argument in a function `fname` are valid parameters as specified in `*compat_args` and whether or not they are set to their default values. Parameters ---------- fname: str The name of the function being passed the `**kwargs` parameter args: tuple The `*args` parameter passed into a function kwargs: dict The `**kwargs` parameter passed into `fname` max_fname_arg_count: int The minimum number of arguments that the function `fname` requires, excluding those in `args`. Used for displaying appropriate error messages. Must be non-negative. compat_args: OrderedDict A ordered dictionary of keys that `kwargs` is allowed to have and their associated default values. Note that if there is only one key, a generic dict can be passed in as well. Raises ------ TypeError if `args` contains more values than there are `compat_args` OR `kwargs` contains keys not in `compat_args` ValueError if `args` contains values not at the default value (`None`) `kwargs` contains keys in `compat_args` that do not map to the default value as specified in `compat_args` See Also -------- validate_args : Purely args validation. validate_kwargs : Purely kwargs validation. """ # Check that the total number of arguments passed in (i.e. # args and kwargs) does not exceed the length of compat_args _check_arg_length(fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args) # Check there is no overlap with the positional and keyword # arguments, similar to what is done in actual Python functions args_dict = dict(zip(compat_args, args)) for key in args_dict: if key in kwargs: raise TypeError("{fname}() got multiple values for keyword " "argument '{arg}'".format(fname=fname, arg=key)) kwargs.update(args_dict) validate_kwargs(fname, kwargs, compat_args)
def validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, compat_args): """ Checks whether parameters passed to the *args and **kwargs argument in a function `fname` are valid parameters as specified in `*compat_args` and whether or not they are set to their default values. Parameters ---------- fname: str The name of the function being passed the `**kwargs` parameter args: tuple The `*args` parameter passed into a function kwargs: dict The `**kwargs` parameter passed into `fname` max_fname_arg_count: int The minimum number of arguments that the function `fname` requires, excluding those in `args`. Used for displaying appropriate error messages. Must be non-negative. compat_args: OrderedDict A ordered dictionary of keys that `kwargs` is allowed to have and their associated default values. Note that if there is only one key, a generic dict can be passed in as well. Raises ------ TypeError if `args` contains more values than there are `compat_args` OR `kwargs` contains keys not in `compat_args` ValueError if `args` contains values not at the default value (`None`) `kwargs` contains keys in `compat_args` that do not map to the default value as specified in `compat_args` See Also -------- validate_args : Purely args validation. validate_kwargs : Purely kwargs validation. """ # Check that the total number of arguments passed in (i.e. # args and kwargs) does not exceed the length of compat_args _check_arg_length(fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args) # Check there is no overlap with the positional and keyword # arguments, similar to what is done in actual Python functions args_dict = dict(zip(compat_args, args)) for key in args_dict: if key in kwargs: raise TypeError("{fname}() got multiple values for keyword " "argument '{arg}'".format(fname=fname, arg=key)) kwargs.update(args_dict) validate_kwargs(fname, kwargs, compat_args)
[ "Checks", "whether", "parameters", "passed", "to", "the", "*", "args", "and", "**", "kwargs", "argument", "in", "a", "function", "fname", "are", "valid", "parameters", "as", "specified", "in", "*", "compat_args", "and", "whether", "or", "not", "they", "are", "set", "to", "their", "default", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L160-L218
[ "def", "validate_args_and_kwargs", "(", "fname", ",", "args", ",", "kwargs", ",", "max_fname_arg_count", ",", "compat_args", ")", ":", "# Check that the total number of arguments passed in (i.e.", "# args and kwargs) does not exceed the length of compat_args", "_check_arg_length", "(", "fname", ",", "args", "+", "tuple", "(", "kwargs", ".", "values", "(", ")", ")", ",", "max_fname_arg_count", ",", "compat_args", ")", "# Check there is no overlap with the positional and keyword", "# arguments, similar to what is done in actual Python functions", "args_dict", "=", "dict", "(", "zip", "(", "compat_args", ",", "args", ")", ")", "for", "key", "in", "args_dict", ":", "if", "key", "in", "kwargs", ":", "raise", "TypeError", "(", "\"{fname}() got multiple values for keyword \"", "\"argument '{arg}'\"", ".", "format", "(", "fname", "=", "fname", ",", "arg", "=", "key", ")", ")", "kwargs", ".", "update", "(", "args_dict", ")", "validate_kwargs", "(", "fname", ",", "kwargs", ",", "compat_args", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
validate_bool_kwarg
Ensures that argument passed in arg_name is of type bool.
pandas/util/_validators.py
def validate_bool_kwarg(value, arg_name): """ Ensures that argument passed in arg_name is of type bool. """ if not (is_bool(value) or value is None): raise ValueError('For argument "{arg}" expected type bool, received ' 'type {typ}.'.format(arg=arg_name, typ=type(value).__name__)) return value
def validate_bool_kwarg(value, arg_name): """ Ensures that argument passed in arg_name is of type bool. """ if not (is_bool(value) or value is None): raise ValueError('For argument "{arg}" expected type bool, received ' 'type {typ}.'.format(arg=arg_name, typ=type(value).__name__)) return value
[ "Ensures", "that", "argument", "passed", "in", "arg_name", "is", "of", "type", "bool", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L221-L227
[ "def", "validate_bool_kwarg", "(", "value", ",", "arg_name", ")", ":", "if", "not", "(", "is_bool", "(", "value", ")", "or", "value", "is", "None", ")", ":", "raise", "ValueError", "(", "'For argument \"{arg}\" expected type bool, received '", "'type {typ}.'", ".", "format", "(", "arg", "=", "arg_name", ",", "typ", "=", "type", "(", "value", ")", ".", "__name__", ")", ")", "return", "value" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
validate_axis_style_args
Argument handler for mixed index, columns / axis functions In an attempt to handle both `.method(index, columns)`, and `.method(arg, axis=.)`, we have to do some bad things to argument parsing. This translates all arguments to `{index=., columns=.}` style. Parameters ---------- data : DataFrame or Panel args : tuple All positional arguments from the user kwargs : dict All keyword arguments from the user arg_name, method_name : str Used for better error messages Returns ------- kwargs : dict A dictionary of keyword arguments. Doesn't modify ``kwargs`` inplace, so update them with the return value here. Examples -------- >>> df._validate_axis_style_args((str.upper,), {'columns': id}, ... 'mapper', 'rename') {'columns': <function id>, 'index': <method 'upper' of 'str' objects>} This emits a warning >>> df._validate_axis_style_args((str.upper, id), {}, ... 'mapper', 'rename') {'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
pandas/util/_validators.py
def validate_axis_style_args(data, args, kwargs, arg_name, method_name): """Argument handler for mixed index, columns / axis functions In an attempt to handle both `.method(index, columns)`, and `.method(arg, axis=.)`, we have to do some bad things to argument parsing. This translates all arguments to `{index=., columns=.}` style. Parameters ---------- data : DataFrame or Panel args : tuple All positional arguments from the user kwargs : dict All keyword arguments from the user arg_name, method_name : str Used for better error messages Returns ------- kwargs : dict A dictionary of keyword arguments. Doesn't modify ``kwargs`` inplace, so update them with the return value here. Examples -------- >>> df._validate_axis_style_args((str.upper,), {'columns': id}, ... 'mapper', 'rename') {'columns': <function id>, 'index': <method 'upper' of 'str' objects>} This emits a warning >>> df._validate_axis_style_args((str.upper, id), {}, ... 'mapper', 'rename') {'columns': <function id>, 'index': <method 'upper' of 'str' objects>} """ # TODO: Change to keyword-only args and remove all this out = {} # Goal: fill 'out' with index/columns-style arguments # like out = {'index': foo, 'columns': bar} # Start by validating for consistency if 'axis' in kwargs and any(x in kwargs for x in data._AXIS_NUMBERS): msg = "Cannot specify both 'axis' and any of 'index' or 'columns'." raise TypeError(msg) # First fill with explicit values provided by the user... if arg_name in kwargs: if args: msg = ("{} got multiple values for argument " "'{}'".format(method_name, arg_name)) raise TypeError(msg) axis = data._get_axis_name(kwargs.get('axis', 0)) out[axis] = kwargs[arg_name] # More user-provided arguments, now from kwargs for k, v in kwargs.items(): try: ax = data._get_axis_name(k) except ValueError: pass else: out[ax] = v # All user-provided kwargs have been handled now. # Now we supplement with positional arguments, emitting warnings # when there's ambiguity and raising when there's conflicts if len(args) == 0: pass # It's up to the function to decide if this is valid elif len(args) == 1: axis = data._get_axis_name(kwargs.get('axis', 0)) out[axis] = args[0] elif len(args) == 2: if 'axis' in kwargs: # Unambiguously wrong msg = ("Cannot specify both 'axis' and any of 'index' " "or 'columns'") raise TypeError(msg) msg = ("Interpreting call\n\t'.{method_name}(a, b)' as " "\n\t'.{method_name}(index=a, columns=b)'.\nUse named " "arguments to remove any ambiguity. In the future, using " "positional arguments for 'index' or 'columns' will raise " " a 'TypeError'.") warnings.warn(msg.format(method_name=method_name,), FutureWarning, stacklevel=4) out[data._AXIS_NAMES[0]] = args[0] out[data._AXIS_NAMES[1]] = args[1] else: msg = "Cannot specify all of '{}', 'index', 'columns'." raise TypeError(msg.format(arg_name)) return out
def validate_axis_style_args(data, args, kwargs, arg_name, method_name): """Argument handler for mixed index, columns / axis functions In an attempt to handle both `.method(index, columns)`, and `.method(arg, axis=.)`, we have to do some bad things to argument parsing. This translates all arguments to `{index=., columns=.}` style. Parameters ---------- data : DataFrame or Panel args : tuple All positional arguments from the user kwargs : dict All keyword arguments from the user arg_name, method_name : str Used for better error messages Returns ------- kwargs : dict A dictionary of keyword arguments. Doesn't modify ``kwargs`` inplace, so update them with the return value here. Examples -------- >>> df._validate_axis_style_args((str.upper,), {'columns': id}, ... 'mapper', 'rename') {'columns': <function id>, 'index': <method 'upper' of 'str' objects>} This emits a warning >>> df._validate_axis_style_args((str.upper, id), {}, ... 'mapper', 'rename') {'columns': <function id>, 'index': <method 'upper' of 'str' objects>} """ # TODO: Change to keyword-only args and remove all this out = {} # Goal: fill 'out' with index/columns-style arguments # like out = {'index': foo, 'columns': bar} # Start by validating for consistency if 'axis' in kwargs and any(x in kwargs for x in data._AXIS_NUMBERS): msg = "Cannot specify both 'axis' and any of 'index' or 'columns'." raise TypeError(msg) # First fill with explicit values provided by the user... if arg_name in kwargs: if args: msg = ("{} got multiple values for argument " "'{}'".format(method_name, arg_name)) raise TypeError(msg) axis = data._get_axis_name(kwargs.get('axis', 0)) out[axis] = kwargs[arg_name] # More user-provided arguments, now from kwargs for k, v in kwargs.items(): try: ax = data._get_axis_name(k) except ValueError: pass else: out[ax] = v # All user-provided kwargs have been handled now. # Now we supplement with positional arguments, emitting warnings # when there's ambiguity and raising when there's conflicts if len(args) == 0: pass # It's up to the function to decide if this is valid elif len(args) == 1: axis = data._get_axis_name(kwargs.get('axis', 0)) out[axis] = args[0] elif len(args) == 2: if 'axis' in kwargs: # Unambiguously wrong msg = ("Cannot specify both 'axis' and any of 'index' " "or 'columns'") raise TypeError(msg) msg = ("Interpreting call\n\t'.{method_name}(a, b)' as " "\n\t'.{method_name}(index=a, columns=b)'.\nUse named " "arguments to remove any ambiguity. In the future, using " "positional arguments for 'index' or 'columns' will raise " " a 'TypeError'.") warnings.warn(msg.format(method_name=method_name,), FutureWarning, stacklevel=4) out[data._AXIS_NAMES[0]] = args[0] out[data._AXIS_NAMES[1]] = args[1] else: msg = "Cannot specify all of '{}', 'index', 'columns'." raise TypeError(msg.format(arg_name)) return out
[ "Argument", "handler", "for", "mixed", "index", "columns", "/", "axis", "functions" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L230-L322
[ "def", "validate_axis_style_args", "(", "data", ",", "args", ",", "kwargs", ",", "arg_name", ",", "method_name", ")", ":", "# TODO: Change to keyword-only args and remove all this", "out", "=", "{", "}", "# Goal: fill 'out' with index/columns-style arguments", "# like out = {'index': foo, 'columns': bar}", "# Start by validating for consistency", "if", "'axis'", "in", "kwargs", "and", "any", "(", "x", "in", "kwargs", "for", "x", "in", "data", ".", "_AXIS_NUMBERS", ")", ":", "msg", "=", "\"Cannot specify both 'axis' and any of 'index' or 'columns'.\"", "raise", "TypeError", "(", "msg", ")", "# First fill with explicit values provided by the user...", "if", "arg_name", "in", "kwargs", ":", "if", "args", ":", "msg", "=", "(", "\"{} got multiple values for argument \"", "\"'{}'\"", ".", "format", "(", "method_name", ",", "arg_name", ")", ")", "raise", "TypeError", "(", "msg", ")", "axis", "=", "data", ".", "_get_axis_name", "(", "kwargs", ".", "get", "(", "'axis'", ",", "0", ")", ")", "out", "[", "axis", "]", "=", "kwargs", "[", "arg_name", "]", "# More user-provided arguments, now from kwargs", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "try", ":", "ax", "=", "data", ".", "_get_axis_name", "(", "k", ")", "except", "ValueError", ":", "pass", "else", ":", "out", "[", "ax", "]", "=", "v", "# All user-provided kwargs have been handled now.", "# Now we supplement with positional arguments, emitting warnings", "# when there's ambiguity and raising when there's conflicts", "if", "len", "(", "args", ")", "==", "0", ":", "pass", "# It's up to the function to decide if this is valid", "elif", "len", "(", "args", ")", "==", "1", ":", "axis", "=", "data", ".", "_get_axis_name", "(", "kwargs", ".", "get", "(", "'axis'", ",", "0", ")", ")", "out", "[", "axis", "]", "=", "args", "[", "0", "]", "elif", "len", "(", "args", ")", "==", "2", ":", "if", "'axis'", "in", "kwargs", ":", "# Unambiguously wrong", "msg", "=", "(", "\"Cannot specify both 'axis' and any of 'index' \"", "\"or 'columns'\"", ")", "raise", "TypeError", "(", "msg", ")", "msg", "=", "(", "\"Interpreting call\\n\\t'.{method_name}(a, b)' as \"", "\"\\n\\t'.{method_name}(index=a, columns=b)'.\\nUse named \"", "\"arguments to remove any ambiguity. In the future, using \"", "\"positional arguments for 'index' or 'columns' will raise \"", "\" a 'TypeError'.\"", ")", "warnings", ".", "warn", "(", "msg", ".", "format", "(", "method_name", "=", "method_name", ",", ")", ",", "FutureWarning", ",", "stacklevel", "=", "4", ")", "out", "[", "data", ".", "_AXIS_NAMES", "[", "0", "]", "]", "=", "args", "[", "0", "]", "out", "[", "data", ".", "_AXIS_NAMES", "[", "1", "]", "]", "=", "args", "[", "1", "]", "else", ":", "msg", "=", "\"Cannot specify all of '{}', 'index', 'columns'.\"", "raise", "TypeError", "(", "msg", ".", "format", "(", "arg_name", ")", ")", "return", "out" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
validate_fillna_kwargs
Validate the keyword arguments to 'fillna'. This checks that exactly one of 'value' and 'method' is specified. If 'method' is specified, this validates that it's a valid method. Parameters ---------- value, method : object The 'value' and 'method' keyword arguments for 'fillna'. validate_scalar_dict_value : bool, default True Whether to validate that 'value' is a scalar or dict. Specifically, validate that it is not a list or tuple. Returns ------- value, method : object
pandas/util/_validators.py
def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True): """Validate the keyword arguments to 'fillna'. This checks that exactly one of 'value' and 'method' is specified. If 'method' is specified, this validates that it's a valid method. Parameters ---------- value, method : object The 'value' and 'method' keyword arguments for 'fillna'. validate_scalar_dict_value : bool, default True Whether to validate that 'value' is a scalar or dict. Specifically, validate that it is not a list or tuple. Returns ------- value, method : object """ from pandas.core.missing import clean_fill_method if value is None and method is None: raise ValueError("Must specify a fill 'value' or 'method'.") elif value is None and method is not None: method = clean_fill_method(method) elif value is not None and method is None: if validate_scalar_dict_value and isinstance(value, (list, tuple)): raise TypeError('"value" parameter must be a scalar or dict, but ' 'you passed a "{0}"'.format(type(value).__name__)) elif value is not None and method is not None: raise ValueError("Cannot specify both 'value' and 'method'.") return value, method
def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True): """Validate the keyword arguments to 'fillna'. This checks that exactly one of 'value' and 'method' is specified. If 'method' is specified, this validates that it's a valid method. Parameters ---------- value, method : object The 'value' and 'method' keyword arguments for 'fillna'. validate_scalar_dict_value : bool, default True Whether to validate that 'value' is a scalar or dict. Specifically, validate that it is not a list or tuple. Returns ------- value, method : object """ from pandas.core.missing import clean_fill_method if value is None and method is None: raise ValueError("Must specify a fill 'value' or 'method'.") elif value is None and method is not None: method = clean_fill_method(method) elif value is not None and method is None: if validate_scalar_dict_value and isinstance(value, (list, tuple)): raise TypeError('"value" parameter must be a scalar or dict, but ' 'you passed a "{0}"'.format(type(value).__name__)) elif value is not None and method is not None: raise ValueError("Cannot specify both 'value' and 'method'.") return value, method
[ "Validate", "the", "keyword", "arguments", "to", "fillna", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/util/_validators.py#L325-L358
[ "def", "validate_fillna_kwargs", "(", "value", ",", "method", ",", "validate_scalar_dict_value", "=", "True", ")", ":", "from", "pandas", ".", "core", ".", "missing", "import", "clean_fill_method", "if", "value", "is", "None", "and", "method", "is", "None", ":", "raise", "ValueError", "(", "\"Must specify a fill 'value' or 'method'.\"", ")", "elif", "value", "is", "None", "and", "method", "is", "not", "None", ":", "method", "=", "clean_fill_method", "(", "method", ")", "elif", "value", "is", "not", "None", "and", "method", "is", "None", ":", "if", "validate_scalar_dict_value", "and", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "TypeError", "(", "'\"value\" parameter must be a scalar or dict, but '", "'you passed a \"{0}\"'", ".", "format", "(", "type", "(", "value", ")", ".", "__name__", ")", ")", "elif", "value", "is", "not", "None", "and", "method", "is", "not", "None", ":", "raise", "ValueError", "(", "\"Cannot specify both 'value' and 'method'.\"", ")", "return", "value", ",", "method" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_maybe_process_deprecations
Potentially we might have a deprecation warning, show it but call the appropriate methods anyhow.
pandas/core/resample.py
def _maybe_process_deprecations(r, how=None, fill_method=None, limit=None): """ Potentially we might have a deprecation warning, show it but call the appropriate methods anyhow. """ if how is not None: # .resample(..., how='sum') if isinstance(how, str): method = "{0}()".format(how) # .resample(..., how=lambda x: ....) else: method = ".apply(<func>)" # if we have both a how and fill_method, then show # the following warning if fill_method is None: warnings.warn("how in .resample() is deprecated\n" "the new syntax is " ".resample(...).{method}".format( method=method), FutureWarning, stacklevel=3) r = r.aggregate(how) if fill_method is not None: # show the prior function call method = '.' + method if how is not None else '' args = "limit={0}".format(limit) if limit is not None else "" warnings.warn("fill_method is deprecated to .resample()\n" "the new syntax is .resample(...){method}" ".{fill_method}({args})".format( method=method, fill_method=fill_method, args=args), FutureWarning, stacklevel=3) if how is not None: r = getattr(r, fill_method)(limit=limit) else: r = r.aggregate(fill_method, limit=limit) return r
def _maybe_process_deprecations(r, how=None, fill_method=None, limit=None): """ Potentially we might have a deprecation warning, show it but call the appropriate methods anyhow. """ if how is not None: # .resample(..., how='sum') if isinstance(how, str): method = "{0}()".format(how) # .resample(..., how=lambda x: ....) else: method = ".apply(<func>)" # if we have both a how and fill_method, then show # the following warning if fill_method is None: warnings.warn("how in .resample() is deprecated\n" "the new syntax is " ".resample(...).{method}".format( method=method), FutureWarning, stacklevel=3) r = r.aggregate(how) if fill_method is not None: # show the prior function call method = '.' + method if how is not None else '' args = "limit={0}".format(limit) if limit is not None else "" warnings.warn("fill_method is deprecated to .resample()\n" "the new syntax is .resample(...){method}" ".{fill_method}({args})".format( method=method, fill_method=fill_method, args=args), FutureWarning, stacklevel=3) if how is not None: r = getattr(r, fill_method)(limit=limit) else: r = r.aggregate(fill_method, limit=limit) return r
[ "Potentially", "we", "might", "have", "a", "deprecation", "warning", "show", "it", "but", "call", "the", "appropriate", "methods", "anyhow", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L877-L922
[ "def", "_maybe_process_deprecations", "(", "r", ",", "how", "=", "None", ",", "fill_method", "=", "None", ",", "limit", "=", "None", ")", ":", "if", "how", "is", "not", "None", ":", "# .resample(..., how='sum')", "if", "isinstance", "(", "how", ",", "str", ")", ":", "method", "=", "\"{0}()\"", ".", "format", "(", "how", ")", "# .resample(..., how=lambda x: ....)", "else", ":", "method", "=", "\".apply(<func>)\"", "# if we have both a how and fill_method, then show", "# the following warning", "if", "fill_method", "is", "None", ":", "warnings", ".", "warn", "(", "\"how in .resample() is deprecated\\n\"", "\"the new syntax is \"", "\".resample(...).{method}\"", ".", "format", "(", "method", "=", "method", ")", ",", "FutureWarning", ",", "stacklevel", "=", "3", ")", "r", "=", "r", ".", "aggregate", "(", "how", ")", "if", "fill_method", "is", "not", "None", ":", "# show the prior function call", "method", "=", "'.'", "+", "method", "if", "how", "is", "not", "None", "else", "''", "args", "=", "\"limit={0}\"", ".", "format", "(", "limit", ")", "if", "limit", "is", "not", "None", "else", "\"\"", "warnings", ".", "warn", "(", "\"fill_method is deprecated to .resample()\\n\"", "\"the new syntax is .resample(...){method}\"", "\".{fill_method}({args})\"", ".", "format", "(", "method", "=", "method", ",", "fill_method", "=", "fill_method", ",", "args", "=", "args", ")", ",", "FutureWarning", ",", "stacklevel", "=", "3", ")", "if", "how", "is", "not", "None", ":", "r", "=", "getattr", "(", "r", ",", "fill_method", ")", "(", "limit", "=", "limit", ")", "else", ":", "r", "=", "r", ".", "aggregate", "(", "fill_method", ",", "limit", "=", "limit", ")", "return", "r" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
resample
Create a TimeGrouper and return our resampler.
pandas/core/resample.py
def resample(obj, kind=None, **kwds): """ Create a TimeGrouper and return our resampler. """ tg = TimeGrouper(**kwds) return tg._get_resampler(obj, kind=kind)
def resample(obj, kind=None, **kwds): """ Create a TimeGrouper and return our resampler. """ tg = TimeGrouper(**kwds) return tg._get_resampler(obj, kind=kind)
[ "Create", "a", "TimeGrouper", "and", "return", "our", "resampler", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1238-L1243
[ "def", "resample", "(", "obj", ",", "kind", "=", "None", ",", "*", "*", "kwds", ")", ":", "tg", "=", "TimeGrouper", "(", "*", "*", "kwds", ")", "return", "tg", ".", "_get_resampler", "(", "obj", ",", "kind", "=", "kind", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
get_resampler_for_grouping
Return our appropriate resampler when grouping as well.
pandas/core/resample.py
def get_resampler_for_grouping(groupby, rule, how=None, fill_method=None, limit=None, kind=None, **kwargs): """ Return our appropriate resampler when grouping as well. """ # .resample uses 'on' similar to how .groupby uses 'key' kwargs['key'] = kwargs.pop('on', None) tg = TimeGrouper(freq=rule, **kwargs) resampler = tg._get_resampler(groupby.obj, kind=kind) r = resampler._get_resampler_for_grouping(groupby=groupby) return _maybe_process_deprecations(r, how=how, fill_method=fill_method, limit=limit)
def get_resampler_for_grouping(groupby, rule, how=None, fill_method=None, limit=None, kind=None, **kwargs): """ Return our appropriate resampler when grouping as well. """ # .resample uses 'on' similar to how .groupby uses 'key' kwargs['key'] = kwargs.pop('on', None) tg = TimeGrouper(freq=rule, **kwargs) resampler = tg._get_resampler(groupby.obj, kind=kind) r = resampler._get_resampler_for_grouping(groupby=groupby) return _maybe_process_deprecations(r, how=how, fill_method=fill_method, limit=limit)
[ "Return", "our", "appropriate", "resampler", "when", "grouping", "as", "well", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1249-L1264
[ "def", "get_resampler_for_grouping", "(", "groupby", ",", "rule", ",", "how", "=", "None", ",", "fill_method", "=", "None", ",", "limit", "=", "None", ",", "kind", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# .resample uses 'on' similar to how .groupby uses 'key'", "kwargs", "[", "'key'", "]", "=", "kwargs", ".", "pop", "(", "'on'", ",", "None", ")", "tg", "=", "TimeGrouper", "(", "freq", "=", "rule", ",", "*", "*", "kwargs", ")", "resampler", "=", "tg", ".", "_get_resampler", "(", "groupby", ".", "obj", ",", "kind", "=", "kind", ")", "r", "=", "resampler", ".", "_get_resampler_for_grouping", "(", "groupby", "=", "groupby", ")", "return", "_maybe_process_deprecations", "(", "r", ",", "how", "=", "how", ",", "fill_method", "=", "fill_method", ",", "limit", "=", "limit", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_get_timestamp_range_edges
Adjust the `first` Timestamp to the preceeding Timestamp that resides on the provided offset. Adjust the `last` Timestamp to the following Timestamp that resides on the provided offset. Input Timestamps that already reside on the offset will be adjusted depending on the type of offset and the `closed` parameter. Parameters ---------- first : pd.Timestamp The beginning Timestamp of the range to be adjusted. last : pd.Timestamp The ending Timestamp of the range to be adjusted. offset : pd.DateOffset The dateoffset to which the Timestamps will be adjusted. closed : {'right', 'left'}, default None Which side of bin interval is closed. base : int, default 0 The "origin" of the adjusted Timestamps. Returns ------- A tuple of length 2, containing the adjusted pd.Timestamp objects.
pandas/core/resample.py
def _get_timestamp_range_edges(first, last, offset, closed='left', base=0): """ Adjust the `first` Timestamp to the preceeding Timestamp that resides on the provided offset. Adjust the `last` Timestamp to the following Timestamp that resides on the provided offset. Input Timestamps that already reside on the offset will be adjusted depending on the type of offset and the `closed` parameter. Parameters ---------- first : pd.Timestamp The beginning Timestamp of the range to be adjusted. last : pd.Timestamp The ending Timestamp of the range to be adjusted. offset : pd.DateOffset The dateoffset to which the Timestamps will be adjusted. closed : {'right', 'left'}, default None Which side of bin interval is closed. base : int, default 0 The "origin" of the adjusted Timestamps. Returns ------- A tuple of length 2, containing the adjusted pd.Timestamp objects. """ if isinstance(offset, Tick): if isinstance(offset, Day): # _adjust_dates_anchored assumes 'D' means 24H, but first/last # might contain a DST transition (23H, 24H, or 25H). # So "pretend" the dates are naive when adjusting the endpoints tz = first.tz first = first.tz_localize(None) last = last.tz_localize(None) first, last = _adjust_dates_anchored(first, last, offset, closed=closed, base=base) if isinstance(offset, Day): first = first.tz_localize(tz) last = last.tz_localize(tz) return first, last else: first = first.normalize() last = last.normalize() if closed == 'left': first = Timestamp(offset.rollback(first)) else: first = Timestamp(first - offset) last = Timestamp(last + offset) return first, last
def _get_timestamp_range_edges(first, last, offset, closed='left', base=0): """ Adjust the `first` Timestamp to the preceeding Timestamp that resides on the provided offset. Adjust the `last` Timestamp to the following Timestamp that resides on the provided offset. Input Timestamps that already reside on the offset will be adjusted depending on the type of offset and the `closed` parameter. Parameters ---------- first : pd.Timestamp The beginning Timestamp of the range to be adjusted. last : pd.Timestamp The ending Timestamp of the range to be adjusted. offset : pd.DateOffset The dateoffset to which the Timestamps will be adjusted. closed : {'right', 'left'}, default None Which side of bin interval is closed. base : int, default 0 The "origin" of the adjusted Timestamps. Returns ------- A tuple of length 2, containing the adjusted pd.Timestamp objects. """ if isinstance(offset, Tick): if isinstance(offset, Day): # _adjust_dates_anchored assumes 'D' means 24H, but first/last # might contain a DST transition (23H, 24H, or 25H). # So "pretend" the dates are naive when adjusting the endpoints tz = first.tz first = first.tz_localize(None) last = last.tz_localize(None) first, last = _adjust_dates_anchored(first, last, offset, closed=closed, base=base) if isinstance(offset, Day): first = first.tz_localize(tz) last = last.tz_localize(tz) return first, last else: first = first.normalize() last = last.normalize() if closed == 'left': first = Timestamp(offset.rollback(first)) else: first = Timestamp(first - offset) last = Timestamp(last + offset) return first, last
[ "Adjust", "the", "first", "Timestamp", "to", "the", "preceeding", "Timestamp", "that", "resides", "on", "the", "provided", "offset", ".", "Adjust", "the", "last", "Timestamp", "to", "the", "following", "Timestamp", "that", "resides", "on", "the", "provided", "offset", ".", "Input", "Timestamps", "that", "already", "reside", "on", "the", "offset", "will", "be", "adjusted", "depending", "on", "the", "type", "of", "offset", "and", "the", "closed", "parameter", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1582-L1634
[ "def", "_get_timestamp_range_edges", "(", "first", ",", "last", ",", "offset", ",", "closed", "=", "'left'", ",", "base", "=", "0", ")", ":", "if", "isinstance", "(", "offset", ",", "Tick", ")", ":", "if", "isinstance", "(", "offset", ",", "Day", ")", ":", "# _adjust_dates_anchored assumes 'D' means 24H, but first/last", "# might contain a DST transition (23H, 24H, or 25H).", "# So \"pretend\" the dates are naive when adjusting the endpoints", "tz", "=", "first", ".", "tz", "first", "=", "first", ".", "tz_localize", "(", "None", ")", "last", "=", "last", ".", "tz_localize", "(", "None", ")", "first", ",", "last", "=", "_adjust_dates_anchored", "(", "first", ",", "last", ",", "offset", ",", "closed", "=", "closed", ",", "base", "=", "base", ")", "if", "isinstance", "(", "offset", ",", "Day", ")", ":", "first", "=", "first", ".", "tz_localize", "(", "tz", ")", "last", "=", "last", ".", "tz_localize", "(", "tz", ")", "return", "first", ",", "last", "else", ":", "first", "=", "first", ".", "normalize", "(", ")", "last", "=", "last", ".", "normalize", "(", ")", "if", "closed", "==", "'left'", ":", "first", "=", "Timestamp", "(", "offset", ".", "rollback", "(", "first", ")", ")", "else", ":", "first", "=", "Timestamp", "(", "first", "-", "offset", ")", "last", "=", "Timestamp", "(", "last", "+", "offset", ")", "return", "first", ",", "last" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_get_period_range_edges
Adjust the provided `first` and `last` Periods to the respective Period of the given offset that encompasses them. Parameters ---------- first : pd.Period The beginning Period of the range to be adjusted. last : pd.Period The ending Period of the range to be adjusted. offset : pd.DateOffset The dateoffset to which the Periods will be adjusted. closed : {'right', 'left'}, default None Which side of bin interval is closed. base : int, default 0 The "origin" of the adjusted Periods. Returns ------- A tuple of length 2, containing the adjusted pd.Period objects.
pandas/core/resample.py
def _get_period_range_edges(first, last, offset, closed='left', base=0): """ Adjust the provided `first` and `last` Periods to the respective Period of the given offset that encompasses them. Parameters ---------- first : pd.Period The beginning Period of the range to be adjusted. last : pd.Period The ending Period of the range to be adjusted. offset : pd.DateOffset The dateoffset to which the Periods will be adjusted. closed : {'right', 'left'}, default None Which side of bin interval is closed. base : int, default 0 The "origin" of the adjusted Periods. Returns ------- A tuple of length 2, containing the adjusted pd.Period objects. """ if not all(isinstance(obj, pd.Period) for obj in [first, last]): raise TypeError("'first' and 'last' must be instances of type Period") # GH 23882 first = first.to_timestamp() last = last.to_timestamp() adjust_first = not offset.onOffset(first) adjust_last = offset.onOffset(last) first, last = _get_timestamp_range_edges(first, last, offset, closed=closed, base=base) first = (first + adjust_first * offset).to_period(offset) last = (last - adjust_last * offset).to_period(offset) return first, last
def _get_period_range_edges(first, last, offset, closed='left', base=0): """ Adjust the provided `first` and `last` Periods to the respective Period of the given offset that encompasses them. Parameters ---------- first : pd.Period The beginning Period of the range to be adjusted. last : pd.Period The ending Period of the range to be adjusted. offset : pd.DateOffset The dateoffset to which the Periods will be adjusted. closed : {'right', 'left'}, default None Which side of bin interval is closed. base : int, default 0 The "origin" of the adjusted Periods. Returns ------- A tuple of length 2, containing the adjusted pd.Period objects. """ if not all(isinstance(obj, pd.Period) for obj in [first, last]): raise TypeError("'first' and 'last' must be instances of type Period") # GH 23882 first = first.to_timestamp() last = last.to_timestamp() adjust_first = not offset.onOffset(first) adjust_last = offset.onOffset(last) first, last = _get_timestamp_range_edges(first, last, offset, closed=closed, base=base) first = (first + adjust_first * offset).to_period(offset) last = (last - adjust_last * offset).to_period(offset) return first, last
[ "Adjust", "the", "provided", "first", "and", "last", "Periods", "to", "the", "respective", "Period", "of", "the", "given", "offset", "that", "encompasses", "them", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1637-L1673
[ "def", "_get_period_range_edges", "(", "first", ",", "last", ",", "offset", ",", "closed", "=", "'left'", ",", "base", "=", "0", ")", ":", "if", "not", "all", "(", "isinstance", "(", "obj", ",", "pd", ".", "Period", ")", "for", "obj", "in", "[", "first", ",", "last", "]", ")", ":", "raise", "TypeError", "(", "\"'first' and 'last' must be instances of type Period\"", ")", "# GH 23882", "first", "=", "first", ".", "to_timestamp", "(", ")", "last", "=", "last", ".", "to_timestamp", "(", ")", "adjust_first", "=", "not", "offset", ".", "onOffset", "(", "first", ")", "adjust_last", "=", "offset", ".", "onOffset", "(", "last", ")", "first", ",", "last", "=", "_get_timestamp_range_edges", "(", "first", ",", "last", ",", "offset", ",", "closed", "=", "closed", ",", "base", "=", "base", ")", "first", "=", "(", "first", "+", "adjust_first", "*", "offset", ")", ".", "to_period", "(", "offset", ")", "last", "=", "(", "last", "-", "adjust_last", "*", "offset", ")", ".", "to_period", "(", "offset", ")", "return", "first", ",", "last" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
asfreq
Utility frequency conversion method for Series/DataFrame.
pandas/core/resample.py
def asfreq(obj, freq, method=None, how=None, normalize=False, fill_value=None): """ Utility frequency conversion method for Series/DataFrame. """ if isinstance(obj.index, PeriodIndex): if method is not None: raise NotImplementedError("'method' argument is not supported") if how is None: how = 'E' new_obj = obj.copy() new_obj.index = obj.index.asfreq(freq, how=how) elif len(obj.index) == 0: new_obj = obj.copy() new_obj.index = obj.index._shallow_copy(freq=to_offset(freq)) else: dti = date_range(obj.index[0], obj.index[-1], freq=freq) dti.name = obj.index.name new_obj = obj.reindex(dti, method=method, fill_value=fill_value) if normalize: new_obj.index = new_obj.index.normalize() return new_obj
def asfreq(obj, freq, method=None, how=None, normalize=False, fill_value=None): """ Utility frequency conversion method for Series/DataFrame. """ if isinstance(obj.index, PeriodIndex): if method is not None: raise NotImplementedError("'method' argument is not supported") if how is None: how = 'E' new_obj = obj.copy() new_obj.index = obj.index.asfreq(freq, how=how) elif len(obj.index) == 0: new_obj = obj.copy() new_obj.index = obj.index._shallow_copy(freq=to_offset(freq)) else: dti = date_range(obj.index[0], obj.index[-1], freq=freq) dti.name = obj.index.name new_obj = obj.reindex(dti, method=method, fill_value=fill_value) if normalize: new_obj.index = new_obj.index.normalize() return new_obj
[ "Utility", "frequency", "conversion", "method", "for", "Series", "/", "DataFrame", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1734-L1759
[ "def", "asfreq", "(", "obj", ",", "freq", ",", "method", "=", "None", ",", "how", "=", "None", ",", "normalize", "=", "False", ",", "fill_value", "=", "None", ")", ":", "if", "isinstance", "(", "obj", ".", "index", ",", "PeriodIndex", ")", ":", "if", "method", "is", "not", "None", ":", "raise", "NotImplementedError", "(", "\"'method' argument is not supported\"", ")", "if", "how", "is", "None", ":", "how", "=", "'E'", "new_obj", "=", "obj", ".", "copy", "(", ")", "new_obj", ".", "index", "=", "obj", ".", "index", ".", "asfreq", "(", "freq", ",", "how", "=", "how", ")", "elif", "len", "(", "obj", ".", "index", ")", "==", "0", ":", "new_obj", "=", "obj", ".", "copy", "(", ")", "new_obj", ".", "index", "=", "obj", ".", "index", ".", "_shallow_copy", "(", "freq", "=", "to_offset", "(", "freq", ")", ")", "else", ":", "dti", "=", "date_range", "(", "obj", ".", "index", "[", "0", "]", ",", "obj", ".", "index", "[", "-", "1", "]", ",", "freq", "=", "freq", ")", "dti", ".", "name", "=", "obj", ".", "index", ".", "name", "new_obj", "=", "obj", ".", "reindex", "(", "dti", ",", "method", "=", "method", ",", "fill_value", "=", "fill_value", ")", "if", "normalize", ":", "new_obj", ".", "index", "=", "new_obj", ".", "index", ".", "normalize", "(", ")", "return", "new_obj" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Resampler._from_selection
Is the resampling from a DataFrame column or MultiIndex level.
pandas/core/resample.py
def _from_selection(self): """ Is the resampling from a DataFrame column or MultiIndex level. """ # upsampling and PeriodIndex resampling do not work # with selection, this state used to catch and raise an error return (self.groupby is not None and (self.groupby.key is not None or self.groupby.level is not None))
def _from_selection(self): """ Is the resampling from a DataFrame column or MultiIndex level. """ # upsampling and PeriodIndex resampling do not work # with selection, this state used to catch and raise an error return (self.groupby is not None and (self.groupby.key is not None or self.groupby.level is not None))
[ "Is", "the", "resampling", "from", "a", "DataFrame", "column", "or", "MultiIndex", "level", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L135-L143
[ "def", "_from_selection", "(", "self", ")", ":", "# upsampling and PeriodIndex resampling do not work", "# with selection, this state used to catch and raise an error", "return", "(", "self", ".", "groupby", "is", "not", "None", "and", "(", "self", ".", "groupby", ".", "key", "is", "not", "None", "or", "self", ".", "groupby", ".", "level", "is", "not", "None", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Resampler._set_binner
Setup our binners. Cache these as we are an immutable object
pandas/core/resample.py
def _set_binner(self): """ Setup our binners. Cache these as we are an immutable object """ if self.binner is None: self.binner, self.grouper = self._get_binner()
def _set_binner(self): """ Setup our binners. Cache these as we are an immutable object """ if self.binner is None: self.binner, self.grouper = self._get_binner()
[ "Setup", "our", "binners", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L163-L170
[ "def", "_set_binner", "(", "self", ")", ":", "if", "self", ".", "binner", "is", "None", ":", "self", ".", "binner", ",", "self", ".", "grouper", "=", "self", ".", "_get_binner", "(", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Resampler._get_binner
Create the BinGrouper, assume that self.set_grouper(obj) has already been called.
pandas/core/resample.py
def _get_binner(self): """ Create the BinGrouper, assume that self.set_grouper(obj) has already been called. """ binner, bins, binlabels = self._get_binner_for_time() bin_grouper = BinGrouper(bins, binlabels, indexer=self.groupby.indexer) return binner, bin_grouper
def _get_binner(self): """ Create the BinGrouper, assume that self.set_grouper(obj) has already been called. """ binner, bins, binlabels = self._get_binner_for_time() bin_grouper = BinGrouper(bins, binlabels, indexer=self.groupby.indexer) return binner, bin_grouper
[ "Create", "the", "BinGrouper", "assume", "that", "self", ".", "set_grouper", "(", "obj", ")", "has", "already", "been", "called", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L172-L180
[ "def", "_get_binner", "(", "self", ")", ":", "binner", ",", "bins", ",", "binlabels", "=", "self", ".", "_get_binner_for_time", "(", ")", "bin_grouper", "=", "BinGrouper", "(", "bins", ",", "binlabels", ",", "indexer", "=", "self", ".", "groupby", ".", "indexer", ")", "return", "binner", ",", "bin_grouper" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Resampler.transform
Call function producing a like-indexed Series on each group and return a Series with the transformed values. Parameters ---------- arg : function To apply to each group. Should return a Series with the same index. Returns ------- transformed : Series Examples -------- >>> resampled.transform(lambda x: (x - x.mean()) / x.std())
pandas/core/resample.py
def transform(self, arg, *args, **kwargs): """ Call function producing a like-indexed Series on each group and return a Series with the transformed values. Parameters ---------- arg : function To apply to each group. Should return a Series with the same index. Returns ------- transformed : Series Examples -------- >>> resampled.transform(lambda x: (x - x.mean()) / x.std()) """ return self._selected_obj.groupby(self.groupby).transform( arg, *args, **kwargs)
def transform(self, arg, *args, **kwargs): """ Call function producing a like-indexed Series on each group and return a Series with the transformed values. Parameters ---------- arg : function To apply to each group. Should return a Series with the same index. Returns ------- transformed : Series Examples -------- >>> resampled.transform(lambda x: (x - x.mean()) / x.std()) """ return self._selected_obj.groupby(self.groupby).transform( arg, *args, **kwargs)
[ "Call", "function", "producing", "a", "like", "-", "indexed", "Series", "on", "each", "group", "and", "return", "a", "Series", "with", "the", "transformed", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L280-L299
[ "def", "transform", "(", "self", ",", "arg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_selected_obj", ".", "groupby", "(", "self", ".", "groupby", ")", ".", "transform", "(", "arg", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Resampler._gotitem
Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on
pandas/core/resample.py
def _gotitem(self, key, ndim, subset=None): """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ self._set_binner() grouper = self.grouper if subset is None: subset = self.obj grouped = groupby(subset, by=None, grouper=grouper, axis=self.axis) # try the key selection try: return grouped[key] except KeyError: return grouped
def _gotitem(self, key, ndim, subset=None): """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ self._set_binner() grouper = self.grouper if subset is None: subset = self.obj grouped = groupby(subset, by=None, grouper=grouper, axis=self.axis) # try the key selection try: return grouped[key] except KeyError: return grouped
[ "Sub", "-", "classes", "to", "define", ".", "Return", "a", "sliced", "object", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L307-L329
[ "def", "_gotitem", "(", "self", ",", "key", ",", "ndim", ",", "subset", "=", "None", ")", ":", "self", ".", "_set_binner", "(", ")", "grouper", "=", "self", ".", "grouper", "if", "subset", "is", "None", ":", "subset", "=", "self", ".", "obj", "grouped", "=", "groupby", "(", "subset", ",", "by", "=", "None", ",", "grouper", "=", "grouper", ",", "axis", "=", "self", ".", "axis", ")", "# try the key selection", "try", ":", "return", "grouped", "[", "key", "]", "except", "KeyError", ":", "return", "grouped" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Resampler._groupby_and_aggregate
Re-evaluate the obj with a groupby aggregation.
pandas/core/resample.py
def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs): """ Re-evaluate the obj with a groupby aggregation. """ if grouper is None: self._set_binner() grouper = self.grouper obj = self._selected_obj grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis) try: if isinstance(obj, ABCDataFrame) and callable(how): # Check if the function is reducing or not. result = grouped._aggregate_item_by_item(how, *args, **kwargs) else: result = grouped.aggregate(how, *args, **kwargs) except Exception: # we have a non-reducing function # try to evaluate result = grouped.apply(how, *args, **kwargs) result = self._apply_loffset(result) return self._wrap_result(result)
def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs): """ Re-evaluate the obj with a groupby aggregation. """ if grouper is None: self._set_binner() grouper = self.grouper obj = self._selected_obj grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis) try: if isinstance(obj, ABCDataFrame) and callable(how): # Check if the function is reducing or not. result = grouped._aggregate_item_by_item(how, *args, **kwargs) else: result = grouped.aggregate(how, *args, **kwargs) except Exception: # we have a non-reducing function # try to evaluate result = grouped.apply(how, *args, **kwargs) result = self._apply_loffset(result) return self._wrap_result(result)
[ "Re", "-", "evaluate", "the", "obj", "with", "a", "groupby", "aggregation", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L331-L357
[ "def", "_groupby_and_aggregate", "(", "self", ",", "how", ",", "grouper", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "grouper", "is", "None", ":", "self", ".", "_set_binner", "(", ")", "grouper", "=", "self", ".", "grouper", "obj", "=", "self", ".", "_selected_obj", "grouped", "=", "groupby", "(", "obj", ",", "by", "=", "None", ",", "grouper", "=", "grouper", ",", "axis", "=", "self", ".", "axis", ")", "try", ":", "if", "isinstance", "(", "obj", ",", "ABCDataFrame", ")", "and", "callable", "(", "how", ")", ":", "# Check if the function is reducing or not.", "result", "=", "grouped", ".", "_aggregate_item_by_item", "(", "how", ",", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "result", "=", "grouped", ".", "aggregate", "(", "how", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Exception", ":", "# we have a non-reducing function", "# try to evaluate", "result", "=", "grouped", ".", "apply", "(", "how", ",", "*", "args", ",", "*", "*", "kwargs", ")", "result", "=", "self", ".", "_apply_loffset", "(", "result", ")", "return", "self", ".", "_wrap_result", "(", "result", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Resampler._apply_loffset
If loffset is set, offset the result index. This is NOT an idempotent routine, it will be applied exactly once to the result. Parameters ---------- result : Series or DataFrame the result of resample
pandas/core/resample.py
def _apply_loffset(self, result): """ If loffset is set, offset the result index. This is NOT an idempotent routine, it will be applied exactly once to the result. Parameters ---------- result : Series or DataFrame the result of resample """ needs_offset = ( isinstance(self.loffset, (DateOffset, timedelta, np.timedelta64)) and isinstance(result.index, DatetimeIndex) and len(result.index) > 0 ) if needs_offset: result.index = result.index + self.loffset self.loffset = None return result
def _apply_loffset(self, result): """ If loffset is set, offset the result index. This is NOT an idempotent routine, it will be applied exactly once to the result. Parameters ---------- result : Series or DataFrame the result of resample """ needs_offset = ( isinstance(self.loffset, (DateOffset, timedelta, np.timedelta64)) and isinstance(result.index, DatetimeIndex) and len(result.index) > 0 ) if needs_offset: result.index = result.index + self.loffset self.loffset = None return result
[ "If", "loffset", "is", "set", "offset", "the", "result", "index", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L359-L383
[ "def", "_apply_loffset", "(", "self", ",", "result", ")", ":", "needs_offset", "=", "(", "isinstance", "(", "self", ".", "loffset", ",", "(", "DateOffset", ",", "timedelta", ",", "np", ".", "timedelta64", ")", ")", "and", "isinstance", "(", "result", ".", "index", ",", "DatetimeIndex", ")", "and", "len", "(", "result", ".", "index", ")", ">", "0", ")", "if", "needs_offset", ":", "result", ".", "index", "=", "result", ".", "index", "+", "self", ".", "loffset", "self", ".", "loffset", "=", "None", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Resampler._get_resampler_for_grouping
Return the correct class for resampling with groupby.
pandas/core/resample.py
def _get_resampler_for_grouping(self, groupby, **kwargs): """ Return the correct class for resampling with groupby. """ return self._resampler_for_grouping(self, groupby=groupby, **kwargs)
def _get_resampler_for_grouping(self, groupby, **kwargs): """ Return the correct class for resampling with groupby. """ return self._resampler_for_grouping(self, groupby=groupby, **kwargs)
[ "Return", "the", "correct", "class", "for", "resampling", "with", "groupby", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L385-L389
[ "def", "_get_resampler_for_grouping", "(", "self", ",", "groupby", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_resampler_for_grouping", "(", "self", ",", "groupby", "=", "groupby", ",", "*", "*", "kwargs", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Resampler._wrap_result
Potentially wrap any results.
pandas/core/resample.py
def _wrap_result(self, result): """ Potentially wrap any results. """ if isinstance(result, ABCSeries) and self._selection is not None: result.name = self._selection if isinstance(result, ABCSeries) and result.empty: obj = self.obj if isinstance(obj.index, PeriodIndex): result.index = obj.index.asfreq(self.freq) else: result.index = obj.index._shallow_copy(freq=self.freq) result.name = getattr(obj, 'name', None) return result
def _wrap_result(self, result): """ Potentially wrap any results. """ if isinstance(result, ABCSeries) and self._selection is not None: result.name = self._selection if isinstance(result, ABCSeries) and result.empty: obj = self.obj if isinstance(obj.index, PeriodIndex): result.index = obj.index.asfreq(self.freq) else: result.index = obj.index._shallow_copy(freq=self.freq) result.name = getattr(obj, 'name', None) return result
[ "Potentially", "wrap", "any", "results", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L391-L406
[ "def", "_wrap_result", "(", "self", ",", "result", ")", ":", "if", "isinstance", "(", "result", ",", "ABCSeries", ")", "and", "self", ".", "_selection", "is", "not", "None", ":", "result", ".", "name", "=", "self", ".", "_selection", "if", "isinstance", "(", "result", ",", "ABCSeries", ")", "and", "result", ".", "empty", ":", "obj", "=", "self", ".", "obj", "if", "isinstance", "(", "obj", ".", "index", ",", "PeriodIndex", ")", ":", "result", ".", "index", "=", "obj", ".", "index", ".", "asfreq", "(", "self", ".", "freq", ")", "else", ":", "result", ".", "index", "=", "obj", ".", "index", ".", "_shallow_copy", "(", "freq", "=", "self", ".", "freq", ")", "result", ".", "name", "=", "getattr", "(", "obj", ",", "'name'", ",", "None", ")", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Resampler.interpolate
Interpolate values according to different methods. .. versionadded:: 0.18.1
pandas/core/resample.py
def interpolate(self, method='linear', axis=0, limit=None, inplace=False, limit_direction='forward', limit_area=None, downcast=None, **kwargs): """ Interpolate values according to different methods. .. versionadded:: 0.18.1 """ result = self._upsample(None) return result.interpolate(method=method, axis=axis, limit=limit, inplace=inplace, limit_direction=limit_direction, limit_area=limit_area, downcast=downcast, **kwargs)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False, limit_direction='forward', limit_area=None, downcast=None, **kwargs): """ Interpolate values according to different methods. .. versionadded:: 0.18.1 """ result = self._upsample(None) return result.interpolate(method=method, axis=axis, limit=limit, inplace=inplace, limit_direction=limit_direction, limit_area=limit_area, downcast=downcast, **kwargs)
[ "Interpolate", "values", "according", "to", "different", "methods", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L756-L769
[ "def", "interpolate", "(", "self", ",", "method", "=", "'linear'", ",", "axis", "=", "0", ",", "limit", "=", "None", ",", "inplace", "=", "False", ",", "limit_direction", "=", "'forward'", ",", "limit_area", "=", "None", ",", "downcast", "=", "None", ",", "*", "*", "kwargs", ")", ":", "result", "=", "self", ".", "_upsample", "(", "None", ")", "return", "result", ".", "interpolate", "(", "method", "=", "method", ",", "axis", "=", "axis", ",", "limit", "=", "limit", ",", "inplace", "=", "inplace", ",", "limit_direction", "=", "limit_direction", ",", "limit_area", "=", "limit_area", ",", "downcast", "=", "downcast", ",", "*", "*", "kwargs", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Resampler.std
Compute standard deviation of groups, excluding missing values. Parameters ---------- ddof : integer, default 1 Degrees of freedom.
pandas/core/resample.py
def std(self, ddof=1, *args, **kwargs): """ Compute standard deviation of groups, excluding missing values. Parameters ---------- ddof : integer, default 1 Degrees of freedom. """ nv.validate_resampler_func('std', args, kwargs) return self._downsample('std', ddof=ddof)
def std(self, ddof=1, *args, **kwargs): """ Compute standard deviation of groups, excluding missing values. Parameters ---------- ddof : integer, default 1 Degrees of freedom. """ nv.validate_resampler_func('std', args, kwargs) return self._downsample('std', ddof=ddof)
[ "Compute", "standard", "deviation", "of", "groups", "excluding", "missing", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L790-L800
[ "def", "std", "(", "self", ",", "ddof", "=", "1", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_resampler_func", "(", "'std'", ",", "args", ",", "kwargs", ")", "return", "self", ".", "_downsample", "(", "'std'", ",", "ddof", "=", "ddof", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
Resampler.var
Compute variance of groups, excluding missing values. Parameters ---------- ddof : integer, default 1 degrees of freedom
pandas/core/resample.py
def var(self, ddof=1, *args, **kwargs): """ Compute variance of groups, excluding missing values. Parameters ---------- ddof : integer, default 1 degrees of freedom """ nv.validate_resampler_func('var', args, kwargs) return self._downsample('var', ddof=ddof)
def var(self, ddof=1, *args, **kwargs): """ Compute variance of groups, excluding missing values. Parameters ---------- ddof : integer, default 1 degrees of freedom """ nv.validate_resampler_func('var', args, kwargs) return self._downsample('var', ddof=ddof)
[ "Compute", "variance", "of", "groups", "excluding", "missing", "values", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L802-L812
[ "def", "var", "(", "self", ",", "ddof", "=", "1", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "nv", ".", "validate_resampler_func", "(", "'var'", ",", "args", ",", "kwargs", ")", "return", "self", ".", "_downsample", "(", "'var'", ",", "ddof", "=", "ddof", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_GroupByMixin._apply
Dispatch to _upsample; we are stripping all of the _upsample kwargs and performing the original function call on the grouped object.
pandas/core/resample.py
def _apply(self, f, grouper=None, *args, **kwargs): """ Dispatch to _upsample; we are stripping all of the _upsample kwargs and performing the original function call on the grouped object. """ def func(x): x = self._shallow_copy(x, groupby=self.groupby) if isinstance(f, str): return getattr(x, f)(**kwargs) return x.apply(f, *args, **kwargs) result = self._groupby.apply(func) return self._wrap_result(result)
def _apply(self, f, grouper=None, *args, **kwargs): """ Dispatch to _upsample; we are stripping all of the _upsample kwargs and performing the original function call on the grouped object. """ def func(x): x = self._shallow_copy(x, groupby=self.groupby) if isinstance(f, str): return getattr(x, f)(**kwargs) return x.apply(f, *args, **kwargs) result = self._groupby.apply(func) return self._wrap_result(result)
[ "Dispatch", "to", "_upsample", ";", "we", "are", "stripping", "all", "of", "the", "_upsample", "kwargs", "and", "performing", "the", "original", "function", "call", "on", "the", "grouped", "object", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L947-L962
[ "def", "_apply", "(", "self", ",", "f", ",", "grouper", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "func", "(", "x", ")", ":", "x", "=", "self", ".", "_shallow_copy", "(", "x", ",", "groupby", "=", "self", ".", "groupby", ")", "if", "isinstance", "(", "f", ",", "str", ")", ":", "return", "getattr", "(", "x", ",", "f", ")", "(", "*", "*", "kwargs", ")", "return", "x", ".", "apply", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", "result", "=", "self", ".", "_groupby", ".", "apply", "(", "func", ")", "return", "self", ".", "_wrap_result", "(", "result", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DatetimeIndexResampler._downsample
Downsample the cython defined function. Parameters ---------- how : string / cython mapped function **kwargs : kw args passed to how function
pandas/core/resample.py
def _downsample(self, how, **kwargs): """ Downsample the cython defined function. Parameters ---------- how : string / cython mapped function **kwargs : kw args passed to how function """ self._set_binner() how = self._is_cython_func(how) or how ax = self.ax obj = self._selected_obj if not len(ax): # reset to the new freq obj = obj.copy() obj.index.freq = self.freq return obj # do we have a regular frequency if ax.freq is not None or ax.inferred_freq is not None: if len(self.grouper.binlabels) > len(ax) and how is None: # let's do an asfreq return self.asfreq() # we are downsampling # we want to call the actual grouper method here result = obj.groupby( self.grouper, axis=self.axis).aggregate(how, **kwargs) result = self._apply_loffset(result) return self._wrap_result(result)
def _downsample(self, how, **kwargs): """ Downsample the cython defined function. Parameters ---------- how : string / cython mapped function **kwargs : kw args passed to how function """ self._set_binner() how = self._is_cython_func(how) or how ax = self.ax obj = self._selected_obj if not len(ax): # reset to the new freq obj = obj.copy() obj.index.freq = self.freq return obj # do we have a regular frequency if ax.freq is not None or ax.inferred_freq is not None: if len(self.grouper.binlabels) > len(ax) and how is None: # let's do an asfreq return self.asfreq() # we are downsampling # we want to call the actual grouper method here result = obj.groupby( self.grouper, axis=self.axis).aggregate(how, **kwargs) result = self._apply_loffset(result) return self._wrap_result(result)
[ "Downsample", "the", "cython", "defined", "function", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L982-L1016
[ "def", "_downsample", "(", "self", ",", "how", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_set_binner", "(", ")", "how", "=", "self", ".", "_is_cython_func", "(", "how", ")", "or", "how", "ax", "=", "self", ".", "ax", "obj", "=", "self", ".", "_selected_obj", "if", "not", "len", "(", "ax", ")", ":", "# reset to the new freq", "obj", "=", "obj", ".", "copy", "(", ")", "obj", ".", "index", ".", "freq", "=", "self", ".", "freq", "return", "obj", "# do we have a regular frequency", "if", "ax", ".", "freq", "is", "not", "None", "or", "ax", ".", "inferred_freq", "is", "not", "None", ":", "if", "len", "(", "self", ".", "grouper", ".", "binlabels", ")", ">", "len", "(", "ax", ")", "and", "how", "is", "None", ":", "# let's do an asfreq", "return", "self", ".", "asfreq", "(", ")", "# we are downsampling", "# we want to call the actual grouper method here", "result", "=", "obj", ".", "groupby", "(", "self", ".", "grouper", ",", "axis", "=", "self", ".", "axis", ")", ".", "aggregate", "(", "how", ",", "*", "*", "kwargs", ")", "result", "=", "self", ".", "_apply_loffset", "(", "result", ")", "return", "self", ".", "_wrap_result", "(", "result", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DatetimeIndexResampler._adjust_binner_for_upsample
Adjust our binner when upsampling. The range of a new index should not be outside specified range
pandas/core/resample.py
def _adjust_binner_for_upsample(self, binner): """ Adjust our binner when upsampling. The range of a new index should not be outside specified range """ if self.closed == 'right': binner = binner[1:] else: binner = binner[:-1] return binner
def _adjust_binner_for_upsample(self, binner): """ Adjust our binner when upsampling. The range of a new index should not be outside specified range """ if self.closed == 'right': binner = binner[1:] else: binner = binner[:-1] return binner
[ "Adjust", "our", "binner", "when", "upsampling", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1018-L1028
[ "def", "_adjust_binner_for_upsample", "(", "self", ",", "binner", ")", ":", "if", "self", ".", "closed", "==", "'right'", ":", "binner", "=", "binner", "[", "1", ":", "]", "else", ":", "binner", "=", "binner", "[", ":", "-", "1", "]", "return", "binner" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DatetimeIndexResampler._upsample
Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill', 'asfreq'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna
pandas/core/resample.py
def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill', 'asfreq'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna """ self._set_binner() if self.axis: raise AssertionError('axis must be 0') if self._from_selection: raise ValueError("Upsampling from level= or on= selection" " is not supported, use .set_index(...)" " to explicitly set index to" " datetime-like") ax = self.ax obj = self._selected_obj binner = self.binner res_index = self._adjust_binner_for_upsample(binner) # if we have the same frequency as our axis, then we are equal sampling if limit is None and to_offset(ax.inferred_freq) == self.freq: result = obj.copy() result.index = res_index else: result = obj.reindex(res_index, method=method, limit=limit, fill_value=fill_value) result = self._apply_loffset(result) return self._wrap_result(result)
def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill', 'asfreq'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna """ self._set_binner() if self.axis: raise AssertionError('axis must be 0') if self._from_selection: raise ValueError("Upsampling from level= or on= selection" " is not supported, use .set_index(...)" " to explicitly set index to" " datetime-like") ax = self.ax obj = self._selected_obj binner = self.binner res_index = self._adjust_binner_for_upsample(binner) # if we have the same frequency as our axis, then we are equal sampling if limit is None and to_offset(ax.inferred_freq) == self.freq: result = obj.copy() result.index = res_index else: result = obj.reindex(res_index, method=method, limit=limit, fill_value=fill_value) result = self._apply_loffset(result) return self._wrap_result(result)
[ "Parameters", "----------", "method", ":", "string", "{", "backfill", "bfill", "pad", "ffill", "asfreq", "}", "method", "for", "upsampling", "limit", ":", "int", "default", "None", "Maximum", "size", "gap", "to", "fill", "when", "reindexing", "fill_value", ":", "scalar", "default", "None", "Value", "to", "use", "for", "missing", "values" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1030-L1069
[ "def", "_upsample", "(", "self", ",", "method", ",", "limit", "=", "None", ",", "fill_value", "=", "None", ")", ":", "self", ".", "_set_binner", "(", ")", "if", "self", ".", "axis", ":", "raise", "AssertionError", "(", "'axis must be 0'", ")", "if", "self", ".", "_from_selection", ":", "raise", "ValueError", "(", "\"Upsampling from level= or on= selection\"", "\" is not supported, use .set_index(...)\"", "\" to explicitly set index to\"", "\" datetime-like\"", ")", "ax", "=", "self", ".", "ax", "obj", "=", "self", ".", "_selected_obj", "binner", "=", "self", ".", "binner", "res_index", "=", "self", ".", "_adjust_binner_for_upsample", "(", "binner", ")", "# if we have the same frequency as our axis, then we are equal sampling", "if", "limit", "is", "None", "and", "to_offset", "(", "ax", ".", "inferred_freq", ")", "==", "self", ".", "freq", ":", "result", "=", "obj", ".", "copy", "(", ")", "result", ".", "index", "=", "res_index", "else", ":", "result", "=", "obj", ".", "reindex", "(", "res_index", ",", "method", "=", "method", ",", "limit", "=", "limit", ",", "fill_value", "=", "fill_value", ")", "result", "=", "self", ".", "_apply_loffset", "(", "result", ")", "return", "self", ".", "_wrap_result", "(", "result", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
PeriodIndexResampler._downsample
Downsample the cython defined function. Parameters ---------- how : string / cython mapped function **kwargs : kw args passed to how function
pandas/core/resample.py
def _downsample(self, how, **kwargs): """ Downsample the cython defined function. Parameters ---------- how : string / cython mapped function **kwargs : kw args passed to how function """ # we may need to actually resample as if we are timestamps if self.kind == 'timestamp': return super()._downsample(how, **kwargs) how = self._is_cython_func(how) or how ax = self.ax if is_subperiod(ax.freq, self.freq): # Downsampling return self._groupby_and_aggregate(how, grouper=self.grouper, **kwargs) elif is_superperiod(ax.freq, self.freq): if how == 'ohlc': # GH #13083 # upsampling to subperiods is handled as an asfreq, which works # for pure aggregating/reducing methods # OHLC reduces along the time dimension, but creates multiple # values for each period -> handle by _groupby_and_aggregate() return self._groupby_and_aggregate(how, grouper=self.grouper) return self.asfreq() elif ax.freq == self.freq: return self.asfreq() raise IncompatibleFrequency( 'Frequency {} cannot be resampled to {}, as they are not ' 'sub or super periods'.format(ax.freq, self.freq))
def _downsample(self, how, **kwargs): """ Downsample the cython defined function. Parameters ---------- how : string / cython mapped function **kwargs : kw args passed to how function """ # we may need to actually resample as if we are timestamps if self.kind == 'timestamp': return super()._downsample(how, **kwargs) how = self._is_cython_func(how) or how ax = self.ax if is_subperiod(ax.freq, self.freq): # Downsampling return self._groupby_and_aggregate(how, grouper=self.grouper, **kwargs) elif is_superperiod(ax.freq, self.freq): if how == 'ohlc': # GH #13083 # upsampling to subperiods is handled as an asfreq, which works # for pure aggregating/reducing methods # OHLC reduces along the time dimension, but creates multiple # values for each period -> handle by _groupby_and_aggregate() return self._groupby_and_aggregate(how, grouper=self.grouper) return self.asfreq() elif ax.freq == self.freq: return self.asfreq() raise IncompatibleFrequency( 'Frequency {} cannot be resampled to {}, as they are not ' 'sub or super periods'.format(ax.freq, self.freq))
[ "Downsample", "the", "cython", "defined", "function", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1124-L1159
[ "def", "_downsample", "(", "self", ",", "how", ",", "*", "*", "kwargs", ")", ":", "# we may need to actually resample as if we are timestamps", "if", "self", ".", "kind", "==", "'timestamp'", ":", "return", "super", "(", ")", ".", "_downsample", "(", "how", ",", "*", "*", "kwargs", ")", "how", "=", "self", ".", "_is_cython_func", "(", "how", ")", "or", "how", "ax", "=", "self", ".", "ax", "if", "is_subperiod", "(", "ax", ".", "freq", ",", "self", ".", "freq", ")", ":", "# Downsampling", "return", "self", ".", "_groupby_and_aggregate", "(", "how", ",", "grouper", "=", "self", ".", "grouper", ",", "*", "*", "kwargs", ")", "elif", "is_superperiod", "(", "ax", ".", "freq", ",", "self", ".", "freq", ")", ":", "if", "how", "==", "'ohlc'", ":", "# GH #13083", "# upsampling to subperiods is handled as an asfreq, which works", "# for pure aggregating/reducing methods", "# OHLC reduces along the time dimension, but creates multiple", "# values for each period -> handle by _groupby_and_aggregate()", "return", "self", ".", "_groupby_and_aggregate", "(", "how", ",", "grouper", "=", "self", ".", "grouper", ")", "return", "self", ".", "asfreq", "(", ")", "elif", "ax", ".", "freq", "==", "self", ".", "freq", ":", "return", "self", ".", "asfreq", "(", ")", "raise", "IncompatibleFrequency", "(", "'Frequency {} cannot be resampled to {}, as they are not '", "'sub or super periods'", ".", "format", "(", "ax", ".", "freq", ",", "self", ".", "freq", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
PeriodIndexResampler._upsample
Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna
pandas/core/resample.py
def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna """ # we may need to actually resample as if we are timestamps if self.kind == 'timestamp': return super()._upsample(method, limit=limit, fill_value=fill_value) self._set_binner() ax = self.ax obj = self.obj new_index = self.binner # Start vs. end of period memb = ax.asfreq(self.freq, how=self.convention) # Get the fill indexer indexer = memb.get_indexer(new_index, method=method, limit=limit) return self._wrap_result(_take_new_index( obj, indexer, new_index, axis=self.axis))
def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna """ # we may need to actually resample as if we are timestamps if self.kind == 'timestamp': return super()._upsample(method, limit=limit, fill_value=fill_value) self._set_binner() ax = self.ax obj = self.obj new_index = self.binner # Start vs. end of period memb = ax.asfreq(self.freq, how=self.convention) # Get the fill indexer indexer = memb.get_indexer(new_index, method=method, limit=limit) return self._wrap_result(_take_new_index( obj, indexer, new_index, axis=self.axis))
[ "Parameters", "----------", "method", ":", "string", "{", "backfill", "bfill", "pad", "ffill", "}", "method", "for", "upsampling", "limit", ":", "int", "default", "None", "Maximum", "size", "gap", "to", "fill", "when", "reindexing", "fill_value", ":", "scalar", "default", "None", "Value", "to", "use", "for", "missing", "values" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1161-L1194
[ "def", "_upsample", "(", "self", ",", "method", ",", "limit", "=", "None", ",", "fill_value", "=", "None", ")", ":", "# we may need to actually resample as if we are timestamps", "if", "self", ".", "kind", "==", "'timestamp'", ":", "return", "super", "(", ")", ".", "_upsample", "(", "method", ",", "limit", "=", "limit", ",", "fill_value", "=", "fill_value", ")", "self", ".", "_set_binner", "(", ")", "ax", "=", "self", ".", "ax", "obj", "=", "self", ".", "obj", "new_index", "=", "self", ".", "binner", "# Start vs. end of period", "memb", "=", "ax", ".", "asfreq", "(", "self", ".", "freq", ",", "how", "=", "self", ".", "convention", ")", "# Get the fill indexer", "indexer", "=", "memb", ".", "get_indexer", "(", "new_index", ",", "method", "=", "method", ",", "limit", "=", "limit", ")", "return", "self", ".", "_wrap_result", "(", "_take_new_index", "(", "obj", ",", "indexer", ",", "new_index", ",", "axis", "=", "self", ".", "axis", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
TimeGrouper._get_resampler
Return my resampler or raise if we have an invalid axis. Parameters ---------- obj : input object kind : string, optional 'period','timestamp','timedelta' are valid Returns ------- a Resampler Raises ------ TypeError if incompatible axis
pandas/core/resample.py
def _get_resampler(self, obj, kind=None): """ Return my resampler or raise if we have an invalid axis. Parameters ---------- obj : input object kind : string, optional 'period','timestamp','timedelta' are valid Returns ------- a Resampler Raises ------ TypeError if incompatible axis """ self._set_grouper(obj) ax = self.ax if isinstance(ax, DatetimeIndex): return DatetimeIndexResampler(obj, groupby=self, kind=kind, axis=self.axis) elif isinstance(ax, PeriodIndex) or kind == 'period': return PeriodIndexResampler(obj, groupby=self, kind=kind, axis=self.axis) elif isinstance(ax, TimedeltaIndex): return TimedeltaIndexResampler(obj, groupby=self, axis=self.axis) raise TypeError("Only valid with DatetimeIndex, " "TimedeltaIndex or PeriodIndex, " "but got an instance of %r" % type(ax).__name__)
def _get_resampler(self, obj, kind=None): """ Return my resampler or raise if we have an invalid axis. Parameters ---------- obj : input object kind : string, optional 'period','timestamp','timedelta' are valid Returns ------- a Resampler Raises ------ TypeError if incompatible axis """ self._set_grouper(obj) ax = self.ax if isinstance(ax, DatetimeIndex): return DatetimeIndexResampler(obj, groupby=self, kind=kind, axis=self.axis) elif isinstance(ax, PeriodIndex) or kind == 'period': return PeriodIndexResampler(obj, groupby=self, kind=kind, axis=self.axis) elif isinstance(ax, TimedeltaIndex): return TimedeltaIndexResampler(obj, groupby=self, axis=self.axis) raise TypeError("Only valid with DatetimeIndex, " "TimedeltaIndex or PeriodIndex, " "but got an instance of %r" % type(ax).__name__)
[ "Return", "my", "resampler", "or", "raise", "if", "we", "have", "an", "invalid", "axis", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/resample.py#L1334-L1373
[ "def", "_get_resampler", "(", "self", ",", "obj", ",", "kind", "=", "None", ")", ":", "self", ".", "_set_grouper", "(", "obj", ")", "ax", "=", "self", ".", "ax", "if", "isinstance", "(", "ax", ",", "DatetimeIndex", ")", ":", "return", "DatetimeIndexResampler", "(", "obj", ",", "groupby", "=", "self", ",", "kind", "=", "kind", ",", "axis", "=", "self", ".", "axis", ")", "elif", "isinstance", "(", "ax", ",", "PeriodIndex", ")", "or", "kind", "==", "'period'", ":", "return", "PeriodIndexResampler", "(", "obj", ",", "groupby", "=", "self", ",", "kind", "=", "kind", ",", "axis", "=", "self", ".", "axis", ")", "elif", "isinstance", "(", "ax", ",", "TimedeltaIndex", ")", ":", "return", "TimedeltaIndexResampler", "(", "obj", ",", "groupby", "=", "self", ",", "axis", "=", "self", ".", "axis", ")", "raise", "TypeError", "(", "\"Only valid with DatetimeIndex, \"", "\"TimedeltaIndex or PeriodIndex, \"", "\"but got an instance of %r\"", "%", "type", "(", "ax", ")", ".", "__name__", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_combine_hash_arrays
Parameters ---------- arrays : generator num_items : int Should be the same as CPython's tupleobject.c
pandas/core/util/hashing.py
def _combine_hash_arrays(arrays, num_items): """ Parameters ---------- arrays : generator num_items : int Should be the same as CPython's tupleobject.c """ try: first = next(arrays) except StopIteration: return np.array([], dtype=np.uint64) arrays = itertools.chain([first], arrays) mult = np.uint64(1000003) out = np.zeros_like(first) + np.uint64(0x345678) for i, a in enumerate(arrays): inverse_i = num_items - i out ^= a out *= mult mult += np.uint64(82520 + inverse_i + inverse_i) assert i + 1 == num_items, 'Fed in wrong num_items' out += np.uint64(97531) return out
def _combine_hash_arrays(arrays, num_items): """ Parameters ---------- arrays : generator num_items : int Should be the same as CPython's tupleobject.c """ try: first = next(arrays) except StopIteration: return np.array([], dtype=np.uint64) arrays = itertools.chain([first], arrays) mult = np.uint64(1000003) out = np.zeros_like(first) + np.uint64(0x345678) for i, a in enumerate(arrays): inverse_i = num_items - i out ^= a out *= mult mult += np.uint64(82520 + inverse_i + inverse_i) assert i + 1 == num_items, 'Fed in wrong num_items' out += np.uint64(97531) return out
[ "Parameters", "----------", "arrays", ":", "generator", "num_items", ":", "int" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/util/hashing.py#L21-L46
[ "def", "_combine_hash_arrays", "(", "arrays", ",", "num_items", ")", ":", "try", ":", "first", "=", "next", "(", "arrays", ")", "except", "StopIteration", ":", "return", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "np", ".", "uint64", ")", "arrays", "=", "itertools", ".", "chain", "(", "[", "first", "]", ",", "arrays", ")", "mult", "=", "np", ".", "uint64", "(", "1000003", ")", "out", "=", "np", ".", "zeros_like", "(", "first", ")", "+", "np", ".", "uint64", "(", "0x345678", ")", "for", "i", ",", "a", "in", "enumerate", "(", "arrays", ")", ":", "inverse_i", "=", "num_items", "-", "i", "out", "^=", "a", "out", "*=", "mult", "mult", "+=", "np", ".", "uint64", "(", "82520", "+", "inverse_i", "+", "inverse_i", ")", "assert", "i", "+", "1", "==", "num_items", ",", "'Fed in wrong num_items'", "out", "+=", "np", ".", "uint64", "(", "97531", ")", "return", "out" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
hash_pandas_object
Return a data hash of the Index/Series/DataFrame .. versionadded:: 0.19.2 Parameters ---------- index : boolean, default True include the index in the hash (if Series/DataFrame) encoding : string, default 'utf8' encoding for data & key when strings hash_key : string key to encode, default to _default_hash_key categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. .. versionadded:: 0.20.0 Returns ------- Series of uint64, same length as the object
pandas/core/util/hashing.py
def hash_pandas_object(obj, index=True, encoding='utf8', hash_key=None, categorize=True): """ Return a data hash of the Index/Series/DataFrame .. versionadded:: 0.19.2 Parameters ---------- index : boolean, default True include the index in the hash (if Series/DataFrame) encoding : string, default 'utf8' encoding for data & key when strings hash_key : string key to encode, default to _default_hash_key categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. .. versionadded:: 0.20.0 Returns ------- Series of uint64, same length as the object """ from pandas import Series if hash_key is None: hash_key = _default_hash_key if isinstance(obj, ABCMultiIndex): return Series(hash_tuples(obj, encoding, hash_key), dtype='uint64', copy=False) if isinstance(obj, ABCIndexClass): h = hash_array(obj.values, encoding, hash_key, categorize).astype('uint64', copy=False) h = Series(h, index=obj, dtype='uint64', copy=False) elif isinstance(obj, ABCSeries): h = hash_array(obj.values, encoding, hash_key, categorize).astype('uint64', copy=False) if index: index_iter = (hash_pandas_object(obj.index, index=False, encoding=encoding, hash_key=hash_key, categorize=categorize).values for _ in [None]) arrays = itertools.chain([h], index_iter) h = _combine_hash_arrays(arrays, 2) h = Series(h, index=obj.index, dtype='uint64', copy=False) elif isinstance(obj, ABCDataFrame): hashes = (hash_array(series.values) for _, series in obj.iteritems()) num_items = len(obj.columns) if index: index_hash_generator = (hash_pandas_object(obj.index, index=False, encoding=encoding, hash_key=hash_key, categorize=categorize).values # noqa for _ in [None]) num_items += 1 hashes = itertools.chain(hashes, index_hash_generator) h = _combine_hash_arrays(hashes, num_items) h = Series(h, index=obj.index, dtype='uint64', copy=False) else: raise TypeError("Unexpected type for hashing %s" % type(obj)) return h
def hash_pandas_object(obj, index=True, encoding='utf8', hash_key=None, categorize=True): """ Return a data hash of the Index/Series/DataFrame .. versionadded:: 0.19.2 Parameters ---------- index : boolean, default True include the index in the hash (if Series/DataFrame) encoding : string, default 'utf8' encoding for data & key when strings hash_key : string key to encode, default to _default_hash_key categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. .. versionadded:: 0.20.0 Returns ------- Series of uint64, same length as the object """ from pandas import Series if hash_key is None: hash_key = _default_hash_key if isinstance(obj, ABCMultiIndex): return Series(hash_tuples(obj, encoding, hash_key), dtype='uint64', copy=False) if isinstance(obj, ABCIndexClass): h = hash_array(obj.values, encoding, hash_key, categorize).astype('uint64', copy=False) h = Series(h, index=obj, dtype='uint64', copy=False) elif isinstance(obj, ABCSeries): h = hash_array(obj.values, encoding, hash_key, categorize).astype('uint64', copy=False) if index: index_iter = (hash_pandas_object(obj.index, index=False, encoding=encoding, hash_key=hash_key, categorize=categorize).values for _ in [None]) arrays = itertools.chain([h], index_iter) h = _combine_hash_arrays(arrays, 2) h = Series(h, index=obj.index, dtype='uint64', copy=False) elif isinstance(obj, ABCDataFrame): hashes = (hash_array(series.values) for _, series in obj.iteritems()) num_items = len(obj.columns) if index: index_hash_generator = (hash_pandas_object(obj.index, index=False, encoding=encoding, hash_key=hash_key, categorize=categorize).values # noqa for _ in [None]) num_items += 1 hashes = itertools.chain(hashes, index_hash_generator) h = _combine_hash_arrays(hashes, num_items) h = Series(h, index=obj.index, dtype='uint64', copy=False) else: raise TypeError("Unexpected type for hashing %s" % type(obj)) return h
[ "Return", "a", "data", "hash", "of", "the", "Index", "/", "Series", "/", "DataFrame" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/util/hashing.py#L49-L117
[ "def", "hash_pandas_object", "(", "obj", ",", "index", "=", "True", ",", "encoding", "=", "'utf8'", ",", "hash_key", "=", "None", ",", "categorize", "=", "True", ")", ":", "from", "pandas", "import", "Series", "if", "hash_key", "is", "None", ":", "hash_key", "=", "_default_hash_key", "if", "isinstance", "(", "obj", ",", "ABCMultiIndex", ")", ":", "return", "Series", "(", "hash_tuples", "(", "obj", ",", "encoding", ",", "hash_key", ")", ",", "dtype", "=", "'uint64'", ",", "copy", "=", "False", ")", "if", "isinstance", "(", "obj", ",", "ABCIndexClass", ")", ":", "h", "=", "hash_array", "(", "obj", ".", "values", ",", "encoding", ",", "hash_key", ",", "categorize", ")", ".", "astype", "(", "'uint64'", ",", "copy", "=", "False", ")", "h", "=", "Series", "(", "h", ",", "index", "=", "obj", ",", "dtype", "=", "'uint64'", ",", "copy", "=", "False", ")", "elif", "isinstance", "(", "obj", ",", "ABCSeries", ")", ":", "h", "=", "hash_array", "(", "obj", ".", "values", ",", "encoding", ",", "hash_key", ",", "categorize", ")", ".", "astype", "(", "'uint64'", ",", "copy", "=", "False", ")", "if", "index", ":", "index_iter", "=", "(", "hash_pandas_object", "(", "obj", ".", "index", ",", "index", "=", "False", ",", "encoding", "=", "encoding", ",", "hash_key", "=", "hash_key", ",", "categorize", "=", "categorize", ")", ".", "values", "for", "_", "in", "[", "None", "]", ")", "arrays", "=", "itertools", ".", "chain", "(", "[", "h", "]", ",", "index_iter", ")", "h", "=", "_combine_hash_arrays", "(", "arrays", ",", "2", ")", "h", "=", "Series", "(", "h", ",", "index", "=", "obj", ".", "index", ",", "dtype", "=", "'uint64'", ",", "copy", "=", "False", ")", "elif", "isinstance", "(", "obj", ",", "ABCDataFrame", ")", ":", "hashes", "=", "(", "hash_array", "(", "series", ".", "values", ")", "for", "_", ",", "series", "in", "obj", ".", "iteritems", "(", ")", ")", "num_items", "=", "len", "(", "obj", ".", "columns", ")", "if", "index", ":", "index_hash_generator", "=", "(", "hash_pandas_object", "(", "obj", ".", "index", ",", "index", "=", "False", ",", "encoding", "=", "encoding", ",", "hash_key", "=", "hash_key", ",", "categorize", "=", "categorize", ")", ".", "values", "# noqa", "for", "_", "in", "[", "None", "]", ")", "num_items", "+=", "1", "hashes", "=", "itertools", ".", "chain", "(", "hashes", ",", "index_hash_generator", ")", "h", "=", "_combine_hash_arrays", "(", "hashes", ",", "num_items", ")", "h", "=", "Series", "(", "h", ",", "index", "=", "obj", ".", "index", ",", "dtype", "=", "'uint64'", ",", "copy", "=", "False", ")", "else", ":", "raise", "TypeError", "(", "\"Unexpected type for hashing %s\"", "%", "type", "(", "obj", ")", ")", "return", "h" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
hash_tuples
Hash an MultiIndex / list-of-tuples efficiently .. versionadded:: 0.20.0 Parameters ---------- vals : MultiIndex, list-of-tuples, or single tuple encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- ndarray of hashed values array
pandas/core/util/hashing.py
def hash_tuples(vals, encoding='utf8', hash_key=None): """ Hash an MultiIndex / list-of-tuples efficiently .. versionadded:: 0.20.0 Parameters ---------- vals : MultiIndex, list-of-tuples, or single tuple encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- ndarray of hashed values array """ is_tuple = False if isinstance(vals, tuple): vals = [vals] is_tuple = True elif not is_list_like(vals): raise TypeError("must be convertible to a list-of-tuples") from pandas import Categorical, MultiIndex if not isinstance(vals, ABCMultiIndex): vals = MultiIndex.from_tuples(vals) # create a list-of-Categoricals vals = [Categorical(vals.codes[level], vals.levels[level], ordered=False, fastpath=True) for level in range(vals.nlevels)] # hash the list-of-ndarrays hashes = (_hash_categorical(cat, encoding=encoding, hash_key=hash_key) for cat in vals) h = _combine_hash_arrays(hashes, len(vals)) if is_tuple: h = h[0] return h
def hash_tuples(vals, encoding='utf8', hash_key=None): """ Hash an MultiIndex / list-of-tuples efficiently .. versionadded:: 0.20.0 Parameters ---------- vals : MultiIndex, list-of-tuples, or single tuple encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- ndarray of hashed values array """ is_tuple = False if isinstance(vals, tuple): vals = [vals] is_tuple = True elif not is_list_like(vals): raise TypeError("must be convertible to a list-of-tuples") from pandas import Categorical, MultiIndex if not isinstance(vals, ABCMultiIndex): vals = MultiIndex.from_tuples(vals) # create a list-of-Categoricals vals = [Categorical(vals.codes[level], vals.levels[level], ordered=False, fastpath=True) for level in range(vals.nlevels)] # hash the list-of-ndarrays hashes = (_hash_categorical(cat, encoding=encoding, hash_key=hash_key) for cat in vals) h = _combine_hash_arrays(hashes, len(vals)) if is_tuple: h = h[0] return h
[ "Hash", "an", "MultiIndex", "/", "list", "-", "of", "-", "tuples", "efficiently" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/util/hashing.py#L120-L164
[ "def", "hash_tuples", "(", "vals", ",", "encoding", "=", "'utf8'", ",", "hash_key", "=", "None", ")", ":", "is_tuple", "=", "False", "if", "isinstance", "(", "vals", ",", "tuple", ")", ":", "vals", "=", "[", "vals", "]", "is_tuple", "=", "True", "elif", "not", "is_list_like", "(", "vals", ")", ":", "raise", "TypeError", "(", "\"must be convertible to a list-of-tuples\"", ")", "from", "pandas", "import", "Categorical", ",", "MultiIndex", "if", "not", "isinstance", "(", "vals", ",", "ABCMultiIndex", ")", ":", "vals", "=", "MultiIndex", ".", "from_tuples", "(", "vals", ")", "# create a list-of-Categoricals", "vals", "=", "[", "Categorical", "(", "vals", ".", "codes", "[", "level", "]", ",", "vals", ".", "levels", "[", "level", "]", ",", "ordered", "=", "False", ",", "fastpath", "=", "True", ")", "for", "level", "in", "range", "(", "vals", ".", "nlevels", ")", "]", "# hash the list-of-ndarrays", "hashes", "=", "(", "_hash_categorical", "(", "cat", ",", "encoding", "=", "encoding", ",", "hash_key", "=", "hash_key", ")", "for", "cat", "in", "vals", ")", "h", "=", "_combine_hash_arrays", "(", "hashes", ",", "len", "(", "vals", ")", ")", "if", "is_tuple", ":", "h", "=", "h", "[", "0", "]", "return", "h" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
hash_tuple
Hash a single tuple efficiently Parameters ---------- val : single tuple encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- hash
pandas/core/util/hashing.py
def hash_tuple(val, encoding='utf8', hash_key=None): """ Hash a single tuple efficiently Parameters ---------- val : single tuple encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- hash """ hashes = (_hash_scalar(v, encoding=encoding, hash_key=hash_key) for v in val) h = _combine_hash_arrays(hashes, len(val))[0] return h
def hash_tuple(val, encoding='utf8', hash_key=None): """ Hash a single tuple efficiently Parameters ---------- val : single tuple encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- hash """ hashes = (_hash_scalar(v, encoding=encoding, hash_key=hash_key) for v in val) h = _combine_hash_arrays(hashes, len(val))[0] return h
[ "Hash", "a", "single", "tuple", "efficiently" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/util/hashing.py#L167-L187
[ "def", "hash_tuple", "(", "val", ",", "encoding", "=", "'utf8'", ",", "hash_key", "=", "None", ")", ":", "hashes", "=", "(", "_hash_scalar", "(", "v", ",", "encoding", "=", "encoding", ",", "hash_key", "=", "hash_key", ")", "for", "v", "in", "val", ")", "h", "=", "_combine_hash_arrays", "(", "hashes", ",", "len", "(", "val", ")", ")", "[", "0", "]", "return", "h" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_hash_categorical
Hash a Categorical by hashing its categories, and then mapping the codes to the hashes Parameters ---------- c : Categorical encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- ndarray of hashed values array, same size as len(c)
pandas/core/util/hashing.py
def _hash_categorical(c, encoding, hash_key): """ Hash a Categorical by hashing its categories, and then mapping the codes to the hashes Parameters ---------- c : Categorical encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- ndarray of hashed values array, same size as len(c) """ # Convert ExtensionArrays to ndarrays values = np.asarray(c.categories.values) hashed = hash_array(values, encoding, hash_key, categorize=False) # we have uint64, as we don't directly support missing values # we don't want to use take_nd which will coerce to float # instead, directly construct the result with a # max(np.uint64) as the missing value indicator # # TODO: GH 15362 mask = c.isna() if len(hashed): result = hashed.take(c.codes) else: result = np.zeros(len(mask), dtype='uint64') if mask.any(): result[mask] = np.iinfo(np.uint64).max return result
def _hash_categorical(c, encoding, hash_key): """ Hash a Categorical by hashing its categories, and then mapping the codes to the hashes Parameters ---------- c : Categorical encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- ndarray of hashed values array, same size as len(c) """ # Convert ExtensionArrays to ndarrays values = np.asarray(c.categories.values) hashed = hash_array(values, encoding, hash_key, categorize=False) # we have uint64, as we don't directly support missing values # we don't want to use take_nd which will coerce to float # instead, directly construct the result with a # max(np.uint64) as the missing value indicator # # TODO: GH 15362 mask = c.isna() if len(hashed): result = hashed.take(c.codes) else: result = np.zeros(len(mask), dtype='uint64') if mask.any(): result[mask] = np.iinfo(np.uint64).max return result
[ "Hash", "a", "Categorical", "by", "hashing", "its", "categories", "and", "then", "mapping", "the", "codes", "to", "the", "hashes" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/util/hashing.py#L190-L226
[ "def", "_hash_categorical", "(", "c", ",", "encoding", ",", "hash_key", ")", ":", "# Convert ExtensionArrays to ndarrays", "values", "=", "np", ".", "asarray", "(", "c", ".", "categories", ".", "values", ")", "hashed", "=", "hash_array", "(", "values", ",", "encoding", ",", "hash_key", ",", "categorize", "=", "False", ")", "# we have uint64, as we don't directly support missing values", "# we don't want to use take_nd which will coerce to float", "# instead, directly construct the result with a", "# max(np.uint64) as the missing value indicator", "#", "# TODO: GH 15362", "mask", "=", "c", ".", "isna", "(", ")", "if", "len", "(", "hashed", ")", ":", "result", "=", "hashed", ".", "take", "(", "c", ".", "codes", ")", "else", ":", "result", "=", "np", ".", "zeros", "(", "len", "(", "mask", ")", ",", "dtype", "=", "'uint64'", ")", "if", "mask", ".", "any", "(", ")", ":", "result", "[", "mask", "]", "=", "np", ".", "iinfo", "(", "np", ".", "uint64", ")", ".", "max", "return", "result" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
hash_array
Given a 1d array, return an array of deterministic integers. .. versionadded:: 0.19.2 Parameters ---------- vals : ndarray, Categorical encoding : string, default 'utf8' encoding for data & key when strings hash_key : string key to encode, default to _default_hash_key categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. .. versionadded:: 0.20.0 Returns ------- 1d uint64 numpy array of hash values, same length as the vals
pandas/core/util/hashing.py
def hash_array(vals, encoding='utf8', hash_key=None, categorize=True): """ Given a 1d array, return an array of deterministic integers. .. versionadded:: 0.19.2 Parameters ---------- vals : ndarray, Categorical encoding : string, default 'utf8' encoding for data & key when strings hash_key : string key to encode, default to _default_hash_key categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. .. versionadded:: 0.20.0 Returns ------- 1d uint64 numpy array of hash values, same length as the vals """ if not hasattr(vals, 'dtype'): raise TypeError("must pass a ndarray-like") dtype = vals.dtype if hash_key is None: hash_key = _default_hash_key # For categoricals, we hash the categories, then remap the codes to the # hash values. (This check is above the complex check so that we don't ask # numpy if categorical is a subdtype of complex, as it will choke). if is_categorical_dtype(dtype): return _hash_categorical(vals, encoding, hash_key) elif is_extension_array_dtype(dtype): vals, _ = vals._values_for_factorize() dtype = vals.dtype # we'll be working with everything as 64-bit values, so handle this # 128-bit value early if np.issubdtype(dtype, np.complex128): return hash_array(vals.real) + 23 * hash_array(vals.imag) # First, turn whatever array this is into unsigned 64-bit ints, if we can # manage it. elif isinstance(dtype, np.bool): vals = vals.astype('u8') elif issubclass(dtype.type, (np.datetime64, np.timedelta64)): vals = vals.view('i8').astype('u8', copy=False) elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8: vals = vals.view('u{}'.format(vals.dtype.itemsize)).astype('u8') else: # With repeated values, its MUCH faster to categorize object dtypes, # then hash and rename categories. We allow skipping the categorization # when the values are known/likely to be unique. if categorize: from pandas import factorize, Categorical, Index codes, categories = factorize(vals, sort=False) cat = Categorical(codes, Index(categories), ordered=False, fastpath=True) return _hash_categorical(cat, encoding, hash_key) try: vals = hashing.hash_object_array(vals, hash_key, encoding) except TypeError: # we have mixed types vals = hashing.hash_object_array(vals.astype(str).astype(object), hash_key, encoding) # Then, redistribute these 64-bit ints within the space of 64-bit ints vals ^= vals >> 30 vals *= np.uint64(0xbf58476d1ce4e5b9) vals ^= vals >> 27 vals *= np.uint64(0x94d049bb133111eb) vals ^= vals >> 31 return vals
def hash_array(vals, encoding='utf8', hash_key=None, categorize=True): """ Given a 1d array, return an array of deterministic integers. .. versionadded:: 0.19.2 Parameters ---------- vals : ndarray, Categorical encoding : string, default 'utf8' encoding for data & key when strings hash_key : string key to encode, default to _default_hash_key categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. .. versionadded:: 0.20.0 Returns ------- 1d uint64 numpy array of hash values, same length as the vals """ if not hasattr(vals, 'dtype'): raise TypeError("must pass a ndarray-like") dtype = vals.dtype if hash_key is None: hash_key = _default_hash_key # For categoricals, we hash the categories, then remap the codes to the # hash values. (This check is above the complex check so that we don't ask # numpy if categorical is a subdtype of complex, as it will choke). if is_categorical_dtype(dtype): return _hash_categorical(vals, encoding, hash_key) elif is_extension_array_dtype(dtype): vals, _ = vals._values_for_factorize() dtype = vals.dtype # we'll be working with everything as 64-bit values, so handle this # 128-bit value early if np.issubdtype(dtype, np.complex128): return hash_array(vals.real) + 23 * hash_array(vals.imag) # First, turn whatever array this is into unsigned 64-bit ints, if we can # manage it. elif isinstance(dtype, np.bool): vals = vals.astype('u8') elif issubclass(dtype.type, (np.datetime64, np.timedelta64)): vals = vals.view('i8').astype('u8', copy=False) elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8: vals = vals.view('u{}'.format(vals.dtype.itemsize)).astype('u8') else: # With repeated values, its MUCH faster to categorize object dtypes, # then hash and rename categories. We allow skipping the categorization # when the values are known/likely to be unique. if categorize: from pandas import factorize, Categorical, Index codes, categories = factorize(vals, sort=False) cat = Categorical(codes, Index(categories), ordered=False, fastpath=True) return _hash_categorical(cat, encoding, hash_key) try: vals = hashing.hash_object_array(vals, hash_key, encoding) except TypeError: # we have mixed types vals = hashing.hash_object_array(vals.astype(str).astype(object), hash_key, encoding) # Then, redistribute these 64-bit ints within the space of 64-bit ints vals ^= vals >> 30 vals *= np.uint64(0xbf58476d1ce4e5b9) vals ^= vals >> 27 vals *= np.uint64(0x94d049bb133111eb) vals ^= vals >> 31 return vals
[ "Given", "a", "1d", "array", "return", "an", "array", "of", "deterministic", "integers", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/util/hashing.py#L229-L305
[ "def", "hash_array", "(", "vals", ",", "encoding", "=", "'utf8'", ",", "hash_key", "=", "None", ",", "categorize", "=", "True", ")", ":", "if", "not", "hasattr", "(", "vals", ",", "'dtype'", ")", ":", "raise", "TypeError", "(", "\"must pass a ndarray-like\"", ")", "dtype", "=", "vals", ".", "dtype", "if", "hash_key", "is", "None", ":", "hash_key", "=", "_default_hash_key", "# For categoricals, we hash the categories, then remap the codes to the", "# hash values. (This check is above the complex check so that we don't ask", "# numpy if categorical is a subdtype of complex, as it will choke).", "if", "is_categorical_dtype", "(", "dtype", ")", ":", "return", "_hash_categorical", "(", "vals", ",", "encoding", ",", "hash_key", ")", "elif", "is_extension_array_dtype", "(", "dtype", ")", ":", "vals", ",", "_", "=", "vals", ".", "_values_for_factorize", "(", ")", "dtype", "=", "vals", ".", "dtype", "# we'll be working with everything as 64-bit values, so handle this", "# 128-bit value early", "if", "np", ".", "issubdtype", "(", "dtype", ",", "np", ".", "complex128", ")", ":", "return", "hash_array", "(", "vals", ".", "real", ")", "+", "23", "*", "hash_array", "(", "vals", ".", "imag", ")", "# First, turn whatever array this is into unsigned 64-bit ints, if we can", "# manage it.", "elif", "isinstance", "(", "dtype", ",", "np", ".", "bool", ")", ":", "vals", "=", "vals", ".", "astype", "(", "'u8'", ")", "elif", "issubclass", "(", "dtype", ".", "type", ",", "(", "np", ".", "datetime64", ",", "np", ".", "timedelta64", ")", ")", ":", "vals", "=", "vals", ".", "view", "(", "'i8'", ")", ".", "astype", "(", "'u8'", ",", "copy", "=", "False", ")", "elif", "issubclass", "(", "dtype", ".", "type", ",", "np", ".", "number", ")", "and", "dtype", ".", "itemsize", "<=", "8", ":", "vals", "=", "vals", ".", "view", "(", "'u{}'", ".", "format", "(", "vals", ".", "dtype", ".", "itemsize", ")", ")", ".", "astype", "(", "'u8'", ")", "else", ":", "# With repeated values, its MUCH faster to categorize object dtypes,", "# then hash and rename categories. We allow skipping the categorization", "# when the values are known/likely to be unique.", "if", "categorize", ":", "from", "pandas", "import", "factorize", ",", "Categorical", ",", "Index", "codes", ",", "categories", "=", "factorize", "(", "vals", ",", "sort", "=", "False", ")", "cat", "=", "Categorical", "(", "codes", ",", "Index", "(", "categories", ")", ",", "ordered", "=", "False", ",", "fastpath", "=", "True", ")", "return", "_hash_categorical", "(", "cat", ",", "encoding", ",", "hash_key", ")", "try", ":", "vals", "=", "hashing", ".", "hash_object_array", "(", "vals", ",", "hash_key", ",", "encoding", ")", "except", "TypeError", ":", "# we have mixed types", "vals", "=", "hashing", ".", "hash_object_array", "(", "vals", ".", "astype", "(", "str", ")", ".", "astype", "(", "object", ")", ",", "hash_key", ",", "encoding", ")", "# Then, redistribute these 64-bit ints within the space of 64-bit ints", "vals", "^=", "vals", ">>", "30", "vals", "*=", "np", ".", "uint64", "(", "0xbf58476d1ce4e5b9", ")", "vals", "^=", "vals", ">>", "27", "vals", "*=", "np", ".", "uint64", "(", "0x94d049bb133111eb", ")", "vals", "^=", "vals", ">>", "31", "return", "vals" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_hash_scalar
Hash scalar value Returns ------- 1d uint64 numpy array of hash value, of length 1
pandas/core/util/hashing.py
def _hash_scalar(val, encoding='utf8', hash_key=None): """ Hash scalar value Returns ------- 1d uint64 numpy array of hash value, of length 1 """ if isna(val): # this is to be consistent with the _hash_categorical implementation return np.array([np.iinfo(np.uint64).max], dtype='u8') if getattr(val, 'tzinfo', None) is not None: # for tz-aware datetimes, we need the underlying naive UTC value and # not the tz aware object or pd extension type (as # infer_dtype_from_scalar would do) if not isinstance(val, tslibs.Timestamp): val = tslibs.Timestamp(val) val = val.tz_convert(None) dtype, val = infer_dtype_from_scalar(val) vals = np.array([val], dtype=dtype) return hash_array(vals, hash_key=hash_key, encoding=encoding, categorize=False)
def _hash_scalar(val, encoding='utf8', hash_key=None): """ Hash scalar value Returns ------- 1d uint64 numpy array of hash value, of length 1 """ if isna(val): # this is to be consistent with the _hash_categorical implementation return np.array([np.iinfo(np.uint64).max], dtype='u8') if getattr(val, 'tzinfo', None) is not None: # for tz-aware datetimes, we need the underlying naive UTC value and # not the tz aware object or pd extension type (as # infer_dtype_from_scalar would do) if not isinstance(val, tslibs.Timestamp): val = tslibs.Timestamp(val) val = val.tz_convert(None) dtype, val = infer_dtype_from_scalar(val) vals = np.array([val], dtype=dtype) return hash_array(vals, hash_key=hash_key, encoding=encoding, categorize=False)
[ "Hash", "scalar", "value" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/util/hashing.py#L308-L333
[ "def", "_hash_scalar", "(", "val", ",", "encoding", "=", "'utf8'", ",", "hash_key", "=", "None", ")", ":", "if", "isna", "(", "val", ")", ":", "# this is to be consistent with the _hash_categorical implementation", "return", "np", ".", "array", "(", "[", "np", ".", "iinfo", "(", "np", ".", "uint64", ")", ".", "max", "]", ",", "dtype", "=", "'u8'", ")", "if", "getattr", "(", "val", ",", "'tzinfo'", ",", "None", ")", "is", "not", "None", ":", "# for tz-aware datetimes, we need the underlying naive UTC value and", "# not the tz aware object or pd extension type (as", "# infer_dtype_from_scalar would do)", "if", "not", "isinstance", "(", "val", ",", "tslibs", ".", "Timestamp", ")", ":", "val", "=", "tslibs", ".", "Timestamp", "(", "val", ")", "val", "=", "val", ".", "tz_convert", "(", "None", ")", "dtype", ",", "val", "=", "infer_dtype_from_scalar", "(", "val", ")", "vals", "=", "np", ".", "array", "(", "[", "val", "]", ",", "dtype", "=", "dtype", ")", "return", "hash_array", "(", "vals", ",", "hash_key", "=", "hash_key", ",", "encoding", "=", "encoding", ",", "categorize", "=", "False", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DocBuilder._process_single_doc
Make sure the provided value for --single is a path to an existing .rst/.ipynb file, or a pandas object that can be imported. For example, categorial.rst or pandas.DataFrame.head. For the latter, return the corresponding file path (e.g. reference/api/pandas.DataFrame.head.rst).
doc/make.py
def _process_single_doc(self, single_doc): """ Make sure the provided value for --single is a path to an existing .rst/.ipynb file, or a pandas object that can be imported. For example, categorial.rst or pandas.DataFrame.head. For the latter, return the corresponding file path (e.g. reference/api/pandas.DataFrame.head.rst). """ base_name, extension = os.path.splitext(single_doc) if extension in ('.rst', '.ipynb'): if os.path.exists(os.path.join(SOURCE_PATH, single_doc)): return single_doc else: raise FileNotFoundError('File {} not found'.format(single_doc)) elif single_doc.startswith('pandas.'): try: obj = pandas # noqa: F821 for name in single_doc.split('.'): obj = getattr(obj, name) except AttributeError: raise ImportError('Could not import {}'.format(single_doc)) else: return single_doc[len('pandas.'):] else: raise ValueError(('--single={} not understood. Value should be a ' 'valid path to a .rst or .ipynb file, or a ' 'valid pandas object (e.g. categorical.rst or ' 'pandas.DataFrame.head)').format(single_doc))
def _process_single_doc(self, single_doc): """ Make sure the provided value for --single is a path to an existing .rst/.ipynb file, or a pandas object that can be imported. For example, categorial.rst or pandas.DataFrame.head. For the latter, return the corresponding file path (e.g. reference/api/pandas.DataFrame.head.rst). """ base_name, extension = os.path.splitext(single_doc) if extension in ('.rst', '.ipynb'): if os.path.exists(os.path.join(SOURCE_PATH, single_doc)): return single_doc else: raise FileNotFoundError('File {} not found'.format(single_doc)) elif single_doc.startswith('pandas.'): try: obj = pandas # noqa: F821 for name in single_doc.split('.'): obj = getattr(obj, name) except AttributeError: raise ImportError('Could not import {}'.format(single_doc)) else: return single_doc[len('pandas.'):] else: raise ValueError(('--single={} not understood. Value should be a ' 'valid path to a .rst or .ipynb file, or a ' 'valid pandas object (e.g. categorical.rst or ' 'pandas.DataFrame.head)').format(single_doc))
[ "Make", "sure", "the", "provided", "value", "for", "--", "single", "is", "a", "path", "to", "an", "existing", ".", "rst", "/", ".", "ipynb", "file", "or", "a", "pandas", "object", "that", "can", "be", "imported", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L59-L88
[ "def", "_process_single_doc", "(", "self", ",", "single_doc", ")", ":", "base_name", ",", "extension", "=", "os", ".", "path", ".", "splitext", "(", "single_doc", ")", "if", "extension", "in", "(", "'.rst'", ",", "'.ipynb'", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "SOURCE_PATH", ",", "single_doc", ")", ")", ":", "return", "single_doc", "else", ":", "raise", "FileNotFoundError", "(", "'File {} not found'", ".", "format", "(", "single_doc", ")", ")", "elif", "single_doc", ".", "startswith", "(", "'pandas.'", ")", ":", "try", ":", "obj", "=", "pandas", "# noqa: F821", "for", "name", "in", "single_doc", ".", "split", "(", "'.'", ")", ":", "obj", "=", "getattr", "(", "obj", ",", "name", ")", "except", "AttributeError", ":", "raise", "ImportError", "(", "'Could not import {}'", ".", "format", "(", "single_doc", ")", ")", "else", ":", "return", "single_doc", "[", "len", "(", "'pandas.'", ")", ":", "]", "else", ":", "raise", "ValueError", "(", "(", "'--single={} not understood. Value should be a '", "'valid path to a .rst or .ipynb file, or a '", "'valid pandas object (e.g. categorical.rst or '", "'pandas.DataFrame.head)'", ")", ".", "format", "(", "single_doc", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DocBuilder._run_os
Execute a command as a OS terminal. Parameters ---------- *args : list of str Command and parameters to be executed Examples -------- >>> DocBuilder()._run_os('python', '--version')
doc/make.py
def _run_os(*args): """ Execute a command as a OS terminal. Parameters ---------- *args : list of str Command and parameters to be executed Examples -------- >>> DocBuilder()._run_os('python', '--version') """ subprocess.check_call(args, stdout=sys.stdout, stderr=sys.stderr)
def _run_os(*args): """ Execute a command as a OS terminal. Parameters ---------- *args : list of str Command and parameters to be executed Examples -------- >>> DocBuilder()._run_os('python', '--version') """ subprocess.check_call(args, stdout=sys.stdout, stderr=sys.stderr)
[ "Execute", "a", "command", "as", "a", "OS", "terminal", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L91-L104
[ "def", "_run_os", "(", "*", "args", ")", ":", "subprocess", ".", "check_call", "(", "args", ",", "stdout", "=", "sys", ".", "stdout", ",", "stderr", "=", "sys", ".", "stderr", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DocBuilder._sphinx_build
Call sphinx to build documentation. Attribute `num_jobs` from the class is used. Parameters ---------- kind : {'html', 'latex'} Examples -------- >>> DocBuilder(num_jobs=4)._sphinx_build('html')
doc/make.py
def _sphinx_build(self, kind): """ Call sphinx to build documentation. Attribute `num_jobs` from the class is used. Parameters ---------- kind : {'html', 'latex'} Examples -------- >>> DocBuilder(num_jobs=4)._sphinx_build('html') """ if kind not in ('html', 'latex'): raise ValueError('kind must be html or latex, ' 'not {}'.format(kind)) cmd = ['sphinx-build', '-b', kind] if self.num_jobs: cmd += ['-j', str(self.num_jobs)] if self.warnings_are_errors: cmd += ['-W', '--keep-going'] if self.verbosity: cmd.append('-{}'.format('v' * self.verbosity)) cmd += ['-d', os.path.join(BUILD_PATH, 'doctrees'), SOURCE_PATH, os.path.join(BUILD_PATH, kind)] return subprocess.call(cmd)
def _sphinx_build(self, kind): """ Call sphinx to build documentation. Attribute `num_jobs` from the class is used. Parameters ---------- kind : {'html', 'latex'} Examples -------- >>> DocBuilder(num_jobs=4)._sphinx_build('html') """ if kind not in ('html', 'latex'): raise ValueError('kind must be html or latex, ' 'not {}'.format(kind)) cmd = ['sphinx-build', '-b', kind] if self.num_jobs: cmd += ['-j', str(self.num_jobs)] if self.warnings_are_errors: cmd += ['-W', '--keep-going'] if self.verbosity: cmd.append('-{}'.format('v' * self.verbosity)) cmd += ['-d', os.path.join(BUILD_PATH, 'doctrees'), SOURCE_PATH, os.path.join(BUILD_PATH, kind)] return subprocess.call(cmd)
[ "Call", "sphinx", "to", "build", "documentation", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L106-L133
[ "def", "_sphinx_build", "(", "self", ",", "kind", ")", ":", "if", "kind", "not", "in", "(", "'html'", ",", "'latex'", ")", ":", "raise", "ValueError", "(", "'kind must be html or latex, '", "'not {}'", ".", "format", "(", "kind", ")", ")", "cmd", "=", "[", "'sphinx-build'", ",", "'-b'", ",", "kind", "]", "if", "self", ".", "num_jobs", ":", "cmd", "+=", "[", "'-j'", ",", "str", "(", "self", ".", "num_jobs", ")", "]", "if", "self", ".", "warnings_are_errors", ":", "cmd", "+=", "[", "'-W'", ",", "'--keep-going'", "]", "if", "self", ".", "verbosity", ":", "cmd", ".", "append", "(", "'-{}'", ".", "format", "(", "'v'", "*", "self", ".", "verbosity", ")", ")", "cmd", "+=", "[", "'-d'", ",", "os", ".", "path", ".", "join", "(", "BUILD_PATH", ",", "'doctrees'", ")", ",", "SOURCE_PATH", ",", "os", ".", "path", ".", "join", "(", "BUILD_PATH", ",", "kind", ")", "]", "return", "subprocess", ".", "call", "(", "cmd", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DocBuilder._open_browser
Open a browser tab showing single
doc/make.py
def _open_browser(self, single_doc_html): """ Open a browser tab showing single """ url = os.path.join('file://', DOC_PATH, 'build', 'html', single_doc_html) webbrowser.open(url, new=2)
def _open_browser(self, single_doc_html): """ Open a browser tab showing single """ url = os.path.join('file://', DOC_PATH, 'build', 'html', single_doc_html) webbrowser.open(url, new=2)
[ "Open", "a", "browser", "tab", "showing", "single" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L135-L141
[ "def", "_open_browser", "(", "self", ",", "single_doc_html", ")", ":", "url", "=", "os", ".", "path", ".", "join", "(", "'file://'", ",", "DOC_PATH", ",", "'build'", ",", "'html'", ",", "single_doc_html", ")", "webbrowser", ".", "open", "(", "url", ",", "new", "=", "2", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DocBuilder._get_page_title
Open the rst file `page` and extract its title.
doc/make.py
def _get_page_title(self, page): """ Open the rst file `page` and extract its title. """ fname = os.path.join(SOURCE_PATH, '{}.rst'.format(page)) option_parser = docutils.frontend.OptionParser( components=(docutils.parsers.rst.Parser,)) doc = docutils.utils.new_document( '<doc>', option_parser.get_default_values()) with open(fname) as f: data = f.read() parser = docutils.parsers.rst.Parser() # do not generate any warning when parsing the rst with open(os.devnull, 'a') as f: doc.reporter.stream = f parser.parse(data, doc) section = next(node for node in doc.children if isinstance(node, docutils.nodes.section)) title = next(node for node in section.children if isinstance(node, docutils.nodes.title)) return title.astext()
def _get_page_title(self, page): """ Open the rst file `page` and extract its title. """ fname = os.path.join(SOURCE_PATH, '{}.rst'.format(page)) option_parser = docutils.frontend.OptionParser( components=(docutils.parsers.rst.Parser,)) doc = docutils.utils.new_document( '<doc>', option_parser.get_default_values()) with open(fname) as f: data = f.read() parser = docutils.parsers.rst.Parser() # do not generate any warning when parsing the rst with open(os.devnull, 'a') as f: doc.reporter.stream = f parser.parse(data, doc) section = next(node for node in doc.children if isinstance(node, docutils.nodes.section)) title = next(node for node in section.children if isinstance(node, docutils.nodes.title)) return title.astext()
[ "Open", "the", "rst", "file", "page", "and", "extract", "its", "title", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L143-L167
[ "def", "_get_page_title", "(", "self", ",", "page", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "SOURCE_PATH", ",", "'{}.rst'", ".", "format", "(", "page", ")", ")", "option_parser", "=", "docutils", ".", "frontend", ".", "OptionParser", "(", "components", "=", "(", "docutils", ".", "parsers", ".", "rst", ".", "Parser", ",", ")", ")", "doc", "=", "docutils", ".", "utils", ".", "new_document", "(", "'<doc>'", ",", "option_parser", ".", "get_default_values", "(", ")", ")", "with", "open", "(", "fname", ")", "as", "f", ":", "data", "=", "f", ".", "read", "(", ")", "parser", "=", "docutils", ".", "parsers", ".", "rst", ".", "Parser", "(", ")", "# do not generate any warning when parsing the rst", "with", "open", "(", "os", ".", "devnull", ",", "'a'", ")", "as", "f", ":", "doc", ".", "reporter", ".", "stream", "=", "f", "parser", ".", "parse", "(", "data", ",", "doc", ")", "section", "=", "next", "(", "node", "for", "node", "in", "doc", ".", "children", "if", "isinstance", "(", "node", ",", "docutils", ".", "nodes", ".", "section", ")", ")", "title", "=", "next", "(", "node", "for", "node", "in", "section", ".", "children", "if", "isinstance", "(", "node", ",", "docutils", ".", "nodes", ".", "title", ")", ")", "return", "title", ".", "astext", "(", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DocBuilder._add_redirects
Create in the build directory an html file with a redirect, for every row in REDIRECTS_FILE.
doc/make.py
def _add_redirects(self): """ Create in the build directory an html file with a redirect, for every row in REDIRECTS_FILE. """ html = ''' <html> <head> <meta http-equiv="refresh" content="0;URL={url}"/> </head> <body> <p> The page has been moved to <a href="{url}">{title}</a> </p> </body> <html> ''' with open(REDIRECTS_FILE) as mapping_fd: reader = csv.reader(mapping_fd) for row in reader: if not row or row[0].strip().startswith('#'): continue path = os.path.join(BUILD_PATH, 'html', *row[0].split('/')) + '.html' try: title = self._get_page_title(row[1]) except Exception: # the file can be an ipynb and not an rst, or docutils # may not be able to read the rst because it has some # sphinx specific stuff title = 'this page' if os.path.exists(path): raise RuntimeError(( 'Redirection would overwrite an existing file: ' '{}').format(path)) with open(path, 'w') as moved_page_fd: moved_page_fd.write( html.format(url='{}.html'.format(row[1]), title=title))
def _add_redirects(self): """ Create in the build directory an html file with a redirect, for every row in REDIRECTS_FILE. """ html = ''' <html> <head> <meta http-equiv="refresh" content="0;URL={url}"/> </head> <body> <p> The page has been moved to <a href="{url}">{title}</a> </p> </body> <html> ''' with open(REDIRECTS_FILE) as mapping_fd: reader = csv.reader(mapping_fd) for row in reader: if not row or row[0].strip().startswith('#'): continue path = os.path.join(BUILD_PATH, 'html', *row[0].split('/')) + '.html' try: title = self._get_page_title(row[1]) except Exception: # the file can be an ipynb and not an rst, or docutils # may not be able to read the rst because it has some # sphinx specific stuff title = 'this page' if os.path.exists(path): raise RuntimeError(( 'Redirection would overwrite an existing file: ' '{}').format(path)) with open(path, 'w') as moved_page_fd: moved_page_fd.write( html.format(url='{}.html'.format(row[1]), title=title))
[ "Create", "in", "the", "build", "directory", "an", "html", "file", "with", "a", "redirect", "for", "every", "row", "in", "REDIRECTS_FILE", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L169-L212
[ "def", "_add_redirects", "(", "self", ")", ":", "html", "=", "'''\n <html>\n <head>\n <meta http-equiv=\"refresh\" content=\"0;URL={url}\"/>\n </head>\n <body>\n <p>\n The page has been moved to <a href=\"{url}\">{title}</a>\n </p>\n </body>\n <html>\n '''", "with", "open", "(", "REDIRECTS_FILE", ")", "as", "mapping_fd", ":", "reader", "=", "csv", ".", "reader", "(", "mapping_fd", ")", "for", "row", "in", "reader", ":", "if", "not", "row", "or", "row", "[", "0", "]", ".", "strip", "(", ")", ".", "startswith", "(", "'#'", ")", ":", "continue", "path", "=", "os", ".", "path", ".", "join", "(", "BUILD_PATH", ",", "'html'", ",", "*", "row", "[", "0", "]", ".", "split", "(", "'/'", ")", ")", "+", "'.html'", "try", ":", "title", "=", "self", ".", "_get_page_title", "(", "row", "[", "1", "]", ")", "except", "Exception", ":", "# the file can be an ipynb and not an rst, or docutils", "# may not be able to read the rst because it has some", "# sphinx specific stuff", "title", "=", "'this page'", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "RuntimeError", "(", "(", "'Redirection would overwrite an existing file: '", "'{}'", ")", ".", "format", "(", "path", ")", ")", "with", "open", "(", "path", ",", "'w'", ")", "as", "moved_page_fd", ":", "moved_page_fd", ".", "write", "(", "html", ".", "format", "(", "url", "=", "'{}.html'", ".", "format", "(", "row", "[", "1", "]", ")", ",", "title", "=", "title", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DocBuilder.html
Build HTML documentation.
doc/make.py
def html(self): """ Build HTML documentation. """ ret_code = self._sphinx_build('html') zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip') if os.path.exists(zip_fname): os.remove(zip_fname) if self.single_doc_html is not None: self._open_browser(self.single_doc_html) else: self._add_redirects() return ret_code
def html(self): """ Build HTML documentation. """ ret_code = self._sphinx_build('html') zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip') if os.path.exists(zip_fname): os.remove(zip_fname) if self.single_doc_html is not None: self._open_browser(self.single_doc_html) else: self._add_redirects() return ret_code
[ "Build", "HTML", "documentation", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L214-L227
[ "def", "html", "(", "self", ")", ":", "ret_code", "=", "self", ".", "_sphinx_build", "(", "'html'", ")", "zip_fname", "=", "os", ".", "path", ".", "join", "(", "BUILD_PATH", ",", "'html'", ",", "'pandas.zip'", ")", "if", "os", ".", "path", ".", "exists", "(", "zip_fname", ")", ":", "os", ".", "remove", "(", "zip_fname", ")", "if", "self", ".", "single_doc_html", "is", "not", "None", ":", "self", ".", "_open_browser", "(", "self", ".", "single_doc_html", ")", "else", ":", "self", ".", "_add_redirects", "(", ")", "return", "ret_code" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DocBuilder.latex
Build PDF documentation.
doc/make.py
def latex(self, force=False): """ Build PDF documentation. """ if sys.platform == 'win32': sys.stderr.write('latex build has not been tested on windows\n') else: ret_code = self._sphinx_build('latex') os.chdir(os.path.join(BUILD_PATH, 'latex')) if force: for i in range(3): self._run_os('pdflatex', '-interaction=nonstopmode', 'pandas.tex') raise SystemExit('You should check the file ' '"build/latex/pandas.pdf" for problems.') else: self._run_os('make') return ret_code
def latex(self, force=False): """ Build PDF documentation. """ if sys.platform == 'win32': sys.stderr.write('latex build has not been tested on windows\n') else: ret_code = self._sphinx_build('latex') os.chdir(os.path.join(BUILD_PATH, 'latex')) if force: for i in range(3): self._run_os('pdflatex', '-interaction=nonstopmode', 'pandas.tex') raise SystemExit('You should check the file ' '"build/latex/pandas.pdf" for problems.') else: self._run_os('make') return ret_code
[ "Build", "PDF", "documentation", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L229-L247
[ "def", "latex", "(", "self", ",", "force", "=", "False", ")", ":", "if", "sys", ".", "platform", "==", "'win32'", ":", "sys", ".", "stderr", ".", "write", "(", "'latex build has not been tested on windows\\n'", ")", "else", ":", "ret_code", "=", "self", ".", "_sphinx_build", "(", "'latex'", ")", "os", ".", "chdir", "(", "os", ".", "path", ".", "join", "(", "BUILD_PATH", ",", "'latex'", ")", ")", "if", "force", ":", "for", "i", "in", "range", "(", "3", ")", ":", "self", ".", "_run_os", "(", "'pdflatex'", ",", "'-interaction=nonstopmode'", ",", "'pandas.tex'", ")", "raise", "SystemExit", "(", "'You should check the file '", "'\"build/latex/pandas.pdf\" for problems.'", ")", "else", ":", "self", ".", "_run_os", "(", "'make'", ")", "return", "ret_code" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DocBuilder.clean
Clean documentation generated files.
doc/make.py
def clean(): """ Clean documentation generated files. """ shutil.rmtree(BUILD_PATH, ignore_errors=True) shutil.rmtree(os.path.join(SOURCE_PATH, 'reference', 'api'), ignore_errors=True)
def clean(): """ Clean documentation generated files. """ shutil.rmtree(BUILD_PATH, ignore_errors=True) shutil.rmtree(os.path.join(SOURCE_PATH, 'reference', 'api'), ignore_errors=True)
[ "Clean", "documentation", "generated", "files", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L256-L262
[ "def", "clean", "(", ")", ":", "shutil", ".", "rmtree", "(", "BUILD_PATH", ",", "ignore_errors", "=", "True", ")", "shutil", ".", "rmtree", "(", "os", ".", "path", ".", "join", "(", "SOURCE_PATH", ",", "'reference'", ",", "'api'", ")", ",", "ignore_errors", "=", "True", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
DocBuilder.zip_html
Compress HTML documentation into a zip file.
doc/make.py
def zip_html(self): """ Compress HTML documentation into a zip file. """ zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip') if os.path.exists(zip_fname): os.remove(zip_fname) dirname = os.path.join(BUILD_PATH, 'html') fnames = os.listdir(dirname) os.chdir(dirname) self._run_os('zip', zip_fname, '-r', '-q', *fnames)
def zip_html(self): """ Compress HTML documentation into a zip file. """ zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip') if os.path.exists(zip_fname): os.remove(zip_fname) dirname = os.path.join(BUILD_PATH, 'html') fnames = os.listdir(dirname) os.chdir(dirname) self._run_os('zip', zip_fname, '-r', '-q', *fnames)
[ "Compress", "HTML", "documentation", "into", "a", "zip", "file", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L264-L278
[ "def", "zip_html", "(", "self", ")", ":", "zip_fname", "=", "os", ".", "path", ".", "join", "(", "BUILD_PATH", ",", "'html'", ",", "'pandas.zip'", ")", "if", "os", ".", "path", ".", "exists", "(", "zip_fname", ")", ":", "os", ".", "remove", "(", "zip_fname", ")", "dirname", "=", "os", ".", "path", ".", "join", "(", "BUILD_PATH", ",", "'html'", ")", "fnames", "=", "os", ".", "listdir", "(", "dirname", ")", "os", ".", "chdir", "(", "dirname", ")", "self", ".", "_run_os", "(", "'zip'", ",", "zip_fname", ",", "'-r'", ",", "'-q'", ",", "*", "fnames", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
LatexFormatter.write_result
Render a DataFrame to a LaTeX tabular/longtable environment output.
pandas/io/formats/latex.py
def write_result(self, buf): """ Render a DataFrame to a LaTeX tabular/longtable environment output. """ # string representation of the columns if len(self.frame.columns) == 0 or len(self.frame.index) == 0: info_line = ('Empty {name}\nColumns: {col}\nIndex: {idx}' .format(name=type(self.frame).__name__, col=self.frame.columns, idx=self.frame.index)) strcols = [[info_line]] else: strcols = self.fmt._to_str_columns() def get_col_type(dtype): if issubclass(dtype.type, np.number): return 'r' else: return 'l' # reestablish the MultiIndex that has been joined by _to_str_column if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex): out = self.frame.index.format( adjoin=False, sparsify=self.fmt.sparsify, names=self.fmt.has_index_names, na_rep=self.fmt.na_rep ) # index.format will sparsify repeated entries with empty strings # so pad these with some empty space def pad_empties(x): for pad in reversed(x): if pad: break return [x[0]] + [i if i else ' ' * len(pad) for i in x[1:]] out = (pad_empties(i) for i in out) # Add empty spaces for each column level clevels = self.frame.columns.nlevels out = [[' ' * len(i[-1])] * clevels + i for i in out] # Add the column names to the last index column cnames = self.frame.columns.names if any(cnames): new_names = [i if i else '{}' for i in cnames] out[self.frame.index.nlevels - 1][:clevels] = new_names # Get rid of old multiindex column and add new ones strcols = out + strcols[1:] column_format = self.column_format if column_format is None: dtypes = self.frame.dtypes._values column_format = ''.join(map(get_col_type, dtypes)) if self.fmt.index: index_format = 'l' * self.frame.index.nlevels column_format = index_format + column_format elif not isinstance(column_format, str): # pragma: no cover raise AssertionError('column_format must be str or unicode, ' 'not {typ}'.format(typ=type(column_format))) if not self.longtable: buf.write('\\begin{{tabular}}{{{fmt}}}\n' .format(fmt=column_format)) buf.write('\\toprule\n') else: buf.write('\\begin{{longtable}}{{{fmt}}}\n' .format(fmt=column_format)) buf.write('\\toprule\n') ilevels = self.frame.index.nlevels clevels = self.frame.columns.nlevels nlevels = clevels if self.fmt.has_index_names and self.fmt.show_index_names: nlevels += 1 strrows = list(zip(*strcols)) self.clinebuf = [] for i, row in enumerate(strrows): if i == nlevels and self.fmt.header: buf.write('\\midrule\n') # End of header if self.longtable: buf.write('\\endhead\n') buf.write('\\midrule\n') buf.write('\\multicolumn{{{n}}}{{r}}{{{{Continued on next ' 'page}}}} \\\\\n'.format(n=len(row))) buf.write('\\midrule\n') buf.write('\\endfoot\n\n') buf.write('\\bottomrule\n') buf.write('\\endlastfoot\n') if self.fmt.kwds.get('escape', True): # escape backslashes first crow = [(x.replace('\\', '\\textbackslash ') .replace('_', '\\_') .replace('%', '\\%').replace('$', '\\$') .replace('#', '\\#').replace('{', '\\{') .replace('}', '\\}').replace('~', '\\textasciitilde ') .replace('^', '\\textasciicircum ') .replace('&', '\\&') if (x and x != '{}') else '{}') for x in row] else: crow = [x if x else '{}' for x in row] if self.bold_rows and self.fmt.index: # bold row labels crow = ['\\textbf{{{x}}}'.format(x=x) if j < ilevels and x.strip() not in ['', '{}'] else x for j, x in enumerate(crow)] if i < clevels and self.fmt.header and self.multicolumn: # sum up columns to multicolumns crow = self._format_multicolumn(crow, ilevels) if (i >= nlevels and self.fmt.index and self.multirow and ilevels > 1): # sum up rows to multirows crow = self._format_multirow(crow, ilevels, i, strrows) buf.write(' & '.join(crow)) buf.write(' \\\\\n') if self.multirow and i < len(strrows) - 1: self._print_cline(buf, i, len(strcols)) if not self.longtable: buf.write('\\bottomrule\n') buf.write('\\end{tabular}\n') else: buf.write('\\end{longtable}\n')
def write_result(self, buf): """ Render a DataFrame to a LaTeX tabular/longtable environment output. """ # string representation of the columns if len(self.frame.columns) == 0 or len(self.frame.index) == 0: info_line = ('Empty {name}\nColumns: {col}\nIndex: {idx}' .format(name=type(self.frame).__name__, col=self.frame.columns, idx=self.frame.index)) strcols = [[info_line]] else: strcols = self.fmt._to_str_columns() def get_col_type(dtype): if issubclass(dtype.type, np.number): return 'r' else: return 'l' # reestablish the MultiIndex that has been joined by _to_str_column if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex): out = self.frame.index.format( adjoin=False, sparsify=self.fmt.sparsify, names=self.fmt.has_index_names, na_rep=self.fmt.na_rep ) # index.format will sparsify repeated entries with empty strings # so pad these with some empty space def pad_empties(x): for pad in reversed(x): if pad: break return [x[0]] + [i if i else ' ' * len(pad) for i in x[1:]] out = (pad_empties(i) for i in out) # Add empty spaces for each column level clevels = self.frame.columns.nlevels out = [[' ' * len(i[-1])] * clevels + i for i in out] # Add the column names to the last index column cnames = self.frame.columns.names if any(cnames): new_names = [i if i else '{}' for i in cnames] out[self.frame.index.nlevels - 1][:clevels] = new_names # Get rid of old multiindex column and add new ones strcols = out + strcols[1:] column_format = self.column_format if column_format is None: dtypes = self.frame.dtypes._values column_format = ''.join(map(get_col_type, dtypes)) if self.fmt.index: index_format = 'l' * self.frame.index.nlevels column_format = index_format + column_format elif not isinstance(column_format, str): # pragma: no cover raise AssertionError('column_format must be str or unicode, ' 'not {typ}'.format(typ=type(column_format))) if not self.longtable: buf.write('\\begin{{tabular}}{{{fmt}}}\n' .format(fmt=column_format)) buf.write('\\toprule\n') else: buf.write('\\begin{{longtable}}{{{fmt}}}\n' .format(fmt=column_format)) buf.write('\\toprule\n') ilevels = self.frame.index.nlevels clevels = self.frame.columns.nlevels nlevels = clevels if self.fmt.has_index_names and self.fmt.show_index_names: nlevels += 1 strrows = list(zip(*strcols)) self.clinebuf = [] for i, row in enumerate(strrows): if i == nlevels and self.fmt.header: buf.write('\\midrule\n') # End of header if self.longtable: buf.write('\\endhead\n') buf.write('\\midrule\n') buf.write('\\multicolumn{{{n}}}{{r}}{{{{Continued on next ' 'page}}}} \\\\\n'.format(n=len(row))) buf.write('\\midrule\n') buf.write('\\endfoot\n\n') buf.write('\\bottomrule\n') buf.write('\\endlastfoot\n') if self.fmt.kwds.get('escape', True): # escape backslashes first crow = [(x.replace('\\', '\\textbackslash ') .replace('_', '\\_') .replace('%', '\\%').replace('$', '\\$') .replace('#', '\\#').replace('{', '\\{') .replace('}', '\\}').replace('~', '\\textasciitilde ') .replace('^', '\\textasciicircum ') .replace('&', '\\&') if (x and x != '{}') else '{}') for x in row] else: crow = [x if x else '{}' for x in row] if self.bold_rows and self.fmt.index: # bold row labels crow = ['\\textbf{{{x}}}'.format(x=x) if j < ilevels and x.strip() not in ['', '{}'] else x for j, x in enumerate(crow)] if i < clevels and self.fmt.header and self.multicolumn: # sum up columns to multicolumns crow = self._format_multicolumn(crow, ilevels) if (i >= nlevels and self.fmt.index and self.multirow and ilevels > 1): # sum up rows to multirows crow = self._format_multirow(crow, ilevels, i, strrows) buf.write(' & '.join(crow)) buf.write(' \\\\\n') if self.multirow and i < len(strrows) - 1: self._print_cline(buf, i, len(strcols)) if not self.longtable: buf.write('\\bottomrule\n') buf.write('\\end{tabular}\n') else: buf.write('\\end{longtable}\n')
[ "Render", "a", "DataFrame", "to", "a", "LaTeX", "tabular", "/", "longtable", "environment", "output", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/latex.py#L40-L163
[ "def", "write_result", "(", "self", ",", "buf", ")", ":", "# string representation of the columns", "if", "len", "(", "self", ".", "frame", ".", "columns", ")", "==", "0", "or", "len", "(", "self", ".", "frame", ".", "index", ")", "==", "0", ":", "info_line", "=", "(", "'Empty {name}\\nColumns: {col}\\nIndex: {idx}'", ".", "format", "(", "name", "=", "type", "(", "self", ".", "frame", ")", ".", "__name__", ",", "col", "=", "self", ".", "frame", ".", "columns", ",", "idx", "=", "self", ".", "frame", ".", "index", ")", ")", "strcols", "=", "[", "[", "info_line", "]", "]", "else", ":", "strcols", "=", "self", ".", "fmt", ".", "_to_str_columns", "(", ")", "def", "get_col_type", "(", "dtype", ")", ":", "if", "issubclass", "(", "dtype", ".", "type", ",", "np", ".", "number", ")", ":", "return", "'r'", "else", ":", "return", "'l'", "# reestablish the MultiIndex that has been joined by _to_str_column", "if", "self", ".", "fmt", ".", "index", "and", "isinstance", "(", "self", ".", "frame", ".", "index", ",", "ABCMultiIndex", ")", ":", "out", "=", "self", ".", "frame", ".", "index", ".", "format", "(", "adjoin", "=", "False", ",", "sparsify", "=", "self", ".", "fmt", ".", "sparsify", ",", "names", "=", "self", ".", "fmt", ".", "has_index_names", ",", "na_rep", "=", "self", ".", "fmt", ".", "na_rep", ")", "# index.format will sparsify repeated entries with empty strings", "# so pad these with some empty space", "def", "pad_empties", "(", "x", ")", ":", "for", "pad", "in", "reversed", "(", "x", ")", ":", "if", "pad", ":", "break", "return", "[", "x", "[", "0", "]", "]", "+", "[", "i", "if", "i", "else", "' '", "*", "len", "(", "pad", ")", "for", "i", "in", "x", "[", "1", ":", "]", "]", "out", "=", "(", "pad_empties", "(", "i", ")", "for", "i", "in", "out", ")", "# Add empty spaces for each column level", "clevels", "=", "self", ".", "frame", ".", "columns", ".", "nlevels", "out", "=", "[", "[", "' '", "*", "len", "(", "i", "[", "-", "1", "]", ")", "]", "*", "clevels", "+", "i", "for", "i", "in", "out", "]", "# Add the column names to the last index column", "cnames", "=", "self", ".", "frame", ".", "columns", ".", "names", "if", "any", "(", "cnames", ")", ":", "new_names", "=", "[", "i", "if", "i", "else", "'{}'", "for", "i", "in", "cnames", "]", "out", "[", "self", ".", "frame", ".", "index", ".", "nlevels", "-", "1", "]", "[", ":", "clevels", "]", "=", "new_names", "# Get rid of old multiindex column and add new ones", "strcols", "=", "out", "+", "strcols", "[", "1", ":", "]", "column_format", "=", "self", ".", "column_format", "if", "column_format", "is", "None", ":", "dtypes", "=", "self", ".", "frame", ".", "dtypes", ".", "_values", "column_format", "=", "''", ".", "join", "(", "map", "(", "get_col_type", ",", "dtypes", ")", ")", "if", "self", ".", "fmt", ".", "index", ":", "index_format", "=", "'l'", "*", "self", ".", "frame", ".", "index", ".", "nlevels", "column_format", "=", "index_format", "+", "column_format", "elif", "not", "isinstance", "(", "column_format", ",", "str", ")", ":", "# pragma: no cover", "raise", "AssertionError", "(", "'column_format must be str or unicode, '", "'not {typ}'", ".", "format", "(", "typ", "=", "type", "(", "column_format", ")", ")", ")", "if", "not", "self", ".", "longtable", ":", "buf", ".", "write", "(", "'\\\\begin{{tabular}}{{{fmt}}}\\n'", ".", "format", "(", "fmt", "=", "column_format", ")", ")", "buf", ".", "write", "(", "'\\\\toprule\\n'", ")", "else", ":", "buf", ".", "write", "(", "'\\\\begin{{longtable}}{{{fmt}}}\\n'", ".", "format", "(", "fmt", "=", "column_format", ")", ")", "buf", ".", "write", "(", "'\\\\toprule\\n'", ")", "ilevels", "=", "self", ".", "frame", ".", "index", ".", "nlevels", "clevels", "=", "self", ".", "frame", ".", "columns", ".", "nlevels", "nlevels", "=", "clevels", "if", "self", ".", "fmt", ".", "has_index_names", "and", "self", ".", "fmt", ".", "show_index_names", ":", "nlevels", "+=", "1", "strrows", "=", "list", "(", "zip", "(", "*", "strcols", ")", ")", "self", ".", "clinebuf", "=", "[", "]", "for", "i", ",", "row", "in", "enumerate", "(", "strrows", ")", ":", "if", "i", "==", "nlevels", "and", "self", ".", "fmt", ".", "header", ":", "buf", ".", "write", "(", "'\\\\midrule\\n'", ")", "# End of header", "if", "self", ".", "longtable", ":", "buf", ".", "write", "(", "'\\\\endhead\\n'", ")", "buf", ".", "write", "(", "'\\\\midrule\\n'", ")", "buf", ".", "write", "(", "'\\\\multicolumn{{{n}}}{{r}}{{{{Continued on next '", "'page}}}} \\\\\\\\\\n'", ".", "format", "(", "n", "=", "len", "(", "row", ")", ")", ")", "buf", ".", "write", "(", "'\\\\midrule\\n'", ")", "buf", ".", "write", "(", "'\\\\endfoot\\n\\n'", ")", "buf", ".", "write", "(", "'\\\\bottomrule\\n'", ")", "buf", ".", "write", "(", "'\\\\endlastfoot\\n'", ")", "if", "self", ".", "fmt", ".", "kwds", ".", "get", "(", "'escape'", ",", "True", ")", ":", "# escape backslashes first", "crow", "=", "[", "(", "x", ".", "replace", "(", "'\\\\'", ",", "'\\\\textbackslash '", ")", ".", "replace", "(", "'_'", ",", "'\\\\_'", ")", ".", "replace", "(", "'%'", ",", "'\\\\%'", ")", ".", "replace", "(", "'$'", ",", "'\\\\$'", ")", ".", "replace", "(", "'#'", ",", "'\\\\#'", ")", ".", "replace", "(", "'{'", ",", "'\\\\{'", ")", ".", "replace", "(", "'}'", ",", "'\\\\}'", ")", ".", "replace", "(", "'~'", ",", "'\\\\textasciitilde '", ")", ".", "replace", "(", "'^'", ",", "'\\\\textasciicircum '", ")", ".", "replace", "(", "'&'", ",", "'\\\\&'", ")", "if", "(", "x", "and", "x", "!=", "'{}'", ")", "else", "'{}'", ")", "for", "x", "in", "row", "]", "else", ":", "crow", "=", "[", "x", "if", "x", "else", "'{}'", "for", "x", "in", "row", "]", "if", "self", ".", "bold_rows", "and", "self", ".", "fmt", ".", "index", ":", "# bold row labels", "crow", "=", "[", "'\\\\textbf{{{x}}}'", ".", "format", "(", "x", "=", "x", ")", "if", "j", "<", "ilevels", "and", "x", ".", "strip", "(", ")", "not", "in", "[", "''", ",", "'{}'", "]", "else", "x", "for", "j", ",", "x", "in", "enumerate", "(", "crow", ")", "]", "if", "i", "<", "clevels", "and", "self", ".", "fmt", ".", "header", "and", "self", ".", "multicolumn", ":", "# sum up columns to multicolumns", "crow", "=", "self", ".", "_format_multicolumn", "(", "crow", ",", "ilevels", ")", "if", "(", "i", ">=", "nlevels", "and", "self", ".", "fmt", ".", "index", "and", "self", ".", "multirow", "and", "ilevels", ">", "1", ")", ":", "# sum up rows to multirows", "crow", "=", "self", ".", "_format_multirow", "(", "crow", ",", "ilevels", ",", "i", ",", "strrows", ")", "buf", ".", "write", "(", "' & '", ".", "join", "(", "crow", ")", ")", "buf", ".", "write", "(", "' \\\\\\\\\\n'", ")", "if", "self", ".", "multirow", "and", "i", "<", "len", "(", "strrows", ")", "-", "1", ":", "self", ".", "_print_cline", "(", "buf", ",", "i", ",", "len", "(", "strcols", ")", ")", "if", "not", "self", ".", "longtable", ":", "buf", ".", "write", "(", "'\\\\bottomrule\\n'", ")", "buf", ".", "write", "(", "'\\\\end{tabular}\\n'", ")", "else", ":", "buf", ".", "write", "(", "'\\\\end{longtable}\\n'", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
LatexFormatter._format_multicolumn
r""" Combine columns belonging to a group to a single multicolumn entry according to self.multicolumn_format e.g.: a & & & b & c & will become \multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
pandas/io/formats/latex.py
def _format_multicolumn(self, row, ilevels): r""" Combine columns belonging to a group to a single multicolumn entry according to self.multicolumn_format e.g.: a & & & b & c & will become \multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c} """ row2 = list(row[:ilevels]) ncol = 1 coltext = '' def append_col(): # write multicolumn if needed if ncol > 1: row2.append('\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}' .format(ncol=ncol, fmt=self.multicolumn_format, txt=coltext.strip())) # don't modify where not needed else: row2.append(coltext) for c in row[ilevels:]: # if next col has text, write the previous if c.strip(): if coltext: append_col() coltext = c ncol = 1 # if not, add it to the previous multicolumn else: ncol += 1 # write last column name if coltext: append_col() return row2
def _format_multicolumn(self, row, ilevels): r""" Combine columns belonging to a group to a single multicolumn entry according to self.multicolumn_format e.g.: a & & & b & c & will become \multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c} """ row2 = list(row[:ilevels]) ncol = 1 coltext = '' def append_col(): # write multicolumn if needed if ncol > 1: row2.append('\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}' .format(ncol=ncol, fmt=self.multicolumn_format, txt=coltext.strip())) # don't modify where not needed else: row2.append(coltext) for c in row[ilevels:]: # if next col has text, write the previous if c.strip(): if coltext: append_col() coltext = c ncol = 1 # if not, add it to the previous multicolumn else: ncol += 1 # write last column name if coltext: append_col() return row2
[ "r", "Combine", "columns", "belonging", "to", "a", "group", "to", "a", "single", "multicolumn", "entry", "according", "to", "self", ".", "multicolumn_format" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/latex.py#L165-L201
[ "def", "_format_multicolumn", "(", "self", ",", "row", ",", "ilevels", ")", ":", "row2", "=", "list", "(", "row", "[", ":", "ilevels", "]", ")", "ncol", "=", "1", "coltext", "=", "''", "def", "append_col", "(", ")", ":", "# write multicolumn if needed", "if", "ncol", ">", "1", ":", "row2", ".", "append", "(", "'\\\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}'", ".", "format", "(", "ncol", "=", "ncol", ",", "fmt", "=", "self", ".", "multicolumn_format", ",", "txt", "=", "coltext", ".", "strip", "(", ")", ")", ")", "# don't modify where not needed", "else", ":", "row2", ".", "append", "(", "coltext", ")", "for", "c", "in", "row", "[", "ilevels", ":", "]", ":", "# if next col has text, write the previous", "if", "c", ".", "strip", "(", ")", ":", "if", "coltext", ":", "append_col", "(", ")", "coltext", "=", "c", "ncol", "=", "1", "# if not, add it to the previous multicolumn", "else", ":", "ncol", "+=", "1", "# write last column name", "if", "coltext", ":", "append_col", "(", ")", "return", "row2" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
LatexFormatter._format_multirow
r""" Check following rows, whether row should be a multirow e.g.: becomes: a & 0 & \multirow{2}{*}{a} & 0 & & 1 & & 1 & b & 0 & \cline{1-2} b & 0 &
pandas/io/formats/latex.py
def _format_multirow(self, row, ilevels, i, rows): r""" Check following rows, whether row should be a multirow e.g.: becomes: a & 0 & \multirow{2}{*}{a} & 0 & & 1 & & 1 & b & 0 & \cline{1-2} b & 0 & """ for j in range(ilevels): if row[j].strip(): nrow = 1 for r in rows[i + 1:]: if not r[j].strip(): nrow += 1 else: break if nrow > 1: # overwrite non-multirow entry row[j] = '\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}'.format( nrow=nrow, row=row[j].strip()) # save when to end the current block with \cline self.clinebuf.append([i + nrow - 1, j + 1]) return row
def _format_multirow(self, row, ilevels, i, rows): r""" Check following rows, whether row should be a multirow e.g.: becomes: a & 0 & \multirow{2}{*}{a} & 0 & & 1 & & 1 & b & 0 & \cline{1-2} b & 0 & """ for j in range(ilevels): if row[j].strip(): nrow = 1 for r in rows[i + 1:]: if not r[j].strip(): nrow += 1 else: break if nrow > 1: # overwrite non-multirow entry row[j] = '\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}'.format( nrow=nrow, row=row[j].strip()) # save when to end the current block with \cline self.clinebuf.append([i + nrow - 1, j + 1]) return row
[ "r", "Check", "following", "rows", "whether", "row", "should", "be", "a", "multirow" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/latex.py#L203-L227
[ "def", "_format_multirow", "(", "self", ",", "row", ",", "ilevels", ",", "i", ",", "rows", ")", ":", "for", "j", "in", "range", "(", "ilevels", ")", ":", "if", "row", "[", "j", "]", ".", "strip", "(", ")", ":", "nrow", "=", "1", "for", "r", "in", "rows", "[", "i", "+", "1", ":", "]", ":", "if", "not", "r", "[", "j", "]", ".", "strip", "(", ")", ":", "nrow", "+=", "1", "else", ":", "break", "if", "nrow", ">", "1", ":", "# overwrite non-multirow entry", "row", "[", "j", "]", "=", "'\\\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}'", ".", "format", "(", "nrow", "=", "nrow", ",", "row", "=", "row", "[", "j", "]", ".", "strip", "(", ")", ")", "# save when to end the current block with \\cline", "self", ".", "clinebuf", ".", "append", "(", "[", "i", "+", "nrow", "-", "1", ",", "j", "+", "1", "]", ")", "return", "row" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
LatexFormatter._print_cline
Print clines after multirow-blocks are finished
pandas/io/formats/latex.py
def _print_cline(self, buf, i, icol): """ Print clines after multirow-blocks are finished """ for cl in self.clinebuf: if cl[0] == i: buf.write('\\cline{{{cl:d}-{icol:d}}}\n' .format(cl=cl[1], icol=icol)) # remove entries that have been written to buffer self.clinebuf = [x for x in self.clinebuf if x[0] != i]
def _print_cline(self, buf, i, icol): """ Print clines after multirow-blocks are finished """ for cl in self.clinebuf: if cl[0] == i: buf.write('\\cline{{{cl:d}-{icol:d}}}\n' .format(cl=cl[1], icol=icol)) # remove entries that have been written to buffer self.clinebuf = [x for x in self.clinebuf if x[0] != i]
[ "Print", "clines", "after", "multirow", "-", "blocks", "are", "finished" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/formats/latex.py#L229-L238
[ "def", "_print_cline", "(", "self", ",", "buf", ",", "i", ",", "icol", ")", ":", "for", "cl", "in", "self", ".", "clinebuf", ":", "if", "cl", "[", "0", "]", "==", "i", ":", "buf", ".", "write", "(", "'\\\\cline{{{cl:d}-{icol:d}}}\\n'", ".", "format", "(", "cl", "=", "cl", "[", "1", "]", ",", "icol", "=", "icol", ")", ")", "# remove entries that have been written to buffer", "self", ".", "clinebuf", "=", "[", "x", "for", "x", "in", "self", ".", "clinebuf", "if", "x", "[", "0", "]", "!=", "i", "]" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_validate_integer
Checks whether the 'name' parameter for parsing is either an integer OR float that can SAFELY be cast to an integer without losing accuracy. Raises a ValueError if that is not the case. Parameters ---------- name : string Parameter name (used for error reporting) val : int or float The value to check min_val : int Minimum allowed value (val < min_val will result in a ValueError)
pandas/io/parsers.py
def _validate_integer(name, val, min_val=0): """ Checks whether the 'name' parameter for parsing is either an integer OR float that can SAFELY be cast to an integer without losing accuracy. Raises a ValueError if that is not the case. Parameters ---------- name : string Parameter name (used for error reporting) val : int or float The value to check min_val : int Minimum allowed value (val < min_val will result in a ValueError) """ msg = "'{name:s}' must be an integer >={min_val:d}".format(name=name, min_val=min_val) if val is not None: if is_float(val): if int(val) != val: raise ValueError(msg) val = int(val) elif not (is_integer(val) and val >= min_val): raise ValueError(msg) return val
def _validate_integer(name, val, min_val=0): """ Checks whether the 'name' parameter for parsing is either an integer OR float that can SAFELY be cast to an integer without losing accuracy. Raises a ValueError if that is not the case. Parameters ---------- name : string Parameter name (used for error reporting) val : int or float The value to check min_val : int Minimum allowed value (val < min_val will result in a ValueError) """ msg = "'{name:s}' must be an integer >={min_val:d}".format(name=name, min_val=min_val) if val is not None: if is_float(val): if int(val) != val: raise ValueError(msg) val = int(val) elif not (is_integer(val) and val >= min_val): raise ValueError(msg) return val
[ "Checks", "whether", "the", "name", "parameter", "for", "parsing", "is", "either", "an", "integer", "OR", "float", "that", "can", "SAFELY", "be", "cast", "to", "an", "integer", "without", "losing", "accuracy", ".", "Raises", "a", "ValueError", "if", "that", "is", "not", "the", "case", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L349-L376
[ "def", "_validate_integer", "(", "name", ",", "val", ",", "min_val", "=", "0", ")", ":", "msg", "=", "\"'{name:s}' must be an integer >={min_val:d}\"", ".", "format", "(", "name", "=", "name", ",", "min_val", "=", "min_val", ")", "if", "val", "is", "not", "None", ":", "if", "is_float", "(", "val", ")", ":", "if", "int", "(", "val", ")", "!=", "val", ":", "raise", "ValueError", "(", "msg", ")", "val", "=", "int", "(", "val", ")", "elif", "not", "(", "is_integer", "(", "val", ")", "and", "val", ">=", "min_val", ")", ":", "raise", "ValueError", "(", "msg", ")", "return", "val" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_validate_names
Check if the `names` parameter contains duplicates. If duplicates are found, we issue a warning before returning. Parameters ---------- names : array-like or None An array containing a list of the names used for the output DataFrame. Returns ------- names : array-like or None The original `names` parameter.
pandas/io/parsers.py
def _validate_names(names): """ Check if the `names` parameter contains duplicates. If duplicates are found, we issue a warning before returning. Parameters ---------- names : array-like or None An array containing a list of the names used for the output DataFrame. Returns ------- names : array-like or None The original `names` parameter. """ if names is not None: if len(names) != len(set(names)): msg = ("Duplicate names specified. This " "will raise an error in the future.") warnings.warn(msg, UserWarning, stacklevel=3) return names
def _validate_names(names): """ Check if the `names` parameter contains duplicates. If duplicates are found, we issue a warning before returning. Parameters ---------- names : array-like or None An array containing a list of the names used for the output DataFrame. Returns ------- names : array-like or None The original `names` parameter. """ if names is not None: if len(names) != len(set(names)): msg = ("Duplicate names specified. This " "will raise an error in the future.") warnings.warn(msg, UserWarning, stacklevel=3) return names
[ "Check", "if", "the", "names", "parameter", "contains", "duplicates", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L379-L402
[ "def", "_validate_names", "(", "names", ")", ":", "if", "names", "is", "not", "None", ":", "if", "len", "(", "names", ")", "!=", "len", "(", "set", "(", "names", ")", ")", ":", "msg", "=", "(", "\"Duplicate names specified. This \"", "\"will raise an error in the future.\"", ")", "warnings", ".", "warn", "(", "msg", ",", "UserWarning", ",", "stacklevel", "=", "3", ")", "return", "names" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_read
Generic reader of line files.
pandas/io/parsers.py
def _read(filepath_or_buffer: FilePathOrBuffer, kwds): """Generic reader of line files.""" encoding = kwds.get('encoding', None) if encoding is not None: encoding = re.sub('_', '-', encoding).lower() kwds['encoding'] = encoding compression = kwds.get('compression', 'infer') compression = _infer_compression(filepath_or_buffer, compression) # TODO: get_filepath_or_buffer could return # Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile] # though mypy handling of conditional imports is difficult. # See https://github.com/python/mypy/issues/1297 fp_or_buf, _, compression, should_close = get_filepath_or_buffer( filepath_or_buffer, encoding, compression) kwds['compression'] = compression if kwds.get('date_parser', None) is not None: if isinstance(kwds['parse_dates'], bool): kwds['parse_dates'] = True # Extract some of the arguments (pass chunksize on). iterator = kwds.get('iterator', False) chunksize = _validate_integer('chunksize', kwds.get('chunksize', None), 1) nrows = kwds.get('nrows', None) # Check for duplicates in names. _validate_names(kwds.get("names", None)) # Create the parser. parser = TextFileReader(fp_or_buf, **kwds) if chunksize or iterator: return parser try: data = parser.read(nrows) finally: parser.close() if should_close: try: fp_or_buf.close() except ValueError: pass return data
def _read(filepath_or_buffer: FilePathOrBuffer, kwds): """Generic reader of line files.""" encoding = kwds.get('encoding', None) if encoding is not None: encoding = re.sub('_', '-', encoding).lower() kwds['encoding'] = encoding compression = kwds.get('compression', 'infer') compression = _infer_compression(filepath_or_buffer, compression) # TODO: get_filepath_or_buffer could return # Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile] # though mypy handling of conditional imports is difficult. # See https://github.com/python/mypy/issues/1297 fp_or_buf, _, compression, should_close = get_filepath_or_buffer( filepath_or_buffer, encoding, compression) kwds['compression'] = compression if kwds.get('date_parser', None) is not None: if isinstance(kwds['parse_dates'], bool): kwds['parse_dates'] = True # Extract some of the arguments (pass chunksize on). iterator = kwds.get('iterator', False) chunksize = _validate_integer('chunksize', kwds.get('chunksize', None), 1) nrows = kwds.get('nrows', None) # Check for duplicates in names. _validate_names(kwds.get("names", None)) # Create the parser. parser = TextFileReader(fp_or_buf, **kwds) if chunksize or iterator: return parser try: data = parser.read(nrows) finally: parser.close() if should_close: try: fp_or_buf.close() except ValueError: pass return data
[ "Generic", "reader", "of", "line", "files", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L405-L452
[ "def", "_read", "(", "filepath_or_buffer", ":", "FilePathOrBuffer", ",", "kwds", ")", ":", "encoding", "=", "kwds", ".", "get", "(", "'encoding'", ",", "None", ")", "if", "encoding", "is", "not", "None", ":", "encoding", "=", "re", ".", "sub", "(", "'_'", ",", "'-'", ",", "encoding", ")", ".", "lower", "(", ")", "kwds", "[", "'encoding'", "]", "=", "encoding", "compression", "=", "kwds", ".", "get", "(", "'compression'", ",", "'infer'", ")", "compression", "=", "_infer_compression", "(", "filepath_or_buffer", ",", "compression", ")", "# TODO: get_filepath_or_buffer could return", "# Union[FilePathOrBuffer, s3fs.S3File, gcsfs.GCSFile]", "# though mypy handling of conditional imports is difficult.", "# See https://github.com/python/mypy/issues/1297", "fp_or_buf", ",", "_", ",", "compression", ",", "should_close", "=", "get_filepath_or_buffer", "(", "filepath_or_buffer", ",", "encoding", ",", "compression", ")", "kwds", "[", "'compression'", "]", "=", "compression", "if", "kwds", ".", "get", "(", "'date_parser'", ",", "None", ")", "is", "not", "None", ":", "if", "isinstance", "(", "kwds", "[", "'parse_dates'", "]", ",", "bool", ")", ":", "kwds", "[", "'parse_dates'", "]", "=", "True", "# Extract some of the arguments (pass chunksize on).", "iterator", "=", "kwds", ".", "get", "(", "'iterator'", ",", "False", ")", "chunksize", "=", "_validate_integer", "(", "'chunksize'", ",", "kwds", ".", "get", "(", "'chunksize'", ",", "None", ")", ",", "1", ")", "nrows", "=", "kwds", ".", "get", "(", "'nrows'", ",", "None", ")", "# Check for duplicates in names.", "_validate_names", "(", "kwds", ".", "get", "(", "\"names\"", ",", "None", ")", ")", "# Create the parser.", "parser", "=", "TextFileReader", "(", "fp_or_buf", ",", "*", "*", "kwds", ")", "if", "chunksize", "or", "iterator", ":", "return", "parser", "try", ":", "data", "=", "parser", ".", "read", "(", "nrows", ")", "finally", ":", "parser", ".", "close", "(", ")", "if", "should_close", ":", "try", ":", "fp_or_buf", ".", "close", "(", ")", "except", "ValueError", ":", "pass", "return", "data" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
read_fwf
r""" Read a table of fixed-width formatted lines into DataFrame. Also supports optionally iterating or breaking of the file into chunks. Additional help can be found in the `online docs for IO Tools <http://pandas.pydata.org/pandas-docs/stable/io.html>`_. Parameters ---------- filepath_or_buffer : str, path object, or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: file://localhost/path/to/table.csv. If you want to pass in a path object, pandas accepts either ``pathlib.Path`` or ``py._path.local.LocalPath``. By file-like object, we refer to objects with a ``read()`` method, such as a file handler (e.g. via builtin ``open`` function) or ``StringIO``. colspecs : list of tuple (int, int) or 'infer'. optional A list of tuples giving the extents of the fixed-width fields of each line as half-open intervals (i.e., [from, to[ ). String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data which are not being skipped via skiprows (default='infer'). widths : list of int, optional A list of field widths which can be used instead of 'colspecs' if the intervals are contiguous. infer_nrows : int, default 100 The number of rows to consider when letting the parser determine the `colspecs`. .. versionadded:: 0.24.0 **kwds : optional Optional keyword arguments can be passed to ``TextFileReader``. Returns ------- DataFrame or TextParser A comma-separated values (csv) file is returned as two-dimensional data structure with labeled axes. See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. read_csv : Read a comma-separated values (csv) file into DataFrame. Examples -------- >>> pd.read_fwf('data.csv') # doctest: +SKIP
pandas/io/parsers.py
def read_fwf(filepath_or_buffer: FilePathOrBuffer, colspecs='infer', widths=None, infer_nrows=100, **kwds): r""" Read a table of fixed-width formatted lines into DataFrame. Also supports optionally iterating or breaking of the file into chunks. Additional help can be found in the `online docs for IO Tools <http://pandas.pydata.org/pandas-docs/stable/io.html>`_. Parameters ---------- filepath_or_buffer : str, path object, or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: file://localhost/path/to/table.csv. If you want to pass in a path object, pandas accepts either ``pathlib.Path`` or ``py._path.local.LocalPath``. By file-like object, we refer to objects with a ``read()`` method, such as a file handler (e.g. via builtin ``open`` function) or ``StringIO``. colspecs : list of tuple (int, int) or 'infer'. optional A list of tuples giving the extents of the fixed-width fields of each line as half-open intervals (i.e., [from, to[ ). String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data which are not being skipped via skiprows (default='infer'). widths : list of int, optional A list of field widths which can be used instead of 'colspecs' if the intervals are contiguous. infer_nrows : int, default 100 The number of rows to consider when letting the parser determine the `colspecs`. .. versionadded:: 0.24.0 **kwds : optional Optional keyword arguments can be passed to ``TextFileReader``. Returns ------- DataFrame or TextParser A comma-separated values (csv) file is returned as two-dimensional data structure with labeled axes. See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. read_csv : Read a comma-separated values (csv) file into DataFrame. Examples -------- >>> pd.read_fwf('data.csv') # doctest: +SKIP """ # Check input arguments. if colspecs is None and widths is None: raise ValueError("Must specify either colspecs or widths") elif colspecs not in (None, 'infer') and widths is not None: raise ValueError("You must specify only one of 'widths' and " "'colspecs'") # Compute 'colspecs' from 'widths', if specified. if widths is not None: colspecs, col = [], 0 for w in widths: colspecs.append((col, col + w)) col += w kwds['colspecs'] = colspecs kwds['infer_nrows'] = infer_nrows kwds['engine'] = 'python-fwf' return _read(filepath_or_buffer, kwds)
def read_fwf(filepath_or_buffer: FilePathOrBuffer, colspecs='infer', widths=None, infer_nrows=100, **kwds): r""" Read a table of fixed-width formatted lines into DataFrame. Also supports optionally iterating or breaking of the file into chunks. Additional help can be found in the `online docs for IO Tools <http://pandas.pydata.org/pandas-docs/stable/io.html>`_. Parameters ---------- filepath_or_buffer : str, path object, or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: file://localhost/path/to/table.csv. If you want to pass in a path object, pandas accepts either ``pathlib.Path`` or ``py._path.local.LocalPath``. By file-like object, we refer to objects with a ``read()`` method, such as a file handler (e.g. via builtin ``open`` function) or ``StringIO``. colspecs : list of tuple (int, int) or 'infer'. optional A list of tuples giving the extents of the fixed-width fields of each line as half-open intervals (i.e., [from, to[ ). String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data which are not being skipped via skiprows (default='infer'). widths : list of int, optional A list of field widths which can be used instead of 'colspecs' if the intervals are contiguous. infer_nrows : int, default 100 The number of rows to consider when letting the parser determine the `colspecs`. .. versionadded:: 0.24.0 **kwds : optional Optional keyword arguments can be passed to ``TextFileReader``. Returns ------- DataFrame or TextParser A comma-separated values (csv) file is returned as two-dimensional data structure with labeled axes. See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. read_csv : Read a comma-separated values (csv) file into DataFrame. Examples -------- >>> pd.read_fwf('data.csv') # doctest: +SKIP """ # Check input arguments. if colspecs is None and widths is None: raise ValueError("Must specify either colspecs or widths") elif colspecs not in (None, 'infer') and widths is not None: raise ValueError("You must specify only one of 'widths' and " "'colspecs'") # Compute 'colspecs' from 'widths', if specified. if widths is not None: colspecs, col = [], 0 for w in widths: colspecs.append((col, col + w)) col += w kwds['colspecs'] = colspecs kwds['infer_nrows'] = infer_nrows kwds['engine'] = 'python-fwf' return _read(filepath_or_buffer, kwds)
[ "r", "Read", "a", "table", "of", "fixed", "-", "width", "formatted", "lines", "into", "DataFrame", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L735-L813
[ "def", "read_fwf", "(", "filepath_or_buffer", ":", "FilePathOrBuffer", ",", "colspecs", "=", "'infer'", ",", "widths", "=", "None", ",", "infer_nrows", "=", "100", ",", "*", "*", "kwds", ")", ":", "# Check input arguments.", "if", "colspecs", "is", "None", "and", "widths", "is", "None", ":", "raise", "ValueError", "(", "\"Must specify either colspecs or widths\"", ")", "elif", "colspecs", "not", "in", "(", "None", ",", "'infer'", ")", "and", "widths", "is", "not", "None", ":", "raise", "ValueError", "(", "\"You must specify only one of 'widths' and \"", "\"'colspecs'\"", ")", "# Compute 'colspecs' from 'widths', if specified.", "if", "widths", "is", "not", "None", ":", "colspecs", ",", "col", "=", "[", "]", ",", "0", "for", "w", "in", "widths", ":", "colspecs", ".", "append", "(", "(", "col", ",", "col", "+", "w", ")", ")", "col", "+=", "w", "kwds", "[", "'colspecs'", "]", "=", "colspecs", "kwds", "[", "'infer_nrows'", "]", "=", "infer_nrows", "kwds", "[", "'engine'", "]", "=", "'python-fwf'", "return", "_read", "(", "filepath_or_buffer", ",", "kwds", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_is_potential_multi_index
Check whether or not the `columns` parameter could be converted into a MultiIndex. Parameters ---------- columns : array-like Object which may or may not be convertible into a MultiIndex Returns ------- boolean : Whether or not columns could become a MultiIndex
pandas/io/parsers.py
def _is_potential_multi_index(columns): """ Check whether or not the `columns` parameter could be converted into a MultiIndex. Parameters ---------- columns : array-like Object which may or may not be convertible into a MultiIndex Returns ------- boolean : Whether or not columns could become a MultiIndex """ return (len(columns) and not isinstance(columns, MultiIndex) and all(isinstance(c, tuple) for c in columns))
def _is_potential_multi_index(columns): """ Check whether or not the `columns` parameter could be converted into a MultiIndex. Parameters ---------- columns : array-like Object which may or may not be convertible into a MultiIndex Returns ------- boolean : Whether or not columns could become a MultiIndex """ return (len(columns) and not isinstance(columns, MultiIndex) and all(isinstance(c, tuple) for c in columns))
[ "Check", "whether", "or", "not", "the", "columns", "parameter", "could", "be", "converted", "into", "a", "MultiIndex", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1190-L1205
[ "def", "_is_potential_multi_index", "(", "columns", ")", ":", "return", "(", "len", "(", "columns", ")", "and", "not", "isinstance", "(", "columns", ",", "MultiIndex", ")", "and", "all", "(", "isinstance", "(", "c", ",", "tuple", ")", "for", "c", "in", "columns", ")", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_evaluate_usecols
Check whether or not the 'usecols' parameter is a callable. If so, enumerates the 'names' parameter and returns a set of indices for each entry in 'names' that evaluates to True. If not a callable, returns 'usecols'.
pandas/io/parsers.py
def _evaluate_usecols(usecols, names): """ Check whether or not the 'usecols' parameter is a callable. If so, enumerates the 'names' parameter and returns a set of indices for each entry in 'names' that evaluates to True. If not a callable, returns 'usecols'. """ if callable(usecols): return {i for i, name in enumerate(names) if usecols(name)} return usecols
def _evaluate_usecols(usecols, names): """ Check whether or not the 'usecols' parameter is a callable. If so, enumerates the 'names' parameter and returns a set of indices for each entry in 'names' that evaluates to True. If not a callable, returns 'usecols'. """ if callable(usecols): return {i for i, name in enumerate(names) if usecols(name)} return usecols
[ "Check", "whether", "or", "not", "the", "usecols", "parameter", "is", "a", "callable", ".", "If", "so", "enumerates", "the", "names", "parameter", "and", "returns", "a", "set", "of", "indices", "for", "each", "entry", "in", "names", "that", "evaluates", "to", "True", ".", "If", "not", "a", "callable", "returns", "usecols", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1208-L1218
[ "def", "_evaluate_usecols", "(", "usecols", ",", "names", ")", ":", "if", "callable", "(", "usecols", ")", ":", "return", "{", "i", "for", "i", ",", "name", "in", "enumerate", "(", "names", ")", "if", "usecols", "(", "name", ")", "}", "return", "usecols" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_validate_usecols_names
Validates that all usecols are present in a given list of names. If not, raise a ValueError that shows what usecols are missing. Parameters ---------- usecols : iterable of usecols The columns to validate are present in names. names : iterable of names The column names to check against. Returns ------- usecols : iterable of usecols The `usecols` parameter if the validation succeeds. Raises ------ ValueError : Columns were missing. Error message will list them.
pandas/io/parsers.py
def _validate_usecols_names(usecols, names): """ Validates that all usecols are present in a given list of names. If not, raise a ValueError that shows what usecols are missing. Parameters ---------- usecols : iterable of usecols The columns to validate are present in names. names : iterable of names The column names to check against. Returns ------- usecols : iterable of usecols The `usecols` parameter if the validation succeeds. Raises ------ ValueError : Columns were missing. Error message will list them. """ missing = [c for c in usecols if c not in names] if len(missing) > 0: raise ValueError( "Usecols do not match columns, " "columns expected but not found: {missing}".format(missing=missing) ) return usecols
def _validate_usecols_names(usecols, names): """ Validates that all usecols are present in a given list of names. If not, raise a ValueError that shows what usecols are missing. Parameters ---------- usecols : iterable of usecols The columns to validate are present in names. names : iterable of names The column names to check against. Returns ------- usecols : iterable of usecols The `usecols` parameter if the validation succeeds. Raises ------ ValueError : Columns were missing. Error message will list them. """ missing = [c for c in usecols if c not in names] if len(missing) > 0: raise ValueError( "Usecols do not match columns, " "columns expected but not found: {missing}".format(missing=missing) ) return usecols
[ "Validates", "that", "all", "usecols", "are", "present", "in", "a", "given", "list", "of", "names", ".", "If", "not", "raise", "a", "ValueError", "that", "shows", "what", "usecols", "are", "missing", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1221-L1250
[ "def", "_validate_usecols_names", "(", "usecols", ",", "names", ")", ":", "missing", "=", "[", "c", "for", "c", "in", "usecols", "if", "c", "not", "in", "names", "]", "if", "len", "(", "missing", ")", ">", "0", ":", "raise", "ValueError", "(", "\"Usecols do not match columns, \"", "\"columns expected but not found: {missing}\"", ".", "format", "(", "missing", "=", "missing", ")", ")", "return", "usecols" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_validate_usecols_arg
Validate the 'usecols' parameter. Checks whether or not the 'usecols' parameter contains all integers (column selection by index), strings (column by name) or is a callable. Raises a ValueError if that is not the case. Parameters ---------- usecols : list-like, callable, or None List of columns to use when parsing or a callable that can be used to filter a list of table columns. Returns ------- usecols_tuple : tuple A tuple of (verified_usecols, usecols_dtype). 'verified_usecols' is either a set if an array-like is passed in or 'usecols' if a callable or None is passed in. 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like is passed in or None if a callable or None is passed in.
pandas/io/parsers.py
def _validate_usecols_arg(usecols): """ Validate the 'usecols' parameter. Checks whether or not the 'usecols' parameter contains all integers (column selection by index), strings (column by name) or is a callable. Raises a ValueError if that is not the case. Parameters ---------- usecols : list-like, callable, or None List of columns to use when parsing or a callable that can be used to filter a list of table columns. Returns ------- usecols_tuple : tuple A tuple of (verified_usecols, usecols_dtype). 'verified_usecols' is either a set if an array-like is passed in or 'usecols' if a callable or None is passed in. 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like is passed in or None if a callable or None is passed in. """ msg = ("'usecols' must either be list-like of all strings, all unicode, " "all integers or a callable.") if usecols is not None: if callable(usecols): return usecols, None if not is_list_like(usecols): # see gh-20529 # # Ensure it is iterable container but not string. raise ValueError(msg) usecols_dtype = lib.infer_dtype(usecols, skipna=False) if usecols_dtype not in ("empty", "integer", "string", "unicode"): raise ValueError(msg) usecols = set(usecols) return usecols, usecols_dtype return usecols, None
def _validate_usecols_arg(usecols): """ Validate the 'usecols' parameter. Checks whether or not the 'usecols' parameter contains all integers (column selection by index), strings (column by name) or is a callable. Raises a ValueError if that is not the case. Parameters ---------- usecols : list-like, callable, or None List of columns to use when parsing or a callable that can be used to filter a list of table columns. Returns ------- usecols_tuple : tuple A tuple of (verified_usecols, usecols_dtype). 'verified_usecols' is either a set if an array-like is passed in or 'usecols' if a callable or None is passed in. 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like is passed in or None if a callable or None is passed in. """ msg = ("'usecols' must either be list-like of all strings, all unicode, " "all integers or a callable.") if usecols is not None: if callable(usecols): return usecols, None if not is_list_like(usecols): # see gh-20529 # # Ensure it is iterable container but not string. raise ValueError(msg) usecols_dtype = lib.infer_dtype(usecols, skipna=False) if usecols_dtype not in ("empty", "integer", "string", "unicode"): raise ValueError(msg) usecols = set(usecols) return usecols, usecols_dtype return usecols, None
[ "Validate", "the", "usecols", "parameter", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1284-L1330
[ "def", "_validate_usecols_arg", "(", "usecols", ")", ":", "msg", "=", "(", "\"'usecols' must either be list-like of all strings, all unicode, \"", "\"all integers or a callable.\"", ")", "if", "usecols", "is", "not", "None", ":", "if", "callable", "(", "usecols", ")", ":", "return", "usecols", ",", "None", "if", "not", "is_list_like", "(", "usecols", ")", ":", "# see gh-20529", "#", "# Ensure it is iterable container but not string.", "raise", "ValueError", "(", "msg", ")", "usecols_dtype", "=", "lib", ".", "infer_dtype", "(", "usecols", ",", "skipna", "=", "False", ")", "if", "usecols_dtype", "not", "in", "(", "\"empty\"", ",", "\"integer\"", ",", "\"string\"", ",", "\"unicode\"", ")", ":", "raise", "ValueError", "(", "msg", ")", "usecols", "=", "set", "(", "usecols", ")", "return", "usecols", ",", "usecols_dtype", "return", "usecols", ",", "None" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_validate_parse_dates_arg
Check whether or not the 'parse_dates' parameter is a non-boolean scalar. Raises a ValueError if that is the case.
pandas/io/parsers.py
def _validate_parse_dates_arg(parse_dates): """ Check whether or not the 'parse_dates' parameter is a non-boolean scalar. Raises a ValueError if that is the case. """ msg = ("Only booleans, lists, and " "dictionaries are accepted " "for the 'parse_dates' parameter") if parse_dates is not None: if is_scalar(parse_dates): if not lib.is_bool(parse_dates): raise TypeError(msg) elif not isinstance(parse_dates, (list, dict)): raise TypeError(msg) return parse_dates
def _validate_parse_dates_arg(parse_dates): """ Check whether or not the 'parse_dates' parameter is a non-boolean scalar. Raises a ValueError if that is the case. """ msg = ("Only booleans, lists, and " "dictionaries are accepted " "for the 'parse_dates' parameter") if parse_dates is not None: if is_scalar(parse_dates): if not lib.is_bool(parse_dates): raise TypeError(msg) elif not isinstance(parse_dates, (list, dict)): raise TypeError(msg) return parse_dates
[ "Check", "whether", "or", "not", "the", "parse_dates", "parameter", "is", "a", "non", "-", "boolean", "scalar", ".", "Raises", "a", "ValueError", "if", "that", "is", "the", "case", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L1333-L1351
[ "def", "_validate_parse_dates_arg", "(", "parse_dates", ")", ":", "msg", "=", "(", "\"Only booleans, lists, and \"", "\"dictionaries are accepted \"", "\"for the 'parse_dates' parameter\"", ")", "if", "parse_dates", "is", "not", "None", ":", "if", "is_scalar", "(", "parse_dates", ")", ":", "if", "not", "lib", ".", "is_bool", "(", "parse_dates", ")", ":", "raise", "TypeError", "(", "msg", ")", "elif", "not", "isinstance", "(", "parse_dates", ",", "(", "list", ",", "dict", ")", ")", ":", "raise", "TypeError", "(", "msg", ")", "return", "parse_dates" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_stringify_na_values
return a stringified and numeric for these values
pandas/io/parsers.py
def _stringify_na_values(na_values): """ return a stringified and numeric for these values """ result = [] for x in na_values: result.append(str(x)) result.append(x) try: v = float(x) # we are like 999 here if v == int(v): v = int(v) result.append("{value}.0".format(value=v)) result.append(str(v)) result.append(v) except (TypeError, ValueError, OverflowError): pass try: result.append(int(x)) except (TypeError, ValueError, OverflowError): pass return set(result)
def _stringify_na_values(na_values): """ return a stringified and numeric for these values """ result = [] for x in na_values: result.append(str(x)) result.append(x) try: v = float(x) # we are like 999 here if v == int(v): v = int(v) result.append("{value}.0".format(value=v)) result.append(str(v)) result.append(v) except (TypeError, ValueError, OverflowError): pass try: result.append(int(x)) except (TypeError, ValueError, OverflowError): pass return set(result)
[ "return", "a", "stringified", "and", "numeric", "for", "these", "values" ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L3425-L3447
[ "def", "_stringify_na_values", "(", "na_values", ")", ":", "result", "=", "[", "]", "for", "x", "in", "na_values", ":", "result", ".", "append", "(", "str", "(", "x", ")", ")", "result", ".", "append", "(", "x", ")", "try", ":", "v", "=", "float", "(", "x", ")", "# we are like 999 here", "if", "v", "==", "int", "(", "v", ")", ":", "v", "=", "int", "(", "v", ")", "result", ".", "append", "(", "\"{value}.0\"", ".", "format", "(", "value", "=", "v", ")", ")", "result", ".", "append", "(", "str", "(", "v", ")", ")", "result", ".", "append", "(", "v", ")", "except", "(", "TypeError", ",", "ValueError", ",", "OverflowError", ")", ":", "pass", "try", ":", "result", ".", "append", "(", "int", "(", "x", ")", ")", "except", "(", "TypeError", ",", "ValueError", ",", "OverflowError", ")", ":", "pass", "return", "set", "(", "result", ")" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037
train
_get_na_values
Get the NaN values for a given column. Parameters ---------- col : str The name of the column. na_values : array-like, dict The object listing the NaN values as strings. na_fvalues : array-like, dict The object listing the NaN values as floats. keep_default_na : bool If `na_values` is a dict, and the column is not mapped in the dictionary, whether to return the default NaN values or the empty set. Returns ------- nan_tuple : A length-two tuple composed of 1) na_values : the string NaN values for that column. 2) na_fvalues : the float NaN values for that column.
pandas/io/parsers.py
def _get_na_values(col, na_values, na_fvalues, keep_default_na): """ Get the NaN values for a given column. Parameters ---------- col : str The name of the column. na_values : array-like, dict The object listing the NaN values as strings. na_fvalues : array-like, dict The object listing the NaN values as floats. keep_default_na : bool If `na_values` is a dict, and the column is not mapped in the dictionary, whether to return the default NaN values or the empty set. Returns ------- nan_tuple : A length-two tuple composed of 1) na_values : the string NaN values for that column. 2) na_fvalues : the float NaN values for that column. """ if isinstance(na_values, dict): if col in na_values: return na_values[col], na_fvalues[col] else: if keep_default_na: return _NA_VALUES, set() return set(), set() else: return na_values, na_fvalues
def _get_na_values(col, na_values, na_fvalues, keep_default_na): """ Get the NaN values for a given column. Parameters ---------- col : str The name of the column. na_values : array-like, dict The object listing the NaN values as strings. na_fvalues : array-like, dict The object listing the NaN values as floats. keep_default_na : bool If `na_values` is a dict, and the column is not mapped in the dictionary, whether to return the default NaN values or the empty set. Returns ------- nan_tuple : A length-two tuple composed of 1) na_values : the string NaN values for that column. 2) na_fvalues : the float NaN values for that column. """ if isinstance(na_values, dict): if col in na_values: return na_values[col], na_fvalues[col] else: if keep_default_na: return _NA_VALUES, set() return set(), set() else: return na_values, na_fvalues
[ "Get", "the", "NaN", "values", "for", "a", "given", "column", "." ]
pandas-dev/pandas
python
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L3450-L3483
[ "def", "_get_na_values", "(", "col", ",", "na_values", ",", "na_fvalues", ",", "keep_default_na", ")", ":", "if", "isinstance", "(", "na_values", ",", "dict", ")", ":", "if", "col", "in", "na_values", ":", "return", "na_values", "[", "col", "]", ",", "na_fvalues", "[", "col", "]", "else", ":", "if", "keep_default_na", ":", "return", "_NA_VALUES", ",", "set", "(", ")", "return", "set", "(", ")", ",", "set", "(", ")", "else", ":", "return", "na_values", ",", "na_fvalues" ]
9feb3ad92cc0397a04b665803a49299ee7aa1037