_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q19500
|
liquid_precip_ratio
|
train
|
def liquid_precip_ratio(pr, prsn=None, tas=None, freq='QS-DEC'):
r"""Ratio of rainfall to total precipitation
The ratio of total liquid precipitation over the total precipitation. If solid precipitation is not provided,
then precipitation is assumed solid if the temperature is below 0Β°C.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux [Kg m-2 s-1] or [mm].
prsn : xarray.DataArray
Mean daily solid precipitation flux [Kg m-2 s-1] or [mm].
tas : xarray.DataArray
Mean daily temperature [β] or [K]
freq : str
Resampling frequency
Returns
-------
xarray.DataArray
Ratio of rainfall to total precipitation
Notes
-----
Let :math:`PR_i` be the mean daily precipitation of day :math:`i`, then for a period :math:`j` starting at
day :math:`a` and finishing on day :math:`b`:
.. math::
PR_{ij} = \sum_{i=a}^{b} PR_i
PRwet_{ij}
See also
--------
winter_rain_ratio
"""
if prsn is None:
tu = units.parse_units(tas.attrs['units'].replace('-', '**-'))
fu = 'degC'
frz = 0
if fu != tu:
frz = units.convert(frz, fu, tu)
prsn = pr.where(tas < frz, 0)
tot = pr.resample(time=freq).sum(dim='time')
rain = tot - prsn.resample(time=freq).sum(dim='time')
ratio = rain / tot
return ratio
|
python
|
{
"resource": ""
}
|
q19501
|
tn_days_below
|
train
|
def tn_days_below(tasmin, thresh='-10.0 degC', freq='YS'):
r"""Number of days with tmin below a threshold in
Number of days where daily minimum temperature is below a threshold.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature [β] or [K]
thresh : str
Threshold temperature on which to base evaluation [β] or [K] . Default: '-10 degC'.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Number of days Tmin < threshold.
Notes
-----
Let :math:`TN_{ij}` be the daily minimum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TX_{ij} < Threshold [β]
"""
thresh = utils.convert_units_to(thresh, tasmin)
f1 = utils.threshold_count(tasmin, '<', thresh, freq)
return f1
|
python
|
{
"resource": ""
}
|
q19502
|
tx_days_above
|
train
|
def tx_days_above(tasmax, thresh='25.0 degC', freq='YS'):
r"""Number of summer days
Number of days where daily maximum temperature exceed a threshold.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [β] or [K]
thresh : str
Threshold temperature on which to base evaluation [β] or [K]. Default: '25 degC'.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Number of summer days.
Notes
-----
Let :math:`TX_{ij}` be the daily maximum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TX_{ij} > Threshold [β]
"""
thresh = utils.convert_units_to(thresh, tasmax)
f = (tasmax > (thresh)) * 1
return f.resample(time=freq).sum(dim='time')
|
python
|
{
"resource": ""
}
|
q19503
|
max_n_day_precipitation_amount
|
train
|
def max_n_day_precipitation_amount(pr, window=1, freq='YS'):
r"""Highest precipitation amount cumulated over a n-day moving window.
Calculate the n-day rolling sum of the original daily total precipitation series
and determine the maximum value over each period.
Parameters
----------
da : xarray.DataArray
Daily precipitation values [Kg m-2 s-1] or [mm]
window : int
Window size in days.
freq : str, optional
Resampling frequency : default 'YS' (yearly)
Returns
-------
xarray.DataArray
The highest cumulated n-day precipitation value at the given time frequency.
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the highest 5-day total precipitation
at an annual frequency:
>>> da = xr.open_dataset('pr.day.nc').pr
>>> window = 5
>>> output = max_n_day_precipitation_amount(da, window, freq="YS")
"""
# rolling sum of the values
arr = pr.rolling(time=window, center=False).sum()
out = arr.resample(time=freq).max(dim='time', keep_attrs=True)
out.attrs['units'] = pr.units
# Adjust values and units to make sure they are daily
return utils.pint_multiply(out, 1 * units.day, 'mm')
|
python
|
{
"resource": ""
}
|
q19504
|
rain_on_frozen_ground_days
|
train
|
def rain_on_frozen_ground_days(pr, tas, thresh='1 mm/d', freq='YS'):
"""Number of rain on frozen ground events
Number of days with rain above a threshold after a series of seven days below freezing temperature.
Precipitation is assumed to be rain when the temperature is above 0β.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux [Kg m-2 s-1] or [mm]
tas : xarray.DataArray
Mean daily temperature [β] or [K]
thresh : str
Precipitation threshold to consider a day as a rain event. Default : '1 mm/d'
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
The number of rain on frozen ground events per period [days]
Notes
-----
Let :math:`PR_i` be the mean daily precipitation and :math:`TG_i` be the mean daily temperature of day :math:`i`.
Then for a period :math:`j`, rain on frozen grounds days are counted where:
.. math::
PR_{i} > Threshold [mm]
and where
.. math::
TG_{i} β€ 0β
is true for continuous periods where :math:`i β₯ 7`
"""
t = utils.convert_units_to(thresh, pr)
frz = utils.convert_units_to('0 C', tas)
def func(x, axis):
"""Check that temperature conditions are below 0 for seven days and above after."""
frozen = x == np.array([0, 0, 0, 0, 0, 0, 0, 1], bool)
return frozen.all(axis=axis)
tcond = (tas > frz).rolling(time=8).reduce(func)
pcond = (pr > t)
return (tcond * pcond * 1).resample(time=freq).sum(dim='time')
|
python
|
{
"resource": ""
}
|
q19505
|
tg90p
|
train
|
def tg90p(tas, t90, freq='YS'):
r"""Number of days with daily mean temperature over the 90th percentile.
Number of days with daily mean temperature over the 90th percentile.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature [β] or [K]
t90 : xarray.DataArray
90th percentile of daily mean temperature [β] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Count of days with daily mean temperature below the 10th percentile [days]
Notes
-----
The 90th percentile should be computed for a 5 day window centered on each calendar day for a reference period.
Example
-------
>>> t90 = percentile_doy(historical_tas, per=0.9)
>>> hot_days = tg90p(tas, t90)
"""
if 'dayofyear' not in t90.coords.keys():
raise AttributeError("t10 should have dayofyear coordinates.")
t90 = utils.convert_units_to(t90, tas)
# adjustment of t90 to tas doy range
t90 = utils.adjust_doy_calendar(t90, tas)
# create array of percentile with tas shape and coords
thresh = xr.full_like(tas, np.nan)
doy = thresh.time.dt.dayofyear.values
thresh.data = t90.sel(dayofyear=doy)
# compute the cold days
over = (tas > thresh)
return over.resample(time=freq).sum(dim='time')
|
python
|
{
"resource": ""
}
|
q19506
|
tg10p
|
train
|
def tg10p(tas, t10, freq='YS'):
r"""Number of days with daily mean temperature below the 10th percentile.
Number of days with daily mean temperature below the 10th percentile.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature [β] or [K]
t10 : xarray.DataArray
10th percentile of daily mean temperature [β] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Count of days with daily mean temperature below the 10th percentile [days]
Notes
-----
The 10th percentile should be computed for a 5 day window centered on each calendar day for a reference period.
Example
-------
>>> t10 = percentile_doy(historical_tas, per=0.1)
>>> cold_days = tg10p(tas, t10)
"""
if 'dayofyear' not in t10.coords.keys():
raise AttributeError("t10 should have dayofyear coordinates.")
t10 = utils.convert_units_to(t10, tas)
# adjustment of t10 to tas doy range
t10 = utils.adjust_doy_calendar(t10, tas)
# create array of percentile with tas shape and coords
thresh = xr.full_like(tas, np.nan)
doy = thresh.time.dt.dayofyear.values
thresh.data = t10.sel(dayofyear=doy)
# compute the cold days
below = (tas < thresh)
return below.resample(time=freq).sum(dim='time')
|
python
|
{
"resource": ""
}
|
q19507
|
tg_max
|
train
|
def tg_max(tas, freq='YS'):
r"""Highest mean temperature.
The maximum of daily mean temperature.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature [β] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Maximum of daily minimum temperature.
Notes
-----
Let :math:`TN_{ij}` be the mean temperature at day :math:`i` of period :math:`j`. Then the maximum
daily mean temperature for period :math:`j` is:
.. math::
TNx_j = max(TN_{ij})
"""
return tas.resample(time=freq).max(dim='time', keep_attrs=True)
|
python
|
{
"resource": ""
}
|
q19508
|
tg_mean
|
train
|
def tg_mean(tas, freq='YS'):
r"""Mean of daily average temperature.
Resample the original daily mean temperature series by taking the mean over each period.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature [β] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
The mean daily temperature at the given time frequency
Notes
-----
Let :math:`TN_i` be the mean daily temperature of day :math:`i`, then for a period :math:`p` starting at
day :math:`a` and finishing on day :math:`b`:
.. math::
TG_p = \frac{\sum_{i=a}^{b} TN_i}{b - a + 1}
Examples
--------
The following would compute for each grid cell of file `tas.day.nc` the mean temperature
at the seasonal frequency, ie DJF, MAM, JJA, SON, DJF, etc.:
>>> t = xr.open_dataset('tas.day.nc')
>>> tg = tm_mean(t, freq="QS-DEC")
"""
arr = tas.resample(time=freq) if freq else tas
return arr.mean(dim='time', keep_attrs=True)
|
python
|
{
"resource": ""
}
|
q19509
|
tg_min
|
train
|
def tg_min(tas, freq='YS'):
r"""Lowest mean temperature
Minimum of daily mean temperature.
Parameters
----------
tas : xarray.DataArray
Mean daily temperature [β] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Minimum of daily minimum temperature.
Notes
-----
Let :math:`TG_{ij}` be the mean temperature at day :math:`i` of period :math:`j`. Then the minimum
daily mean temperature for period :math:`j` is:
.. math::
TGn_j = min(TG_{ij})
"""
return tas.resample(time=freq).min(dim='time', keep_attrs=True)
|
python
|
{
"resource": ""
}
|
q19510
|
tn_max
|
train
|
def tn_max(tasmin, freq='YS'):
r"""Highest minimum temperature.
The maximum of daily minimum temperature.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature [β] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Maximum of daily minimum temperature.
Notes
-----
Let :math:`TN_{ij}` be the minimum temperature at day :math:`i` of period :math:`j`. Then the maximum
daily minimum temperature for period :math:`j` is:
.. math::
TNx_j = max(TN_{ij})
"""
return tasmin.resample(time=freq).max(dim='time', keep_attrs=True)
|
python
|
{
"resource": ""
}
|
q19511
|
tn_mean
|
train
|
def tn_mean(tasmin, freq='YS'):
r"""Mean minimum temperature.
Mean of daily minimum temperature.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature [β] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Mean of daily minimum temperature.
Notes
-----
Let :math:`TN_{ij}` be the minimum temperature at day :math:`i` of period :math:`j`. Then mean
values in period :math:`j` are given by:
.. math::
TN_{ij} = \frac{ \sum_{i=1}^{I} TN_{ij} }{I}
"""
arr = tasmin.resample(time=freq) if freq else tasmin
return arr.mean(dim='time', keep_attrs=True)
|
python
|
{
"resource": ""
}
|
q19512
|
tn_min
|
train
|
def tn_min(tasmin, freq='YS'):
r"""Lowest minimum temperature
Minimum of daily minimum temperature.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature [β] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Minimum of daily minimum temperature.
Notes
-----
Let :math:`TN_{ij}` be the minimum temperature at day :math:`i` of period :math:`j`. Then the minimum
daily minimum temperature for period :math:`j` is:
.. math::
TNn_j = min(TN_{ij})
"""
return tasmin.resample(time=freq).min(dim='time', keep_attrs=True)
|
python
|
{
"resource": ""
}
|
q19513
|
tropical_nights
|
train
|
def tropical_nights(tasmin, thresh='20.0 degC', freq='YS'):
r"""Tropical nights
The number of days with minimum daily temperature above threshold.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature [β] or [K]
thresh : str
Threshold temperature on which to base evaluation [β] or [K]. Default: '20 degC'.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Number of days with minimum daily temperature above threshold.
Notes
-----
Let :math:`TN_{ij}` be the daily minimum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TN_{ij} > Threshold [β]
"""
thresh = utils.convert_units_to(thresh, tasmin)
return tasmin.pipe(lambda x: (tasmin > thresh) * 1) \
.resample(time=freq) \
.sum(dim='time')
|
python
|
{
"resource": ""
}
|
q19514
|
tx_max
|
train
|
def tx_max(tasmax, freq='YS'):
r"""Highest max temperature
The maximum value of daily maximum temperature.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [β] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Maximum value of daily maximum temperature.
Notes
-----
Let :math:`TX_{ij}` be the maximum temperature at day :math:`i` of period :math:`j`. Then the maximum
daily maximum temperature for period :math:`j` is:
.. math::
TXx_j = max(TX_{ij})
"""
return tasmax.resample(time=freq).max(dim='time', keep_attrs=True)
|
python
|
{
"resource": ""
}
|
q19515
|
tx_mean
|
train
|
def tx_mean(tasmax, freq='YS'):
r"""Mean max temperature
The mean of daily maximum temperature.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [β] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Mean of daily maximum temperature.
Notes
-----
Let :math:`TX_{ij}` be the maximum temperature at day :math:`i` of period :math:`j`. Then mean
values in period :math:`j` are given by:
.. math::
TX_{ij} = \frac{ \sum_{i=1}^{I} TX_{ij} }{I}
"""
arr = tasmax.resample(time=freq) if freq else tasmax
return arr.mean(dim='time', keep_attrs=True)
|
python
|
{
"resource": ""
}
|
q19516
|
tx_min
|
train
|
def tx_min(tasmax, freq='YS'):
r"""Lowest max temperature
The minimum of daily maximum temperature.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [β] or [K]
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Minimum of daily maximum temperature.
Notes
-----
Let :math:`TX_{ij}` be the maximum temperature at day :math:`i` of period :math:`j`. Then the minimum
daily maximum temperature for period :math:`j` is:
.. math::
TXn_j = min(TX_{ij})
"""
return tasmax.resample(time=freq).min(dim='time', keep_attrs=True)
|
python
|
{
"resource": ""
}
|
q19517
|
warm_day_frequency
|
train
|
def warm_day_frequency(tasmax, thresh='30 degC', freq='YS'):
r"""Frequency of extreme warm days
Return the number of days with tasmax > thresh per period
Parameters
----------
tasmax : xarray.DataArray
Mean daily temperature [β] or [K]
thresh : str
Threshold temperature on which to base evaluation [β] or [K]. Default : '30 degC'
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Number of days exceeding threshold.
Notes:
Let :math:`TX_{ij}` be the daily maximum temperature at day :math:`i` of period :math:`j`. Then
counted is the number of days where:
.. math::
TN_{ij} > Threshold [β]
"""
thresh = utils.convert_units_to(thresh, tasmax)
events = (tasmax > thresh) * 1
return events.resample(time=freq).sum(dim='time')
|
python
|
{
"resource": ""
}
|
q19518
|
tx_tn_days_above
|
train
|
def tx_tn_days_above(tasmin, tasmax, thresh_tasmin='22 degC',
thresh_tasmax='30 degC', freq='YS'):
r"""Number of days with both hot maximum and minimum daily temperatures.
The number of days per period with tasmin above a threshold and tasmax above another threshold.
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature [β] or [K]
tasmax : xarray.DataArray
Maximum daily temperature [β] or [K]
thresh_tasmin : str
Threshold temperature for tasmin on which to base evaluation [β] or [K]. Default : '22 degC'
thresh_tasmax : str
Threshold temperature for tasmax on which to base evaluation [β] or [K]. Default : '30 degC'
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
the number of days with tasmin > thresh_tasmin and
tasmax > thresh_tasamax per period
Notes
-----
Let :math:`TX_{ij}` be the maximum temperature at day :math:`i` of period :math:`j`, :math:`TN_{ij}`
the daily minimum temperature at day :math:`i` of period :math:`j`, :math:`TX_{thresh}` the threshold for maximum
daily temperature, and :math:`TN_{thresh}` the threshold for minimum daily temperature. Then counted is the number
of days where:
.. math::
TX_{ij} > TX_{thresh} [β]
and where:
.. math::
TN_{ij} > TN_{thresh} [β]
"""
thresh_tasmax = utils.convert_units_to(thresh_tasmax, tasmax)
thresh_tasmin = utils.convert_units_to(thresh_tasmin, tasmin)
events = ((tasmin > (thresh_tasmin)) & (tasmax > (thresh_tasmax))) * 1
return events.resample(time=freq).sum(dim='time')
|
python
|
{
"resource": ""
}
|
q19519
|
warm_night_frequency
|
train
|
def warm_night_frequency(tasmin, thresh='22 degC', freq='YS'):
r"""Frequency of extreme warm nights
Return the number of days with tasmin > thresh per period
Parameters
----------
tasmin : xarray.DataArray
Minimum daily temperature [β] or [K]
thresh : str
Threshold temperature on which to base evaluation [β] or [K]. Default : '22 degC'
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
The number of days with tasmin > thresh per period
"""
thresh = utils.convert_units_to(thresh, tasmin, )
events = (tasmin > thresh) * 1
return events.resample(time=freq).sum(dim='time')
|
python
|
{
"resource": ""
}
|
q19520
|
warm_spell_duration_index
|
train
|
def warm_spell_duration_index(tasmax, tx90, window=6, freq='YS'):
r"""Warm spell duration index
Number of days with at least six consecutive days where the daily maximum temperature is above the 90th
percentile. The 90th percentile should be computed for a 5-day window centred on each calendar day in the
1961-1990 period.
Parameters
----------
tasmax : xarray.DataArray
Maximum daily temperature [β] or [K]
tx90 : float
90th percentile of daily maximum temperature [β] or [K]
window : int
Minimum number of days with temperature below threshold to qualify as a warm spell.
freq : str, optional
Resampling frequency
Returns
-------
xarray.DataArray
Count of days with at least six consecutive days where the daily maximum temperature is above the 90th
percentile [days].
References
----------
From the Expert Team on Climate Change Detection, Monitoring and Indices (ETCCDMI).
Used in Alexander, L. V., et al. (2006), Global observed changes in daily climate extremes of temperature and
precipitation, J. Geophys. Res., 111, D05109, doi: 10.1029/2005JD006290.
"""
if 'dayofyear' not in tx90.coords.keys():
raise AttributeError("tx90 should have dayofyear coordinates.")
# The day of year value of the tasmax series.
doy = tasmax.indexes['time'].dayofyear
# adjustment of tx90 to tasmax doy range
tx90 = utils.adjust_doy_calendar(tx90, tasmax)
# Create an array with the shape and coords of tasmax, but with values set to tx90 according to the doy index.
thresh = xr.full_like(tasmax, np.nan)
thresh.data = tx90.sel(dayofyear=doy)
above = (tasmax > thresh)
return above.resample(time=freq).apply(rl.windowed_run_count, window=window, dim='time')
|
python
|
{
"resource": ""
}
|
q19521
|
wetdays
|
train
|
def wetdays(pr, thresh='1.0 mm/day', freq='YS'):
r"""Wet days
Return the total number of days during period with precipitation over threshold.
Parameters
----------
pr : xarray.DataArray
Daily precipitation [mm]
thresh : str
Precipitation value over which a day is considered wet. Default: '1 mm/day'.
freq : str, optional
Resampling frequency defining the periods
defined in http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling.
Returns
-------
xarray.DataArray
The number of wet days for each period [day]
Examples
--------
The following would compute for each grid cell of file `pr.day.nc` the number days
with precipitation over 5 mm at the seasonal frequency, ie DJF, MAM, JJA, SON, DJF, etc.:
>>> pr = xr.open_dataset('pr.day.nc')
>>> wd = wetdays(pr, pr_min = 5., freq="QS-DEC")
"""
thresh = utils.convert_units_to(thresh, pr, 'hydro')
wd = (pr >= thresh) * 1
return wd.resample(time=freq).sum(dim='time')
|
python
|
{
"resource": ""
}
|
q19522
|
winter_rain_ratio
|
train
|
def winter_rain_ratio(pr, prsn=None, tas=None):
"""Ratio of rainfall to total precipitation during winter
The ratio of total liquid precipitation over the total precipitation over the winter months (DJF. If solid
precipitation is not provided, then precipitation is assumed solid if the temperature is below 0Β°C.
Parameters
----------
pr : xarray.DataArray
Mean daily precipitation flux [Kg m-2 s-1] or [mm].
prsn : xarray.DataArray
Mean daily solid precipitation flux [Kg m-2 s-1] or [mm].
tas : xarray.DataArray
Mean daily temperature [β] or [K]
freq : str
Resampling frequency
Returns
-------
xarray.DataArray
Ratio of rainfall to total precipitation during winter months (DJF)
"""
ratio = liquid_precip_ratio(pr, prsn, tas, freq='QS-DEC')
winter = ratio.indexes['time'].month == 12
return ratio[winter]
|
python
|
{
"resource": ""
}
|
q19523
|
select_time
|
train
|
def select_time(da, **indexer):
"""Select entries according to a time period.
Parameters
----------
da : xarray.DataArray
Input data.
**indexer : {dim: indexer, }, optional
Time attribute and values over which to subset the array. For example, use season='DJF' to select winter values,
month=1 to select January, or month=[6,7,8] to select summer months. If not indexer is given, all values are
considered.
Returns
-------
xr.DataArray
Selected input values.
"""
if not indexer:
selected = da
else:
key, val = indexer.popitem()
time_att = getattr(da.time.dt, key)
selected = da.sel(time=time_att.isin(val)).dropna(dim='time')
return selected
|
python
|
{
"resource": ""
}
|
q19524
|
select_resample_op
|
train
|
def select_resample_op(da, op, freq="YS", **indexer):
"""Apply operation over each period that is part of the index selection.
Parameters
----------
da : xarray.DataArray
Input data.
op : str {'min', 'max', 'mean', 'std', 'var', 'count', 'sum', 'argmax', 'argmin'} or func
Reduce operation. Can either be a DataArray method or a function that can be applied to a DataArray.
freq : str
Resampling frequency defining the periods
defined in http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling.
**indexer : {dim: indexer, }, optional
Time attribute and values over which to subset the array. For example, use season='DJF' to select winter values,
month=1 to select January, or month=[6,7,8] to select summer months. If not indexer is given, all values are
considered.
Returns
-------
xarray.DataArray
The maximum value for each period.
"""
da = select_time(da, **indexer)
r = da.resample(time=freq, keep_attrs=True)
if isinstance(op, str):
return getattr(r, op)(dim='time', keep_attrs=True)
return r.apply(op)
|
python
|
{
"resource": ""
}
|
q19525
|
doymax
|
train
|
def doymax(da):
"""Return the day of year of the maximum value."""
i = da.argmax(dim='time')
out = da.time.dt.dayofyear[i]
out.attrs['units'] = ''
return out
|
python
|
{
"resource": ""
}
|
q19526
|
fit
|
train
|
def fit(arr, dist='norm'):
"""Fit an array to a univariate distribution along the time dimension.
Parameters
----------
arr : xarray.DataArray
Time series to be fitted along the time dimension.
dist : str
Name of the univariate distribution, such as beta, expon, genextreme, gamma, gumbel_r, lognorm, norm
(see scipy.stats).
Returns
-------
xarray.DataArray
An array of distribution parameters fitted using the method of Maximum Likelihood.
"""
# Get the distribution
dc = get_dist(dist)
# Fit the parameters (lazy computation)
data = dask.array.apply_along_axis(dc.fit, arr.get_axis_num('time'), arr)
# Count the number of values used for the fit.
# n = arr.count(dim='time')
# Create a view to a DataArray with the desired dimensions to copy them over to the parameter array.
mean = arr.mean(dim='time', keep_attrs=True)
# Create coordinate for the distribution parameters
coords = dict(mean.coords.items())
coords['dparams'] = ([] if dc.shapes is None else dc.shapes.split(',')) + ['loc', 'scale']
# TODO: add time and time_bnds coordinates (Low will work on this)
# time.attrs['climatology'] = 'climatology_bounds'
# coords['time'] =
# coords['climatology_bounds'] =
out = xr.DataArray(data=data, coords=coords, dims=(u'dparams',) + mean.dims)
out.attrs = arr.attrs
out.attrs['original_name'] = getattr(arr, 'standard_name', '')
out.attrs['standard_name'] = '{0} distribution parameters'.format(dist)
out.attrs['long_name'] = '{0} distribution parameters for {1}'.format(dist, getattr(arr, 'standard_name', ''))
out.attrs['estimator'] = 'Maximum likelihood'
out.attrs['cell_methods'] = (out.attrs.get('cell_methods', '') + ' time: fit').strip()
out.attrs['units'] = ''
msg = '\nData fitted with {0} statistical distribution using a Maximum Likelihood Estimator'
out.attrs['history'] = out.attrs.get('history', '') + msg.format(dist)
return out
|
python
|
{
"resource": ""
}
|
q19527
|
fa
|
train
|
def fa(arr, t, dist='norm', mode='high'):
"""Return the value corresponding to the given return period.
Parameters
----------
arr : xarray.DataArray
Maximized/minimized input data with a `time` dimension.
t : int or sequence
Return period. The period depends on the resolution of the input data. If the input array's resolution is
yearly, then the return period is in years.
dist : str
Name of the univariate distribution, such as beta, expon, genextreme, gamma, gumbel_r, lognorm, norm
(see scipy.stats).
mode : {'min', 'max}
Whether we are looking for a probability of exceedance (max) or a probability of non-exceedance (min).
Returns
-------
xarray.DataArray
An array of values with a 1/t probability of exceedance (if mode=='max').
"""
t = np.atleast_1d(t)
# Get the distribution
dc = get_dist(dist)
# Fit the parameters of the distribution
p = fit(arr, dist)
# Create a lambda function to facilitate passing arguments to dask. There is probably a better way to do this.
if mode in ['max', 'high']:
def func(x):
return dc.isf(1./t, *x)
elif mode in ['min', 'low']:
def func(x):
return dc.ppf(1./t, *x)
else:
raise ValueError("mode `{}` should be either 'max' or 'min'".format(mode))
data = dask.array.apply_along_axis(func, p.get_axis_num('dparams'), p)
# Create coordinate for the return periods
coords = dict(p.coords.items())
coords.pop('dparams')
coords['return_period'] = t
# Create dimensions
dims = list(p.dims)
dims.remove('dparams')
dims.insert(0, u'return_period')
# TODO: add time and time_bnds coordinates (Low will work on this)
# time.attrs['climatology'] = 'climatology_bounds'
# coords['time'] =
# coords['climatology_bounds'] =
out = xr.DataArray(data=data, coords=coords, dims=dims)
out.attrs = p.attrs
out.attrs['standard_name'] = '{0} quantiles'.format(dist)
out.attrs['long_name'] = '{0} return period values for {1}'.format(dist, getattr(arr, 'standard_name', ''))
out.attrs['cell_methods'] = (out.attrs.get('cell_methods', '') + ' dparams: ppf').strip()
out.attrs['units'] = arr.attrs.get('units', '')
out.attrs['mode'] = mode
out.attrs['history'] = out.attrs.get('history', '') + "Compute values corresponding to return periods."
return out
|
python
|
{
"resource": ""
}
|
q19528
|
frequency_analysis
|
train
|
def frequency_analysis(da, mode, t, dist, window=1, freq=None, **indexer):
"""Return the value corresponding to a return period.
Parameters
----------
da : xarray.DataArray
Input data.
t : int or sequence
Return period. The period depends on the resolution of the input data. If the input array's resolution is
yearly, then the return period is in years.
dist : str
Name of the univariate distribution, such as beta, expon, genextreme, gamma, gumbel_r, lognorm, norm
(see scipy.stats).
mode : {'min', 'max'}
Whether we are looking for a probability of exceedance (high) or a probability of non-exceedance (low).
window : int
Averaging window length (days).
freq : str
Resampling frequency. If None, the frequency is assumed to be 'YS' unless the indexer is season='DJF',
in which case `freq` would be set to `YS-DEC`.
**indexer : {dim: indexer, }, optional
Time attribute and values over which to subset the array. For example, use season='DJF' to select winter values,
month=1 to select January, or month=[6,7,8] to select summer months. If not indexer is given, all values are
considered.
Returns
-------
xarray.DataArray
An array of values with a 1/t probability of exceedance or non-exceedance when mode is high or low respectively.
"""
# Apply rolling average
if window > 1:
da = da.rolling(time=window, center=False).mean()
# Assign default resampling frequency if not provided
freq = freq or default_freq(**indexer)
# Extract the time series of min or max over the period
sel = select_resample_op(da, op=mode, freq=freq, **indexer).dropna(dim='time')
# Frequency analysis
return fa(sel, t, dist, mode)
|
python
|
{
"resource": ""
}
|
q19529
|
default_freq
|
train
|
def default_freq(**indexer):
"""Return the default frequency."""
freq = 'AS-JAN'
if indexer:
if 'DJF' in indexer.values():
freq = 'AS-DEC'
if 'month' in indexer and sorted(indexer.values()) != indexer.values():
raise (NotImplementedError)
return freq
|
python
|
{
"resource": ""
}
|
q19530
|
get_dist
|
train
|
def get_dist(dist):
"""Return a distribution object from scipy.stats.
"""
from scipy import stats
dc = getattr(stats, dist, None)
if dc is None:
e = "Statistical distribution `{}` is not in scipy.stats.".format(dist)
raise ValueError(e)
return dc
|
python
|
{
"resource": ""
}
|
q19531
|
_get_indicators
|
train
|
def _get_indicators(modules):
"""For all modules or classes listed, return the children that are instances of xclim.utils.Indicator.
modules : sequence
Sequence of modules to inspect.
"""
out = []
for obj in modules:
for key, val in obj.__dict__.items():
if isinstance(val, xcu.Indicator):
out.append(val)
return out
|
python
|
{
"resource": ""
}
|
q19532
|
_indicator_table
|
train
|
def _indicator_table():
"""Return a sequence of dicts storing metadata about all available indices."""
from xclim import temperature, precip
import inspect
inds = _get_indicators([temperature, precip])
table = []
for ind in inds:
# Apply default values
args = {name: p.default for (name, p) in ind._sig.parameters.items() if p.default != inspect._empty}
table.append(ind.json(args))
return table
|
python
|
{
"resource": ""
}
|
q19533
|
longest_run
|
train
|
def longest_run(da, dim='time'):
"""Return the length of the longest consecutive run of True values.
Parameters
----------
arr : N-dimensional array (boolean)
Input array
dim : Xarray dimension (default = 'time')
Dimension along which to calculate consecutive run
Returns
-------
N-dimensional array (int)
Length of longest run of True values along dimension
"""
d = rle(da, dim=dim)
rl_long = d.max(dim=dim)
return rl_long
|
python
|
{
"resource": ""
}
|
q19534
|
rle_1d
|
train
|
def rle_1d(arr):
"""Return the length, starting position and value of consecutive identical values.
Parameters
----------
arr : sequence
Array of values to be parsed.
Returns
-------
(values, run lengths, start positions)
values : np.array
The values taken by arr over each run
run lengths : np.array
The length of each run
start position : np.array
The starting index of each run
Examples
--------
>>> a = [1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3]
>>> rle_1d(a)
(array([1, 2, 3]), array([2, 4, 6]), array([0, 2, 6]))
"""
ia = np.asarray(arr)
n = len(ia)
if n == 0:
e = 'run length array empty'
warn(e)
return None, None, None
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element position
rl = np.diff(np.append(-1, i)) # run lengths
pos = np.cumsum(np.append(0, rl))[:-1] # positions
return ia[i], rl, pos
|
python
|
{
"resource": ""
}
|
q19535
|
longest_run_1d
|
train
|
def longest_run_1d(arr):
"""Return the length of the longest consecutive run of identical values.
Parameters
----------
arr : bool array
Input array
Returns
-------
int
Length of longest run.
"""
v, rl = rle_1d(arr)[:2]
return np.where(v, rl, 0).max()
|
python
|
{
"resource": ""
}
|
q19536
|
windowed_run_count_ufunc
|
train
|
def windowed_run_count_ufunc(x, window):
"""Dask-parallel version of windowed_run_count_1d, ie the number of consecutive true values in
array for runs at least as long as given duration.
Parameters
----------
x : bool array
Input array
window : int
Minimum duration of consecutive run to accumulate values.
Returns
-------
out : func
A function operating along the time dimension of a dask-array.
"""
return xr.apply_ufunc(windowed_run_count_1d,
x,
input_core_dims=[['time'], ],
vectorize=True,
dask='parallelized',
output_dtypes=[np.int, ],
keep_attrs=True,
kwargs={'window': window})
|
python
|
{
"resource": ""
}
|
q19537
|
windowed_run_events_ufunc
|
train
|
def windowed_run_events_ufunc(x, window):
"""Dask-parallel version of windowed_run_events_1d, ie the number of runs at least as long as given duration.
Parameters
----------
x : bool array
Input array
window : int
Minimum run length
Returns
-------
out : func
A function operating along the time dimension of a dask-array.
"""
return xr.apply_ufunc(windowed_run_events_1d,
x,
input_core_dims=[['time'], ],
vectorize=True,
dask='parallelized',
output_dtypes=[np.int, ],
keep_attrs=True,
kwargs={'window': window})
|
python
|
{
"resource": ""
}
|
q19538
|
longest_run_ufunc
|
train
|
def longest_run_ufunc(x):
"""Dask-parallel version of longest_run_1d, ie the maximum number of consecutive true values in
array.
Parameters
----------
x : bool array
Input array
Returns
-------
out : func
A function operating along the time dimension of a dask-array.
"""
return xr.apply_ufunc(longest_run_1d,
x,
input_core_dims=[['time'], ],
vectorize=True,
dask='parallelized',
output_dtypes=[np.int, ],
keep_attrs=True,
)
|
python
|
{
"resource": ""
}
|
q19539
|
check_valid
|
train
|
def check_valid(var, key, expected):
r"""Check that a variable's attribute has the expected value. Warn user otherwise."""
att = getattr(var, key, None)
if att is None:
e = 'Variable does not have a `{}` attribute.'.format(key)
warn(e)
elif att != expected:
e = 'Variable has a non-conforming {}. Got `{}`, expected `{}`'.format(key, att, expected)
warn(e)
|
python
|
{
"resource": ""
}
|
q19540
|
check_valid_temperature
|
train
|
def check_valid_temperature(var, units):
r"""Check that variable is air temperature."""
check_valid(var, 'standard_name', 'air_temperature')
check_valid(var, 'units', units)
assert_daily(var)
|
python
|
{
"resource": ""
}
|
q19541
|
valid_daily_max_min_temperature
|
train
|
def valid_daily_max_min_temperature(comp, units='K'):
r"""Decorator to check that a computation runs on valid min and max temperature datasets."""
@wraps(comp)
def func(tasmax, tasmin, **kwds):
valid_daily_max_temperature(tasmax, units)
valid_daily_min_temperature(tasmin, units)
return comp(tasmax, tasmin, **kwds)
return func
|
python
|
{
"resource": ""
}
|
q19542
|
valid_daily_mean_discharge
|
train
|
def valid_daily_mean_discharge(comp):
r"""Decorator to check that a computation runs on valid discharge data."""
@wraps(comp)
def func(q, **kwds):
check_valid_discharge(q)
return comp(q, **kwds)
return func
|
python
|
{
"resource": ""
}
|
q19543
|
check_is_dataarray
|
train
|
def check_is_dataarray(comp):
r"""Decorator to check that a computation has an instance of xarray.DataArray
as first argument."""
@wraps(comp)
def func(data_array, *args, **kwds):
assert isinstance(data_array, xr.DataArray)
return comp(data_array, *args, **kwds)
return func
|
python
|
{
"resource": ""
}
|
q19544
|
missing_any
|
train
|
def missing_any(da, freq, **kwds):
r"""Return a boolean DataArray indicating whether there are missing days in the resampled array.
Parameters
----------
da : DataArray
Input array at daily frequency.
freq : str
Resampling frequency.
Returns
-------
out : DataArray
A boolean array set to True if any month or year has missing values.
"""
c = da.notnull().resample(time=freq).sum(dim='time')
if '-' in freq:
pfreq, anchor = freq.split('-')
else:
pfreq = freq
if pfreq.endswith('S'):
start_time = c.indexes['time']
end_time = start_time.shift(1, freq=freq)
else:
end_time = c.indexes['time']
start_time = end_time.shift(-1, freq=freq)
n = (end_time - start_time).days
nda = xr.DataArray(n.values, coords={'time': c.time}, dims='time')
return c != nda
|
python
|
{
"resource": ""
}
|
q19545
|
rstjinja
|
train
|
def rstjinja(app, docname, source):
"""
Render our pages as a jinja template for fancy templating goodness.
"""
# Make sure we're outputting HTML
if app.builder.format != 'html':
return
src = source[0]
rendered = app.builder.templates.render_string(
src, app.config.html_context
)
source[0] = rendered
|
python
|
{
"resource": ""
}
|
q19546
|
units2pint
|
train
|
def units2pint(value):
"""Return the pint Unit for the DataArray units.
Parameters
----------
value : xr.DataArray or string
Input data array or expression.
Returns
-------
pint.Unit
Units of the data array.
"""
def _transform(s):
"""Convert a CF-unit string to a pint expression."""
return re.subn(r'\^?(-?\d)', r'**\g<1>', s)[0]
if isinstance(value, str):
unit = value
elif isinstance(value, xr.DataArray):
unit = value.attrs['units']
elif isinstance(value, units.Quantity):
return value.units
else:
raise NotImplementedError("Value of type {} not supported.".format(type(value)))
try: # Pint compatible
return units.parse_expression(unit).units
except (pint.UndefinedUnitError, pint.DimensionalityError): # Convert from CF-units to pint-compatible
return units.parse_expression(_transform(unit)).units
|
python
|
{
"resource": ""
}
|
q19547
|
pint2cfunits
|
train
|
def pint2cfunits(value):
"""Return a CF-Convention unit string from a `pint` unit.
Parameters
----------
value : pint.Unit
Input unit.
Returns
-------
out : str
Units following CF-Convention.
"""
# Print units using abbreviations (millimeter -> mm)
s = "{:~}".format(value)
# Search and replace patterns
pat = r'(?P<inverse>/ )?(?P<unit>\w+)(?: \*\* (?P<pow>\d))?'
def repl(m):
i, u, p = m.groups()
p = p or (1 if i else '')
neg = '-' if i else ('^' if p else '')
return "{}{}{}".format(u, neg, p)
out, n = re.subn(pat, repl, s)
return out
|
python
|
{
"resource": ""
}
|
q19548
|
pint_multiply
|
train
|
def pint_multiply(da, q, out_units=None):
"""Multiply xarray.DataArray by pint.Quantity.
Parameters
----------
da : xr.DataArray
Input array.
q : pint.Quantity
Multiplicating factor.
out_units : str
Units the output array should be converted into.
"""
a = 1 * units2pint(da)
f = a * q.to_base_units()
if out_units:
f = f.to(out_units)
out = da * f.magnitude
out.attrs['units'] = pint2cfunits(f.units)
return out
|
python
|
{
"resource": ""
}
|
q19549
|
convert_units_to
|
train
|
def convert_units_to(source, target, context=None):
"""
Convert a mathematical expression into a value with the same units as a DataArray.
Parameters
----------
source : str, pint.Quantity or xr.DataArray
The value to be converted, e.g. '4C' or '1 mm/d'.
target : str, pint.Unit or DataArray
Target array of values to which units must conform.
context : str
Returns
-------
out
The source value converted to target's units.
"""
# Target units
if isinstance(target, units.Unit):
tu = target
elif isinstance(target, (str, xr.DataArray)):
tu = units2pint(target)
else:
raise NotImplementedError
if isinstance(source, str):
q = units.parse_expression(source)
# Return magnitude of converted quantity. This is going to fail if units are not compatible.
return q.to(tu).m
if isinstance(source, units.Quantity):
return source.to(tu).m
if isinstance(source, xr.DataArray):
fu = units2pint(source)
if fu == tu:
return source
tu_u = pint2cfunits(tu)
with units.context(context or 'none'):
out = units.convert(source, fu, tu)
out.attrs['units'] = tu_u
return out
# TODO remove backwards compatibility of int/float thresholds after v1.0 release
if isinstance(source, (float, int)):
if context == 'hydro':
fu = units.mm / units.day
else:
fu = units.degC
warnings.warn("Future versions of XCLIM will require explicit unit specifications.", FutureWarning)
return (source * fu).to(tu).m
raise NotImplementedError("source of type {} is not supported.".format(type(source)))
|
python
|
{
"resource": ""
}
|
q19550
|
threshold_count
|
train
|
def threshold_count(da, op, thresh, freq):
"""Count number of days above or below threshold.
Parameters
----------
da : xarray.DataArray
Input data.
op : {>, <, >=, <=, gt, lt, ge, le }
Logical operator, e.g. arr > thresh.
thresh : float
Threshold value.
freq : str
Resampling frequency defining the periods
defined in http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling.
Returns
-------
xarray.DataArray
The number of days meeting the constraints for each period.
"""
from xarray.core.ops import get_op
if op in binary_ops:
op = binary_ops[op]
elif op in binary_ops.values():
pass
else:
raise ValueError("Operation `{}` not recognized.".format(op))
func = getattr(da, '_binary_op')(get_op(op))
c = func(da, thresh) * 1
return c.resample(time=freq).sum(dim='time')
|
python
|
{
"resource": ""
}
|
q19551
|
percentile_doy
|
train
|
def percentile_doy(arr, window=5, per=.1):
"""Percentile value for each day of the year
Return the climatological percentile over a moving window around each day of the year.
Parameters
----------
arr : xarray.DataArray
Input data.
window : int
Number of days around each day of the year to include in the calculation.
per : float
Percentile between [0,1]
Returns
-------
xarray.DataArray
The percentiles indexed by the day of the year.
"""
# TODO: Support percentile array, store percentile in coordinates.
# This is supported by DataArray.quantile, but not by groupby.reduce.
rr = arr.rolling(min_periods=1, center=True, time=window).construct('window')
# Create empty percentile array
g = rr.groupby('time.dayofyear')
p = g.reduce(np.nanpercentile, dim=('time', 'window'), q=per * 100)
# The percentile for the 366th day has a sample size of 1/4 of the other days.
# To have the same sample size, we interpolate the percentile from 1-365 doy range to 1-366
if p.dayofyear.max() == 366:
p = adjust_doy_calendar(p.loc[p.dayofyear < 366], arr)
p.attrs.update(arr.attrs.copy())
return p
|
python
|
{
"resource": ""
}
|
q19552
|
infer_doy_max
|
train
|
def infer_doy_max(arr):
"""Return the largest doy allowed by calendar.
Parameters
----------
arr : xarray.DataArray
Array with `time` coordinate.
Returns
-------
int
The largest day of the year found in calendar.
"""
cal = arr.time.encoding.get('calendar', None)
if cal in calendars:
doy_max = calendars[cal]
else:
# If source is an array with no calendar information and whose length is not at least of full year,
# then this inference could be wrong (
doy_max = arr.time.dt.dayofyear.max().data
if len(arr.time) < 360:
raise ValueError("Cannot infer the calendar from a series less than a year long.")
if doy_max not in [360, 365, 366]:
raise ValueError("The target array's calendar is not recognized")
return doy_max
|
python
|
{
"resource": ""
}
|
q19553
|
_interpolate_doy_calendar
|
train
|
def _interpolate_doy_calendar(source, doy_max):
"""Interpolate from one set of dayofyear range to another
Interpolate an array defined over a `dayofyear` range (say 1 to 360) to another `dayofyear` range (say 1
to 365).
Parameters
----------
source : xarray.DataArray
Array with `dayofyear` coordinates.
doy_max : int
Largest day of the year allowed by calendar.
Returns
-------
xarray.DataArray
Interpolated source array over coordinates spanning the target `dayofyear` range.
"""
if 'dayofyear' not in source.coords.keys():
raise AttributeError("source should have dayofyear coordinates.")
# Interpolation of source to target dayofyear range
doy_max_source = source.dayofyear.max()
# Interpolate to fill na values
tmp = source.interpolate_na(dim='dayofyear')
# Interpolate to target dayofyear range
tmp.coords['dayofyear'] = np.linspace(start=1, stop=doy_max, num=doy_max_source)
return tmp.interp(dayofyear=range(1, doy_max + 1))
|
python
|
{
"resource": ""
}
|
q19554
|
adjust_doy_calendar
|
train
|
def adjust_doy_calendar(source, target):
"""Interpolate from one set of dayofyear range to another calendar.
Interpolate an array defined over a `dayofyear` range (say 1 to 360) to another `dayofyear` range (say 1
to 365).
Parameters
----------
source : xarray.DataArray
Array with `dayofyear` coordinates.
target : xarray.DataArray
Array with `time` coordinate.
Returns
-------
xarray.DataArray
Interpolated source array over coordinates spanning the target `dayofyear` range.
"""
doy_max_source = source.dayofyear.max()
doy_max = infer_doy_max(target)
if doy_max_source == doy_max:
return source
return _interpolate_doy_calendar(source, doy_max)
|
python
|
{
"resource": ""
}
|
q19555
|
daily_downsampler
|
train
|
def daily_downsampler(da, freq='YS'):
r"""Daily climate data downsampler
Parameters
----------
da : xarray.DataArray
freq : string
Returns
-------
xarray.DataArray
Note
----
Usage Example
grouper = daily_downsampler(da_std, freq='YS')
x2 = grouper.mean()
# add time coords to x2 and change dimension tags to time
time1 = daily_downsampler(da_std.time, freq=freq).first()
x2.coords['time'] = ('tags', time1.values)
x2 = x2.swap_dims({'tags': 'time'})
x2 = x2.sortby('time')
"""
# generate tags from da.time and freq
if isinstance(da.time.values[0], np.datetime64):
years = ['{:04d}'.format(y) for y in da.time.dt.year.values]
months = ['{:02d}'.format(m) for m in da.time.dt.month.values]
else:
# cannot use year, month, season attributes, not available for all calendars ...
years = ['{:04d}'.format(v.year) for v in da.time.values]
months = ['{:02d}'.format(v.month) for v in da.time.values]
seasons = ['DJF DJF MAM MAM MAM JJA JJA JJA SON SON SON DJF'.split()[int(m) - 1] for m in months]
n_t = da.time.size
if freq == 'YS':
# year start frequency
l_tags = years
elif freq == 'MS':
# month start frequency
l_tags = [years[i] + months[i] for i in range(n_t)]
elif freq == 'QS-DEC':
# DJF, MAM, JJA, SON seasons
# construct tags from list of season+year, increasing year for December
ys = []
for i in range(n_t):
m = months[i]
s = seasons[i]
y = years[i]
if m == '12':
y = str(int(y) + 1)
ys.append(y + s)
l_tags = ys
else:
raise RuntimeError('freqency {:s} not implemented'.format(freq))
# add tags to buffer DataArray
buffer = da.copy()
buffer.coords['tags'] = ('time', l_tags)
# return groupby according to tags
return buffer.groupby('tags')
|
python
|
{
"resource": ""
}
|
q19556
|
walk_map
|
train
|
def walk_map(d, func):
"""Apply a function recursively to values of dictionary.
Parameters
----------
d : dict
Input dictionary, possibly nested.
func : function
Function to apply to dictionary values.
Returns
-------
dict
Dictionary whose values are the output of the given function.
"""
out = {}
for k, v in d.items():
if isinstance(v, (dict, defaultdict)):
out[k] = walk_map(v, func)
else:
out[k] = func(v)
return out
|
python
|
{
"resource": ""
}
|
q19557
|
parse_doc
|
train
|
def parse_doc(doc):
"""Crude regex parsing."""
if doc is None:
return {}
out = {}
sections = re.split(r'(\w+)\n\s+-{4,50}', doc) # obj.__doc__.split('\n\n')
intro = sections.pop(0)
if intro:
content = list(map(str.strip, intro.strip().split('\n\n')))
if len(content) == 1:
out['title'] = content[0]
elif len(content) == 2:
out['title'], out['abstract'] = content
for i in range(0, len(sections), 2):
header, content = sections[i:i + 2]
if header in ['Notes', 'References']:
out[header.lower()] = content.replace('\n ', '\n')
elif header == 'Parameters':
pass
elif header == 'Returns':
match = re.search(r'xarray\.DataArray\s*(.*)', content)
if match:
out['long_name'] = match.groups()[0]
return out
|
python
|
{
"resource": ""
}
|
q19558
|
format_kwargs
|
train
|
def format_kwargs(attrs, params):
"""Modify attribute with argument values.
Parameters
----------
attrs : dict
Attributes to be assigned to function output. The values of the attributes in braces will be replaced the
the corresponding args values.
params : dict
A BoundArguments.arguments dictionary storing a function's arguments.
"""
attrs_mapping = {'cell_methods': {'YS': 'years', 'MS': 'months'},
'long_name': {'YS': 'Annual', 'MS': 'Monthly'}}
for key, val in attrs.items():
mba = {}
# Add formatting {} around values to be able to replace them with _attrs_mapping using format.
for k, v in params.items():
if isinstance(v, six.string_types) and v in attrs_mapping.get(key, {}).keys():
mba[k] = '{' + v + '}'
else:
mba[k] = v
attrs[key] = val.format(**mba).format(**attrs_mapping.get(key, {}))
|
python
|
{
"resource": ""
}
|
q19559
|
Indicator.json
|
train
|
def json(self, args=None):
"""Return a dictionary representation of the class.
Notes
-----
This is meant to be used by a third-party library wanting to wrap this class into another interface.
"""
names = ['identifier', 'abstract', 'keywords']
out = {key: getattr(self, key) for key in names}
out.update(self.cf_attrs)
out = self.format(out, args)
out['notes'] = self.notes
out['parameters'] = str({key: {'default': p.default if p.default != p.empty else None, 'desc': ''}
for (key, p) in self._sig.parameters.items()})
if six.PY2:
out = walk_map(out, lambda x: x.decode('utf8') if isinstance(x, six.string_types) else x)
return out
|
python
|
{
"resource": ""
}
|
q19560
|
Indicator.factory
|
train
|
def factory(cls, attrs):
"""Create a subclass from the attributes dictionary."""
name = attrs['identifier'].capitalize()
return type(name, (cls,), attrs)
|
python
|
{
"resource": ""
}
|
q19561
|
sent2features
|
train
|
def sent2features(sentence, template):
""" extract features in a sentence
:type sentence: list of token, each token is a list of tag
"""
return [word2features(sentence, i, template) for i in range(len(sentence))]
|
python
|
{
"resource": ""
}
|
q19562
|
tokenize
|
train
|
def tokenize(text, format=None):
"""
tokenize text for word segmentation
:param text: raw text input
:return: tokenize text
"""
text = Text(text)
text = text.replace("\t", " ")
tokens = re.findall(patterns, text)
tokens = [token[0] for token in tokens]
if format == "text":
return " ".join(tokens)
else:
return tokens
|
python
|
{
"resource": ""
}
|
q19563
|
pos_tag
|
train
|
def pos_tag(sentence, format=None):
"""
Vietnamese POS tagging
Parameters
==========
sentence: {unicode, str}
Raw sentence
Returns
=======
tokens: list of tuple with word, pos tag
tagged sentence
Examples
--------
>>> # -*- coding: utf-8 -*-
>>> from underthesea import pos_tag
>>> sentence = "Chợ thα»t chΓ³ nα»i tiαΊΏng α» TPHCM bα» truy quΓ©t"
>>> pos_tag(sentence)
[('Chợ', 'N'),
('thα»t', 'N'),
('chΓ³', 'N'),
('nα»i tiαΊΏng', 'A'),
('α»', 'E'),
('TPHCM', 'Np'),
('bα»', 'V'),
('truy quΓ©t', 'V')]
"""
sentence = word_tokenize(sentence)
crf_model = CRFPOSTagPredictor.Instance()
result = crf_model.predict(sentence, format)
return result
|
python
|
{
"resource": ""
}
|
q19564
|
WSCorpus.save
|
train
|
def save(self, folder, format):
"""save wscorpus to files
:param str folder: path to directory
:type folder: string
:param str format: either TEXT or COLUMN
:type format: str
"""
try:
mkdir(folder)
except Exception:
pass
for document in self.documents:
f = join(folder, document.id)
content = u"\n".join(document.sentences)
write(f, content)
|
python
|
{
"resource": ""
}
|
q19565
|
ner
|
train
|
def ner(sentence, format=None):
"""
Location and classify named entities in text
Parameters
==========
sentence: {unicode, str}
raw sentence
Returns
=======
tokens: list of tuple with word, pos tag, chunking tag, ner tag
tagged sentence
Examples
--------
>>> # -*- coding: utf-8 -*-
>>> from underthesea import ner
>>> sentence = "Γng Putin ca ngợi nhα»―ng thΓ nh tα»±u vΔ© ΔαΊ‘i cα»§a LiΓͺn XΓ΄"
>>> ner(sentence)
[('Γng', 'Nc', 'B-NP', 'O'),
('Putin', 'Np', 'B-NP', 'B-PER'),
('ca ngợi', 'V', 'B-VP', 'O'),
('nhα»―ng', 'L', 'B-NP', 'O'),
('thΓ nh tα»±u', 'N', 'B-NP', 'O'),
('vΔ© ΔαΊ‘i', 'A', 'B-AP', 'O'),
('cα»§a', 'E', 'B-PP', 'O'),
('LiΓͺn XΓ΄', 'Np', 'B-NP', 'B-LOC')]
"""
sentence = chunk(sentence)
crf_model = CRFNERPredictor.Instance()
result = crf_model.predict(sentence, format)
return result
|
python
|
{
"resource": ""
}
|
q19566
|
encrypt
|
train
|
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
|
python
|
{
"resource": ""
}
|
q19567
|
prepend_line
|
train
|
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
|
python
|
{
"resource": ""
}
|
q19568
|
word_tokenize
|
train
|
def word_tokenize(sentence, format=None):
"""
Vietnamese word segmentation
Parameters
==========
sentence: {unicode, str}
raw sentence
Returns
=======
tokens: list of text
tagged sentence
Examples
--------
>>> # -*- coding: utf-8 -*-
>>> from underthesea import word_tokenize
>>> sentence = "BΓ‘c sΔ© bΓ’y giα» cΓ³ thα» thαΊ£n nhiΓͺn bΓ‘o tin bα»nh nhΓ’n bα» ung thΖ°"
>>> word_tokenize(sentence)
['BΓ‘c sΔ©', 'bΓ’y giα»', 'cΓ³ thα»', 'thαΊ£n nhiΓͺn', 'bΓ‘o tin', 'bα»nh nhΓ’n', 'bα»', 'ung thΖ°']
>>> word_tokenize(sentence, format="text")
'BΓ‘c_sΔ© bΓ’y_giα» cΓ³_thα» thαΊ£n_nhiΓͺn bΓ‘o_tin bα»nh_nhΓ’n bα» ung_thΖ°'
"""
tokens = tokenize(sentence)
crf_model = CRFModel.instance()
output = crf_model.predict(tokens, format)
tokens = [token[0] for token in output]
tags = [token[1] for token in output]
output = []
for tag, token in zip(tags, tokens):
if tag == "I-W":
output[-1] = output[-1] + u" " + token
else:
output.append(token)
if format == "text":
output = u" ".join([item.replace(" ", "_") for item in output])
return output
|
python
|
{
"resource": ""
}
|
q19569
|
FastTextClassifier.fit
|
train
|
def fit(self, X, y, model_filename=None):
"""Fit FastText according to X, y
Parameters:
----------
X : list of text
each item is a text
y: list
each item is either a label (in multi class problem) or list of
labels (in multi label problem)
"""
train_file = "temp.train"
X = [x.replace("\n", " ") for x in X]
y = [item[0] for item in y]
y = [_.replace(" ", "-") for _ in y]
lines = ["__label__{} , {}".format(j, i) for i, j in zip(X, y)]
content = "\n".join(lines)
write(train_file, content)
if model_filename:
self.estimator = fasttext.supervised(train_file, model_filename)
else:
self.estimator = fasttext.supervised(train_file)
os.remove(train_file)
|
python
|
{
"resource": ""
}
|
q19570
|
ConfigDriver.create_driver
|
train
|
def create_driver(self):
"""Create a selenium driver using specified config properties
:returns: a new selenium driver
:rtype: selenium.webdriver.remote.webdriver.WebDriver
"""
driver_type = self.config.get('Driver', 'type')
try:
if self.config.getboolean_optional('Server', 'enabled'):
self.logger.info("Creating remote driver (type = %s)", driver_type)
driver = self._create_remote_driver()
else:
self.logger.info("Creating local driver (type = %s)", driver_type)
driver = self._create_local_driver()
except Exception as exc:
error_message = get_error_message_from_exception(exc)
self.logger.error("%s driver can not be launched: %s", driver_type.capitalize(), error_message)
raise
return driver
|
python
|
{
"resource": ""
}
|
q19571
|
ConfigDriver._create_local_driver
|
train
|
def _create_local_driver(self):
"""Create a driver in local machine
:returns: a new local selenium driver
"""
driver_type = self.config.get('Driver', 'type')
driver_name = driver_type.split('-')[0]
if driver_name in ('android', 'ios', 'iphone'):
# Create local appium driver
driver = self._setup_appium()
else:
driver_setup = {
'firefox': self._setup_firefox,
'chrome': self._setup_chrome,
'safari': self._setup_safari,
'opera': self._setup_opera,
'iexplore': self._setup_explorer,
'edge': self._setup_edge,
'phantomjs': self._setup_phantomjs
}
driver_setup_method = driver_setup.get(driver_name)
if not driver_setup_method:
raise Exception('Unknown driver {0}'.format(driver_name))
# Get driver capabilities
capabilities = self._get_capabilities_from_driver_type(driver_name)
self._add_capabilities_from_properties(capabilities, 'Capabilities')
# Create local selenium driver
driver = driver_setup_method(capabilities)
return driver
|
python
|
{
"resource": ""
}
|
q19572
|
ConfigDriver._get_capabilities_from_driver_type
|
train
|
def _get_capabilities_from_driver_type(driver_name):
"""Create initial driver capabilities
:params driver_name: name of selected driver
:returns: capabilities dictionary
"""
if driver_name == 'firefox':
return DesiredCapabilities.FIREFOX.copy()
elif driver_name == 'chrome':
return DesiredCapabilities.CHROME.copy()
elif driver_name == 'safari':
return DesiredCapabilities.SAFARI.copy()
elif driver_name == 'opera':
return DesiredCapabilities.OPERA.copy()
elif driver_name == 'iexplore':
return DesiredCapabilities.INTERNETEXPLORER.copy()
elif driver_name == 'edge':
return DesiredCapabilities.EDGE.copy()
elif driver_name == 'phantomjs':
return DesiredCapabilities.PHANTOMJS.copy()
elif driver_name in ('android', 'ios', 'iphone'):
return {}
raise Exception('Unknown driver {0}'.format(driver_name))
|
python
|
{
"resource": ""
}
|
q19573
|
ConfigDriver._add_capabilities_from_properties
|
train
|
def _add_capabilities_from_properties(self, capabilities, section):
"""Add capabilities from properties file
:param capabilities: capabilities object
:param section: properties section
"""
cap_type = {'Capabilities': 'server', 'AppiumCapabilities': 'Appium server'}
try:
for cap, cap_value in dict(self.config.items(section)).items():
self.logger.debug("Added %s capability: %s = %s", cap_type[section], cap, cap_value)
capabilities[cap] = cap_value if cap == 'version' else self._convert_property_type(cap_value)
except NoSectionError:
pass
|
python
|
{
"resource": ""
}
|
q19574
|
ConfigDriver._setup_firefox
|
train
|
def _setup_firefox(self, capabilities):
"""Setup Firefox webdriver
:param capabilities: capabilities object
:returns: a new local Firefox driver
"""
if capabilities.get("marionette"):
gecko_driver = self.config.get('Driver', 'gecko_driver_path')
self.logger.debug("Gecko driver path given in properties: %s", gecko_driver)
else:
gecko_driver = None
# Get Firefox binary
firefox_binary = self.config.get_optional('Firefox', 'binary')
firefox_options = Options()
if self.config.getboolean_optional('Driver', 'headless'):
self.logger.debug("Running Firefox in headless mode")
firefox_options.add_argument('-headless')
self._add_firefox_arguments(firefox_options)
if firefox_binary:
firefox_options.binary = firefox_binary
log_path = os.path.join(DriverWrappersPool.output_directory, 'geckodriver.log')
try:
# Selenium 3
return webdriver.Firefox(firefox_profile=self._create_firefox_profile(), capabilities=capabilities,
executable_path=gecko_driver, firefox_options=firefox_options, log_path=log_path)
except TypeError:
# Selenium 2
return webdriver.Firefox(firefox_profile=self._create_firefox_profile(), capabilities=capabilities,
executable_path=gecko_driver, firefox_options=firefox_options)
|
python
|
{
"resource": ""
}
|
q19575
|
ConfigDriver._create_firefox_profile
|
train
|
def _create_firefox_profile(self):
"""Create and configure a firefox profile
:returns: firefox profile
"""
# Get Firefox profile
profile_directory = self.config.get_optional('Firefox', 'profile')
if profile_directory:
self.logger.debug("Using firefox profile: %s", profile_directory)
# Create Firefox profile
profile = webdriver.FirefoxProfile(profile_directory=profile_directory)
profile.native_events_enabled = True
# Add Firefox preferences
try:
for pref, pref_value in dict(self.config.items('FirefoxPreferences')).items():
self.logger.debug("Added firefox preference: %s = %s", pref, pref_value)
profile.set_preference(pref, self._convert_property_type(pref_value))
profile.update_preferences()
except NoSectionError:
pass
# Add Firefox extensions
try:
for pref, pref_value in dict(self.config.items('FirefoxExtensions')).items():
self.logger.debug("Added firefox extension: %s = %s", pref, pref_value)
profile.add_extension(pref_value)
except NoSectionError:
pass
return profile
|
python
|
{
"resource": ""
}
|
q19576
|
ConfigDriver._convert_property_type
|
train
|
def _convert_property_type(value):
"""Converts the string value in a boolean, integer or string
:param value: string value
:returns: boolean, integer or string value
"""
if value in ('true', 'True'):
return True
elif value in ('false', 'False'):
return False
elif str(value).startswith('{') and str(value).endswith('}'):
return ast.literal_eval(value)
else:
try:
return int(value)
except ValueError:
return value
|
python
|
{
"resource": ""
}
|
q19577
|
ConfigDriver._setup_chrome
|
train
|
def _setup_chrome(self, capabilities):
"""Setup Chrome webdriver
:param capabilities: capabilities object
:returns: a new local Chrome driver
"""
chrome_driver = self.config.get('Driver', 'chrome_driver_path')
self.logger.debug("Chrome driver path given in properties: %s", chrome_driver)
return webdriver.Chrome(chrome_driver, chrome_options=self._create_chrome_options(),
desired_capabilities=capabilities)
|
python
|
{
"resource": ""
}
|
q19578
|
ConfigDriver._create_chrome_options
|
train
|
def _create_chrome_options(self):
"""Create and configure a chrome options object
:returns: chrome options object
"""
# Create Chrome options
options = webdriver.ChromeOptions()
if self.config.getboolean_optional('Driver', 'headless'):
self.logger.debug("Running Chrome in headless mode")
options.add_argument('--headless')
if os.name == 'nt': # Temporarily needed if running on Windows.
options.add_argument('--disable-gpu')
# Add Chrome preferences, mobile emulation options and chrome arguments
self._add_chrome_options(options, 'prefs')
self._add_chrome_options(options, 'mobileEmulation')
self._add_chrome_arguments(options)
return options
|
python
|
{
"resource": ""
}
|
q19579
|
ConfigDriver._add_chrome_options
|
train
|
def _add_chrome_options(self, options, option_name):
"""Add Chrome options from properties file
:param options: chrome options object
:param option_name: chrome option name
"""
options_conf = {'prefs': {'section': 'ChromePreferences', 'message': 'preference'},
'mobileEmulation': {'section': 'ChromeMobileEmulation', 'message': 'mobile emulation option'}}
option_value = dict()
try:
for key, value in dict(self.config.items(options_conf[option_name]['section'])).items():
self.logger.debug("Added chrome %s: %s = %s", options_conf[option_name]['message'], key, value)
option_value[key] = self._convert_property_type(value)
if len(option_value) > 0:
options.add_experimental_option(option_name, option_value)
except NoSectionError:
pass
|
python
|
{
"resource": ""
}
|
q19580
|
ConfigDriver._add_chrome_arguments
|
train
|
def _add_chrome_arguments(self, options):
"""Add Chrome arguments from properties file
:param options: chrome options object
"""
try:
for pref, pref_value in dict(self.config.items('ChromeArguments')).items():
pref_value = '={}'.format(pref_value) if pref_value else ''
self.logger.debug("Added chrome argument: %s%s", pref, pref_value)
options.add_argument('{}{}'.format(pref, self._convert_property_type(pref_value)))
except NoSectionError:
pass
|
python
|
{
"resource": ""
}
|
q19581
|
ConfigDriver._setup_opera
|
train
|
def _setup_opera(self, capabilities):
"""Setup Opera webdriver
:param capabilities: capabilities object
:returns: a new local Opera driver
"""
opera_driver = self.config.get('Driver', 'opera_driver_path')
self.logger.debug("Opera driver path given in properties: %s", opera_driver)
return webdriver.Opera(executable_path=opera_driver, desired_capabilities=capabilities)
|
python
|
{
"resource": ""
}
|
q19582
|
ConfigDriver._setup_explorer
|
train
|
def _setup_explorer(self, capabilities):
"""Setup Internet Explorer webdriver
:param capabilities: capabilities object
:returns: a new local Internet Explorer driver
"""
explorer_driver = self.config.get('Driver', 'explorer_driver_path')
self.logger.debug("Explorer driver path given in properties: %s", explorer_driver)
return webdriver.Ie(explorer_driver, capabilities=capabilities)
|
python
|
{
"resource": ""
}
|
q19583
|
ConfigDriver._setup_edge
|
train
|
def _setup_edge(self, capabilities):
"""Setup Edge webdriver
:param capabilities: capabilities object
:returns: a new local Edge driver
"""
edge_driver = self.config.get('Driver', 'edge_driver_path')
self.logger.debug("Edge driver path given in properties: %s", edge_driver)
return webdriver.Edge(edge_driver, capabilities=capabilities)
|
python
|
{
"resource": ""
}
|
q19584
|
ConfigDriver._setup_phantomjs
|
train
|
def _setup_phantomjs(self, capabilities):
"""Setup phantomjs webdriver
:param capabilities: capabilities object
:returns: a new local phantomjs driver
"""
phantomjs_driver = self.config.get('Driver', 'phantomjs_driver_path')
self.logger.debug("Phantom driver path given in properties: %s", phantomjs_driver)
return webdriver.PhantomJS(executable_path=phantomjs_driver, desired_capabilities=capabilities)
|
python
|
{
"resource": ""
}
|
q19585
|
ConfigDriver._setup_appium
|
train
|
def _setup_appium(self):
"""Setup Appium webdriver
:returns: a new remote Appium driver
"""
self.config.set('Server', 'host', '127.0.0.1')
self.config.set('Server', 'port', '4723')
return self._create_remote_driver()
|
python
|
{
"resource": ""
}
|
q19586
|
Utils.set_implicitly_wait
|
train
|
def set_implicitly_wait(self):
"""Read implicitly timeout from configuration properties and configure driver implicitly wait"""
implicitly_wait = self.driver_wrapper.config.get_optional('Driver', 'implicitly_wait')
if implicitly_wait:
self.driver_wrapper.driver.implicitly_wait(implicitly_wait)
|
python
|
{
"resource": ""
}
|
q19587
|
Utils.capture_screenshot
|
train
|
def capture_screenshot(self, name):
"""Capture screenshot and save it in screenshots folder
:param name: screenshot name suffix
:returns: screenshot path
"""
filename = '{0:0=2d}_{1}'.format(DriverWrappersPool.screenshots_number, name)
filename = '{}.png'.format(get_valid_filename(filename))
filepath = os.path.join(DriverWrappersPool.screenshots_directory, filename)
if not os.path.exists(DriverWrappersPool.screenshots_directory):
os.makedirs(DriverWrappersPool.screenshots_directory)
if self.driver_wrapper.driver.get_screenshot_as_file(filepath):
self.logger.info('Screenshot saved in %s', filepath)
DriverWrappersPool.screenshots_number += 1
return filepath
return None
|
python
|
{
"resource": ""
}
|
q19588
|
Utils.save_webdriver_logs
|
train
|
def save_webdriver_logs(self, test_name):
"""Get webdriver logs and write them to log files
:param test_name: test that has generated these logs
"""
try:
log_types = self.driver_wrapper.driver.log_types
except Exception:
# geckodriver does not implement log_types, but it implements get_log for client and server
log_types = ['client', 'server']
self.logger.debug("Reading logs from '%s' and writing them to log files", ', '.join(log_types))
for log_type in log_types:
try:
self.save_webdriver_logs_by_type(log_type, test_name)
except Exception:
# Capture exceptions to avoid errors in teardown method
pass
|
python
|
{
"resource": ""
}
|
q19589
|
Utils.save_webdriver_logs_by_type
|
train
|
def save_webdriver_logs_by_type(self, log_type, test_name):
"""Get webdriver logs of the specified type and write them to a log file
:param log_type: browser, client, driver, performance, server, syslog, crashlog or logcat
:param test_name: test that has generated these logs
"""
try:
logs = self.driver_wrapper.driver.get_log(log_type)
except Exception:
return
if len(logs) > 0:
log_file_name = '{}_{}.txt'.format(get_valid_filename(test_name), log_type)
log_file_name = os.path.join(DriverWrappersPool.logs_directory, log_file_name)
with open(log_file_name, 'a+', encoding='utf-8') as log_file:
driver_type = self.driver_wrapper.config.get('Driver', 'type')
log_file.write(
u"\n{} '{}' test logs with driver = {}\n\n".format(datetime.now(), test_name, driver_type))
for entry in logs:
timestamp = datetime.fromtimestamp(float(entry['timestamp']) / 1000.).strftime(
'%Y-%m-%d %H:%M:%S.%f')
log_file.write(u'{}\t{}\t{}\n'.format(timestamp, entry['level'], entry['message'].rstrip()))
|
python
|
{
"resource": ""
}
|
q19590
|
Utils.discard_logcat_logs
|
train
|
def discard_logcat_logs(self):
"""Discard previous logcat logs"""
if self.driver_wrapper.is_android_test():
try:
self.driver_wrapper.driver.get_log('logcat')
except Exception:
pass
|
python
|
{
"resource": ""
}
|
q19591
|
Utils._expected_condition_find_element
|
train
|
def _expected_condition_find_element(self, element):
"""Tries to find the element, but does not thrown an exception if the element is not found
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:returns: the web element if it has been found or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
from toolium.pageelements.page_element import PageElement
web_element = False
try:
if isinstance(element, PageElement):
# Use _find_web_element() instead of web_element to avoid logging error message
element._web_element = None
element._find_web_element()
web_element = element._web_element
elif isinstance(element, tuple):
web_element = self.driver_wrapper.driver.find_element(*element)
except NoSuchElementException:
pass
return web_element
|
python
|
{
"resource": ""
}
|
q19592
|
Utils._expected_condition_find_first_element
|
train
|
def _expected_condition_find_first_element(self, elements):
"""Try to find sequentially the elements of the list and return the first element found
:param elements: list of PageElements or element locators as a tuple (locator_type, locator_value) to be found
sequentially
:returns: first element found or None
:rtype: toolium.pageelements.PageElement or tuple
"""
from toolium.pageelements.page_element import PageElement
element_found = None
for element in elements:
try:
if isinstance(element, PageElement):
element._web_element = None
element._find_web_element()
else:
self.driver_wrapper.driver.find_element(*element)
element_found = element
break
except (NoSuchElementException, TypeError):
pass
return element_found
|
python
|
{
"resource": ""
}
|
q19593
|
Utils._expected_condition_find_element_clickable
|
train
|
def _expected_condition_find_element_clickable(self, element):
"""Tries to find the element and checks that it is clickable, but does not thrown an exception if the element
is not found
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:returns: the web element if it is clickable or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
web_element = self._expected_condition_find_element_visible(element)
try:
return web_element if web_element and web_element.is_enabled() else False
except StaleElementReferenceException:
return False
|
python
|
{
"resource": ""
}
|
q19594
|
Utils._expected_condition_find_element_stopped
|
train
|
def _expected_condition_find_element_stopped(self, element_times):
"""Tries to find the element and checks that it has stopped moving, but does not thrown an exception if the element
is not found
:param element_times: Tuple with 2 items where:
[0] element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
[1] times: number of iterations checking the element's location that must be the same for all of them
in order to considering the element has stopped
:returns: the web element if it is clickable or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
element, times = element_times
web_element = self._expected_condition_find_element(element)
try:
locations_list = [tuple(web_element.location.values()) for i in range(int(times)) if not time.sleep(0.001)]
return web_element if set(locations_list) == set(locations_list[-1:]) else False
except StaleElementReferenceException:
return False
|
python
|
{
"resource": ""
}
|
q19595
|
Utils._expected_condition_find_element_containing_text
|
train
|
def _expected_condition_find_element_containing_text(self, element_text_pair):
"""Tries to find the element and checks that it contains the specified text, but does not thrown an exception if the element is
not found
:param element_text_pair: Tuple with 2 items where:
[0] element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
[1] text: text to be contained into the element
:returns: the web element if it contains the text or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
element, text = element_text_pair
web_element = self._expected_condition_find_element(element)
try:
return web_element if web_element and text in web_element.text else False
except StaleElementReferenceException:
return False
|
python
|
{
"resource": ""
}
|
q19596
|
Utils._expected_condition_value_in_element_attribute
|
train
|
def _expected_condition_value_in_element_attribute(self, element_attribute_value):
"""Tries to find the element and checks that it contains the requested attribute with the expected value,
but does not thrown an exception if the element is not found
:param element_attribute_value: Tuple with 3 items where:
[0] element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
[1] attribute: element's attribute where to check its value
[2] value: expected value for the element's attribute
:returns: the web element if it contains the expected value for the requested attribute or False
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
"""
element, attribute, value = element_attribute_value
web_element = self._expected_condition_find_element(element)
try:
return web_element if web_element and web_element.get_attribute(attribute) == value else False
except StaleElementReferenceException:
return False
|
python
|
{
"resource": ""
}
|
q19597
|
Utils._wait_until
|
train
|
def _wait_until(self, condition_method, condition_input, timeout=None):
"""
Common method to wait until condition met
:param condition_method: method to check the condition
:param condition_input: parameter that will be passed to the condition method
:param timeout: max time to wait
:returns: condition method response
"""
# Remove implicitly wait timeout
self.driver_wrapper.driver.implicitly_wait(0)
# Get explicitly wait timeout
timeout = timeout if timeout else self.get_explicitly_wait()
# Wait for condition
condition_response = WebDriverWait(self.driver_wrapper.driver, timeout).until(
lambda s: condition_method(condition_input))
# Restore implicitly wait timeout from properties
self.set_implicitly_wait()
return condition_response
|
python
|
{
"resource": ""
}
|
q19598
|
Utils.wait_until_element_present
|
train
|
def wait_until_element_present(self, element, timeout=None):
"""Search element and wait until it is found
:param element: PageElement or element locator as a tuple (locator_type, locator_value) to be found
:param timeout: max time to wait
:returns: the web element if it is present
:rtype: selenium.webdriver.remote.webelement.WebElement or appium.webdriver.webelement.WebElement
:raises TimeoutException: If the element is not found after the timeout
"""
return self._wait_until(self._expected_condition_find_element, element, timeout)
|
python
|
{
"resource": ""
}
|
q19599
|
Utils.wait_until_first_element_is_found
|
train
|
def wait_until_first_element_is_found(self, elements, timeout=None):
"""Search list of elements and wait until one of them is found
:param elements: list of PageElements or element locators as a tuple (locator_type, locator_value) to be found
sequentially
:param timeout: max time to wait
:returns: first element found
:rtype: toolium.pageelements.PageElement or tuple
:raises TimeoutException: If no element in the list is found after the timeout
"""
try:
return self._wait_until(self._expected_condition_find_first_element, elements, timeout)
except TimeoutException as exception:
msg = 'None of the page elements has been found after %s seconds'
timeout = timeout if timeout else self.get_explicitly_wait()
self.logger.error(msg, timeout)
exception.msg += "\n {}".format(msg % timeout)
raise exception
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.