_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q30500
nsdiffs
train
def nsdiffs(x, m, max_D=2, test='ocsb', **kwargs): """Estimate the seasonal differencing term, ``D``. Perform a test of seasonality for different levels of ``D`` to estimate the number of seasonal differences required to make a given time series stationary. Will select the maximum value of ``D`` for which the time series is judged seasonally stationary by the statistical test. Parameters ---------- x : array-like, shape=(n_samples, [n_features]) The array to difference. m : int The number of seasonal periods (i.e., frequency of the time series) max_D : int, optional (default=2) Maximum number of seasonal differences allowed. Must be a positive integer. The estimated value of ``D`` will not exceed ``max_D``. test : str, optional (default='ocsb') Type of unit root test of seasonality to use in order to detect seasonal periodicity. Valid tests include ("ocsb", "ch"). Note that the CHTest is very slow for large data. Returns ------- D : int The estimated seasonal differencing term. This is the maximum value of ``D`` such that ``D <= max_D`` and the time series is judged seasonally stationary. If the time series is constant, will return 0. """ if max_D <= 0: raise ValueError('max_D must be a positive integer') # get the test - this validates m internally testfunc = get_callable(test, VALID_STESTS)(m, **kwargs)\ .estimate_seasonal_differencing_term x = column_or_1d(check_array(x, ensure_2d=False, force_all_finite=True, dtype=DTYPE)) if is_constant(x): return 0 D = 0 dodiff = testfunc(x) while dodiff == 1 and D < max_D: D += 1 x = diff(x, lag=m) if is_constant(x): return D dodiff = testfunc(x) return D
python
{ "resource": "" }
q30501
ndiffs
train
def ndiffs(x, alpha=0.05, test='kpss', max_d=2, **kwargs): """Estimate ARIMA differencing term, ``d``. Perform a test of stationarity for different levels of ``d`` to estimate the number of differences required to make a given time series stationary. Will select the maximum value of ``d`` for which the time series is judged stationary by the statistical test. Parameters ---------- x : array-like, shape=(n_samples, [n_features]) The array (time series) to difference. alpha : float, optional (default=0.05) Level of the test. This is the value above below which the P-value will be deemed significant. test : str, optional (default='kpss') Type of unit root test of stationarity to use in order to test the stationarity of the time-series. One of ('kpss', 'adf', 'pp') max_d : int, optional (default=2) Maximum number of non-seasonal differences allowed. Must be a positive integer. The estimated value of ``d`` will not exceed ``max_d``. Returns ------- d : int The estimated differencing term. This is the maximum value of ``d`` such that ``d <= max_d`` and the time series is judged stationary. If the time series is constant, will return 0. References ---------- .. [1] R's auto_arima ndiffs function: https://bit.ly/2Bu8CHN """ if max_d <= 0: raise ValueError('max_d must be a positive integer') # get the test testfunc = get_callable(test, VALID_TESTS)(alpha, **kwargs).should_diff x = column_or_1d(check_array(x, ensure_2d=False, force_all_finite=True, dtype=DTYPE)) # base case, if constant return 0 d = 0 if is_constant(x): return d # get initial diff pval, dodiff = testfunc(x) # if initially NaN, return 0 if np.isnan(pval): return 0 # (d is zero, but this is more explicit to the reader) # Begin loop. while dodiff and d < max_d: d += 1 # do differencing x = diff(x) if is_constant(x): return d # get new result pval, dodiff = testfunc(x) # if it's NaN now, take the last non-null one if np.isnan(pval): return d - 1 # when d >= max_d return d
python
{ "resource": "" }
q30502
bind_df_model
train
def bind_df_model(model_fit, arima_results): """Set model degrees of freedom. Older versions of statsmodels don't handle this issue. Sets the model degrees of freedom in place if not already present. Parameters ---------- model_fit : ARMA, ARIMA or SARIMAX The fitted model. arima_results : ModelResultsWrapper The results wrapper. """ if not hasattr(arima_results, 'df_model'): df_model = model_fit.k_exog + model_fit.k_trend + \ model_fit.k_ar + model_fit.k_ma + \ model_fit.k_seasonal_ar + model_fit.k_seasonal_ma setattr(arima_results, 'df_model', df_model)
python
{ "resource": "" }
q30503
get_compatible_pyplot
train
def get_compatible_pyplot(backend=None, debug=True): """Make the backend of MPL compatible. In Travis Mac distributions, python is not installed as a framework. This means that using the TkAgg backend is the best solution (so it doesn't try to use the mac OS backend by default). Parameters ---------- backend : str, optional (default="TkAgg") The backend to default to. debug : bool, optional (default=True) Whether to log the existing backend to stderr. """ import matplotlib # If the backend provided is None, just default to # what's already being used. existing_backend = matplotlib.get_backend() if backend is not None: # Can this raise?... matplotlib.use(backend) # Print out the new backend if debug: sys.stderr.write("Currently using '%s' MPL backend, " "switching to '%s' backend%s" % (existing_backend, backend, os.linesep)) # If backend is not set via env variable, but debug is elif debug: sys.stderr.write("Using '%s' MPL backend%s" % (existing_backend, os.linesep)) from matplotlib import pyplot as plt return plt
python
{ "resource": "" }
q30504
_return_wrapper
train
def _return_wrapper(fits, return_all, start, trace): """If the user wants to get all of the models back, this will return a list of the ARIMA models, otherwise it will just return the model. If this is called from the end of the function, ``fits`` will already be a list. We *know* that if a function call makes it here, ``fits`` is NOT None or it would have thrown an exception in :func:`_post_ppc_arima`. Parameters ---------- fits : iterable or ARIMA The ARIMA(s) return_all : bool Whether to return all. """ # make sure it's an iterable if not is_iterable(fits): fits = [fits] # whether to print the final runtime if trace: print('Total fit time: %.3f seconds' % (time.time() - start)) # which to return? if not all, then first index (assume sorted) if not return_all: return fits[0] return fits
python
{ "resource": "" }
q30505
AutoARIMA.fit
train
def fit(self, y, exogenous=None, **fit_args): """Fit the auto-arima estimator Fit an AutoARIMA to a vector, ``y``, of observations with an optional matrix of ``exogenous`` variables. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series to which to fit the ``ARIMA`` estimator. This may either be a Pandas ``Series`` object (statsmodels can internally use the dates in the index), or a numpy array. This should be a one-dimensional array of floats, and should not contain any ``np.nan`` or ``np.inf`` values. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. **fit_args : dict or kwargs Any keyword arguments to pass to the auto-arima function. """ self.model_ = auto_arima( y, exogenous=exogenous, start_p=self.start_p, d=self.d, start_q=self.start_q, max_p=self.max_p, max_d=self.max_d, max_q=self.max_q, start_P=self.start_P, D=self.D, start_Q=self.start_Q, max_P=self.max_P, max_D=self.max_D, max_Q=self.max_Q, max_order=self.max_order, m=self.m, seasonal=self.seasonal, stationary=self.stationary, information_criterion=self.information_criterion, alpha=self.alpha, test=self.test, seasonal_test=self.seasonal_test, stepwise=self.stepwise, n_jobs=self.n_jobs, start_params=self.start_params, trend=self.trend, method=self.method, transparams=self.transparams, solver=self.solver, maxiter=self.maxiter, disp=self.disp, callback=self.callback, offset_test_args=self.offset_test_args, seasonal_test_args=self.seasonal_test_args, suppress_warnings=self.suppress_warnings, error_action=self.error_action, trace=self.trace, random=self.random, random_state=self.random_state, n_fits=self.n_fits, return_valid_fits=False, # only return ONE out_of_sample_size=self.out_of_sample_size, scoring=self.scoring, scoring_args=self.scoring_args, with_intercept=self.with_intercept, **fit_args) return self
python
{ "resource": "" }
q30506
autocorr_plot
train
def autocorr_plot(series, show=True): """Plot a series' auto-correlation. A wrapper method for the Pandas ``autocorrelation_plot`` method. Parameters ---------- series : array-like, shape=(n_samples,) The series or numpy array for which to plot an auto-correlation. show : bool, optional (default=True) Whether to show the plot after it's been created. If not, will return the plot as an Axis object instead. Notes ----- This method will only show the plot if ``show=True`` (which is the default behavior). To simply get the axis back (say, to add to another canvas), use ``show=False``. Examples -------- >>> autocorr_plot([1, 2, 3], False) # doctest: +SKIP <matplotlib.axes._subplots.AxesSubplot object at 0x127f41dd8> Returns ------- res : Axis or None If ``show`` is True, does not return anything. If False, returns the Axis object. """ _err_for_no_mpl() res = ap(series) return _show_or_return(res, show)
python
{ "resource": "" }
q30507
plot_acf
train
def plot_acf(series, ax=None, lags=None, alpha=None, use_vlines=True, unbiased=False, fft=True, title='Autocorrelation', zero=True, vlines_kwargs=None, show=True, **kwargs): """Plot a series' auto-correlation as a line plot. A wrapper method for the statsmodels ``plot_acf`` method. Parameters ---------- series : array-like, shape=(n_samples,) The series or numpy array for which to plot an auto-correlation. ax : Matplotlib AxesSubplot instance, optional If given, this subplot is used to plot in instead of a new figure being created. lags : int, array-like or None, optional (default=None) int or Array of lag values, used on horizontal axis. Uses np.arange(lags) when lags is an int. If not provided, ``lags=np.arange(len(corr))`` is used. alpha : scalar, optional (default=None) If a number is given, the confidence intervals for the given level are returned. For instance if alpha=.05, 95 % confidence intervals are returned where the standard deviation is computed according to Bartlett's formula. If None, no confidence intervals are plotted. use_vlines : bool, optional (default=True) If True, vertical lines and markers are plotted. If False, only markers are plotted. The default marker is 'o'; it can be overridden with a ``marker`` kwarg. unbiased : bool, optional (default=False) If True, then denominators for autocovariance are n-k, otherwise n fft : bool, optional (default=True) If True, computes the ACF via FFT. title : str, optional (default='Autocorrelation') Title to place on plot. Default is 'Autocorrelation' zero : bool, optional (default=True) Flag indicating whether to include the 0-lag autocorrelation. Default is True. vlines_kwargs : dict, optional (default=None) Optional dictionary of keyword arguments that are passed to vlines. show : bool, optional (default=True) Whether to show the plot after it's been created. If not, will return the plot as an Axis object instead. **kwargs : kwargs, optional Optional keyword arguments that are directly passed on to the Matplotlib ``plot`` and ``axhline`` functions. Notes ----- This method will only show the plot if ``show=True`` (which is the default behavior). To simply get the axis back (say, to add to another canvas), use ``show=False``. Examples -------- >>> plot_acf([1, 2, 3], show=False) # doctest: +SKIP <matplotlib.figure.Figure object at 0x122fab4e0> Returns ------- plt : Axis or None If ``show`` is True, does not return anything. If False, returns the Axis object. """ _err_for_no_mpl() res = pacf(x=series, ax=ax, lags=lags, alpha=alpha, use_vlines=use_vlines, unbiased=unbiased, fft=fft, title=title, zero=zero, vlines_kwargs=vlines_kwargs, **kwargs) return _show_or_return(res, show)
python
{ "resource": "" }
q30508
plot_pacf
train
def plot_pacf(series, ax=None, lags=None, alpha=None, method='yw', use_vlines=True, title='Partial Autocorrelation', zero=True, vlines_kwargs=None, show=True, **kwargs): """Plot a series' partial auto-correlation as a line plot. A wrapper method for the statsmodels ``plot_pacf`` method. Parameters ---------- series : array-like, shape=(n_samples,) The series or numpy array for which to plot an auto-correlation. ax : Matplotlib AxesSubplot instance, optional If given, this subplot is used to plot in instead of a new figure being created. lags : int, array-like or None, optional (default=None) int or Array of lag values, used on horizontal axis. Uses np.arange(lags) when lags is an int. If not provided, ``lags=np.arange(len(corr))`` is used. alpha : scalar, optional (default=None) If a number is given, the confidence intervals for the given level are returned. For instance if alpha=.05, 95 % confidence intervals are returned where the standard deviation is computed according to Bartlett's formula. If None, no confidence intervals are plotted. method : str, optional (default='yw') Specifies which method for the calculations to use. One of {'ywunbiased', 'ywmle', 'ols', 'ld', 'ldb', 'ldunbiased', 'ldbiased'}: - yw or ywunbiased : yule walker with bias correction in denominator for acovf. Default. - ywm or ywmle : yule walker without bias correction - ols - regression of time series on lags of it and on constant - ld or ldunbiased : Levinson-Durbin recursion with bias correction - ldb or ldbiased : Levinson-Durbin recursion without bias correction use_vlines : bool, optional (default=True) If True, vertical lines and markers are plotted. If False, only markers are plotted. The default marker is 'o'; it can be overridden with a ``marker`` kwarg. title : str, optional (default='Partial Autocorrelation') Title to place on plot. Default is 'Partial Autocorrelation' zero : bool, optional (default=True) Flag indicating whether to include the 0-lag autocorrelation. Default is True. vlines_kwargs : dict, optional (default=None) Optional dictionary of keyword arguments that are passed to vlines. show : bool, optional (default=True) Whether to show the plot after it's been created. If not, will return the plot as an Axis object instead. **kwargs : kwargs, optional Optional keyword arguments that are directly passed on to the Matplotlib ``plot`` and ``axhline`` functions. Notes ----- This method will only show the plot if ``show=True`` (which is the default behavior). To simply get the axis back (say, to add to another canvas), use ``show=False``. Examples -------- >>> plot_pacf([1, 2, 3, 4], show=False) # doctest: +SKIP <matplotlib.figure.Figure object at 0x129df1630> Returns ------- plt : Axis or None If ``show`` is True, does not return anything. If False, returns the Axis object. """ _err_for_no_mpl() res = ppacf(x=series, ax=ax, lags=lags, alpha=alpha, method=method, use_vlines=use_vlines, title=title, zero=zero, vlines_kwargs=vlines_kwargs, **kwargs) return _show_or_return(res, show)
python
{ "resource": "" }
q30509
load_airpassengers
train
def load_airpassengers(as_series=False): """Monthly airline passengers. The classic Box & Jenkins airline data. Monthly totals of international airline passengers, 1949 to 1960. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If False, will return a 1d numpy array. Returns ------- rslt : array-like, shape=(n_samples,) The time series vector. Examples -------- >>> from pmdarima.datasets import load_airpassengers >>> load_airpassengers() # doctest: +SKIP np.array([ 112, 118, 132, 129, 121, 135, 148, 148, 136, 119, 104, 118, 115, 126, 141, 135, 125, 149, 170, 170, 158, 133, 114, 140, 145, 150, 178, 163, 172, 178, 199, 199, 184, 162, 146, 166, 171, 180, 193, 181, 183, 218, 230, 242, 209, 191, 172, 194, 196, 196, 236, 235, 229, 243, 264, 272, 237, 211, 180, 201, 204, 188, 235, 227, 234, 264, 302, 293, 259, 229, 203, 229, 242, 233, 267, 269, 270, 315, 364, 347, 312, 274, 237, 278, 284, 277, 317, 313, 318, 374, 413, 405, 355, 306, 271, 306, 315, 301, 356, 348, 355, 422, 465, 467, 404, 347, 305, 336, 340, 318, 362, 348, 363, 435, 491, 505, 404, 359, 310, 337, 360, 342, 406, 396, 420, 472, 548, 559, 463, 407, 362, 405, 417, 391, 419, 461, 472, 535, 622, 606, 508, 461, 390, 432]) >>> load_airpassengers(True).head() 0 112.0 1 118.0 2 132.0 3 129.0 4 121.0 dtype: float64 Notes ----- This is monthly data, so *m* should be set to 12 when using in a seasonal context. References ---------- .. [1] Box, G. E. P., Jenkins, G. M. and Reinsel, G. C. (1976) "Time Series Analysis, Forecasting and Control. Third Edition." Holden-Day. Series G. """ rslt = np.array([ 112, 118, 132, 129, 121, 135, 148, 148, 136, 119, 104, 118, 115, 126, 141, 135, 125, 149, 170, 170, 158, 133, 114, 140, 145, 150, 178, 163, 172, 178, 199, 199, 184, 162, 146, 166, 171, 180, 193, 181, 183, 218, 230, 242, 209, 191, 172, 194, 196, 196, 236, 235, 229, 243, 264, 272, 237, 211, 180, 201, 204, 188, 235, 227, 234, 264, 302, 293, 259, 229, 203, 229, 242, 233, 267, 269, 270, 315, 364, 347, 312, 274, 237, 278, 284, 277, 317, 313, 318, 374, 413, 405, 355, 306, 271, 306, 315, 301, 356, 348, 355, 422, 465, 467, 404, 347, 305, 336, 340, 318, 362, 348, 363, 435, 491, 505, 404, 359, 310, 337, 360, 342, 406, 396, 420, 472, 548, 559, 463, 407, 362, 405, 417, 391, 419, 461, 472, 535, 622, 606, 508, 461, 390, 432 ]).astype(np.float64) if as_series: return pd.Series(rslt) return rslt
python
{ "resource": "" }
q30510
if_has_delegate
train
def if_has_delegate(delegate): """Wrap a delegated instance attribute function. Creates a decorator for methods that are delegated in the presence of a results wrapper. This enables duck-typing by ``hasattr`` returning True according to the sub-estimator. This function was adapted from scikit-learn, which defines ``if_delegate_has_method``, but operates differently by injecting methods not based on method presence, but by delegate presence. Examples -------- >>> from pmdarima.utils.metaestimators import if_has_delegate >>> >>> class A(object): ... @if_has_delegate('d') ... def func(self): ... return True >>> >>> a = A() >>> # the delegate does not exist yet >>> assert not hasattr(a, 'func') >>> # inject the attribute >>> a.d = None >>> assert hasattr(a, 'func') and a.func() Parameters ---------- delegate : string, list of strings or tuple of strings Name of the sub-estimator that can be accessed as an attribute of the base object. If a list or a tuple of names are provided, the first sub-estimator that is an attribute of the base object will be used. """ if isinstance(delegate, list): delegate = tuple(delegate) if not isinstance(delegate, tuple): delegate = (delegate,) return lambda fn: _IffHasDelegate(fn, delegate)
python
{ "resource": "" }
q30511
load_heartrate
train
def load_heartrate(as_series=False): """Uniform heart-rate data. A sample of heartrate data borrowed from an `MIT database <http://ecg.mit.edu/time-series/>`_. The sample consists of 150 evenly spaced (0.5 seconds) heartrate measurements. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If False, will return a 1d numpy array. Returns ------- rslt : array-like, shape=(n_samples,) The heartrate vector. Examples -------- >>> from pmdarima.datasets import load_heartrate >>> load_heartrate() array([84.2697, 84.2697, 84.0619, 85.6542, 87.2093, 87.1246, 86.8726, 86.7052, 87.5899, 89.1475, 89.8204, 89.8204, 90.4375, 91.7605, 93.1081, 94.3291, 95.8003, 97.5119, 98.7457, 98.904 , 98.3437, 98.3075, 98.8313, 99.0789, 98.8157, 98.2998, 97.7311, 97.6471, 97.7922, 97.2974, 96.2042, 95.2318, 94.9367, 95.0867, 95.389 , 95.5414, 95.2439, 94.9415, 95.3557, 96.3423, 97.1563, 97.4026, 96.7028, 96.5516, 97.9837, 98.9879, 97.6312, 95.4064, 93.8603, 93.0552, 94.6012, 95.8476, 95.7692, 95.9236, 95.7692, 95.9211, 95.8501, 94.6703, 93.0993, 91.972 , 91.7821, 91.7911, 90.807 , 89.3196, 88.1511, 88.7762, 90.2265, 90.8066, 91.2284, 92.4238, 93.243 , 92.8472, 92.5926, 91.7778, 91.2974, 91.6364, 91.2952, 91.771 , 93.2285, 93.3199, 91.8799, 91.2239, 92.4055, 93.8716, 94.5825, 94.5594, 94.9453, 96.2412, 96.6879, 95.8295, 94.7819, 93.4731, 92.7997, 92.963 , 92.6996, 91.9648, 91.2417, 91.9312, 93.9548, 95.3044, 95.2511, 94.5358, 93.8093, 93.2287, 92.2065, 92.1588, 93.6376, 94.899 , 95.1592, 95.2415, 95.5414, 95.0971, 94.528 , 95.5887, 96.4715, 96.6158, 97.0769, 96.8531, 96.3947, 97.4291, 98.1767, 97.0148, 96.044 , 95.9581, 96.4814, 96.5211, 95.3629, 93.5741, 92.077 , 90.4094, 90.1751, 91.3312, 91.2883, 89.0592, 87.052 , 86.6226, 85.7889, 85.6348, 85.3911, 83.8064, 82.8729, 82.6266, 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 , 82.645 ]) >>> load_heartrate(True).head() 0 84.2697 1 84.2697 2 84.0619 3 85.6542 4 87.2093 dtype: float64 References ---------- .. [1] Goldberger AL, Rigney DR. Nonlinear dynamics at the bedside. In: Glass L, Hunter P, McCulloch A, eds. Theory of Heart: Biomechanics, Biophysics, and Nonlinear Dynamics of Cardiac Function. New York: Springer-Verlag, 1991, pp. 583-605. """ rslt = np.array([84.2697, 84.2697, 84.0619, 85.6542, 87.2093, 87.1246, 86.8726, 86.7052, 87.5899, 89.1475, 89.8204, 89.8204, 90.4375, 91.7605, 93.1081, 94.3291, 95.8003, 97.5119, 98.7457, 98.904, 98.3437, 98.3075, 98.8313, 99.0789, 98.8157, 98.2998, 97.7311, 97.6471, 97.7922, 97.2974, 96.2042, 95.2318, 94.9367, 95.0867, 95.389, 95.5414, 95.2439, 94.9415, 95.3557, 96.3423, 97.1563, 97.4026, 96.7028, 96.5516, 97.9837, 98.9879, 97.6312, 95.4064, 93.8603, 93.0552, 94.6012, 95.8476, 95.7692, 95.9236, 95.7692, 95.9211, 95.8501, 94.6703, 93.0993, 91.972, 91.7821, 91.7911, 90.807, 89.3196, 88.1511, 88.7762, 90.2265, 90.8066, 91.2284, 92.4238, 93.243, 92.8472, 92.5926, 91.7778, 91.2974, 91.6364, 91.2952, 91.771, 93.2285, 93.3199, 91.8799, 91.2239, 92.4055, 93.8716, 94.5825, 94.5594, 94.9453, 96.2412, 96.6879, 95.8295, 94.7819, 93.4731, 92.7997, 92.963, 92.6996, 91.9648, 91.2417, 91.9312, 93.9548, 95.3044, 95.2511, 94.5358, 93.8093, 93.2287, 92.2065, 92.1588, 93.6376, 94.899, 95.1592, 95.2415, 95.5414, 95.0971, 94.528, 95.5887, 96.4715, 96.6158, 97.0769, 96.8531, 96.3947, 97.4291, 98.1767, 97.0148, 96.044, 95.9581, 96.4814, 96.5211, 95.3629, 93.5741, 92.077, 90.4094, 90.1751, 91.3312, 91.2883, 89.0592, 87.052, 86.6226, 85.7889, 85.6348, 85.3911, 83.8064, 82.8729, 82.6266, 82.645, 82.645, 82.645, 82.645, 82.645, 82.645, 82.645, 82.645]) if as_series: return pd.Series(rslt) return rslt
python
{ "resource": "" }
q30512
benchmark_is_constant
train
def benchmark_is_constant(): """This benchmarks the "is_constant" function from ``pmdarima.arima.utils`` This was added in 0.6.2. """ # WINNER! def is_const1(x): """This is the version in Pyramid 0.6.2. Parameters ---------- x : np.ndarray This is the array. """ return (x == x[0]).all() def is_const2(x): """This should ostensibly only take O(N) rather than O(2N) like its predecessor. But we'll see... Parameters ---------- x : np.ndarray This is the array. """ return np.unique(x).shape[0] == 1 x = np.random.choice(np.arange(10), 1000000, replace=True) _do_time(is_const1, 25, x) _do_time(is_const2, 25, x)
python
{ "resource": "" }
q30513
load_woolyrnq
train
def load_woolyrnq(as_series=False): """Quarterly production of woollen yarn in Australia. This time-series records the quarterly production (in tonnes) of woollen yarn in Australia between Mar 1965 and Sep 1994. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If True, the index will be set to the observed years/quarters. If False, will return a 1d numpy array. Examples -------- >>> from pmdarima.datasets import load_woolyrnq >>> load_woolyrnq() array([6172, 6709, 6633, 6660, 6786, 6800, 6730, 6765, 6720, 7133, 6946, 7095, 7047, 6757, 6915, 6921, 7064, 7206, 7190, 7402, 7819, 7300, 7105, 7259, 7001, 7475, 6840, 7061, 5845, 7529, 7819, 6943, 5714, 6556, 7045, 5947, 5463, 6127, 5540, 4235, 3324, 4793, 5906, 5834, 5240, 5458, 5505, 5002, 3999, 4826, 5318, 4681, 4442, 5305, 5466, 4995, 4573, 5081, 5696, 5079, 4373, 4986, 5341, 4800, 4161, 5007, 5464, 5127, 4240, 5338, 5129, 4437, 3642, 4602, 5524, 4895, 4380, 5186, 6080, 5588, 5009, 5663, 6540, 6262, 5169, 5819, 6339, 5981, 4766, 5976, 6590, 5590, 5135, 5762, 6077, 5882, 4247, 5264, 5146, 4868, 4329, 4869, 5127, 4868, 3827, 4987, 5222, 4928, 3930, 4469, 4954, 4752, 3888, 4588, 5309, 4732, 4837, 6135, 6396]) >>> load_woolyrnq(True).head() Q1 1965 6172 Q2 1965 6709 Q3 1965 6633 Q4 1965 6660 Q1 1966 6786 dtype: int64 Notes ----- This is quarterly data, so *m* should be set to 4 when using in a seasonal context. References ---------- .. [1] https://www.rdocumentation.org/packages/forecast/versions/8.1/topics/woolyrnq # noqa: E501 Returns ------- rslt : array-like, shape=(n_samples,) The woolyrnq dataset. There are 119 observations. """ rslt = np.array([ 6172, 6709, 6633, 6660, 6786, 6800, 6730, 6765, 6720, 7133, 6946, 7095, 7047, 6757, 6915, 6921, 7064, 7206, 7190, 7402, 7819, 7300, 7105, 7259, 7001, 7475, 6840, 7061, 5845, 7529, 7819, 6943, 5714, 6556, 7045, 5947, 5463, 6127, 5540, 4235, 3324, 4793, 5906, 5834, 5240, 5458, 5505, 5002, 3999, 4826, 5318, 4681, 4442, 5305, 5466, 4995, 4573, 5081, 5696, 5079, 4373, 4986, 5341, 4800, 4161, 5007, 5464, 5127, 4240, 5338, 5129, 4437, 3642, 4602, 5524, 4895, 4380, 5186, 6080, 5588, 5009, 5663, 6540, 6262, 5169, 5819, 6339, 5981, 4766, 5976, 6590, 5590, 5135, 5762, 6077, 5882, 4247, 5264, 5146, 4868, 4329, 4869, 5127, 4868, 3827, 4987, 5222, 4928, 3930, 4469, 4954, 4752, 3888, 4588, 5309, 4732, 4837, 6135, 6396]) if not as_series: return rslt # Otherwise we want a series and have to cleverly create the index # (with quarters, and we don't want Q4 in 1994) index = [ "Q%i %i" % (i + 1, year) for year in range(1965, 1995) for i in range(4) ][:-1] # trim off the last one. return pd.Series(rslt, index=index)
python
{ "resource": "" }
q30514
as_series
train
def as_series(x): """Cast as pandas Series. Cast an iterable to a Pandas Series object. Note that the index will simply be a positional ``arange`` and cannot be set in this function. Parameters ---------- x : array-like, shape=(n_samples,) The 1d array on which to compute the auto correlation. Examples -------- >>> as_series([1, 2, 3]) 0 1 1 2 2 3 dtype: int64 >>> as_series(as_series((1, 2, 3))) 0 1 1 2 2 3 dtype: int64 >>> import pandas as pd >>> as_series(pd.Series([4, 5, 6], index=['a', 'b', 'c'])) a 4 b 5 c 6 dtype: int64 Returns ------- s : pd.Series A pandas Series object. """ if isinstance(x, pd.Series): return x return pd.Series(column_or_1d(x))
python
{ "resource": "" }
q30515
c
train
def c(*args): r"""Imitates the ``c`` function from R. Since this whole library is aimed at re-creating in Python what R has already done so well, the ``c`` function was created to wrap ``numpy.concatenate`` and mimic the R functionality. Similar to R, this works with scalars, iterables, and any mix therein. Note that using the ``c`` function on multi-nested lists or iterables will fail! Examples -------- Using ``c`` with varargs will yield a single array: >>> c(1, 2, 3, 4) array([1, 2, 3, 4]) Using ``c`` with nested lists and scalars will also yield a single array: >>> c([1, 2], 4, c(5, 4)) array([1, 2, 4, 5, 4]) However, using ``c`` with multi-level lists will fail! >>> c([1, 2, 3], [[1, 2]]) # doctest: +SKIP ValueError: all the input arrays must have same number of dimensions References ---------- .. [1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/c.html """ # R returns NULL for this if not args: return None # just an array of len 1 if len(args) == 1: element = args[0] # if it's iterable, make it an array if is_iterable(element): return np.asarray(element) # otherwise it's not iterable, put it in an array return np.asarray([element]) # np.concat all. This can be slow, as noted by numerous threads on # numpy concat efficiency, however an alternative using recursive # yields was tested and performed far worse: # # >>> def timeit(func, ntimes, *args): # ... times = [] # ... for i in range(ntimes): # ... start = time.time() # ... func(*args) # ... times.append(time.time() - start) # ... arr = np.asarray(times) # ... print("%s (%i times) - Mean: %.5f sec, " # ... "Min: %.5f sec, Max: %.5f" % (func.__name__, ntimes, # ... arr.mean(), arr.min(), # ... arr.max())) # >>> y = [np.arange(10000), range(500), (1000,), 100, np.arange(50000)] # >>> timeit(c1, 100, *y) # c1 (100 times) - Mean: 0.00009 sec, Min: 0.00006 sec, Max: 0.00065 # >>> timeit(c2, 100, *y) # c2 (100 times) - Mean: 0.08708 sec, Min: 0.08273 sec, Max: 0.10115 # # So we stick with c1, which is this variant. return np.concatenate([a if is_iterable(a) else [a] for a in args])
python
{ "resource": "" }
q30516
diff
train
def diff(x, lag=1, differences=1): """Difference an array. A python implementation of the R ``diff`` function [1]. This computes lag differences from an array given a ``lag`` and ``differencing`` term. If ``x`` is a vector of length :math:`n`, ``lag=1`` and ``differences=1``, then the computed result is equal to the successive differences ``x[lag:n] - x[:n-lag]``. Examples -------- Where ``lag=1`` and ``differences=1``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 1, 1) array([ -6., -2., 7., 25.], dtype=float32) Where ``lag=1`` and ``differences=2``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 1, 2) array([ 4., 9., 18.], dtype=float32) Where ``lag=3`` and ``differences=1``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 3, 1) array([ -1., 30.], dtype=float32) Where ``lag=6`` (larger than the array is) and ``differences=1``: >>> x = c(10, 4, 2, 9, 34) >>> diff(x, 6, 1) array([], dtype=float32) For a 2d array with ``lag=1`` and ``differences=1``: >>> import numpy as np >>> >>> x = np.arange(1, 10).reshape((3, 3)).T >>> diff(x, 1, 1) array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) Parameters ---------- x : array-like, shape=(n_samples, [n_features]) The array to difference. lag : int, optional (default=1) An integer > 0 indicating which lag to use. differences : int, optional (default=1) An integer > 0 indicating the order of the difference. Returns ------- res : np.ndarray, shape=(n_samples, [n_features]) The result of the differenced arrays. References ---------- .. [1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/diff.html """ if any(v < 1 for v in (lag, differences)): raise ValueError('lag and differences must be positive (> 0) integers') x = check_array(x, ensure_2d=False, dtype=np.float32) # type: np.ndarray fun = _diff_vector if x.ndim == 1 else _diff_matrix res = x # "recurse" over range of differences for i in range(differences): res = fun(res, lag) # if it ever comes back empty, just return it as is if not res.shape[0]: return res return res
python
{ "resource": "" }
q30517
_aicc
train
def _aicc(model_results, nobs): """Compute the corrected Akaike Information Criterion""" aic = model_results.aic df_model = model_results.df_model + 1 # add one for constant term return aic + 2. * df_model * (nobs / (nobs - df_model - 1.) - 1.)
python
{ "resource": "" }
q30518
_append_to_endog
train
def _append_to_endog(endog, new_y): """Append to the endogenous array Parameters ---------- endog : np.ndarray, shape=(n_samples, [1]) The existing endogenous array new_y : np.ndarray, shape=(n_samples) The new endogenous array to append """ return np.concatenate((endog, new_y)) if \ endog.ndim == 1 else \ np.concatenate((endog.ravel(), new_y))[:, np.newaxis]
python
{ "resource": "" }
q30519
ARIMA.fit
train
def fit(self, y, exogenous=None, **fit_args): """Fit an ARIMA to a vector, ``y``, of observations with an optional matrix of ``exogenous`` variables. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series to which to fit the ``ARIMA`` estimator. This may either be a Pandas ``Series`` object (statsmodels can internally use the dates in the index), or a numpy array. This should be a one-dimensional array of floats, and should not contain any ``np.nan`` or ``np.inf`` values. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. **fit_args : dict or kwargs Any keyword arguments to pass to the statsmodels ARIMA fit. """ y = c1d(check_array(y, ensure_2d=False, force_all_finite=False, copy=True, dtype=DTYPE)) # type: np.ndarray n_samples = y.shape[0] # if exog was included, check the array... if exogenous is not None: exogenous = check_array(exogenous, ensure_2d=True, force_all_finite=False, copy=False, dtype=DTYPE) # determine the CV args, if any cv = self.out_of_sample_size scoring = get_callable(self.scoring, VALID_SCORING) # don't allow negative, don't allow > n_samples cv = max(cv, 0) # if cv is too big, raise if cv >= n_samples: raise ValueError("out-of-sample size must be less than number " "of samples!") # If we want to get a score on the out-of-sample, we need to trim # down the size of our y vec for fitting. Addressed due to Issue #28 cv_samples = None cv_exog = None if cv: cv_samples = y[-cv:] y = y[:-cv] # This also means we have to address the exogenous matrix if exogenous is not None: cv_exog = exogenous[-cv:, :] exogenous = exogenous[:-cv, :] # Internal call self._fit(y, exogenous, **fit_args) # now make a forecast if we're validating to compute the # out-of-sample score if cv_samples is not None: # get the predictions (use self.predict, which calls forecast # from statsmodels internally) pred = self.predict(n_periods=cv, exogenous=cv_exog) self.oob_ = scoring(cv_samples, pred, **self.scoring_args) self.oob_preds_ = pred # If we compute out of sample scores, we have to now update the # observed time points so future forecasts originate from the end # of our y vec self.update(cv_samples, cv_exog, **fit_args) else: self.oob_ = np.nan self.oob_preds_ = None return self
python
{ "resource": "" }
q30520
ARIMA.predict_in_sample
train
def predict_in_sample(self, exogenous=None, start=None, end=None, dynamic=False): """Generate in-sample predictions from the fit ARIMA model. This can be useful when wanting to visualize the fit, and qualitatively inspect the efficacy of the model, or when wanting to compute the residuals of the model. Parameters ---------- exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. start : int, optional (default=None) Zero-indexed observation number at which to start forecasting, ie., the first forecast is start. end : int, optional (default=None) Zero-indexed observation number at which to end forecasting, ie., the first forecast is start. dynamic : bool, optional The `dynamic` keyword affects in-sample prediction. If dynamic is False, then the in-sample lagged values are used for prediction. If `dynamic` is True, then in-sample forecasts are used in place of lagged dependent variables. The first forecasted value is `start`. Returns ------- predict : array The predicted values. """ check_is_fitted(self, 'arima_res_') # if we fit with exog, make sure one was passed: exogenous = self._check_exog(exogenous) # type: np.ndarray return self.arima_res_.predict(exog=exogenous, start=start, end=end, dynamic=dynamic)
python
{ "resource": "" }
q30521
ARIMA.predict
train
def predict(self, n_periods=10, exogenous=None, return_conf_int=False, alpha=0.05): """Forecast future values Generate predictions (forecasts) ``n_periods`` in the future. Note that if ``exogenous`` variables were used in the model fit, they will be expected for the predict procedure and will fail otherwise. Parameters ---------- n_periods : int, optional (default=10) The number of periods in the future to forecast. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. return_conf_int : bool, optional (default=False) Whether to get the confidence intervals of the forecasts. alpha : float, optional (default=0.05) The confidence intervals for the forecasts are (1 - alpha) % Returns ------- forecasts : array-like, shape=(n_periods,) The array of fore-casted values. conf_int : array-like, shape=(n_periods, 2), optional The confidence intervals for the forecasts. Only returned if ``return_conf_int`` is True. """ check_is_fitted(self, 'arima_res_') if not isinstance(n_periods, (int, long)): raise TypeError("n_periods must be an int or a long") # if we fit with exog, make sure one was passed: exogenous = self._check_exog(exogenous) # type: np.ndarray if exogenous is not None and exogenous.shape[0] != n_periods: raise ValueError('Exogenous array dims (n_rows) != n_periods') # ARIMA/ARMA predict differently... if not self._is_seasonal(): # use the results wrapper to predict so it injects its own params # (also if I was 0, ARMA will not have a forecast method natively) f, _, conf_int = self.arima_res_.forecast( steps=n_periods, exog=exogenous, alpha=alpha) else: # SARIMAX # Unfortunately, SARIMAX does not really provide a nice way to get # the confidence intervals out of the box, so we have to perform # the get_prediction code here and unpack the confidence intervals # manually. # f = self.arima_res_.forecast(steps=n_periods, exog=exogenous) arima = self.arima_res_ end = arima.nobs + n_periods - 1 results = arima.get_prediction(start=arima.nobs, end=end, exog=exogenous) f = results.predicted_mean conf_int = results.conf_int(alpha=alpha) if return_conf_int: # The confidence intervals may be a Pandas frame if it comes from # SARIMAX & we want Numpy. We will to duck type it so we don't add # new explicit requirements for the package return f, check_array(conf_int, force_all_finite=False) return f
python
{ "resource": "" }
q30522
ARIMA.conf_int
train
def conf_int(self, alpha=0.05, **kwargs): r"""Returns the confidence interval of the fitted parameters. Returns ------- alpha : float, optional (default=0.05) The significance level for the confidence interval. ie., the default alpha = .05 returns a 95% confidence interval. **kwargs : keyword args or dict Keyword arguments to pass to the confidence interval function. Could include 'cols' or 'method' """ return self.arima_res_.conf_int(alpha=alpha, **kwargs)
python
{ "resource": "" }
q30523
ARIMA.to_dict
train
def to_dict(self): """Get the ARIMA model as a dictionary Return the dictionary representation of the ARIMA model Returns ------- res : dictionary The ARIMA model as a dictionary. """ return { 'pvalues': self.pvalues(), 'resid': self.resid(), 'order': self.order, 'seasonal_order': self.seasonal_order, 'oob': self.oob(), 'aic': self.aic(), 'aicc': self.aicc(), 'bic': self.bic(), 'bse': self.bse(), 'params': self.params() }
python
{ "resource": "" }
q30524
ARIMA.plot_diagnostics
train
def plot_diagnostics(self, variable=0, lags=10, fig=None, figsize=None): """Plot an ARIMA's diagnostics. Diagnostic plots for standardized residuals of one endogenous variable Parameters ---------- variable : integer, optional Index of the endogenous variable for which the diagnostic plots should be created. Default is 0. lags : integer, optional Number of lags to include in the correlogram. Default is 10. fig : Matplotlib Figure instance, optional If given, subplots are created in this figure instead of in a new figure. Note that the 2x2 grid will be created in the provided figure using `fig.add_subplot()`. figsize : tuple, optional If a figure is created, this argument allows specifying a size. The tuple is (width, height). Notes ----- Produces a 2x2 plot grid with the following plots (ordered clockwise from top left): 1. Standardized residuals over time 2. Histogram plus estimated density of standardized residulas, along with a Normal(0,1) density plotted for reference. 3. Normal Q-Q plot, with Normal reference line. 4. Correlogram See Also -------- statsmodels.graphics.gofplots.qqplot pmdarima.utils.visualization.plot_acf References ---------- .. [1] https://www.statsmodels.org/dev/_modules/statsmodels/tsa/statespace/mlemodel.html#MLEResults.plot_diagnostics # noqa: E501 """ # implicitly checks whether installed, and does our backend magic: _get_plt() # We originally delegated down to SARIMAX model wrapper, but # statsmodels makes it difficult to trust their API, so we just re- # implemented a common method for all results wrappers. from statsmodels.graphics.utils import create_mpl_fig fig = create_mpl_fig(fig, figsize) res_wpr = self.arima_res_ data = res_wpr.data # Eliminate residuals associated with burned or diffuse likelihoods. # The statsmodels code for the Kalman Filter takes the loglik_burn # as a parameter: # loglikelihood_burn : int, optional # The number of initial periods during which the loglikelihood is # not recorded. Default is 0. # If the class has it, it's a SARIMAX and we'll use it. Otherwise we # will just access the residuals as we normally would... if hasattr(res_wpr, 'loglikelihood_burn'): # This is introduced in the bleeding edge version, but is not # backwards compatible with 0.9.0 and less: d = res_wpr.loglikelihood_burn if hasattr(res_wpr, 'nobs_diffuse'): d = np.maximum(d, res_wpr.nobs_diffuse) resid = res_wpr.filter_results\ .standardized_forecasts_error[variable, d:] else: # This gets the residuals, but they need to be standardized d = 0 r = res_wpr.resid resid = (r - np.nanmean(r)) / np.nanstd(r) # Top-left: residuals vs time ax = fig.add_subplot(221) if hasattr(data, 'dates') and data.dates is not None: x = data.dates[d:]._mpl_repr() else: x = np.arange(len(resid)) ax.plot(x, resid) ax.hlines(0, x[0], x[-1], alpha=0.5) ax.set_xlim(x[0], x[-1]) ax.set_title('Standardized residual') # Top-right: histogram, Gaussian kernel density, Normal density # Can only do histogram and Gaussian kernel density on the non-null # elements resid_nonmissing = resid[~(np.isnan(resid))] ax = fig.add_subplot(222) # temporarily disable Deprecation warning, normed -> density # hist needs to use `density` in future when minimum matplotlib has it with warnings.catch_warnings(record=True): ax.hist(resid_nonmissing, normed=True, label='Hist') kde = gaussian_kde(resid_nonmissing) xlim = (-1.96 * 2, 1.96 * 2) x = np.linspace(xlim[0], xlim[1]) ax.plot(x, kde(x), label='KDE') ax.plot(x, norm.pdf(x), label='N(0,1)') ax.set_xlim(xlim) ax.legend() ax.set_title('Histogram plus estimated density') # Bottom-left: QQ plot ax = fig.add_subplot(223) from statsmodels.graphics.gofplots import qqplot qqplot(resid_nonmissing, line='s', ax=ax) ax.set_title('Normal Q-Q') # Bottom-right: Correlogram ax = fig.add_subplot(224) from statsmodels.graphics.tsaplots import plot_acf plot_acf(resid, ax=ax, lags=lags) ax.set_title('Correlogram') ax.set_ylim(-1, 1) return fig
python
{ "resource": "" }
q30525
BoxCoxEndogTransformer.transform
train
def transform(self, y, exogenous=None, **_): """Transform the new array Apply the Box-Cox transformation to the array after learning the lambda parameter. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. Not used for endogenous transformers. Default is None, and non-None values will serve as pass-through arrays. Returns ------- y_transform : array-like or None The Box-Cox transformed y array exogenous : array-like or None The exog array """ check_is_fitted(self, "lam1_") lam1 = self.lam1_ lam2 = self.lam2_ y, exog = self._check_y_exog(y, exogenous) y += lam2 neg_mask = y <= 0. if neg_mask.any(): action = self.neg_action msg = "Negative or zero values present in y" if action == "raise": raise ValueError(msg) elif action == "warn": warnings.warn(msg, UserWarning) y[neg_mask] = self.floor if lam1 == 0: return np.log(y), exog return (y ** lam1 - 1) / lam1, exog
python
{ "resource": "" }
q30526
BoxCoxEndogTransformer.inverse_transform
train
def inverse_transform(self, y, exogenous=None): """Inverse transform a transformed array Inverse the Box-Cox transformation on the transformed array. Note that if truncation happened in the ``transform`` method, invertibility will not be preserved, and the transformed array may not be perfectly inverse-transformed. Parameters ---------- y : array-like or None, shape=(n_samples,) The transformed endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. Not used for endogenous transformers. Default is None, and non-None values will serve as pass-through arrays. Returns ------- y : array-like or None The inverse-transformed y array exogenous : array-like or None The inverse-transformed exogenous array """ check_is_fitted(self, "lam1_") lam1 = self.lam1_ lam2 = self.lam2_ y, exog = self._check_y_exog(y, exogenous) if lam1 == 0: return np.exp(y) - lam2, exog numer = y * lam1 # remove denominator numer += 1. # add 1 back to it de_exp = numer ** (1. / lam1) # de-exponentiate return de_exp - lam2, exog
python
{ "resource": "" }
q30527
_regularize
train
def _regularize(x, y, ties): """Regularize the values, make them ordered and remove duplicates. If the ``ties`` parameter is explicitly set to 'ordered' then order is already assumed. Otherwise, the removal process will happen. Parameters ---------- x : array-like, shape=(n_samples,) The x vector. y : array-like, shape=(n_samples,) The y vector. ties : str One of {'ordered', 'mean'}, handles the ties. """ x, y = [ column_or_1d(check_array(arr, ensure_2d=False, force_all_finite=False, dtype=DTYPE)) for arr in (x, y) ] nx = x.shape[0] if nx != y.shape[0]: raise ValueError('array dim mismatch: %i != %i' % (nx, y.shape[0])) # manipulate x if needed. if ties is 'ordered' we assume that x is # already ordered and everything has been handled already... if ties != 'ordered': o = np.argsort(x) # keep ordered with one another x = x[o] y = y[o] # what if any are the same? ux = np.unique(x) if ux.shape[0] < nx: # Do we want to warn for this? # warnings.warn('collapsing to unique "x" values') # vectorize this function to apply to each "cell" in the array def tie_apply(f, u_val): vals = y[x == u_val] # mask y where x == the unique value return f(vals) # replace the duplicates in the y array with the "tie" func func = VALID_TIES.get(ties, _identity) # maybe expensive to vectorize on the fly? Not sure; would need # to do some benchmarking. However, we need to in order to keep y # and x in scope... y = np.vectorize(tie_apply)(func, ux) # does ux need ordering? hmm.. x = ux return x, y
python
{ "resource": "" }
q30528
approx
train
def approx(x, y, xout, method='linear', rule=1, f=0, yleft=None, yright=None, ties='mean'): """Linearly interpolate points. Return a list of points which (linearly) interpolate given data points, or a function performing the linear (or constant) interpolation. Parameters ---------- x : array-like, shape=(n_samples,) Numeric vector giving the coordinates of the points to be interpolated. y : array-like, shape=(n_samples,) Numeric vector giving the coordinates of the points to be interpolated. xout : int, float or iterable A scalar or iterable of numeric values specifying where interpolation is to take place. method : str, optional (default='linear') Specifies the interpolation method to be used. Choices are "linear" or "constant". rule : int, optional (default=1) An integer describing how interpolation is to take place outside the interval ``[min(x), max(x)]``. If ``rule`` is 1 then np.nans are returned for such points and if it is 2, the value at the closest data extreme is used. f : int, optional (default=0) For ``method`` = "constant" a number between 0 and 1 inclusive, indicating a compromise between left- and right-continuous step functions. If y0 and y1 are the values to the left and right of the point then the value is y0 if f == 0, y1 if f == 1, and y0*(1-f)+y1*f for intermediate values. In this way the result is right-continuous for f == 0 and left-continuous for f == 1, even for non-finite ``y`` values. yleft : float, optional (default=None) The value to be returned when input ``x`` values are less than ``min(x)``. The default is defined by the value of rule given below. yright : float, optional (default=None) The value to be returned when input ``x`` values are greater than ``max(x)``. The default is defined by the value of rule given below. ties : str, optional (default='mean') Handling of tied ``x`` values. Choices are "mean" or "ordered". """ if method not in VALID_APPROX: raise ValueError('method must be one of %r' % VALID_APPROX) # make sure xout is an array xout = c(xout).astype(np.float64) # ensure double # check method method_key = method # not a callable, actually, but serves the purpose.. method = get_callable(method_key, VALID_APPROX) # copy/regularize vectors x, y = _regularize(x, y, ties) nx = x.shape[0] # if len 1? (we've already handled where the size is 0, since we check that # in the _regularize function when we call c1d) if nx == 1: if method_key == 'linear': raise ValueError('need at least two points to ' 'linearly interpolate') # get yleft, yright if yleft is None: yleft = y[0] if rule != 1 else np.nan if yright is None: yright = y[-1] if rule != 1 else np.nan # call the C subroutine yout = C_Approx(x, y, xout, method, f, yleft, yright) # MemoryView return xout, np.asarray(yout)
python
{ "resource": "" }
q30529
BaseTransformer.fit_transform
train
def fit_transform(self, y, exogenous=None, **transform_kwargs): """Fit and transform the arrays Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. **transform_kwargs : keyword args Keyword arguments required by the transform function. """ self.fit(y, exogenous) return self.transform(y, exogenous, **transform_kwargs)
python
{ "resource": "" }
q30530
Pipeline.fit
train
def fit(self, y, exogenous=None, **fit_kwargs): """Fit the pipeline of transformers and the ARIMA model Chain the time-series and exogenous arrays through a series of transformations, fitting each stage along the way, finally fitting an ARIMA or AutoARIMA model. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series to which to fit the ``ARIMA`` estimator. This may either be a Pandas ``Series`` object (statsmodels can internally use the dates in the index), or a numpy array. This should be a one-dimensional array of floats, and should not contain any ``np.nan`` or ``np.inf`` values. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. **fit_kwargs : keyword args Extra keyword arguments used for each stage's ``fit`` stage. Similar to scikit-learn pipeline keyword args, the keys are compound, comprised of the stage name and the argument name separated by a "__". For instance, if fitting an ARIMA in stage "arima", your kwargs may resemble:: {"arima__maxiter": 10} """ # Shallow copy steps = self.steps_ = self._validate_steps() yt = y Xt = exogenous named_kwargs = self._get_kwargs(**fit_kwargs) for step_idx, name, transformer in self._iter(with_final=False): cloned_transformer = clone(transformer) kwargs = named_kwargs[name] yt, Xt = cloned_transformer.fit_transform(yt, Xt, **kwargs) # Replace the transformer of the step with the fitted # transformer. steps[step_idx] = (name, cloned_transformer) # Now fit the final estimator kwargs = named_kwargs[steps[-1][0]] self._final_estimator.fit(yt, exogenous=Xt, **kwargs) return self
python
{ "resource": "" }
q30531
Pipeline.update
train
def update(self, y, exogenous=None, maxiter=None, **kwargs): """Update an ARIMA or auto-ARIMA as well as any necessary transformers Passes the newly observed values through the appropriate endog transformations, and the exogenous array through the exog transformers (updating where necessary) before finally updating the ARIMA model. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series data to add to the endogenous samples on which the ``ARIMA`` estimator was previously fit. This may either be a Pandas ``Series`` object or a numpy array. This should be a one- dimensional array of finite floats. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If the model was fit with an exogenous array of covariates, it will be required for updating the observed values. maxiter : int, optional (default=None) The number of iterations to perform when updating the model. If None, will perform ``max(5, n_samples // 10)`` iterations. **kwargs : keyword args Extra keyword arguments used for each stage's ``update`` stage. Similar to scikit-learn pipeline keyword args, the keys are compound, comprised of the stage name and the argument name separated by a "__". """ check_is_fitted(self, "steps_") # Push the arrays through all of the transformer steps that have the # appropriate update_and_transform method yt = y Xt = exogenous named_kwargs = self._get_kwargs(**kwargs) for step_idx, name, transformer in self._iter(with_final=False): kw = named_kwargs[name] if hasattr(transformer, "update_and_transform"): yt, Xt = transformer.update_and_transform( y=yt, exogenous=Xt, **kw) else: yt, Xt = transformer.transform(yt, exogenous=Xt, **kw) # Now we can update the arima nm, est = self.steps_[-1] return est.update( yt, exogenous=Xt, maxiter=maxiter, **named_kwargs[nm])
python
{ "resource": "" }
q30532
load_wineind
train
def load_wineind(as_series=False): """Australian total wine sales by wine makers in bottles <= 1 litre. This time-series records wine sales by Australian wine makers between Jan 1980 -- Aug 1994. This dataset is found in the R ``forecast`` package. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If True, the index will be set to the observed years/months. If False, will return a 1d numpy array. Notes ----- This is monthly data, so *m* should be set to 12 when using in a seasonal context. Examples -------- >>> from pmdarima.datasets import load_wineind >>> load_wineind() array([15136, 16733, 20016, 17708, 18019, 19227, 22893, 23739, 21133, 22591, 26786, 29740, 15028, 17977, 20008, 21354, 19498, 22125, 25817, 28779, 20960, 22254, 27392, 29945, 16933, 17892, 20533, 23569, 22417, 22084, 26580, 27454, 24081, 23451, 28991, 31386, 16896, 20045, 23471, 21747, 25621, 23859, 25500, 30998, 24475, 23145, 29701, 34365, 17556, 22077, 25702, 22214, 26886, 23191, 27831, 35406, 23195, 25110, 30009, 36242, 18450, 21845, 26488, 22394, 28057, 25451, 24872, 33424, 24052, 28449, 33533, 37351, 19969, 21701, 26249, 24493, 24603, 26485, 30723, 34569, 26689, 26157, 32064, 38870, 21337, 19419, 23166, 28286, 24570, 24001, 33151, 24878, 26804, 28967, 33311, 40226, 20504, 23060, 23562, 27562, 23940, 24584, 34303, 25517, 23494, 29095, 32903, 34379, 16991, 21109, 23740, 25552, 21752, 20294, 29009, 25500, 24166, 26960, 31222, 38641, 14672, 17543, 25453, 32683, 22449, 22316, 27595, 25451, 25421, 25288, 32568, 35110, 16052, 22146, 21198, 19543, 22084, 23816, 29961, 26773, 26635, 26972, 30207, 38687, 16974, 21697, 24179, 23757, 25013, 24019, 30345, 24488, 25156, 25650, 30923, 37240, 17466, 19463, 24352, 26805, 25236, 24735, 29356, 31234, 22724, 28496, 32857, 37198, 13652, 22784, 23565, 26323, 23779, 27549, 29660, 23356]) >>> load_wineind(True).head() Jan 1980 15136 Feb 1980 16733 Mar 1980 20016 Apr 1980 17708 May 1980 18019 dtype: int64 References ---------- .. [1] https://www.rdocumentation.org/packages/forecast/versions/8.1/topics/wineind # noqa: E501 Returns ------- rslt : array-like, shape=(n_samples,) The wineind dataset. There are 176 observations. """ rslt = np.array([15136, 16733, 20016, 17708, 18019, 19227, 22893, 23739, 21133, 22591, 26786, 29740, 15028, 17977, 20008, 21354, 19498, 22125, 25817, 28779, 20960, 22254, 27392, 29945, 16933, 17892, 20533, 23569, 22417, 22084, 26580, 27454, 24081, 23451, 28991, 31386, 16896, 20045, 23471, 21747, 25621, 23859, 25500, 30998, 24475, 23145, 29701, 34365, 17556, 22077, 25702, 22214, 26886, 23191, 27831, 35406, 23195, 25110, 30009, 36242, 18450, 21845, 26488, 22394, 28057, 25451, 24872, 33424, 24052, 28449, 33533, 37351, 19969, 21701, 26249, 24493, 24603, 26485, 30723, 34569, 26689, 26157, 32064, 38870, 21337, 19419, 23166, 28286, 24570, 24001, 33151, 24878, 26804, 28967, 33311, 40226, 20504, 23060, 23562, 27562, 23940, 24584, 34303, 25517, 23494, 29095, 32903, 34379, 16991, 21109, 23740, 25552, 21752, 20294, 29009, 25500, 24166, 26960, 31222, 38641, 14672, 17543, 25453, 32683, 22449, 22316, 27595, 25451, 25421, 25288, 32568, 35110, 16052, 22146, 21198, 19543, 22084, 23816, 29961, 26773, 26635, 26972, 30207, 38687, 16974, 21697, 24179, 23757, 25013, 24019, 30345, 24488, 25156, 25650, 30923, 37240, 17466, 19463, 24352, 26805, 25236, 24735, 29356, 31234, 22724, 28496, 32857, 37198, 13652, 22784, 23565, 26323, 23779, 27549, 29660, 23356]) if not as_series: return rslt # Otherwise we want a series and have to cleverly create the index # (we don't want after aug in 1994, so trip Sep, Oct, Nov and Dec) index = [ "%s %i" % (calendar.month_abbr[i + 1], year) for year in range(1980, 1995) for i in range(12) ][:-4] return pd.Series(rslt, index=index)
python
{ "resource": "" }
q30533
FourierFeaturizer.transform
train
def transform(self, y, exogenous=None, n_periods=0, **_): """Create Fourier term features When an ARIMA is fit with an exogenous array, it must be forecasted with one also. Since at ``predict`` time in a pipeline we won't have ``y`` (and we may not yet have an ``exog`` array), we have to know how far into the future for which to compute Fourier terms (hence ``n_periods``). This method will compute the Fourier features for a given frequency and ``k`` term. Note that the ``y`` values are not used to compute these, so this does not pose a risk of data leakage. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. This is unused and technically optional for the Fourier terms, since it uses the pre-computed ``n`` to calculate the seasonal Fourier terms. exogenous : array-like or None, shape=(n_samples, n_features), optional The exogenous array of additional covariates. If specified, the Fourier terms will be column-bound on the right side of the matrix. Otherwise, the Fourier terms will be returned as the new exogenous array. n_periods : int, optional (default=0) The number of periods in the future to forecast. If ``n_periods`` is 0, will compute the Fourier features for the training set. ``n_periods`` corresponds to the number of samples that will be returned. """ check_is_fitted(self, "p_") _, exog = self._check_y_exog(y, exogenous, null_allowed=True) if n_periods and exog is not None: if n_periods != exog.shape[0]: raise ValueError("If n_periods and exog are specified, " "n_periods must match dims of exogenous") times = np.arange(self.n_ + n_periods, dtype=np.float64) + 1 X_fourier = _fourier_terms(self.p_, times) # Maybe trim if we're in predict mode... in that case, we only keep the # last n_periods rows in the matrix we've created if n_periods: X_fourier = X_fourier[-n_periods:, :] if exog is None: exog = X_fourier else: exog = np.hstack([exog, X_fourier]) return y, exog
python
{ "resource": "" }
q30534
FourierFeaturizer.update_and_transform
train
def update_and_transform(self, y, exogenous, **kwargs): """Update the params and return the transformed arrays Since no parameters really get updated in the Fourier featurizer, all we do is compose forecasts for ``n_periods=len(y)`` and then update ``n_``. Parameters ---------- y : array-like or None, shape=(n_samples,) The endogenous (time-series) array. exogenous : array-like or None, shape=(n_samples, n_features) The exogenous array of additional covariates. **kwargs : keyword args Keyword arguments required by the transform function. """ check_is_fitted(self, "p_") self._check_endog(y) _, Xt = self.transform(y, exogenous, n_periods=len(y), **kwargs) # Update this *after* getting the exog features self.n_ += len(y) return y, Xt
python
{ "resource": "" }
q30535
BaseARIMA.fit_predict
train
def fit_predict(self, y, exogenous=None, n_periods=10, **fit_args): """Fit an ARIMA to a vector, ``y``, of observations with an optional matrix of ``exogenous`` variables, and then generate predictions. Parameters ---------- y : array-like or iterable, shape=(n_samples,) The time-series to which to fit the ``ARIMA`` estimator. This may either be a Pandas ``Series`` object (statsmodels can internally use the dates in the index), or a numpy array. This should be a one-dimensional array of floats, and should not contain any ``np.nan`` or ``np.inf`` values. exogenous : array-like, shape=[n_obs, n_vars], optional (default=None) An optional 2-d array of exogenous variables. If provided, these variables are used as additional features in the regression operation. This should not include a constant or trend. Note that if an ``ARIMA`` is fit on exogenous features, it must be provided exogenous features for making predictions. n_periods : int, optional (default=10) The number of periods in the future to forecast. fit_args : dict or kwargs, optional (default=None) Any keyword args to pass to the fit method. """ self.fit(y, exogenous, **fit_args) return self.predict(n_periods=n_periods, exogenous=exogenous)
python
{ "resource": "" }
q30536
inheritdoc
train
def inheritdoc(parent): """Inherit documentation from a parent Parameters ---------- parent : callable The parent function or class that contains the sought-after docstring. If it doesn't have a docstring, this might behave in unexpected ways. Examples -------- >>> def a(x=1): ... '''This is documentation''' ... return x ... >>> @inheritdoc(a) ... def b(x): ... return 2 * a(x) ... >>> print(b.__doc__) This is documentation >>> print(b(2)) 4 """ def wrapper(func): # Assign the parent docstring to the child func.__doc__ = parent.__doc__ @wraps(func) def caller(*args, **kwargs): return func(*args, **kwargs) return caller return wrapper
python
{ "resource": "" }
q30537
load_austres
train
def load_austres(as_series=False): """Quarterly residential data. Numbers (in thousands) of Australian residents measured quarterly from March 1971 to March 1994. Parameters ---------- as_series : bool, optional (default=False) Whether to return a Pandas series. If False, will return a 1d numpy array. Returns ------- rslt : array-like, shape=(n_samples,) The austres vector. Examples -------- >>> from pmdarima.datasets import load_austres >>> load_austres() np.array([13067.3, 13130.5, 13198.4, 13254.2, 13303.7, 13353.9, 13409.3, 13459.2, 13504.5, 13552.6, 13614.3, 13669.5, 13722.6, 13772.1, 13832.0, 13862.6, 13893.0, 13926.8, 13968.9, 14004.7, 14033.1, 14066.0, 14110.1, 14155.6, 14192.2, 14231.7, 14281.5, 14330.3, 14359.3, 14396.6, 14430.8, 14478.4, 14515.7, 14554.9, 14602.5, 14646.4, 14695.4, 14746.6, 14807.4, 14874.4, 14923.3, 14988.7, 15054.1, 15121.7, 15184.2, 15239.3, 15288.9, 15346.2, 15393.5, 15439.0, 15483.5, 15531.5, 15579.4, 15628.5, 15677.3, 15736.7, 15788.3, 15839.7, 15900.6, 15961.5, 16018.3, 16076.9, 16139.0, 16203.0, 16263.3, 16327.9, 16398.9, 16478.3, 16538.2, 16621.6, 16697.0, 16777.2, 16833.1, 16891.6, 16956.8, 17026.3, 17085.4, 17106.9, 17169.4, 17239.4, 17292.0, 17354.2, 17414.2, 17447.3, 17482.6, 17526.0, 17568.7, 17627.1, 17661.5]) >>> load_austres(True).head() 0 13067.3 1 13130.5 2 13198.4 3 13254.2 4 13303.7 dtype: float64 Notes ----- This is quarterly data, so *m* should be set to 4 when using in a seasonal context. References ---------- .. [1] P. J. Brockwell and R. A. Davis (1996) "Introduction to Time Series and Forecasting." Springer """ rslt = np.array([13067.3, 13130.5, 13198.4, 13254.2, 13303.7, 13353.9, 13409.3, 13459.2, 13504.5, 13552.6, 13614.3, 13669.5, 13722.6, 13772.1, 13832.0, 13862.6, 13893.0, 13926.8, 13968.9, 14004.7, 14033.1, 14066.0, 14110.1, 14155.6, 14192.2, 14231.7, 14281.5, 14330.3, 14359.3, 14396.6, 14430.8, 14478.4, 14515.7, 14554.9, 14602.5, 14646.4, 14695.4, 14746.6, 14807.4, 14874.4, 14923.3, 14988.7, 15054.1, 15121.7, 15184.2, 15239.3, 15288.9, 15346.2, 15393.5, 15439.0, 15483.5, 15531.5, 15579.4, 15628.5, 15677.3, 15736.7, 15788.3, 15839.7, 15900.6, 15961.5, 16018.3, 16076.9, 16139.0, 16203.0, 16263.3, 16327.9, 16398.9, 16478.3, 16538.2, 16621.6, 16697.0, 16777.2, 16833.1, 16891.6, 16956.8, 17026.3, 17085.4, 17106.9, 17169.4, 17239.4, 17292.0, 17354.2, 17414.2, 17447.3, 17482.6, 17526.0, 17568.7, 17627.1, 17661.5]) if as_series: return pd.Series(rslt) return rslt
python
{ "resource": "" }
q30538
SparkJobProgressMonitorOutput.display_with_id
train
def display_with_id(self, obj, display_id, update=False): """Create a new display with an id""" ip = get_ipython() if hasattr(ip, "kernel"): data, md = ip.display_formatter.format(obj) content = { 'data': data, 'metadata': md, 'transient': {'display_id': display_id}, } msg_type = 'update_display_data' if update else 'display_data' ip.kernel.session.send(ip.kernel.iopub_socket, msg_type, content, parent=ip.parent_header) else: display(obj)
python
{ "resource": "" }
q30539
get_caller_text
train
def get_caller_text(frame): """ Return the expression that calls the frame """ def find_match_node(node): "Find a candidate ast node" match_node = None for chd in ast.iter_child_nodes(node): if getattr(chd, "lineno", 0) > frame.f_back.f_lineno: break match_node = node if isinstance(chd, ast.Name) and isinstance(node, ast.Call) else match_node match_node = find_match_node(chd) or match_node return match_node lines, _ = inspect.findsource(frame.f_back.f_code) match_node = find_match_node(ast.parse("".join(lines))) return unparse(match_node).strip().replace(', ', ',') if match_node is not None else None
python
{ "resource": "" }
q30540
get_matches_lineno
train
def get_matches_lineno(code, fn_name): "Return a list of line number corresponding to the definition of function with the name fn_name" class Walker(ast.NodeVisitor): def __init__(self): self._hits = set() #pylint: disable=E0213,E1102 def onvisit(fn): def wrap(self, node): fn(self,node) super(Walker, self).generic_visit(node) return wrap @onvisit def visit_FunctionDef(self, node): if node.name == fn_name: self._hits.add(node) return node @property def hits(self): return list(sorted([n.lineno + 1 for n in self._hits])) walker = Walker() walker.visit(ast.parse(code)) return walker.hits
python
{ "resource": "" }
q30541
tts_langs
train
def tts_langs(): """Languages Google Text-to-Speech supports. Returns: dict: A dictionnary of the type `{ '<lang>': '<name>'}` Where `<lang>` is an IETF language tag such as `en` or `pt-br`, and `<name>` is the full English name of the language, such as `English` or `Portuguese (Brazil)`. The dictionnary returned combines languages from two origins: - Languages fetched automatically from Google Translate - Languages that are undocumented variations that were observed to work and present different dialects or accents. """ try: langs = dict() langs.update(_fetch_langs()) langs.update(_extra_langs()) log.debug("langs: %s", langs) return langs except Exception as e: raise RuntimeError("Unable to get language list: %s" % str(e))
python
{ "resource": "" }
q30542
tone_marks
train
def tone_marks(text): """Add a space after tone-modifying punctuation. Because the `tone_marks` tokenizer case will split after a tone-modidfying punctuation mark, make sure there's whitespace after. """ return PreProcessorRegex( search_args=symbols.TONE_MARKS, search_func=lambda x: u"(?<={})".format(x), repl=' ').run(text)
python
{ "resource": "" }
q30543
end_of_line
train
def end_of_line(text): """Re-form words cut by end-of-line hyphens. Remove "<hyphen><newline>". """ return PreProcessorRegex( search_args=u'-', search_func=lambda x: u"{}\n".format(x), repl='').run(text)
python
{ "resource": "" }
q30544
abbreviations
train
def abbreviations(text): """Remove periods after an abbreviation from a list of known abbrevations that can be spoken the same without that period. This prevents having to handle tokenization of that period. Note: Could potentially remove the ending period of a sentence. Note: Abbreviations that Google Translate can't pronounce without (or even with) a period should be added as a word substitution with a :class:`PreProcessorSub` pre-processor. Ex.: 'Esq.', 'Esquire'. """ return PreProcessorRegex( search_args=symbols.ABBREVIATIONS, search_func=lambda x: r"(?<={})(?=\.).".format(x), repl='', flags=re.IGNORECASE).run(text)
python
{ "resource": "" }
q30545
tone_marks
train
def tone_marks(): """Keep tone-modifying punctuation by matching following character. Assumes the `tone_marks` pre-processor was run for cases where there might not be any space after a tone-modifying punctuation mark. """ return RegexBuilder( pattern_args=symbols.TONE_MARKS, pattern_func=lambda x: u"(?<={}).".format(x)).regex
python
{ "resource": "" }
q30546
period_comma
train
def period_comma(): """Period and comma case. Match if not preceded by ".<letter>" and only if followed by space. Won't cut in the middle/after dotted abbreviations; won't cut numbers. Note: Won't match if a dotted abbreviation ends a sentence. Note: Won't match the end of a sentence if not followed by a space. """ return RegexBuilder( pattern_args=symbols.PERIOD_COMMA, pattern_func=lambda x: r"(?<!\.[a-z]){} ".format(x)).regex
python
{ "resource": "" }
q30547
colon
train
def colon(): """Colon case. Match a colon ":" only if not preceeded by a digit. Mainly to prevent a cut in the middle of time notations e.g. 10:01 """ return RegexBuilder( pattern_args=symbols.COLON, pattern_func=lambda x: r"(?<!\d){}".format(x)).regex
python
{ "resource": "" }
q30548
other_punctuation
train
def other_punctuation(): """Match other punctuation. Match other punctuation to split on; punctuation that naturally inserts a break in speech. """ punc = ''.join( set(symbols.ALL_PUNC) - set(symbols.TONE_MARKS) - set(symbols.PERIOD_COMMA) - set(symbols.COLON)) return RegexBuilder( pattern_args=punc, pattern_func=lambda x: u"{}".format(x)).regex
python
{ "resource": "" }
q30549
legacy_all_punctuation
train
def legacy_all_punctuation(): # pragma: no cover b/c tested but Coveralls: ¯\_(ツ)_/¯ """Match all punctuation. Use as only tokenizer case to mimic gTTS 1.x tokenization. """ punc = symbols.ALL_PUNC return RegexBuilder( pattern_args=punc, pattern_func=lambda x: u"{}".format(x)).regex
python
{ "resource": "" }
q30550
gTTS.write_to_fp
train
def write_to_fp(self, fp): """Do the TTS API request and write bytes to a file-like object. Args: fp (file object): Any file-like object to write the ``mp3`` to. Raises: :class:`gTTSError`: When there's an error with the API request. TypeError: When ``fp`` is not a file-like object that takes bytes. """ # When disabling ssl verify in requests (for proxies and firewalls), # urllib3 prints an insecure warning on stdout. We disable that. urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) text_parts = self._tokenize(self.text) log.debug("text_parts: %i", len(text_parts)) assert text_parts, 'No text to send to TTS API' for idx, part in enumerate(text_parts): try: # Calculate token part_tk = self.token.calculate_token(part) except requests.exceptions.RequestException as e: # pragma: no cover log.debug(str(e), exc_info=True) raise gTTSError( "Connection error during token calculation: %s" % str(e)) payload = {'ie': 'UTF-8', 'q': part, 'tl': self.lang, 'ttsspeed': self.speed, 'total': len(text_parts), 'idx': idx, 'client': 'tw-ob', 'textlen': _len(part), 'tk': part_tk} log.debug("payload-%i: %s", idx, payload) try: # Request r = requests.get(self.GOOGLE_TTS_URL, params=payload, headers=self.GOOGLE_TTS_HEADERS, proxies=urllib.request.getproxies(), verify=False) log.debug("headers-%i: %s", idx, r.request.headers) log.debug("url-%i: %s", idx, r.request.url) log.debug("status-%i: %s", idx, r.status_code) r.raise_for_status() except requests.exceptions.HTTPError: # Request successful, bad response raise gTTSError(tts=self, response=r) except requests.exceptions.RequestException as e: # pragma: no cover # Request failed raise gTTSError(str(e)) try: # Write for chunk in r.iter_content(chunk_size=1024): fp.write(chunk) log.debug("part-%i written to %s", idx, fp) except (AttributeError, TypeError) as e: raise TypeError( "'fp' is not a file-like object or it does not take bytes: %s" % str(e))
python
{ "resource": "" }
q30551
gTTS.save
train
def save(self, savefile): """Do the TTS API request and write result to file. Args: savefile (string): The path and file name to save the ``mp3`` to. Raises: :class:`gTTSError`: When there's an error with the API request. """ with open(str(savefile), 'wb') as f: self.write_to_fp(f) log.debug("Saved to %s", savefile)
python
{ "resource": "" }
q30552
_minimize
train
def _minimize(the_string, delim, max_size): """Recursively split a string in the largest chunks possible from the highest position of a delimiter all the way to a maximum size Args: the_string (string): The string to split. delim (string): The delimiter to split on. max_size (int): The maximum size of a chunk. Returns: list: the minimized string in tokens Every chunk size will be at minimum `the_string[0:idx]` where `idx` is the highest index of `delim` found in `the_string`; and at maximum `the_string[0:max_size]` if no `delim` was found in `the_string`. In the latter case, the split will occur at `the_string[max_size]` which can be any character. The function runs itself again on the rest of `the_string` (`the_string[idx:]`) until no chunk is larger than `max_size`. """ # Remove `delim` from start of `the_string` # i.e. prevent a recursive infinite loop on `the_string[0:0]` # if `the_string` starts with `delim` and is larger than `max_size` if the_string.startswith(delim): the_string = the_string[_len(delim):] if _len(the_string) > max_size: try: # Find the highest index of `delim` in `the_string[0:max_size]` # i.e. `the_string` will be cut in half on `delim` index idx = the_string.rindex(delim, 0, max_size) except ValueError: # `delim` not found in `the_string`, index becomes `max_size` # i.e. `the_string` will be cut in half arbitrarily on `max_size` idx = max_size # Call itself again for `the_string[idx:]` return [the_string[:idx]] + \ _minimize(the_string[idx:], delim, max_size) else: return [the_string]
python
{ "resource": "" }
q30553
PreProcessorRegex.run
train
def run(self, text): """Run each regex substitution on ``text``. Args: text (string): the input text. Returns: string: text after all substitutions have been sequentially applied. """ for regex in self.regexes: text = regex.sub(self.repl, text) return text
python
{ "resource": "" }
q30554
PreProcessorSub.run
train
def run(self, text): """Run each substitution on ``text``. Args: text (string): the input text. Returns: string: text after all substitutions have been sequentially applied. """ for pp in self.pre_processors: text = pp.run(text) return text
python
{ "resource": "" }
q30555
Stemmer.vowel_in_stem
train
def vowel_in_stem(self): """ True iff 0...j contains vowel """ for i in range(0, self.j+1): if not self.cons(i): return True return False
python
{ "resource": "" }
q30556
Stemmer.doublec
train
def doublec(self, j): """ True iff j, j-1 contains double consonant """ if j < 1 or self.b[j] != self.b[j-1]: return False return self.cons(j)
python
{ "resource": "" }
q30557
Stemmer.ends
train
def ends(self, s): length = len(s) """ True iff 0...k ends with string s """ res = (self.b[self.k-length+1:self.k+1] == s) if res: self.j = self.k - length return res
python
{ "resource": "" }
q30558
Stemmer.setto
train
def setto(self, s): """ set j+1...k to string s, readjusting k """ length = len(s) self.b[self.j+1:self.j+1+length] = s self.k = self.j + length
python
{ "resource": "" }
q30559
Stemmer.step1c
train
def step1c(self): """ turn terminal y into i if there's a vowel in stem """ if self.ends(['y']) and self.vowel_in_stem(): self.b[self.k] = 'i'
python
{ "resource": "" }
q30560
Board.setup_layout
train
def setup_layout(self, board_layout): """ Setup the Pin instances based on the given board layout. """ # Create pin instances based on board layout self.analog = [] for i in board_layout['analog']: self.analog.append(Pin(self, i)) self.digital = [] self.digital_ports = [] for i in range(0, len(board_layout['digital']), 8): num_pins = len(board_layout['digital'][i:i + 8]) port_number = int(i / 8) self.digital_ports.append(Port(self, port_number, num_pins)) # Allow to access the Pin instances directly for port in self.digital_ports: self.digital += port.pins # Setup PWM pins for i in board_layout['pwm']: self.digital[i].PWM_CAPABLE = True # Disable certain ports like Rx/Tx and crystal ports for i in board_layout['disabled']: self.digital[i].mode = UNAVAILABLE # Create a dictionary of 'taken' pins. Used by the get_pin method self.taken = {'analog': dict(map(lambda p: (p.pin_number, False), self.analog)), 'digital': dict(map(lambda p: (p.pin_number, False), self.digital))} self._set_default_handlers()
python
{ "resource": "" }
q30561
Board.auto_setup
train
def auto_setup(self): """ Automatic setup based on Firmata's "Capability Query" """ self.add_cmd_handler(CAPABILITY_RESPONSE, self._handle_report_capability_response) self.send_sysex(CAPABILITY_QUERY, []) self.pass_time(0.1) # Serial SYNC while self.bytes_available(): self.iterate() # handle_report_capability_response will write self._layout if self._layout: self.setup_layout(self._layout) else: raise IOError("Board detection failed.")
python
{ "resource": "" }
q30562
Board.add_cmd_handler
train
def add_cmd_handler(self, cmd, func): """Adds a command handler for a command.""" len_args = len(inspect.getargspec(func)[0]) def add_meta(f): def decorator(*args, **kwargs): f(*args, **kwargs) decorator.bytes_needed = len_args - 1 # exclude self decorator.__name__ = f.__name__ return decorator func = add_meta(func) self._command_handlers[cmd] = func
python
{ "resource": "" }
q30563
Board.get_pin
train
def get_pin(self, pin_def): """ Returns the activated pin given by the pin definition. May raise an ``InvalidPinDefError`` or a ``PinAlreadyTakenError``. :arg pin_def: Pin definition as described below, but without the arduino name. So for example ``a:1:i``. 'a' analog pin Pin number 'i' for input 'd' digital pin Pin number 'o' for output 'p' for pwm (Pulse-width modulation) All seperated by ``:``. """ if type(pin_def) == list: bits = pin_def else: bits = pin_def.split(':') a_d = bits[0] == 'a' and 'analog' or 'digital' part = getattr(self, a_d) pin_nr = int(bits[1]) if pin_nr >= len(part): raise InvalidPinDefError('Invalid pin definition: {0} at position 3 on {1}' .format(pin_def, self.name)) if getattr(part[pin_nr], 'mode', None) == UNAVAILABLE: raise InvalidPinDefError('Invalid pin definition: ' 'UNAVAILABLE pin {0} at position on {1}' .format(pin_def, self.name)) if self.taken[a_d][pin_nr]: raise PinAlreadyTakenError('{0} pin {1} is already taken on {2}' .format(a_d, bits[1], self.name)) # ok, should be available pin = part[pin_nr] self.taken[a_d][pin_nr] = True if pin.type is DIGITAL: if bits[2] == 'p': pin.mode = PWM elif bits[2] == 's': pin.mode = SERVO elif bits[2] != 'o': pin.mode = INPUT else: pin.enable_reporting() return pin
python
{ "resource": "" }
q30564
Board.pass_time
train
def pass_time(self, t): """Non-blocking time-out for ``t`` seconds.""" cont = time.time() + t while time.time() < cont: time.sleep(0)
python
{ "resource": "" }
q30565
Board.send_sysex
train
def send_sysex(self, sysex_cmd, data): """ Sends a SysEx msg. :arg sysex_cmd: A sysex command byte : arg data: a bytearray of 7-bit bytes of arbitrary data """ msg = bytearray([START_SYSEX, sysex_cmd]) msg.extend(data) msg.append(END_SYSEX) self.sp.write(msg)
python
{ "resource": "" }
q30566
Board.servo_config
train
def servo_config(self, pin, min_pulse=544, max_pulse=2400, angle=0): """ Configure a pin as servo with min_pulse, max_pulse and first angle. ``min_pulse`` and ``max_pulse`` default to the arduino defaults. """ if pin > len(self.digital) or self.digital[pin].mode == UNAVAILABLE: raise IOError("Pin {0} is not a valid servo pin".format(pin)) data = bytearray([pin]) data += to_two_bytes(min_pulse) data += to_two_bytes(max_pulse) self.send_sysex(SERVO_CONFIG, data) # set pin._mode to SERVO so that it sends analog messages # don't set pin.mode as that calls this method self.digital[pin]._mode = SERVO self.digital[pin].write(angle)
python
{ "resource": "" }
q30567
Board.exit
train
def exit(self): """Call this to exit cleanly.""" # First detach all servo's, otherwise it somehow doesn't want to close... if hasattr(self, 'digital'): for pin in self.digital: if pin.mode == SERVO: pin.mode = OUTPUT if hasattr(self, 'sp'): self.sp.close()
python
{ "resource": "" }
q30568
Board._handle_digital_message
train
def _handle_digital_message(self, port_nr, lsb, msb): """ Digital messages always go by the whole port. This means we have a bitmask which we update the port. """ mask = (msb << 7) + lsb try: self.digital_ports[port_nr]._update(mask) except IndexError: raise ValueError
python
{ "resource": "" }
q30569
Port.enable_reporting
train
def enable_reporting(self): """Enable reporting of values for the whole port.""" self.reporting = True msg = bytearray([REPORT_DIGITAL + self.port_number, 1]) self.board.sp.write(msg) for pin in self.pins: if pin.mode == INPUT: pin.reporting = True
python
{ "resource": "" }
q30570
Port.disable_reporting
train
def disable_reporting(self): """Disable the reporting of the port.""" self.reporting = False msg = bytearray([REPORT_DIGITAL + self.port_number, 0]) self.board.sp.write(msg)
python
{ "resource": "" }
q30571
Port.write
train
def write(self): """Set the output pins of the port to the correct state.""" mask = 0 for pin in self.pins: if pin.mode == OUTPUT: if pin.value == 1: pin_nr = pin.pin_number - self.port_number * 8 mask |= 1 << int(pin_nr) # print("type mask", type(mask)) # print("type self.portnumber", type(self.port_number)) # print("type pinnr", type(pin_nr)) msg = bytearray([DIGITAL_MESSAGE + self.port_number, mask % 128, mask >> 7]) self.board.sp.write(msg)
python
{ "resource": "" }
q30572
Port._update
train
def _update(self, mask): """Update the values for the pins marked as input with the mask.""" if self.reporting: for pin in self.pins: if pin.mode is INPUT: pin_nr = pin.pin_number - self.port_number * 8 pin.value = (mask & (1 << pin_nr)) > 0
python
{ "resource": "" }
q30573
Pin.enable_reporting
train
def enable_reporting(self): """Set an input pin to report values.""" if self.mode is not INPUT: raise IOError("{0} is not an input and can therefore not report".format(self)) if self.type == ANALOG: self.reporting = True msg = bytearray([REPORT_ANALOG + self.pin_number, 1]) self.board.sp.write(msg) else: self.port.enable_reporting()
python
{ "resource": "" }
q30574
Pin.disable_reporting
train
def disable_reporting(self): """Disable the reporting of an input pin.""" if self.type == ANALOG: self.reporting = False msg = bytearray([REPORT_ANALOG + self.pin_number, 0]) self.board.sp.write(msg) else: self.port.disable_reporting()
python
{ "resource": "" }
q30575
Pin.write
train
def write(self, value): """ Output a voltage from the pin :arg value: Uses value as a boolean if the pin is in output mode, or expects a float from 0 to 1 if the pin is in PWM mode. If the pin is in SERVO the value should be in degrees. """ if self.mode is UNAVAILABLE: raise IOError("{0} can not be used through Firmata".format(self)) if self.mode is INPUT: raise IOError("{0} is set up as an INPUT and can therefore not be written to" .format(self)) if value is not self.value: self.value = value if self.mode is OUTPUT: if self.port: self.port.write() else: msg = bytearray([DIGITAL_MESSAGE, self.pin_number, value]) self.board.sp.write(msg) elif self.mode is PWM: value = int(round(value * 255)) msg = bytearray([ANALOG_MESSAGE + self.pin_number, value % 128, value >> 7]) self.board.sp.write(msg) elif self.mode is SERVO: value = int(value) msg = bytearray([ANALOG_MESSAGE + self.pin_number, value % 128, value >> 7]) self.board.sp.write(msg)
python
{ "resource": "" }
q30576
get_the_board
train
def get_the_board( layout=BOARDS["arduino"], base_dir="/dev/", identifier="tty.usbserial" ): """ Helper function to get the one and only board connected to the computer running this. It assumes a normal arduino layout, but this can be overriden by passing a different layout dict as the ``layout`` parameter. ``base_dir`` and ``identifier`` are overridable as well. It will raise an IOError if it can't find a board, on a serial, or if it finds more than one. """ from .pyfirmata import Board # prevent a circular import boards = [] for device in os.listdir(base_dir): if device.startswith(identifier): try: board = Board(os.path.join(base_dir, device), layout) except serial.SerialException: pass else: boards.append(board) if len(boards) == 0: raise IOError( "No boards found in {0} with identifier {1}".format(base_dir, identifier) ) elif len(boards) > 1: raise IOError("More than one board found!") return boards[0]
python
{ "resource": "" }
q30577
from_two_bytes
train
def from_two_bytes(bytes): """ Return an integer from two 7 bit bytes. """ lsb, msb = bytes try: # Usually bytes have been converted to integers with ord already return msb << 7 | lsb except TypeError: # But add this for easy testing # One of them can be a string, or both try: lsb = ord(lsb) except TypeError: pass try: msb = ord(msb) except TypeError: pass return msb << 7 | lsb
python
{ "resource": "" }
q30578
two_byte_iter_to_str
train
def two_byte_iter_to_str(bytes): """ Return a string made from a list of two byte chars. """ bytes = list(bytes) chars = bytearray() while bytes: lsb = bytes.pop(0) try: msb = bytes.pop(0) except IndexError: msb = 0x00 chars.append(from_two_bytes([lsb, msb])) return chars.decode()
python
{ "resource": "" }
q30579
str_to_two_byte_iter
train
def str_to_two_byte_iter(string): """ Return a iter consisting of two byte chars from a string. """ bstring = string.encode() bytes = bytearray() for char in bstring: bytes.append(char) bytes.append(0) return bytes
python
{ "resource": "" }
q30580
MockupSerial.write
train
def write(self, value): """ Appends bytes flat to the deque. So iterables will be unpacked. """ if hasattr(value, '__iter__'): bytearray(value) self.extend(value) else: bytearray([value]) self.append(value)
python
{ "resource": "" }
q30581
get_profile
train
def get_profile(name=None, **kwargs): """Get the profile by name; if no name is given, return the default profile. """ if isinstance(name, Profile): return name clazz = get_profile_class(name or 'default') return clazz(**kwargs)
python
{ "resource": "" }
q30582
get_profile_class
train
def get_profile_class(name): """For the given profile name, load the data from the external database, then generate dynamically a class. """ if name not in CLASS_CACHE: profile_data = PROFILES[name] profile_name = clean(name) class_name = '{}{}Profile'.format( profile_name[0].upper(), profile_name[1:]) new_class = type(class_name, (BaseProfile,), {'profile_data': profile_data}) CLASS_CACHE[name] = new_class return CLASS_CACHE[name]
python
{ "resource": "" }
q30583
BaseProfile.get_font
train
def get_font(self, font): """Return the escpos index for `font`. Makes sure that the requested `font` is valid. """ font = {'a': 0, 'b': 1}.get(font, font) if not six.text_type(font) in self.fonts: raise NotSupported( '"{}" is not a valid font in the current profile'.format(font)) return font
python
{ "resource": "" }
q30584
BaseProfile.get_columns
train
def get_columns(self, font): """ Return the number of columns for the given font. """ font = self.get_font(font) return self.fonts[six.text_type(font)]['columns']
python
{ "resource": "" }
q30585
Config._reset_config
train
def _reset_config(self): """ Clear the loaded configuration. If we are loading a changed config, we don't want to have leftover data. """ self._has_loaded = False self._printer = None self._printer_name = None self._printer_config = None
python
{ "resource": "" }
q30586
Config.load
train
def load(self, config_path=None): """ Load and parse the configuration file using pyyaml :param config_path: An optional file path, file handle, or byte string for the configuration file. """ self._reset_config() if not config_path: config_path = os.path.join( appdirs.user_config_dir(self._app_name), self._config_file ) try: # First check if it's file like. If it is, pyyaml can load it. # I'm checking type instead of catching exceptions to keep the # exception handling simple if hasattr(config_path, 'read'): config = yaml.safe_load(config_path) else: # If it isn't, it's a path. We have to open it first, otherwise # pyyaml will try to read it as yaml with open(config_path, 'rb') as config_file: config = yaml.safe_load(config_file) except EnvironmentError: raise exceptions.ConfigNotFoundError('Couldn\'t read config at {config_path}'.format( config_path=str(config_path), )) except yaml.YAMLError: raise exceptions.ConfigSyntaxError('Error parsing YAML') if 'printer' in config: self._printer_config = config['printer'] self._printer_name = self._printer_config.pop('type').title() if not self._printer_name or not hasattr(printer, self._printer_name): raise exceptions.ConfigSyntaxError( 'Printer type "{printer_name}" is invalid'.format( printer_name=self._printer_name, ) ) self._has_loaded = True
python
{ "resource": "" }
q30587
Config.printer
train
def printer(self): """ Returns a printer that was defined in the config, or throws an exception. This method loads the default config if one hasn't beeen already loaded. """ if not self._has_loaded: self.load() if not self._printer_name: raise exceptions.ConfigSectionMissingError('printer') if not self._printer: # We could catch init errors and make them a ConfigSyntaxError, # but I'll just let them pass self._printer = getattr(printer, self._printer_name)(**self._printer_config) return self._printer
python
{ "resource": "" }
q30588
Usb.open
train
def open(self, usb_args): """ Search device on USB tree and set it as escpos device. :param usb_args: USB arguments """ self.device = usb.core.find(**usb_args) if self.device is None: raise USBNotFoundError("Device not found or cable not plugged in.") self.idVendor = self.device.idVendor self.idProduct = self.device.idProduct check_driver = None try: check_driver = self.device.is_kernel_driver_active(0) except NotImplementedError: pass if check_driver is None or check_driver: try: self.device.detach_kernel_driver(0) except usb.core.USBError as e: if check_driver is not None: print("Could not detatch kernel driver: {0}".format(str(e))) try: self.device.set_configuration() self.device.reset() except usb.core.USBError as e: print("Could not set configuration: {0}".format(str(e)))
python
{ "resource": "" }
q30589
Usb.close
train
def close(self): """ Release USB interface """ if self.device: usb.util.dispose_resources(self.device) self.device = None
python
{ "resource": "" }
q30590
Serial.close
train
def close(self): """ Close Serial interface """ if self.device is not None and self.device.is_open: self.device.flush() self.device.close()
python
{ "resource": "" }
q30591
Network.open
train
def open(self): """ Open TCP socket with ``socket``-library and set it as escpos device """ self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.device.settimeout(self.timeout) self.device.connect((self.host, self.port)) if self.device is None: print("Could not open socket for {0}".format(self.host))
python
{ "resource": "" }
q30592
File.open
train
def open(self): """ Open system file """ self.device = open(self.devfile, "wb") if self.device is None: print("Could not open the specified file {0}".format(self.devfile))
python
{ "resource": "" }
q30593
encode_katakana
train
def encode_katakana(text): """I don't think this quite works yet.""" encoded = [] for char in text: if jaconv: # try to convert japanese text to half-katakanas char = jaconv.z2h(jaconv.hira2kata(char)) # TODO: "the conversion may result in multiple characters" # If that really can happen (I am not really shure), than the string would have to be split and every single # character has to passed through the following lines. if char in TXT_ENC_KATAKANA_MAP: encoded.append(TXT_ENC_KATAKANA_MAP[char]) else: # TODO doesn't this discard all that is not in the map? Can we be sure that the input does contain only # encodable characters? We could at least throw an exception if encoding is not possible. pass return b"".join(encoded)
python
{ "resource": "" }
q30594
EscposImage.to_column_format
train
def to_column_format(self, high_density_vertical=True): """ Extract slices of an image as equal-sized blobs of column-format data. :param high_density_vertical: Printed line height in dots """ im = self._im.transpose(Image.ROTATE_270).transpose(Image.FLIP_LEFT_RIGHT) line_height = 24 if high_density_vertical else 8 width_pixels, height_pixels = im.size top = 0 left = 0 while left < width_pixels: box = (left, top, left + line_height, top + height_pixels) im_slice = im.transform((line_height, height_pixels), Image.EXTENT, box) im_bytes = im_slice.tobytes() yield(im_bytes) left += line_height
python
{ "resource": "" }
q30595
EscposImage.split
train
def split(self, fragment_height): """ Split an image into multiple fragments after fragment_height pixels :param fragment_height: height of fragment :return: list of PIL objects """ passes = int(math.ceil(self.height/fragment_height)) fragments = [] for n in range(0, passes): left = 0 right = self.width upper = n * fragment_height lower = min((n + 1) * fragment_height, self.height) box = (left, upper, right, lower) fragments.append(self.img_original.crop(box)) return fragments
python
{ "resource": "" }
q30596
EscposImage.center
train
def center(self, max_width): """In-place image centering :param: Maximum width in order to deduce x offset for centering :return: None """ old_width, height = self._im.size new_size = (max_width, height) new_im = Image.new("1", new_size) paste_x = int((max_width - old_width) / 2) new_im.paste(self._im, (paste_x, 0)) self._im = new_im
python
{ "resource": "" }
q30597
demo
train
def demo(printer, **kwargs): """ Prints specificed demos. Called when CLI is passed `demo`. This function uses the DEMO_FUNCTIONS dictionary. :param printer: A printer from escpos.printer :param kwargs: A dict with a key for each function you want to test. It's in this format since it usually comes from argparse. """ for demo_choice in kwargs.keys(): command = getattr( printer, demo_choice .replace('barcodes_a', 'barcode') .replace('barcodes_b', 'barcode') ) for params in DEMO_FUNCTIONS[demo_choice]: command(**params) printer.cut()
python
{ "resource": "" }
q30598
Encoder.get_encoding_name
train
def get_encoding_name(self, encoding): """Given an encoding provided by the user, will return a canonical encoding name; and also validate that the encoding is supported. TODO: Support encoding aliases: pc437 instead of cp437. """ encoding = CodePages.get_encoding_name(encoding) if encoding not in self.codepages: raise ValueError(( 'Encoding "{}" cannot be used for the current profile. ' 'Valid encodings are: {}' ).format(encoding, ','.join(self.codepages.keys()))) return encoding
python
{ "resource": "" }
q30599
Encoder._get_codepage_char_list
train
def _get_codepage_char_list(encoding): """Get codepage character list Gets characters 128-255 for a given code page, as an array. :param encoding: The name of the encoding. This must appear in the CodePage list """ codepage = CodePages.get_encoding(encoding) if 'data' in codepage: encodable_chars = list("".join(codepage['data'])) assert(len(encodable_chars) == 128) return encodable_chars elif 'python_encode' in codepage: encodable_chars = [u" "] * 128 for i in range(0, 128): codepoint = i + 128 try: encodable_chars[i] = bytes([codepoint]).decode(codepage['python_encode']) except UnicodeDecodeError: # Non-encodable character, just skip it pass return encodable_chars raise LookupError("Can't find a known encoding for {}".format(encoding))
python
{ "resource": "" }