code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if not item.remote_id: command = CreateFolderCommand(self.settings, item, parent) self.task_runner_add(parent, item, command)
def visit_folder(self, item, parent)
Adds create folder command to task runner if folder doesn't already exist.
11.434208
5.93142
1.927735
if item.need_to_send: if item.size > self.settings.config.upload_bytes_per_chunk: msg = "Programmer Error: Trying to upload large file as small item size:{} name:{}" raise ValueError(msg.format(item.size, item.name)) else: command = CreateSmallFileCommand(self.settings, item, parent, self.settings.file_upload_post_processor) self.task_runner_add(parent, item, command)
def visit_file(self, item, parent)
If file is small add create small file command otherwise raise error. Large files shouldn't be passed to SmallItemUploadTaskBuilder.
8.293347
5.93576
1.397184
parent_task_id = self.item_to_id.get(parent) task_id = self.task_runner.add(parent_task_id, command) self.item_to_id[item] = task_id
def task_runner_add(self, parent, item, command)
Add command to task runner with parent's task id createing a task id for item/command. Save this item's id to a lookup. :param parent: object: parent of item :param item: object: item we are running command on :param command: parallel TaskCommand we want to have run
2.586552
2.587302
0.99971
self.local_project.set_remote_id_after_send(result_id) self.settings.project_id = result_id
def after_run(self, result_id)
Save uuid associated with project we just created. :param result_id: str: uuid of the project
11.757857
10.224874
1.149927
params = (self.remote_folder.name, self.parent.kind, self.parent.remote_id) return UploadContext(self.settings, params, message_queue, task_id)
def create_context(self, message_queue, task_id)
Create values to be used by upload_folder_run function. :param message_queue: Queue: queue background process can send messages to us on :param task_id: int: id of this command's task so message will be routed correctly
9.122855
9.183779
0.993366
parent_data = ParentData(self.parent.kind, self.parent.remote_id) path_data = self.local_file.get_path_data() params = parent_data, path_data, self.local_file.remote_id return UploadContext(self.settings, params, message_queue, task_id)
def create_context(self, message_queue, task_id)
Create values to be used by create_small_file function. :param message_queue: Queue: queue background process can send messages to us on :param task_id: int: id of this command's task so message will be routed correctly
5.906877
6.456899
0.914816
if self.file_upload_post_processor: self.file_upload_post_processor.run(self.settings.data_service, remote_file_data) remote_file_id = remote_file_data['id'] self.settings.watcher.transferring_item(self.local_file) self.local_file.set_remote_id_after_send(remote_file_id)
def after_run(self, remote_file_data)
Save uuid of file to our LocalFile :param remote_file_data: dict: DukeDS file data
4.922233
5.026586
0.97924
watcher = self.settings.watcher if started_waiting: watcher.start_waiting() else: watcher.done_waiting()
def on_message(self, started_waiting)
Receives started_waiting boolean from create_small_file method and notifies project_status_monitor in settings. :param started_waiting: boolean: True when we start waiting, False when done
6.122044
4.97033
1.231718
if item.kind == KindType.file_str: if item.need_to_send: self.add_upload_item(item.path) else: if item.kind == KindType.project_str: pass else: if not item.remote_id: self.add_upload_item(item.path) for child in item.children: self._visit_recur(child)
def _visit_recur(self, item)
Recursively visits children of item. :param item: object: project, folder or file we will add to upload_items if necessary.
4.497927
3.491784
1.288146
arg_parser.add_argument("-p", '--project-name', metavar='ProjectName', type=to_unicode, dest='project_name', help=help_text, required=required)
def add_project_name_arg(arg_parser, required, help_text)
Adds project_name parameter to a parser. :param arg_parser: ArgumentParser parser to add this argument to. :param help_text: str label displayed in usage
3.145506
3.864977
0.813849
project_name_or_id = arg_parser.add_mutually_exclusive_group(required=required) name_help_text = "Name of the project to {}.".format(help_text_suffix) add_project_name_arg(project_name_or_id, required=False, help_text=name_help_text) id_help_text = "ID of the project to {}.".format(help_text_suffix) add_project_id_arg(project_name_or_id, required=False, help_text=id_help_text)
def add_project_name_or_id_arg(arg_parser, required=True, help_text_suffix="manage")
Adds project name or project id argument. These two are mutually exclusive. :param arg_parser: :param required: :param help_text: :return:
1.568463
1.678134
0.934647
path = to_unicode(path) if not os.path.exists(path): raise argparse.ArgumentTypeError("{} is not a valid file/folder.".format(path)) return path
def _paths_must_exists(path)
Raises error if path doesn't exist. :param path: str path to check :return: str same path passed in
3.598315
4.631256
0.776963
basename = os.path.basename(path) if any([bad_char in basename for bad_char in INVALID_PATH_CHARS]): raise argparse.ArgumentTypeError("{} contains invalid characters for a directory.".format(path)) return path
def _path_has_ok_chars(path)
Validate path for invalid characters. :param path: str possible filesystem path :return: path if it was ok otherwise raises error
4.355134
4.335999
1.004413
help_text = "Specifies which project permissions to give to the user. Example: 'project_admin'. " help_text += "See command list_auth_roles for AuthRole values." arg_parser.add_argument("--auth-role", metavar='AuthRole', type=to_unicode, dest='auth_role', help=help_text, default=default_permissions)
def _add_auth_role_arg(arg_parser, default_permissions)
Adds optional auth_role parameter to a parser. :param arg_parser: ArgumentParser parser to add this argument to. :param default_permissions: default value to use for this argument
5.261264
5.510284
0.954808
help_text = "Filters project listing to just those projects with the specified role. " help_text += "See command list_auth_roles for AuthRole values." arg_parser.add_argument("--auth-role", metavar='AuthRole', type=to_unicode, dest='auth_role', help=help_text, default=None)
def _add_project_filter_auth_role_arg(arg_parser)
Adds optional auth_role filtering parameter to a parser. :param arg_parser: ArgumentParser parser to add this argument to.
5.434571
5.468652
0.993768
arg_parser.add_argument("--resend", action='store_true', default=False, dest='resend', help=resend_help)
def _add_resend_arg(arg_parser, resend_help)
Adds resend parameter to a parser. :param arg_parser: ArgumentParser parser to add this argument to. :param type_str
2.047161
2.889515
0.708479
arg_parser.add_argument("--include", metavar='Path', action='append', type=to_unicode, dest='include_paths', help="Specifies a single path to include. This argument can be repeated.", default=[])
def _add_include_arg(arg_parser)
Adds optional repeatable include parameter to a parser. :param arg_parser: ArgumentParser parser to add this argument to.
4.49608
4.836937
0.92953
arg_parser.add_argument('--msg-file', type=argparse.FileType('r'), help=help_text)
def _add_message_file(arg_parser, help_text)
Add mesage file argument with help_text to arg_parser. :param arg_parser: ArgumentParser parser to add this argument to. :param help_text: str: help text for this argument
2.647993
3.314391
0.798938
description = "Uploads local files and folders to a remote host." upload_parser = self.subparsers.add_parser('upload', description=description) _add_dry_run(upload_parser, help_text="Instead of uploading displays a list of folders/files that " "need to be uploaded.") add_project_name_or_id_arg(upload_parser, help_text_suffix="upload files/folders to.") _add_folders_positional_arg(upload_parser) _add_follow_symlinks_arg(upload_parser) upload_parser.set_defaults(func=upload_func)
def register_upload_command(self, upload_func)
Add the upload command to the parser and call upload_func(project_name, folders, follow_symlinks) when chosen. :param upload_func: func Called when this option is chosen: upload_func(project_name, folders, follow_symlinks).
4.667688
4.304213
1.084446
description = "Gives user permission to access a remote project." add_user_parser = self.subparsers.add_parser('add-user', description=description) add_project_name_or_id_arg(add_user_parser, help_text_suffix="add a user to") user_or_email = add_user_parser.add_mutually_exclusive_group(required=True) add_user_arg(user_or_email) add_email_arg(user_or_email) _add_auth_role_arg(add_user_parser, default_permissions='project_admin') add_user_parser.set_defaults(func=add_user_func)
def register_add_user_command(self, add_user_func)
Add the add-user command to the parser and call add_user_func(project_name, user_full_name, auth_role) when chosen. :param add_user_func: func Called when this option is chosen: upload_func(project_name, user_full_name, auth_role).
3.507749
3.453743
1.015637
description = "Removes user permission to access a remote project." remove_user_parser = self.subparsers.add_parser('remove-user', description=description) add_project_name_or_id_arg(remove_user_parser, help_text_suffix="remove a user from") user_or_email = remove_user_parser.add_mutually_exclusive_group(required=True) add_user_arg(user_or_email) add_email_arg(user_or_email) remove_user_parser.set_defaults(func=remove_user_func)
def register_remove_user_command(self, remove_user_func)
Add the remove-user command to the parser and call remove_user_func(project_name, user_full_name) when chosen. :param remove_user_func: func Called when this option is chosen: remove_user_func(project_name, user_full_name).
3.04007
3.076084
0.988292
description = "Download the contents of a remote remote project to a local folder." download_parser = self.subparsers.add_parser('download', description=description) add_project_name_or_id_arg(download_parser, help_text_suffix="download") _add_folder_positional_arg(download_parser) include_or_exclude = download_parser.add_mutually_exclusive_group(required=False) _add_include_arg(include_or_exclude) _add_exclude_arg(include_or_exclude) download_parser.set_defaults(func=download_func)
def register_download_command(self, download_func)
Add 'download' command for downloading a project to a directory. For non empty directories it will download remote files replacing local files. :param download_func: function to run when user choses this option
3.22727
3.351289
0.962994
description = "Share a project with another user with specified permissions. " \ "Sends the other user an email message via D4S2 service. " \ "If not specified this command gives user download permissions." share_parser = self.subparsers.add_parser('share', description=description) add_project_name_or_id_arg(share_parser) user_or_email = share_parser.add_mutually_exclusive_group(required=True) add_user_arg(user_or_email) add_email_arg(user_or_email) _add_auth_role_arg(share_parser, default_permissions='file_downloader') _add_resend_arg(share_parser, "Resend share") _add_message_file(share_parser, "Filename containing a message to be sent with the share. " "Pass - to read from stdin.") share_parser.set_defaults(func=share_func)
def register_share_command(self, share_func)
Add 'share' command for adding view only project permissions and sending email via another service. :param share_func: function to run when user choses this option
4.99213
5.119139
0.975189
description = "Initiate delivery of a project to another user. Removes other user's current permissions. " \ "Send message to D4S2 service to send email and allow access to the project once user " \ "acknowledges receiving the data." deliver_parser = self.subparsers.add_parser('deliver', description=description) add_project_name_or_id_arg(deliver_parser) user_or_email = deliver_parser.add_mutually_exclusive_group(required=True) add_user_arg(user_or_email) add_email_arg(user_or_email) add_share_usernames_arg(deliver_parser) add_share_emails_arg(deliver_parser) _add_copy_project_arg(deliver_parser) _add_resend_arg(deliver_parser, "Resend delivery") include_or_exclude = deliver_parser.add_mutually_exclusive_group(required=False) _add_include_arg(include_or_exclude) _add_exclude_arg(include_or_exclude) _add_message_file(deliver_parser, "Filename containing a message to be sent with the delivery. " "Pass - to read from stdin.") deliver_parser.set_defaults(func=deliver_func)
def register_deliver_command(self, deliver_func)
Add 'deliver' command for transferring a project to another user., :param deliver_func: function to run when user choses this option
3.825361
3.781087
1.011709
description = "Show a list of project names or folders/files of a single project." list_parser = self.subparsers.add_parser('list', description=description) project_name_or_auth_role = list_parser.add_mutually_exclusive_group(required=False) _add_project_filter_auth_role_arg(project_name_or_auth_role) add_project_name_or_id_arg(project_name_or_auth_role, required=False, help_text_suffix="show details for") _add_long_format_option(list_parser, 'Display long format.') list_parser.set_defaults(func=list_func)
def register_list_command(self, list_func)
Add 'list' command to get a list of projects or details about one project. :param list_func: function: run when user choses this option.
4.320748
4.229531
1.021567
description = "Permanently delete a project." delete_parser = self.subparsers.add_parser('delete', description=description) add_project_name_or_id_arg(delete_parser, help_text_suffix="delete") _add_force_arg(delete_parser, "Do not prompt before deleting.") delete_parser.set_defaults(func=delete_func)
def register_delete_command(self, delete_func)
Add 'delete' command delete a project from the remote store. :param delete_func: function: run when user choses this option.
3.978061
4.435707
0.896827
description = "List authorization roles for use with add_user command." list_auth_roles_parser = self.subparsers.add_parser('list-auth-roles', description=description) list_auth_roles_parser.set_defaults(func=list_auth_roles_func)
def register_list_auth_roles_command(self, list_auth_roles_func)
Add 'list_auth_roles' command to list project authorization roles that can be used with add_user. :param list_auth_roles_func: function: run when user choses this option.
2.994609
2.616744
1.144403
parsed_args = self.parser.parse_args(args) if hasattr(parsed_args, 'func'): parsed_args.func(parsed_args) else: self.parser.print_help()
def run_command(self, args)
Parse command line arguments and run function registered for the appropriate command. :param args: [str] command line arguments
2.043918
2.231087
0.916109
df_resampled = data_frame.resample(str(1 / self.sampling_frequency) + 'S').mean() f = interpolate.interp1d(data_frame.td, data_frame.mag_sum_acc) new_timestamp = np.arange(data_frame.td[0], data_frame.td[-1], 1.0 / self.sampling_frequency) df_resampled.mag_sum_acc = f(new_timestamp) logging.debug("resample signal") return df_resampled.interpolate(method='linear')
def resample_signal(self, data_frame)
Convenience method for frequency conversion and resampling of data frame. Object must have a DatetimeIndex. After re-sampling, this methods interpolate the time magnitude sum acceleration values and the x,y,z values of the data frame acceleration :param data_frame: the data frame to resample :type data_frame: pandas.DataFrame :return: the resampled data frame :rtype: pandas.DataFrame
3.445341
3.070565
1.122054
b, a = signal.butter(self.filter_order, 2*self.cutoff_frequency/self.sampling_frequency,'high', analog=False) filtered_signal = signal.lfilter(b, a, data_frame[ts].values) data_frame['filtered_signal'] = filtered_signal logging.debug("filter signal") return data_frame
def filter_signal(self, data_frame, ts='mag_sum_acc')
This method filters a data frame signal as suggested in :cite:`Kassavetis2015`. First step is to high \ pass filter the data frame using a \ `Butterworth <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html>`_ \ digital and analog filter. Then this method filters the data frame along one-dimension using a \ `digital filter <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html>`_. :param data_frame: the input data frame :type data_frame: pandas.DataFrame :param ts: time series name of data frame to filter :type ts: str :return data_frame: adds a column named 'filtered_signal' to the data frame :rtype data_frame: pandas.DataFrame
3.26129
3.157382
1.032909
signal_length = len(data_frame.filtered_signal.values) ll = int(signal_length / 2 - self.window / 2) rr = int(signal_length / 2 + self.window / 2) msa = data_frame.filtered_signal[ll:rr].values hann_window = signal.hann(self.window) msa_window = (msa * hann_window) transformed_signal = fft(msa_window) data = {'filtered_signal': msa_window, 'transformed_signal': transformed_signal, 'dt': data_frame.td[ll:rr].values} data_frame_fft = pd.DataFrame(data, index=data_frame.index[ll:rr], columns=['filtered_signal', 'transformed_signal', 'dt']) logging.debug("fft signal") return data_frame_fft
def fft_signal(self, data_frame)
This method perform Fast Fourier Transform on the data frame using a \ `hanning window <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.hann.html>`_ :param data_frame: the data frame :type data_frame: pandas.DataFrame :return: data frame with a 'filtered_singal', 'transformed_signal' and 'dt' columns :rtype: pandas.DataFrame
3.057011
2.781974
1.098864
signal_length = len(data_frame.filtered_signal) normalised_transformed_signal = data_frame.transformed_signal.values / signal_length k = np.arange(signal_length) T = signal_length / self.sampling_frequency f = k / T # two sides frequency range f = f[range(int(signal_length / 2))] # one side frequency range ts = normalised_transformed_signal[range(int(signal_length / 2))] ampl = sum(abs(ts[(f > self.lower_frequency) & (f < self.upper_frequency)])) freq = f[abs(ts).argmax(axis=0)] logging.debug("tremor ampl calculated") return ampl, freq
def amplitude_by_fft(self, data_frame)
This methods extract the fft components and sum the ones from lower to upper freq as per \ :cite:`Kassavetis2015` :param data_frame: the data frame :type data_frame: pandas.DataFrame :return ampl: the ampl :rtype ampl: float :return freq: the freq :rtype freq: float
4.226589
3.997726
1.057248
frq, Pxx_den = signal.welch(data_frame.filtered_signal.values, self.sampling_frequency, nperseg=self.window) freq = frq[Pxx_den.argmax(axis=0)] ampl = sum(Pxx_den[(frq > self.lower_frequency) & (frq < self.upper_frequency)]) logging.debug("tremor amplitude by welch calculated") return ampl, freq
def amplitude_by_welch(self, data_frame)
This methods uses the Welch method :cite:`Welch1967` to obtain the power spectral density, this is a robust alternative to using fft_signal & amplitude :param data_frame: the data frame :type data_frame: pandas.DataFrame :return: the ampl :rtype ampl: float :return: the freq :rtype freq: float
3.830013
3.953614
0.968737
if m is None or r is None: m = 2 r = 0.3 entropy = feature_calculators.approximate_entropy(x, m, r) logging.debug("approximate entropy by tsfresh calculated") return entropy
def approximate_entropy(self, x, m=None, r=None)
As in tsfresh \ `approximate_entropy <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\ feature_calculators.py#L1601>`_ Implements a `vectorized approximate entropy algorithm <https://en.wikipedia.org/wiki/Approximate_entropy>`_ For short time-series this method is highly dependent on the parameters, but should be stable for N > 2000, see :cite:`Yentes2013`. Other shortcomings and alternatives discussed in \ :cite:`Richman2000` :param x: the time series to calculate the feature of :type x: pandas.Series :param m: Length of compared run of data :type m: int :param r: Filtering level, must be positive :type r: float :return: Approximate entropy :rtype: float
6.397137
4.647955
1.376333
# This is important: If a series is passed, the product below is calculated # based on the index, which corresponds to squaring the series. if lag is None: lag = 0 _autoc = feature_calculators.autocorrelation(x, lag) logging.debug("autocorrelation by tsfresh calculated") return _autoc
def autocorrelation(self, x, lag)
As in tsfresh `autocorrelation <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\ feature_calculators.py#L1457>`_ Calculates the autocorrelation of the specified lag, according to the `formula <https://en.wikipedia.org/wiki/\ Autocorrelation#Estimation>`_: .. math:: \\frac{1}{(n-l)\sigma^{2}} \\sum_{t=1}^{n-l}(X_{t}-\\mu )(X_{t+l}-\\mu) where :math:`n` is the length of the time series :math:`X_i`, :math:`\sigma^2` its variance and :math:`\mu` its mean. `l` denotes the lag. :param x: the time series to calculate the feature of :type x: pandas.Series :param lag: the lag :type lag: int :return: the value of this feature :rtype: float
13.510376
12.021661
1.123836
if param is None: param = [{'lag': 3}, {'lag': 5}, {'lag': 6}] _partialc = feature_calculators.partial_autocorrelation(x, param) logging.debug("partial autocorrelation by tsfresh calculated") return _partialc
def partial_autocorrelation(self, x, param=None)
As in tsfresh `partial_autocorrelation <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/\ feature_extraction/feature_calculators.py#L308>`_ Calculates the value of the partial autocorrelation function at the given lag. The lag `k` partial \ autocorrelation of a time series :math:`\\lbrace x_t, t = 1 \\ldots T \\rbrace` equals the partial correlation \ of :math:`x_t` and \ :math:`x_{t-k}`, adjusted for the intermediate variables \ :math:`\\lbrace x_{t-1}, \\ldots, x_{t-k+1} \\rbrace` (:cite:`Wilson2015`). \ Following `this notes <https://onlinecourses.science.psu.edu/stat510/node/62>`_, it can be defined as .. math:: \\alpha_k = \\frac{ Cov(x_t, x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1})} {\\sqrt{ Var(x_t | x_{t-1}, \\ldots, x_{t-k+1}) Var(x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1} )}} with (a) :math:`x_t = f(x_{t-1}, \\ldots, x_{t-k+1})` and (b) :math:`x_{t-k} = f(x_{t-1}, \\ldots, x_{t-k+1})` \ being AR(k-1) models that can be fitted by OLS. Be aware that in (a), the regression is done on past values to \ predict :math:`x_t` whereas in (b), future values are used to calculate the past value :math:`x_{t-k}`.\ It is said in :cite:`Wilson2015` that "for an AR(p), the partial autocorrelations [ :math:`\\alpha_k` ] \ will be nonzero for `k<=p` and zero for `k>p`."\ With this property, it is used to determine the lag of an AR-Process. :param x: the time series to calculate the feature of :type x: pandas.Series :param param: contains dictionaries {"lag": val} with int val indicating the lag to be returned :type param: list :return: the value of this feature :rtype: float
6.914414
4.949216
1.397073
ratio = feature_calculators.ratio_value_number_to_time_series_length(x) logging.debug("ratio value number to time series length by tsfresh calculated") return ratio
def ratio_value_number_to_time_series_length(self, x)
As in tsfresh `ratio_value_number_to_time_series_length <https://github.com/blue-yonder/tsfresh/blob/master\ /tsfresh/feature_extraction/feature_calculators.py#L830>`_ Returns a factor which is 1 if all values in the time series occur only once, and below one if this is not the case. In principle, it just returns: # unique values / # values :param x: the time series to calculate the feature of :type x: pandas.Series :return: the value of this feature :rtype: float
6.403706
5.191501
1.233498
if ql is None or qh is None or isabs is None or f_agg is None: f_agg = 'mean' isabs = True qh = 0.2 ql = 0.0 quantile = feature_calculators.change_quantiles(x, ql, qh, isabs, f_agg) logging.debug("change_quantiles by tsfresh calculated") return quantile
def change_quantiles(self, x, ql=None, qh=None, isabs=None, f_agg=None)
As in tsfresh `change_quantiles <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/\ feature_extraction/feature_calculators.py#L1248>`_ First fixes a corridor given by the quantiles ql and qh of the distribution of x. Then calculates the \ average, absolute value of consecutive changes of the series x inside this corridor. Think about selecting \ a corridor on the y-Axis and only calculating the mean of the absolute change of the time series inside \ this corridor. :param x: the time series to calculate the feature of :type x: pandas.Series :param ql: the lower quantile of the corridor :type ql: float :param qh: the higher quantile of the corridor :type qh: float :param isabs: should the absolute differences be taken? :type isabs: bool :param f_agg: the aggregator function that is applied to the differences in the bin :type f_agg: str, name of a numpy function (e.g. mean, var, std, median) :return: the value of this feature :rtype: float
3.514225
3.19037
1.10151
if n is None: n = 5 peaks = feature_calculators.number_peaks(x, n) logging.debug("agg linear trend by tsfresh calculated") return peaks
def number_peaks(self, x, n=None)
As in tsfresh `number_peaks <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\ feature_calculators.py#L1003>`_ Calculates the number of peaks of at least support n in the time series x. A peak of support n is defined \ as a subsequence of x where a value occurs, which is bigger than its n neighbours to the left and to the right. Hence in the sequence >>> x = [3, 0, 0, 4, 0, 0, 13] 4 is a peak of support 1 and 2 because in the subsequences >>> [0, 4, 0] >>> [0, 0, 4, 0, 0] 4 is still the highest value. Here, 4 is not a peak of support 3 because 13 is the 3th neighbour to the \ right of 4 and its bigger than 4. :param x: the time series to calculate the feature of :type x: pandas.Series :param n: the support of the peak :type n: int :return: the value of this feature :rtype: float
12.337322
10.850812
1.136995
if param is None: param = [{'attr': 'intercept', 'chunk_len': 5, 'f_agg': 'min'}, {'attr': 'rvalue', 'chunk_len': 10, 'f_agg': 'var'}, {'attr': 'intercept', 'chunk_len': 10, 'f_agg': 'min'}] agg = feature_calculators.agg_linear_trend(x, param) logging.debug("agg linear trend by tsfresh calculated") return list(agg)
def agg_linear_trend(self, x, param=None)
As in tsfresh `agg_inear_trend <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/\ feature_extraction/feature_calculators.py#L1727>`_ Calculates a linear least-squares regression for values of the time series that were aggregated over chunks\ versus the sequence from 0 up to the number of chunks minus one. This feature assumes the signal to be uniformly sampled. It will not use the time stamps to fit the model. The parameters attr controls which of the characteristics are returned. Possible extracted attributes are\ "pvalue", "rvalue", "intercept", "slope", "stderr", see the documentation of linregress for more \ information. The chunksize is regulated by "chunk_len". It specifies how many time series values are in each chunk. Further, the aggregation function is controlled by "f_agg", which can use "max", "min" or , "mean", "median" :param x: the time series to calculate the feature of :type x: pandas.Series :param param: contains dictionaries {"attr": x, "chunk_len": l, "f_agg": f} with x, f a str and l an int :type param: list :return: the different feature values :rtype: pandas.Series
4.724027
2.980234
1.58512
if param is None: param = [{'coeff': 2}, {'coeff': 5}, {'coeff': 8}] welch = feature_calculators.spkt_welch_density(x, param) logging.debug("spkt welch density by tsfresh calculated") return list(welch)
def spkt_welch_density(self, x, param=None)
As in tsfresh `spkt_welch_density <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/\ feature_extraction/feature_calculators.py#L1162>`_ . This feature calculator estimates the cross power \ spectral density of the time series x at different frequencies. To do so, the time series is first shifted \ from the time domain to the frequency domain. \ The feature calculators returns the power spectrum of the different frequencies. :param x: the time series to calculate the feature of :type x: pandas.Series :param param: contains dictionaries {"coeff": x} with x int :type param: list :return: the different feature values :rtype: pandas.Series
6.012042
4.49764
1.33671
_perc = feature_calculators.percentage_of_reoccurring_datapoints_to_all_datapoints(x) logging.debug("percentage of reoccurring datapoints to all datapoints by tsfresh calculated") return _perc
def percentage_of_reoccurring_datapoints_to_all_datapoints(self, x)
As in tsfresh `percentage_of_reoccurring_datapoints_to_all_datapoints <https://github.com/blue-yonder/tsfresh/\ blob/master/tsfresh/feature_extraction/feature_calculators.py#L739>`_ \ Returns the percentage of unique values, that are present in the time series more than once.\ len(different values occurring more than once) / len(different values)\ This means the percentage is normalized to the number of unique values, in contrast to the \ percentage_of_reoccurring_values_to_all_values. :param x: the time series to calculate the feature of :type x: pandas.Series :return: the value of this feature :rtype: float
4.775628
3.904019
1.223259
_energy = feature_calculators.abs_energy(x) logging.debug("abs energy by tsfresh calculated") return _energy
def abs_energy(self, x)
As in tsfresh `abs_energy <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\ feature_calculators.py#L390>`_ \ Returns the absolute energy of the time series which is the sum over the squared values\ .. math:: E=\\sum_{i=1,\ldots, n}x_i^2 :param x: the time series to calculate the feature of :type x: pandas.Series :return: the value of this feature :rtype: float
15.587068
10.105529
1.54243
if param is None: param = [{'aggtype': 'centroid'}] _fft_agg = feature_calculators.fft_aggregated(x, param) logging.debug("fft aggregated by tsfresh calculated") return list(_fft_agg)
def fft_aggregated(self, x, param=None)
As in tsfresh `fft_aggregated <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\ feature_calculators.py#L896>`_ Returns the spectral centroid (mean), variance, skew, and kurtosis of the absolute fourier transform spectrum. :param x: the time series to calculate the feature of :type x: pandas.Series :param param: contains dictionaries {"aggtype": s} where s str and in ["centroid", "variance", "skew", "kurtosis"] :type param: list :return: the different feature values :rtype: pandas.Series
10.117872
5.02744
2.01253
if param is None: param = [{'attr': 'abs', 'coeff': 44}, {'attr': 'abs', 'coeff': 63}, {'attr': 'abs', 'coeff': 0}, {'attr': 'real', 'coeff': 0}, {'attr': 'real', 'coeff': 23}] _fft_coef = feature_calculators.fft_coefficient(x, param) logging.debug("fft coefficient by tsfresh calculated") return list(_fft_coef)
def fft_coefficient(self, x, param=None)
As in tsfresh `fft_coefficient <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\ feature_calculators.py#L852>`_ \ Calculates the fourier coefficients of the one-dimensional discrete Fourier Transform for real input by fast \ fourier transformation algorithm .. math:: A_k = \\sum_{m=0}^{n-1} a_m \\exp \\left \\{ -2 \\pi i \\frac{m k}{n} \\right \\}, \\qquad k = 0, \\ldots , n-1. The resulting coefficients will be complex, this feature calculator can return the real part (attr=="real"), \ the imaginary part (attr=="imag), the absolute value (attr=""abs) and the angle in degrees (attr=="angle). :param x: the time series to calculate the feature of :type x: pandas.Series :param param: contains dictionaries {"coeff": x, "attr": s} with x int and x >= 0, s str and in ["real", "imag"\ , "abs", "angle"] :type param: list :return: the different feature values :rtype: pandas.Series
4.849985
3.556871
1.363554
mean_signal = np.mean(data_frame.mag_sum_acc) data_frame['dc_mag_sum_acc'] = data_frame.mag_sum_acc - mean_signal logging.debug("dc remove signal") return data_frame
def dc_remove_signal(self, data_frame)
Removes the dc component of the signal as per :cite:`Kassavetis2015` :param data_frame: the data frame :type data_frame: pandas.DataFrame :return: the data frame with dc remove signal field :rtype: pandas.DataFrame
5.212075
4.98391
1.04578
try: data_frame_resampled = self.resample_signal(data_frame) data_frame_dc = self.dc_remove_signal(data_frame_resampled) data_frame_filtered = self.filter_signal(data_frame_dc, 'dc_mag_sum_acc') if method == 'fft': data_frame_fft = self.fft_signal(data_frame_filtered) return self.amplitude_by_fft(data_frame_fft) else: return self.amplitude_by_welch(data_frame_filtered) except ValueError as verr: logging.error("TremorProcessor bradykinesia ValueError ->%s", verr.message) except: logging.error("Unexpected error on TemorProcessor bradykinesia: %s", sys.exc_info()[0])
def bradykinesia(self, data_frame, method='fft')
This method calculates the bradykinesia amplitude of the data frame. It accepts two different methods, \ 'fft' and 'welch'. First the signal gets re-sampled, dc removed and then high pass filtered. :param data_frame: the data frame :type data_frame: pandas.DataFrame :param method: fft or welch. :type method: str :return ampl: the amplitude of the Bradykinesia :rtype ampl: float :return freq: the frequency of the Bradykinesia :rtype freq: float
3.378204
3.043365
1.110022
if reactor is None: from twisted.internet import gtk2reactor gtk2reactor.install() from twisted.internet import reactor try: AWSStatusIndicator(reactor) gobject.set_application_name("aws-status") reactor.run() except ValueError: # In this case, the user cancelled, and the exception bubbled to here. pass
def main(argv, reactor=None)
Run the client GUI. Typical use: >>> sys.exit(main(sys.argv)) @param argv: The arguments to run it with, e.g. sys.argv. @param reactor: The reactor to use. Must be compatible with gtk as this module uses gtk API"s. @return exitcode: The exit code it returned, as per sys.exit.
5.49265
5.98361
0.917949
arguments = arguments[1:] options = {} while arguments: key = arguments.pop(0) if key in ("-h", "--help"): raise UsageError("Help requested.") if key.startswith("--"): key = key[2:] try: value = arguments.pop(0) except IndexError: raise OptionError("'--%s' is missing a value." % key) options[key] = value else: raise OptionError("Encountered unexpected value '%s'." % key) default_key = os.environ.get("AWS_ACCESS_KEY_ID") if "key" not in options and default_key: options["key"] = default_key default_secret = os.environ.get("AWS_SECRET_ACCESS_KEY") if "secret" not in options and default_secret: options["secret"] = default_secret default_endpoint = os.environ.get("AWS_ENDPOINT") if "endpoint" not in options and default_endpoint: options["endpoint"] = default_endpoint for name in ("key", "secret", "endpoint", "action"): if name not in options: raise OptionError( "The '--%s' command-line argument is required." % name) return options
def parse_options(arguments)
Parse command line arguments. The parsing logic is fairly simple. It can only parse long-style parameters of the form:: --key value Several parameters can be defined in the environment and will be used unless explicitly overridden with command-line arguments. The access key, secret and endpoint values will be loaded from C{AWS_ACCESS_KEY_ID}, C{AWS_SECRET_ACCESS_KEY} and C{AWS_ENDPOINT} environment variables. @param arguments: A list of command-line arguments. The first item is expected to be the name of the program being run. @raises OptionError: Raised if incorrectly formed command-line arguments are specified, or if required command-line arguments are not present. @raises UsageError: Raised if C{--help} is present in command-line arguments. @return: A C{dict} with key/value pairs extracted from the argument list.
2.019266
1.857287
1.087213
options = parse_options(arguments) key = options.pop("key") secret = options.pop("secret") endpoint = options.pop("endpoint") action = options.pop("action") return Command(key, secret, endpoint, action, options, output)
def get_command(arguments, output=None)
Parse C{arguments} and configure a L{Command} instance. An access key, secret key, endpoint and action are required. Additional parameters included with the request are passed as parameters to the method call. For example, the following command will create a L{Command} object that can invoke the C{DescribeRegions} method with the optional C{RegionName.0} parameter included in the request:: txaws-discover --key KEY --secret SECRET --endpoint URL \ --action DescribeRegions --RegionName.0 us-west-1 @param arguments: The command-line arguments to parse. @raises OptionError: Raised if C{arguments} can't be used to create a L{Command} object. @return: A L{Command} instance configured to make an EC2 API method call.
2.967813
2.764548
1.073526
def run_command(arguments, output, reactor): if output is None: output = sys.stdout try: command = get_command(arguments, output) except UsageError: print >>output, USAGE_MESSAGE.strip() if reactor: reactor.callLater(0, reactor.stop) except Exception, e: print >>output, "ERROR:", str(e) if reactor: reactor.callLater(0, reactor.stop) else: deferred = command.run() if reactor: deferred.addCallback(lambda ignored: reactor.stop()) if not testing_mode: from twisted.internet import reactor reactor.callLater(0, run_command, arguments, output, reactor) reactor.run() else: run_command(arguments, output, None)
def main(arguments, output=None, testing_mode=None)
Entry point parses command-line arguments, runs the specified EC2 API method and prints the response to the screen. @param arguments: Command-line arguments, typically retrieved from C{sys.argv}. @param output: Optionally, a stream to write output to. @param testing_mode: Optionally, a condition that specifies whether or not to run in test mode. When the value is true a reactor will not be run or stopped, to prevent interfering with the test suite.
2.296082
2.297863
0.999225
# determine the number of values to forecast, if necessary self._calculate_values_to_forecast(timeSeries) # extract the required parameters, performance improvement alpha = self._parameters["smoothingFactor"] valuesToForecast = self._parameters["valuesToForecast"] # initialize some variables resultList = [] estimator = None lastT = None # "It's always about performance!" append = resultList.append # smooth the existing TimeSeries data for idx in xrange(len(timeSeries)): # get the current to increase performance t = timeSeries[idx] # get the initial estimate if estimator is None: estimator = t[1] continue # add the first value to the resultList without any correction if 0 == len(resultList): append([t[0], estimator]) lastT = t continue # calculate the error made during the last estimation error = lastT[1] - estimator # calculate the new estimator, based on the last occured value, the error and the smoothingFactor estimator = estimator + alpha * error # save the current value for the next iteration lastT = t # add an entry to the result append([t[0], estimator]) # forecast additional values if requested if valuesToForecast > 0: currentTime = resultList[-1][0] normalizedTimeDiff = currentTime - resultList[-2][0] for idx in xrange(valuesToForecast): currentTime += normalizedTimeDiff # reuse everything error = lastT[1] - estimator estimator = estimator + alpha * error # add a forecasted value append([currentTime, estimator]) # set variables for next iteration lastT = resultList[-1] # return a TimeSeries, containing the result return TimeSeries.from_twodim_list(resultList)
def execute(self, timeSeries)
Creates a new TimeSeries containing the smoothed and forcasted values. :return: TimeSeries object containing the smoothed TimeSeries, including the forecasted values. :rtype: TimeSeries :note: The first normalized value is chosen as the starting point.
5.190341
5.04413
1.028986
parameterIntervals = {} parameterIntervals["smoothingFactor"] = [0.0, 1.0, False, False] parameterIntervals["trendSmoothingFactor"] = [0.0, 1.0, False, False] return parameterIntervals
def _get_parameter_intervals(self)
Returns the intervals for the methods parameter. Only parameters with defined intervals can be used for optimization! :return: Returns a dictionary containing the parameter intervals, using the parameter name as key, while the value hast the following format: [minValue, maxValue, minIntervalClosed, maxIntervalClosed] - minValue Minimal value for the parameter - maxValue Maximal value for the parameter - minIntervalClosed :py:const:`True`, if minValue represents a valid value for the parameter. :py:const:`False` otherwise. - maxIntervalClosed: :py:const:`True`, if maxValue represents a valid value for the parameter. :py:const:`False` otherwise. :rtype: dictionary
4.95958
4.589976
1.080524
# determine the number of values to forecast, if necessary self._calculate_values_to_forecast(timeSeries) # extract the required parameters, performance improvement alpha = self._parameters["smoothingFactor"] beta = self._parameters["trendSmoothingFactor"] # initialize some variables resultList = [] estimator = None trend = None lastT = None # "It's always about performance!" append = resultList.append # smooth the existing TimeSeries data for idx in xrange(len(timeSeries)): # get the current to increase performance t = timeSeries[idx] # get the initial estimate if estimator is None: estimator = t[1] lastT = t continue # add the first value to the resultList without any correction if 0 == len(resultList): append([t[0], estimator]) trend = t[1] - lastT[1] # store current values for next iteration lastT = t lastEstimator = estimator continue # calculate the new estimator and trend, based on the last occured value, the error and the smoothingFactor estimator = alpha * t[1] + (1 - alpha) * (estimator + trend) trend = beta * (estimator - lastEstimator) + (1 - beta) * trend # add an entry to the result append([t[0], estimator]) # store current values for next iteration lastT = t lastEstimator = estimator # forecast additional values if requested if self._parameters["valuesToForecast"] > 0: currentTime = resultList[-1][0] normalizedTimeDiff = currentTime - resultList[-2][0] for idx in xrange(1, self._parameters["valuesToForecast"] + 1): currentTime += normalizedTimeDiff # reuse everything forecast = estimator + idx * trend # add a forecasted value append([currentTime, forecast]) # return a TimeSeries, containing the result return TimeSeries.from_twodim_list(resultList)
def execute(self, timeSeries)
Creates a new TimeSeries containing the smoothed values. :return: TimeSeries object containing the smoothed TimeSeries, including the forecasted values. :rtype: TimeSeries :note: The first normalized value is chosen as the starting point.
4.789913
4.691844
1.020902
# determine the number of values to forecast, if necessary self._calculate_values_to_forecast(timeSeries) seasonLength = self.get_parameter("seasonLength") if len(timeSeries) < seasonLength: raise ValueError("The time series must contain at least one full season.") alpha = self.get_parameter("smoothingFactor") beta = self.get_parameter("trendSmoothingFactor") gamma = self.get_parameter("seasonSmoothingFactor") seasonValues = self.initSeasonFactors(timeSeries) resultList = [] lastEstimator = 0 for idx in xrange(len(timeSeries)): t = timeSeries[idx][0] x_t = timeSeries[idx][1] if idx == 0: lastTrend = self.initialTrendSmoothingFactors(timeSeries) lastEstimator = x_t resultList.append([t, x_t]) continue lastSeasonValue = seasonValues[idx % seasonLength] estimator = alpha * x_t/lastSeasonValue + (1 - alpha) * (lastEstimator + lastTrend) lastTrend = beta * (estimator - lastEstimator) + (1 - beta) * lastTrend seasonValues[idx % seasonLength] = gamma * x_t/estimator + (1 - gamma) * lastSeasonValue lastEstimator = estimator resultList.append([t, estimator]) resultList += self._calculate_forecast(timeSeries, resultList, seasonValues, [lastEstimator, lastSeasonValue, lastTrend]) return TimeSeries.from_twodim_list(resultList)
def execute(self, timeSeries)
Creates a new TimeSeries containing the smoothed values. :param TimeSeries timeSeries: TimeSeries containing hte data. :return: TimeSeries object containing the exponentially smoothed TimeSeries, including the forecasted values. :rtype: TimeSeries :note: Currently the first normalized value is simply chosen as the starting point.
3.402178
3.333516
1.020597
forecastResults = [] lastEstimator, lastSeasonValue, lastTrend = lastSmoothingParams seasonLength = self.get_parameter("seasonLength") #Forecasting. Determine the time difference between two points for extrapolation currentTime = smoothedData[-1][0] normalizedTimeDiff = currentTime - smoothedData[-2][0] for m in xrange(1, self._parameters["valuesToForecast"] + 1): currentTime += normalizedTimeDiff lastSeasonValue = seasonValues[(len(originalTimeSeries) + m - 2) % seasonLength] forecast = (lastEstimator + m * lastTrend) * lastSeasonValue forecastResults.append([currentTime, forecast]) return forecastResults
def _calculate_forecast(self, originalTimeSeries, smoothedData, seasonValues, lastSmoothingParams)
Calculates the actual forecasted based on the input data. :param TimeSeries timeSeries: TimeSeries containing hte data. :param list smoothedData: Contains the smoothed time series data. :param list seasonValues: Contains the seasonal values for the forecast. :param list lastSmoothingParams: List containing the last [estimator, season value, trend] calculated during smoothing the TimeSeries. :return: Returns a list containing forecasted values
5.639249
5.315431
1.06092
seasonLength = self.get_parameter("seasonLength") try: seasonValues = self.get_parameter("seasonValues") assert seasonLength == len(seasonValues), "Preset Season Values have to have to be of season's length" return seasonValues except KeyError: pass seasonValues = [] completeCycles = len(timeSeries) / seasonLength A = {} #cache values for A_j for i in xrange(seasonLength): c_i = 0 for j in xrange(completeCycles): if j not in A: A[j] = self.computeA(j, timeSeries) c_i += timeSeries[(seasonLength * j) + i][1] / A[j] #wikipedia suggests j-1, but we worked with indices in the first place c_i /= completeCycles seasonValues.append(c_i) return seasonValues
def initSeasonFactors(self, timeSeries)
Computes the initial season smoothing factors. :return: Returns a list of season vectors of length "seasonLength". :rtype: list
5.567059
5.336959
1.043114
result = 0.0 seasonLength = self.get_parameter("seasonLength") k = min(len(timeSeries) - seasonLength, seasonLength) #In case of only one full season, use average trend of the months that we have twice for i in xrange(0, k): result += (timeSeries[seasonLength + i][1] - timeSeries[i][1]) / seasonLength return result / k
def initialTrendSmoothingFactors(self, timeSeries)
Calculate the initial Trend smoothing Factor b0. Explanation: http://en.wikipedia.org/wiki/Exponential_smoothing#Triple_exponential_smoothing :return: Returns the initial trend smoothing factor b0
6.301237
6.796232
0.927166
seasonLength = self.get_parameter("seasonLength") A_j = 0 for i in range(seasonLength): A_j += timeSeries[(seasonLength * (j)) + i][1] return A_j / seasonLength
def computeA(self, j, timeSeries)
Calculates A_j. Aj is the average value of x in the jth cycle of your data :return: A_j :rtype: numeric
4.735283
4.337131
1.091801
quoted = False escaped = False result = [] for i, ch in enumerate(text): if escaped: escaped = False result.append(ch) elif ch == u'\\': escaped = True elif ch == u'"': quoted = not quoted elif not quoted and ch == u' ': return u"".join(result), text[i:].lstrip() else: result.append(ch) return u"".join(result), u""
def _split_quoted(text)
Split a unicode string on *SPACE* characters. Splitting is not done at *SPACE* characters occurring within matched *QUOTATION MARK*s. *REVERSE SOLIDUS* can be used to remove all interpretation from the following character. :param unicode text: The string to split. :return: A two-tuple of unicode giving the two split pieces.
2.055853
2.429122
0.846336
data = self.resample_signal(x).values f_res = self.sampling_frequency / self.window f_nr_LBs = int(self.loco_band[0] / f_res) f_nr_LBe = int(self.loco_band[1] / f_res) f_nr_FBs = int(self.freeze_band[0] / f_res) f_nr_FBe = int(self.freeze_band[1] / f_res) jPos = self.window + 1 i = 0 time = [] sumLocoFreeze = [] freezeIndex = [] while jPos < len(data): jStart = jPos - self.window time.append(jPos) y = data[int(jStart):int(jPos)] y = y - np.mean(y) Y = np.fft.fft(y, int(self.window)) Pyy = abs(Y*Y) / self.window areaLocoBand = numerical_integration( Pyy[f_nr_LBs-1 : f_nr_LBe], self.sampling_frequency ) areaFreezeBand = numerical_integration( Pyy[f_nr_FBs-1 : f_nr_FBe], self.sampling_frequency ) sumLocoFreeze.append(areaFreezeBand + areaLocoBand) freezeIndex.append(areaFreezeBand / areaLocoBand) jPos = jPos + self.step_size i = i + 1 freeze_time = np.asarray(time, dtype=np.int32) freeze_index = np.asarray(freezeIndex, dtype=np.float32) locomotor_freeze_index = np.asarray(sumLocoFreeze, dtype=np.float32) return freeze_time, freeze_index, locomotor_freeze_index
def freeze_of_gait(self, x)
This method assess freeze of gait following :cite:`g-BachlinPRMHGT10`. :param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc. :type x: pandas.Series :return freeze_time: What times do freeze of gait events occur. [measured in time (h:m:s)] :rtype freeze_time: numpy.ndarray :return freeze_indexe: Freeze Index is defined as the power in the “freeze” band [3–8 Hz] divided by the power in the “locomotor” band [0.5–3 Hz] [3]. [measured in Hz] :rtype freeze_indexe: numpy.ndarray :return list locomotor_freeze_index: Locomotor freeze index is the power in the “freeze” band [3–8 Hz] added to power in the “locomotor” band [0.5–3 Hz]. [measured in Hz] :rtype locomotor_freeze_index: numpy.ndarray
2.934465
2.601249
1.128099
peaks_data = x[start_offset: -end_offset].values maxtab, mintab = peakdet(peaks_data, self.delta) x = np.mean(peaks_data[maxtab[1:,0].astype(int)] - peaks_data[maxtab[:-1,0].astype(int)]) frequency_of_peaks = abs(1/x) return frequency_of_peaks
def frequency_of_peaks(self, x, start_offset=100, end_offset=100)
This method assess the frequency of the peaks on any given 1-dimensional time series. :param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc. :type x: pandas.Series :param start_offset: Signal to leave out (of calculations) from the begining of the time series (100 default). :type start_offset: int :param end_offset: Signal to leave out (from calculations) from the end of the time series (100 default). :type end_offset: int :return frequency_of_peaks: The frequency of peaks on the provided time series [measured in Hz]. :rtype frequency_of_peaks: float
4.401056
4.462593
0.986211
coeffs = wavedec(x.values, wavelet=wavelet_type, level=wavelet_level) energy = [sum(coeffs[wavelet_level - i]**2) / len(coeffs[wavelet_level - i]) for i in range(wavelet_level)] WEd1 = energy[0] / (5 * np.sqrt(2)) WEd2 = energy[1] / (4 * np.sqrt(2)) WEd3 = energy[2] / (3 * np.sqrt(2)) WEd4 = energy[3] / (2 * np.sqrt(2)) WEd5 = energy[4] / np.sqrt(2) WEd6 = energy[5] / np.sqrt(2) gait_speed = 0.5 * np.sqrt(WEd1+(WEd2/2)+(WEd3/3)+(WEd4/4)+(WEd5/5)+(WEd6/6)) return gait_speed
def speed_of_gait(self, x, wavelet_type='db3', wavelet_level=6)
This method assess the speed of gait following :cite:`g-MartinSB11`. It extracts the gait speed from the energies of the approximation coefficients of wavelet functions. Prefferably you should use the magnitude of x, y and z (mag_acc_sum) here, as the time series. :param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc. :type x: pandas.Series :param wavelet_type: The type of wavelet to use. See https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html for a full list ('db3' default). :type wavelet_type: str :param wavelet_level: The number of cycles the used wavelet should have. See https://pywavelets.readthedocs.io/en/latest/ref/wavelets.html for a fill list (6 default). :type wavelet_level: int :return: The speed of gait [measured in meters/second]. :rtype: float
2.125065
2.064397
1.029388
def _symmetry(v): maxtab, _ = peakdet(v, self.delta) return maxtab[1][1], maxtab[2][1] step_regularity_x, stride_regularity_x = _symmetry(autocorrelation(data_frame.x)) step_regularity_y, stride_regularity_y = _symmetry(autocorrelation(data_frame.y)) step_regularity_z, stride_regularity_z = _symmetry(autocorrelation(data_frame.z)) symmetry_x = step_regularity_x - stride_regularity_x symmetry_y = step_regularity_y - stride_regularity_y symmetry_z = step_regularity_z - stride_regularity_z step_regularity = np.array([step_regularity_x, step_regularity_y, step_regularity_z]) stride_regularity = np.array([stride_regularity_x, stride_regularity_y, stride_regularity_z]) walk_symmetry = np.array([symmetry_x, symmetry_y, symmetry_z]) return step_regularity, stride_regularity, walk_symmetry
def walk_regularity_symmetry(self, data_frame)
This method extracts the step and stride regularity and also walk symmetry. :param data_frame: The data frame. It should have x, y, and z columns. :type data_frame: pandas.DataFrame :return step_regularity: Regularity of steps on [x, y, z] coordinates, defined as the consistency of the step-to-step pattern. :rtype step_regularity: numpy.ndarray :return stride_regularity: Regularity of stride on [x, y, z] coordinates, defined as the consistency of the stride-to-stride pattern. :rtype stride_regularity: numpy.ndarray :return walk_symmetry: Symmetry of walk on [x, y, z] coordinates, defined as the difference between step and stride regularity. :rtype walk_symmetry: numpy.ndarray
1.986672
1.757337
1.130502
# Sum of absolute values across accelerometer axes: data = data_frame.x.abs() + data_frame.y.abs() + data_frame.z.abs() # Find maximum peaks of smoothed data: dummy, ipeaks_smooth = self.heel_strikes(data) data = data.values # Compute number of samples between peaks using the real part of the FFT: interpeak = compute_interpeak(data, self.sampling_frequency) decel = np.int(np.round(self.stride_fraction * interpeak)) # Find maximum peaks close to maximum peaks of smoothed data: ipeaks = [] for ipeak_smooth in ipeaks_smooth: ipeak = np.argmax(data[ipeak_smooth - decel:ipeak_smooth + decel]) ipeak += ipeak_smooth - decel ipeaks.append(ipeak) # Compute the average vector for each deceleration phase: vectors = [] for ipeak in ipeaks: decel_vectors = np.asarray([[data_frame.x[i], data_frame.y[i], data_frame.z[i]] for i in range(ipeak - decel, ipeak)]) vectors.append(np.mean(decel_vectors, axis=0)) # Compute the average deceleration vector and take the opposite direction: direction = -1 * np.mean(vectors, axis=0) # Return the unit vector in this direction: direction /= np.sqrt(direction.dot(direction)) return direction
def walk_direction_preheel(self, data_frame)
Estimate local walk (not cardinal) direction with pre-heel strike phase. Inspired by Nirupam Roy's B.E. thesis: "WalkCompass: Finding Walking Direction Leveraging Smartphone's Inertial Sensors" :param data_frame: The data frame. It should have x, y, and z columns. :type data_frame: pandas.DataFrame :return: Unit vector of local walk (not cardinal) direction. :rtype: numpy.ndarray
3.923506
3.857703
1.017058
# Demean data: data = x.values data -= data.mean() # TODO: fix this # Low-pass filter the AP accelerometer data by the 4th order zero lag # Butterworth filter whose cut frequency is set to 5 Hz: filtered = butter_lowpass_filter(data, self.sampling_frequency, self.cutoff_frequency, self.filter_order) # Find transitional positions where AP accelerometer changes from # positive to negative. transitions = crossings_nonzero_pos2neg(filtered) # Find the peaks of AP acceleration preceding the transitional positions, # and greater than the product of a threshold and the maximum value of # the AP acceleration: strike_indices_smooth = [] filter_threshold = np.abs(self.delta * np.max(filtered)) for i in range(1, np.size(transitions)): segment = range(transitions[i-1], transitions[i]) imax = np.argmax(filtered[segment]) if filtered[segment[imax]] > filter_threshold: strike_indices_smooth.append(segment[imax]) # Compute number of samples between peaks using the real part of the FFT: interpeak = compute_interpeak(data, self.sampling_frequency) decel = np.int(interpeak / 2) # Find maximum peaks close to maximum peaks of smoothed data: strikes_idx = [] for ismooth in strike_indices_smooth: istrike = np.argmax(data[ismooth - decel:ismooth + decel]) istrike = istrike + ismooth - decel strikes_idx.append(istrike) strikes = np.asarray(strikes_idx) strikes -= strikes[0] strikes = strikes / self.sampling_frequency return strikes, np.array(strikes_idx)
def heel_strikes(self, x)
Estimate heel strike times between sign changes in accelerometer data. :param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc. :type x: pandas.Series :return strikes: Heel strike timings measured in seconds. :rtype striles: numpy.ndarray :return strikes_idx: Heel strike timing indices of the time series. :rtype strikes_idx: numpy.ndarray
5.095108
5.019917
1.014978
if (average_step_duration=='autodetect') or (average_stride_duration=='autodetect'): strikes, _ = self.heel_strikes(x) step_durations = [] for i in range(1, np.size(strikes)): step_durations.append(strikes[i] - strikes[i-1]) average_step_duration = np.mean(step_durations) number_of_steps = np.size(strikes) strides1 = strikes[0::2] strides2 = strikes[1::2] stride_durations1 = [] for i in range(1, np.size(strides1)): stride_durations1.append(strides1[i] - strides1[i-1]) stride_durations2 = [] for i in range(1, np.size(strides2)): stride_durations2.append(strides2[i] - strides2[i-1]) strides = [strides1, strides2] stride_durations = [stride_durations1, stride_durations2] average_stride_duration = np.mean((np.mean(stride_durations1), np.mean(stride_durations2))) return self.gait_regularity_symmetry(x, average_step_duration, average_stride_duration) else: coefficients, _ = autocorrelate(x, unbias=1, normalize=2) step_period = np.int(np.round(1 / average_step_duration)) stride_period = np.int(np.round(1 / average_stride_duration)) step_regularity = coefficients[step_period] stride_regularity = coefficients[stride_period] symmetry = np.abs(stride_regularity - step_regularity) return step_regularity, stride_regularity, symmetry
def gait_regularity_symmetry(self, x, average_step_duration='autodetect', average_stride_duration='autodetect', unbias=1, normalize=2)
Compute step and stride regularity and symmetry from accelerometer data with the help of steps and strides. :param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc. :type x: pandas.Series :param average_step_duration: Average duration of each step using the same time unit as the time series. If this is set to 'autodetect' it will infer this from the time series. :type average_step_duration: float :param average_stride_duration: Average duration of each stride using the same time unit as the time series. If this is set to 'autodetect' it will infer this from the time series. :type average_stride_duration: float :param unbias: Unbiased autocorrelation: divide by range (unbias=1) or by weighted range (unbias=2). :type unbias: int :param int normalize: Normalize: divide by 1st coefficient (normalize=1) or by maximum abs. value (normalize=2). :type normalize: int :return step_regularity: Step regularity measure along axis. :rtype step_regularity: float :return stride_regularity: Stride regularity measure along axis. :rtype stride_regularity: float :return symmetry: Symmetry measure along axis. :rtype symmetry: float
1.927635
1.863248
1.034556
sections = [[]] mask = data_frame[labels_col].apply(lambda x: x in labels_to_keep) for i,m in enumerate(mask): if m: sections[-1].append(i) if not m and len(sections[-1]) > min_labels_in_sequence: sections.append([]) sections.pop() sections = [self.rebuild_indexes(data_frame.iloc[s]) for s in sections] return sections
def separate_into_sections(self, data_frame, labels_col='anno', labels_to_keep=[1,2], min_labels_in_sequence=100)
Helper function to separate a time series into multiple sections based on a labeled column. :param data_frame: The data frame. It should have x, y, and z columns. :type data_frame: pandas.DataFrame :param labels_col: The column which has the labels we would like to separate the data_frame on on ('anno' default). :type labels_col: str :param labels_to_keep: The unique labele ids of the labels which we would like to keep, out of all the labels in the labels_col ([1, 2] default). :type labels_to_keep: list :param min_labels_in_sequence: The minimum number of samples which can make up a section (100 default). :type min_labels_in_sequence: int :return: A list of DataFrames, segmented accordingly. :rtype: list
2.562747
3.415124
0.750411
peaks, prominences = get_signal_peaks_and_prominences(x) bellman_idx = BellmanKSegment(prominences, states) return peaks, prominences, bellman_idx
def bellman_segmentation(self, x, states)
Divide a univariate time-series, data_frame, into states contiguous segments, using Bellman k-segmentation algorithm on the peak prominences of the data. :param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc. :type x: pandas.Series :param states: Number of contigous segments. :type states: int :return peaks: The peaks in our data_frame. :rtype peaks: list :return prominences: Peaks prominences. :rtype prominences: list :return bellman_idx: The indices of the segments. :rtype bellman_idx: list
5.854547
3.477288
1.683653
peaks, prominences = get_signal_peaks_and_prominences(x) # sklearn fix: reshape to (-1, 1) sklearn_idx = cluster_fn.fit_predict(prominences.reshape(-1, 1)) return peaks, prominences, sklearn_idx
def sklearn_segmentation(self, x, cluster_fn)
Divide a univariate time-series, data_frame, into states contiguous segments, using sk-learn clustering algorithms on the peak prominences of the data. :param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc. :type x: pandas.Series :param cluster_fn: Any unsupervised learning algorithm from the sklearn library. It needs to have the `fit_predict` method. :param cluster_fn: sklearn.aglorithm :return peaks: The peaks in our data_frame. :rtype peaks: list :return prominences: Peaks prominences. :rtype prominences: list :return sklearn_idx: The indices of the segments. :rtype sklearn_idx: list
5.613963
4.447876
1.262167
# add some checks to see if dictionary is in the right format! data_frame['segmentation'] = 'unknown' for i, (k, v) in enumerate(segmentation_dictionary.items()): for start, end in v: if type(start) != np.datetime64: if start < 0: start = 0 if end > data_frame.size: end = data_frame.size start = data_frame.index.values[start] end = data_frame.index.values[end] data_frame.loc[start: end, 'segmentation'] = k return data_frame
def add_manual_segmentation_to_data_frame(self, data_frame, segmentation_dictionary)
Utility method to store manual segmentation of gait time series. :param data_frame: The data frame. It should have x, y, and z columns. :type data_frame: pandas.DataFrame :param segmentation_dictionary: A dictionary of the form {'signal_type': [(from, to), (from, to)], ..., 'signal_type': [(from, to), (from, to)]}. The from and to can either be of type numpy.datetime64 or int, depending on how you are segmenting the time series. :type segmentation_dictionary: dict :return: The data_frame with a new column named 'segmentation'. :rtype: pandas.DataFrame
3.401221
3.202485
1.062057
data = x fig, ax = plt.subplots() fig.set_size_inches(figsize[0], figsize[1]) # fix this!! colors = 'bgrcmykwbgrcmykwbgrcmykw' data.plot(ax=ax) for i, (k, v) in enumerate(segmentation_dictionary.items()): for start, end in v: if type(start) != np.datetime64: start = data.index.values[start] end = data.index.values[end] plt.axvspan(start, end, color=colors[i], alpha=0.5) legend = [mpatches.Patch(color=colors[i], label="{}".format(k)) for i, k in enumerate(segmentation_dictionary.keys())] plt.legend(handles=legend) plt.show()
def plot_segmentation_dictionary(self, x, segmentation_dictionary, figsize=(10, 5))
Utility method used to visualize how the segmentation dictionary interacts with the time series. :param data_frame: The data frame. It should have x, y, and z columns. :type data_frame: pandas.DataFrame :param segmentation_dictionary: A dictionary of the form {'signal_type': [(from, to), (from, to)], ..., 'signal_type': [(from, to), (from, to)]}. :type segmentation_dictionary: dict :param figsize: The size of the figure where we will plot the segmentation on top of the provided time series ((10, 5) default). :type figsize: tuple
2.597811
2.607108
0.996434
# fix this!! colors = 'bgrcmykwbgrcmykwbgrcmykw' keys = np.unique(segmented_data_frame['segmentation']) fig, ax = plt.subplots() fig.set_size_inches(figsize[0], figsize[1]) segmented_data_frame[axis].plot(ax=ax) for i, k in enumerate(keys): patch = segmented_data_frame['segmentation'].loc[segmented_data_frame['segmentation'] == k].index for p in patch: ax.axvline(p, color=colors[i], alpha=0.1) legend = [mpatches.Patch(color=colors[i], label="{}".format(k)) for i, k in enumerate(keys)] plt.legend(handles=legend) plt.show()
def plot_segmentation_data_frame(self, segmented_data_frame, axis='mag_sum_acc', figsize=(10, 5))
Utility method used to visualize how the segmentation dictionary interacts with the time series. :param segmented_data_frame: The segmented data frame. It should have x, y, z and segmentation columns. :type segmented_data_frame: pandas.DataFrame :param axis: The axis which we want to plot. We can choose from x, y, z and mag_sum_acc ('mag_acc_sum' default). :type axis: str :param figsize: The size of the figure where we will plot the segmentation on top of the provided time series ((10, 5) default). :type figsize: tuple
2.760817
2.794313
0.988013
http_status = 0 if error.check(TwistedWebError): xml_payload = error.value.response if error.value.status: http_status = int(error.value.status) else: error.raiseException() if http_status >= 400: if not xml_payload: error.raiseException() try: fallback_error = errorClass( xml_payload, error.value.status, str(error.value), error.value.response) except (ParseError, AWSResponseParseError): error_message = http.RESPONSES.get(http_status) fallback_error = TwistedWebError( http_status, error_message, error.value.response) raise fallback_error elif 200 <= http_status < 300: return str(error.value) else: error.raiseException()
def error_wrapper(error, errorClass)
We want to see all error messages from cloud services. Amazon's EC2 says that their errors are accompanied either by a 400-series or 500-series HTTP response code. As such, the first thing we want to do is check to see if the error is in that range. If it is, we then need to see if the error message is an EC2 one. In the event that an error is not a Twisted web error nor an EC2 one, the original exception is raised.
3.274962
3.100614
1.05623
return b'/' + b'/'.join(seg.encode('utf-8') for seg in ctx.path)
def _get_joined_path(ctx)
@type ctx: L{_URLContext} @param ctx: A URL context. @return: The path component, un-urlencoded, but joined by slashes. @rtype: L{bytes}
7.369969
7.431968
0.991658
contextFactory = None scheme, host, port, path = parse(url) data = kwds.get('postdata', None) self._method = method = kwds.get('method', 'GET') self.request_headers = self._headers(kwds.get('headers', {})) if (self.body_producer is None) and (data is not None): self.body_producer = FileBodyProducer(StringIO(data)) if self.endpoint.ssl_hostname_verification: contextFactory = None else: contextFactory = WebClientContextFactory() agent = _get_agent(scheme, host, self.reactor, contextFactory) if scheme == "https": self.client.url = url d = agent.request(method, url, self.request_headers, self.body_producer) d.addCallback(self._handle_response) return d
def get_page(self, url, *args, **kwds)
Define our own get_page method so that we can easily override the factory when we need to. This was copied from the following: * twisted.web.client.getPage * twisted.web.client._makeGetterFactory
3.73959
3.333441
1.121841
return Headers(dict((k,[v]) for (k,v) in headers_dict.items()))
def _headers(self, headers_dict)
Convert dictionary of headers into twisted.web.client.Headers object.
6.706185
4.200034
1.596698
return dict((k,v[0]) for (k,v) in headers.getAllRawHeaders())
def _unpack_headers(self, headers)
Unpack twisted.web.client.Headers object to dict. This is to provide backwards compatability.
5.467134
3.133498
1.744738
if self.request_headers: return self._unpack_headers(self.request_headers)
def get_request_headers(self, *args, **kwds)
A convenience method for obtaining the headers that were sent to the S3 server. The AWS S3 API depends upon setting headers. This method is provided as a convenience for debugging issues with the S3 communications.
5.440435
6.48714
0.838649
self.client.status = response.code self.response_headers = response.headers # XXX This workaround (which needs to be improved at that) for possible # bug in Twisted with new client: # http://twistedmatrix.com/trac/ticket/5476 if self._method.upper() == 'HEAD' or response.code == NO_CONTENT: return succeed('') receiver = self.receiver_factory() receiver.finished = d = Deferred() receiver.content_length = response.length response.deliverBody(receiver) if response.code >= 400: d.addCallback(self._fail_response, response) return d
def _handle_response(self, response)
Handle the HTTP response by memoing the headers and then delivering bytes.
6.246725
5.894888
1.059685
if self.response_headers: return self._unpack_headers(self.response_headers)
def get_response_headers(self, *args, **kwargs)
A convenience method for obtaining the headers that were sent from the S3 server. The AWS S3 API depends upon setting headers. This method is used by the head_object API call for getting a S3 object's metadata.
5.743883
6.777877
0.847446
assert secret, "Missing secret key" assert client_id, "Missing client id" headers = { "typ": __type__, "alg": __algorithm__ } claims = { 'iss': client_id, 'iat': epoch_seconds() } return jwt.encode(payload=claims, key=secret, headers=headers).decode()
def create_jwt_token(secret, client_id)
Create JWT token for GOV.UK Notify Tokens have standard header: { "typ": "JWT", "alg": "HS256" } Claims consist of: iss: identifier for the client iat: issued at in epoch seconds (UTC) :param secret: Application signing secret :param client_id: Identifier for the client :return: JWT token for this request
3.342669
3.284168
1.017813
try: unverified = decode_token(token) if 'iss' not in unverified: raise TokenIssuerError return unverified.get('iss') except jwt.DecodeError: raise TokenDecodeError
def get_token_issuer(token)
Issuer of a token is the identifier used to recover the secret Need to extract this from token to ensure we can proceed to the signature validation stage Does not check validity of the token :param token: signed JWT token :return issuer: iss field of the JWT token :raises TokenIssuerError: if iss field not present :raises TokenDecodeError: if token does not conform to JWT spec
4.316354
3.715554
1.161698
try: # check signature of the token decoded_token = jwt.decode( token, key=secret.encode(), verify=True, algorithms=[__algorithm__], leeway=__bound__ ) # token has all the required fields if 'iss' not in decoded_token: raise TokenIssuerError if 'iat' not in decoded_token: raise TokenIssuedAtError # check iat time is within bounds now = epoch_seconds() iat = int(decoded_token['iat']) if now > (iat + __bound__): raise TokenExpiredError("Token has expired", decoded_token) if iat > (now + __bound__): raise TokenExpiredError("Token can not be in the future", decoded_token) return True except jwt.InvalidIssuedAtError: raise TokenExpiredError("Token has invalid iat field", decode_token(token)) except jwt.DecodeError: raise TokenDecodeError
def decode_jwt_token(token, secret)
Validates and decodes the JWT token Token checked for - signature of JWT token - token issued date is valid :param token: jwt token :param secret: client specific secret :return boolean: True if valid token, False otherwise :raises TokenIssuerError: if iss field not present :raises TokenIssuedAtError: if iat field not present :raises jwt.DecodeError: If signature validation fails
3.3895
2.982731
1.136375
if in_superblank: if char == ']': in_superblank = False text_buffer += char elif char == '\\': text_buffer += char text_buffer += next(stream) else: text_buffer += char elif in_lexical_unit: if char == '$': if with_text: yield (text_buffer, LexicalUnit(buffer)) else: yield LexicalUnit(buffer) buffer = '' text_buffer = '' in_lexical_unit = False elif char == '\\': buffer += char buffer += next(stream) else: buffer += char else: if char == '[': in_superblank = True text_buffer += char elif char == '^': in_lexical_unit = True elif char == '\\': text_buffer += char text_buffer += next(stream) else: text_buffer += char
def parse(stream, with_text=False): # type: (Iterator[str], bool) -> Iterator[Union[Tuple[str, LexicalUnit], LexicalUnit]] buffer = '' text_buffer = '' in_lexical_unit = False in_superblank = False for char in stream
Generates lexical units from a character stream. Args: stream (Iterator[str]): A character stream containing lexical units, superblanks and other text. with_text (Optional[bool]): A boolean defining whether to output preceding text with each lexical unit. Yields: :class:`LexicalUnit`: The next lexical unit found in the character stream. (if `with_text` is False) \n *(str, LexicalUnit)* - The next lexical unit found in the character stream and the the text that seperated it from the prior unit in a tuple. (if with_text is True)
1.902536
1.809991
1.05113
try: datafile = file(datafilepath, "wb") except Exception: return False if self._timestampFormat is None: self._timestampFormat = _STR_EPOCHS datafile.write("# time_as_<%s> value\n" % self._timestampFormat) convert = TimeSeries.convert_epoch_to_timestamp for datapoint in self._timeseriesData: timestamp, value = datapoint if self._timestampFormat is not None: timestamp = convert(timestamp, self._timestampFormat) datafile.write("%s %s\n" % (timestamp, value)) datafile.close() return True
def to_gnuplot_datafile(self, datafilepath)
Dumps the TimeSeries into a gnuplot compatible data file. :param string datafilepath: Path used to create the file. If that file already exists, it will be overwritten! :return: Returns :py:const:`True` if the data could be written, :py:const:`False` otherwise. :rtype: boolean
4.031081
3.936671
1.023982
# create and fill the given TimeSeries ts = TimeSeries() ts.set_timeformat(tsformat) for entry in datalist: ts.add_entry(*entry[:2]) # set the normalization level ts._normalized = ts.is_normalized() ts.sort_timeseries() return ts
def from_twodim_list(cls, datalist, tsformat=None)
Creates a new TimeSeries instance from the data stored inside a two dimensional list. :param list datalist: List containing multiple iterables with at least two values. The first item will always be used as timestamp in the predefined format, the second represents the value. All other items in those sublists will be ignored. :param string tsformat: Format of the given timestamp. This is used to convert the timestamp into UNIX epochs, if necessary. For valid examples take a look into the :py:func:`time.strptime` documentation. :return: Returns a TimeSeries instance containing the data from datalist. :rtype: TimeSeries
7.613647
8.080458
0.94223
# initialize the result tuples = 0 # add the SQL result to the time series data = sqlcursor.fetchmany() while 0 < len(data): for entry in data: self.add_entry(str(entry[0]), entry[1]) data = sqlcursor.fetchmany() # set the normalization level self._normalized = self._check_normalization # return the number of tuples added to the timeseries. return tuples
def initialize_from_sql_cursor(self, sqlcursor)
Initializes the TimeSeries's data from the given SQL cursor. You need to set the time stamp format using :py:meth:`TimeSeries.set_timeformat`. :param SQLCursor sqlcursor: Cursor that was holds the SQL result for any given "SELECT timestamp, value, ... FROM ..." SQL query. Only the first two attributes of the SQL result will be used. :return: Returns the number of entries added to the TimeSeries. :rtype: integer
6.451953
5.760233
1.120085
return time.mktime(time.strptime(timestamp, tsformat))
def convert_timestamp_to_epoch(cls, timestamp, tsformat)
Converts the given timestamp into a float representing UNIX-epochs. :param string timestamp: Timestamp in the defined format. :param string tsformat: Format of the given timestamp. This is used to convert the timestamp into UNIX epochs. For valid examples take a look into the :py:func:`time.strptime` documentation. :return: Returns an float, representing the UNIX-epochs for the given timestamp. :rtype: float
4.057193
6.1393
0.660856
return time.strftime(tsformat, time.gmtime(timestamp))
def convert_epoch_to_timestamp(cls, timestamp, tsformat)
Converts the given float representing UNIX-epochs into an actual timestamp. :param float timestamp: Timestamp as UNIX-epochs. :param string tsformat: Format of the given timestamp. This is used to convert the timestamp from UNIX epochs. For valid examples take a look into the :py:func:`time.strptime` documentation. :return: Returns the timestamp as defined in format. :rtype: string
4.352735
5.947947
0.731805
self._normalized = self._predefinedNormalized self._sorted = self._predefinedSorted tsformat = self._timestampFormat if tsformat is not None: timestamp = TimeSeries.convert_timestamp_to_epoch(timestamp, tsformat) self._timeseriesData.append([float(timestamp), float(data)])
def add_entry(self, timestamp, data)
Adds a new data entry to the TimeSeries. :param timestamp: Time stamp of the data. This has either to be a float representing the UNIX epochs or a string containing a timestamp in the given format. :param numeric data: Actual data value.
7.676253
7.012821
1.094603
# the time series is sorted by default if ascending and self._sorted: return sortorder = 1 if not ascending: sortorder = -1 self._predefinedSorted = False self._timeseriesData.sort(key=lambda i: sortorder * i[0]) self._sorted = ascending return self
def sort_timeseries(self, ascending=True)
Sorts the data points within the TimeSeries according to their occurrence inline. :param boolean ascending: Determines if the TimeSeries will be ordered ascending or descending. If this is set to descending once, the ordered parameter defined in :py:meth:`TimeSeries.__init__` will be set to False FOREVER. :return: Returns :py:obj:`self` for convenience. :rtype: TimeSeries
5.820385
6.341153
0.917875
sortorder = 1 if not ascending: sortorder = -1 data = sorted(self._timeseriesData, key=lambda i: sortorder * i[0]) newTS = TimeSeries(self._normalized) for entry in data: newTS.add_entry(*entry) newTS._sorted = ascending return newTS
def sorted_timeseries(self, ascending=True)
Returns a sorted copy of the TimeSeries, preserving the original one. As an assumption this new TimeSeries is not ordered anymore if a new value is added. :param boolean ascending: Determines if the TimeSeries will be ordered ascending or descending. :return: Returns a new TimeSeries instance sorted in the requested order. :rtype: TimeSeries
4.795316
5.92647
0.809135
# do not normalize the TimeSeries if it is already normalized, either by # definition or a prior call of normalize(*) if self._normalizationLevel == normalizationLevel: if self._normalized: # pragma: no cover return # check if all parameters are defined correctly if normalizationLevel not in NormalizationLevels: raise ValueError("Normalization level %s is unknown." % normalizationLevel) if fusionMethod not in FusionMethods: raise ValueError("Fusion method %s is unknown." % fusionMethod) if interpolationMethod not in InterpolationMethods: raise ValueError("Interpolation method %s is unknown." % interpolationMethod) # (nearly) empty TimeSeries instances do not require normalization if len(self) < 2: self._normalized = True return # get the defined methods and parameter self._normalizationLevel = normalizationLevel normalizationLevel = NormalizationLevels[normalizationLevel] fusionMethod = FusionMethods[fusionMethod] interpolationMethod = InterpolationMethods[interpolationMethod] # sort the TimeSeries self.sort_timeseries() # prepare the required buckets start = self._timeseriesData[0][0] end = self._timeseriesData[-1][0] span = end - start bucketcnt = int(span / normalizationLevel) + 1 buckethalfwidth = normalizationLevel / 2.0 bucketstart = start + buckethalfwidth buckets = [[bucketstart + idx * normalizationLevel] for idx in xrange(bucketcnt)] # Step One: Populate buckets # Initialize the timeseries data iterators tsdStartIdx = 0 tsdEndIdx = 0 tsdlength = len(self) for idx in xrange(bucketcnt): # get the bucket to avoid multiple calls of buckets.__getitem__() bucket = buckets[idx] # get the range for the given bucket bucketend = bucket[0] + buckethalfwidth while tsdEndIdx < tsdlength and self._timeseriesData[tsdEndIdx][0] < bucketend: tsdEndIdx += 1 # continue, if no valid data entries exist if tsdStartIdx == tsdEndIdx: continue # use the given fusion method to calculate the fusioned value values = [i[1] for i in self._timeseriesData[tsdStartIdx:tsdEndIdx]] bucket.append(fusionMethod(values)) # set the new timeseries data index tsdStartIdx = tsdEndIdx # Step Two: Fill missing buckets missingCount = 0 lastIdx = 0 for idx in xrange(bucketcnt): # bucket is empty if 1 == len(buckets[idx]): missingCount += 1 continue # This is the first bucket. The first bucket is not empty by definition! if idx == 0: lastIdx = idx continue # update the lastIdx, if none was missing if 0 == missingCount: lastIdx = idx continue # calculate and fill in missing values missingValues = interpolationMethod(buckets[lastIdx][1], buckets[idx][1], missingCount) for idx2 in xrange(1, missingCount + 1): buckets[lastIdx + idx2].append(missingValues[idx2 - 1]) lastIdx = idx missingCount = 0 self._timeseriesData = buckets # at the end set self._normalized to True self._normalized = True
def normalize(self, normalizationLevel="minute", fusionMethod="mean", interpolationMethod="linear")
Normalizes the TimeSeries data points. If this function is called, the TimeSeries gets ordered ascending automatically. The new timestamps will represent the center of each time bucket. Within a normalized TimeSeries, the temporal distance between two consecutive data points is constant. :param string normalizationLevel: Level of normalization that has to be applied. The available normalization levels are defined in :py:data:`timeseries.NormalizationLevels`. :param string fusionMethod: Normalization method that has to be used if multiple data entries exist within the same normalization bucket. The available methods are defined in :py:data:`timeseries.FusionMethods`. :param string interpolationMethod: Interpolation method that is used if a data entry at a specific time is missing. The available interpolation methods are defined in :py:data:`timeseries.InterpolationMethods`. :raise: Raises a :py:exc:`ValueError` if a normalizationLevel, fusionMethod or interpolationMethod hanve an unknown value.
3.394744
3.347258
1.014187
lastDistance = None distance = None for idx in xrange(len(self) - 1): distance = self[idx+1][0] - self[idx][0] # first run if lastDistance is None: lastDistance = distance continue if lastDistance != distance: return False lastDistance = distance return True
def _check_normalization(self)
Checks, if the TimeSeries is normalized. :return: Returns :py:const:`True` if all data entries of the TimeSeries have an equal temporal distance, :py:const:`False` otherwise.
3.793853
3.683921
1.029841
# check, if the methods requirements are fullfilled if method.has_to_be_normalized() and not self._normalized: raise StandardError("method requires a normalized TimeSeries instance.") if method.has_to_be_sorted(): self.sort_timeseries() return method.execute(self)
def apply(self, method)
Applies the given ForecastingAlgorithm or SmoothingMethod from the :py:mod:`pycast.methods` module to the TimeSeries. :param BaseMethod method: Method that should be used with the TimeSeries. For more information about the methods take a look into their corresponding documentation. :raise: Raises a StandardError when the TimeSeries was not normalized and hte method requires a normalized TimeSeries
7.817977
5.506098
1.419876
if not (0.0 < percentage < 1.0): raise ValueError("Parameter percentage has to be in (0.0, 1.0).") cls = self.__class__ value_count = int(len(self) * percentage) values = random.sample(self, value_count) sample = cls.from_twodim_list(values) rest_values = self._timeseriesData[:] for value in values: rest_values.remove(value) rest = cls.from_twodim_list(rest_values) return sample, rest
def sample(self, percentage)
Samples with replacement from the TimeSeries. Returns the sample and the remaining timeseries. The original timeseries is not changed. :param float percentage: How many percent of the original timeseries should be in the sample :return: A tuple containing (sample, rest) as two TimeSeries. :rtype: tuple(TimeSeries,TimeSeries) :raise: Raises a ValueError if percentage is not in (0.0, 1.0).
3.660134
2.894112
1.264683
if not isinstance(data, list): data = [data] if len(data) != self._dimensionCount: raise ValueError("data does contain %s instead of %s dimensions.\n %s" % (len(data), self._dimensionCount, data)) self._normalized = self._predefinedNormalized self._sorted = self._predefinedSorted tsformat = self._timestampFormat if tsformat is not None: timestamp = TimeSeries.convert_timestamp_to_epoch(timestamp, tsformat) self._timeseriesData.append([float(timestamp)] + [float(dimensionValue) for dimensionValue in data])
def add_entry(self, timestamp, data)
Adds a new data entry to the TimeSeries. :param timestamp: Time stamp of the data. This has either to be a float representing the UNIX epochs or a string containing a timestamp in the given format. :param list data: A list containing the actual dimension values. :raise: Raises a :py:exc:`ValueError` if data does not contain as many dimensions as defined in __init__.
5.159081
4.480377
1.151484
if self._timestampFormat is None: return self._timeseriesData datalist = [] append = datalist.append convert = TimeSeries.convert_epoch_to_timestamp for entry in self._timeseriesData: append([convert(entry[0], self._timestampFormat), entry[1:]]) return datalist
def to_twodim_list(self)
Serializes the MultiDimensionalTimeSeries data into a two dimensional list of [timestamp, [values]] pairs. :return: Returns a two dimensional list containing [timestamp, [values]] pairs. :rtype: list
5.61811
5.021433
1.118826
# create and fill the given TimeSeries ts = MultiDimensionalTimeSeries(dimensions=dimensions) ts.set_timeformat(tsformat) for entry in datalist: ts.add_entry(entry[0], entry[1]) # set the normalization level ts._normalized = ts.is_normalized() ts.sort_timeseries() return ts
def from_twodim_list(cls, datalist, tsformat=None, dimensions=1)
Creates a new MultiDimensionalTimeSeries instance from the data stored inside a two dimensional list. :param list datalist: List containing multiple iterables with at least two values. The first item will always be used as timestamp in the predefined format, the second is a list, containing the dimension values. :param string format: Format of the given timestamp. This is used to convert the timestamp into UNIX epochs, if necessary. For valid examples take a look into the :py:func:`time.strptime` documentation. :param integer dimensions: Number of dimensions the MultiDimensionalTimeSeries contains. :return: Returns a MultiDimensionalTimeSeries instance containing the data from datalist. :rtype: MultiDimensionalTimeSeries
5.859243
6.169972
0.949639