signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def _read_segment_lines(segment_lines):
<EOL>segment_fields = {}<EOL>for field in SEGMENT_SPECS.index:<EOL><INDENT>segment_fields[field] = [None] * len(segment_lines)<EOL><DEDENT>for i in range(len(segment_lines)):<EOL><INDENT>(segment_fields['<STR_LIT>'][i], segment_fields['<STR_LIT>'][i]) = _rx_segment.findall(segment_lines[i])[<NUM_LIT:0>]<EOL>if field == '<STR_LIT>':<EOL><INDENT>segment_fields['<STR_LIT>'][i] = int(segment_fields['<STR_LIT>'][i])<EOL><DEDENT><DEDENT>return segment_fields<EOL>
Extract fields from segment line strings into a dictionary
f10212:m4
def get_write_subset(self, spec_type):
if spec_type == '<STR_LIT>':<EOL><INDENT>write_fields = []<EOL>record_specs = RECORD_SPECS.copy()<EOL>if not hasattr(self, '<STR_LIT>'):<EOL><INDENT>record_specs.drop('<STR_LIT>', inplace=True)<EOL><DEDENT>for field in record_specs.index[-<NUM_LIT:1>::-<NUM_LIT:1>]:<EOL><INDENT>if field in write_fields:<EOL><INDENT>continue<EOL><DEDENT>if (record_specs.loc[field, '<STR_LIT>']<EOL>or getattr(self, field) is not None):<EOL><INDENT>req_field = field<EOL>while req_field is not None:<EOL><INDENT>write_fields.append(req_field)<EOL>req_field = record_specs.loc[req_field, '<STR_LIT>']<EOL><DEDENT><DEDENT><DEDENT>if getattr(self, '<STR_LIT>') is not None:<EOL><INDENT>write_fields.append('<STR_LIT>')<EOL><DEDENT><DEDENT>elif spec_type == '<STR_LIT>':<EOL><INDENT>write_fields = []<EOL>signal_specs = SIGNAL_SPECS.copy()<EOL>for ch in range(self.n_sig):<EOL><INDENT>write_fields_ch = []<EOL>for field in signal_specs.index[-<NUM_LIT:1>::-<NUM_LIT:1>]:<EOL><INDENT>if field in write_fields_ch:<EOL><INDENT>continue<EOL><DEDENT>item = getattr(self, field)<EOL>if signal_specs.loc[field, '<STR_LIT>'] or (item is not None and item[ch] is not None):<EOL><INDENT>req_field = field<EOL>while req_field is not None:<EOL><INDENT>write_fields_ch.append(req_field)<EOL>req_field = signal_specs.loc[req_field, '<STR_LIT>']<EOL><DEDENT><DEDENT><DEDENT>write_fields.append(write_fields_ch)<EOL><DEDENT>dict_write_fields = {}<EOL>for field in set([i for write_fields_ch in write_fields for i in write_fields_ch]):<EOL><INDENT>dict_write_fields[field] = []<EOL>for ch in range(self.n_sig):<EOL><INDENT>if field in write_fields[ch]:<EOL><INDENT>dict_write_fields[field].append(ch)<EOL><DEDENT><DEDENT><DEDENT>write_fields = dict_write_fields<EOL><DEDENT>return write_fields<EOL>
Get a set of fields used to write the header; either 'record' or 'signal' specification fields. Helper function for `get_write_fields`. Gets the default required fields, the user defined fields, and their dependencies. Parameters ---------- spec_type : str The set of specification fields desired. Either 'record' or 'signal'. Returns ------- write_fields : list or dict For record fields, returns a list of all fields needed. For signal fields, it returns a dictionary of all fields needed, with keys = field and value = list of channels that must be present for the field.
f10212:c0:m0
def set_defaults(self):
rfields, sfields = self.get_write_fields()<EOL>for f in rfields:<EOL><INDENT>self.set_default(f)<EOL><DEDENT>for f in sfields:<EOL><INDENT>self.set_default(f)<EOL><DEDENT>
Set defaults for fields needed to write the header if they have defaults. Notes ----- - This is NOT called by `rdheader`. It is only automatically called by the gateway `wrsamp` for convenience. - This is also not called by `wrheader` since it is supposed to be an explicit function. - This is not responsible for initializing the attributes. That is done by the constructor. See also `set_p_features` and `set_d_features`.
f10212:c1:m0
def wrheader(self, write_dir='<STR_LIT>'):
<EOL>rec_write_fields, sig_write_fields = self.get_write_fields()<EOL>for field in rec_write_fields:<EOL><INDENT>self.check_field(field)<EOL><DEDENT>for field in sig_write_fields:<EOL><INDENT>self.check_field(field, required_channels=sig_write_fields[field])<EOL><DEDENT>self.check_field_cohesion(rec_write_fields, list(sig_write_fields))<EOL>self.wr_header_file(rec_write_fields, sig_write_fields, write_dir)<EOL>
Write a wfdb header file. The signals are not used. Before writing: - Get the fields used to write the header for this instance. - Check each required field. - Check that the fields are cohesive with one another. Parameters ---------- write_dir : str, optional The output directory in which the header is written. Notes ----- This function does NOT call `set_defaults`. Essential fields must be set beforehand.
f10212:c1:m1
def get_write_fields(self):
<EOL>rec_write_fields = self.get_write_subset('<STR_LIT>')<EOL>if self.comments != None:<EOL><INDENT>rec_write_fields.append('<STR_LIT>')<EOL><DEDENT>self.check_field('<STR_LIT>')<EOL>if self.n_sig > <NUM_LIT:0>:<EOL><INDENT>sig_write_fields = self.get_write_subset('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>sig_write_fields = None<EOL><DEDENT>return rec_write_fields, sig_write_fields<EOL>
Get the list of fields used to write the header, separating record and signal specification fields. Returns the default required fields, the user defined fields, and their dependencies. Does NOT include `d_signal` or `e_d_signal`. Returns ------- rec_write_fields : list Record specification fields to be written. Includes 'comment' if present. sig_write_fields : dict Dictionary of signal specification fields to be written, with values equal to the channels that need to be present for each field.
f10212:c1:m2
def set_default(self, field):
<EOL>if field in RECORD_SPECS.index:<EOL><INDENT>if RECORD_SPECS.loc[field, '<STR_LIT>'] is None or getattr(self, field) is not None:<EOL><INDENT>return<EOL><DEDENT>setattr(self, field, RECORD_SPECS.loc[field, '<STR_LIT>'])<EOL><DEDENT>elif field in SIGNAL_SPECS.index:<EOL><INDENT>if field == '<STR_LIT>' and self.file_name is None:<EOL><INDENT>self.file_name = self.n_sig * [self.record_name + '<STR_LIT>']<EOL>return<EOL><DEDENT>item = getattr(self, field)<EOL>if SIGNAL_SPECS.loc[field, '<STR_LIT>'] is None or item is not None:<EOL><INDENT>return<EOL><DEDENT>if field == '<STR_LIT>' and self.fmt is not None:<EOL><INDENT>self.adc_res = _signal._fmt_res(self.fmt)<EOL>return<EOL><DEDENT>setattr(self, field,<EOL>[SIGNAL_SPECS.loc[field, '<STR_LIT>']] * self.n_sig)<EOL><DEDENT>
Set the object's attribute to its default value if it is missing and there is a default. Not responsible for initializing the attribute. That is done by the constructor.
f10212:c1:m3
def check_field_cohesion(self, rec_write_fields, sig_write_fields):
<EOL>if self.n_sig><NUM_LIT:0>:<EOL><INDENT>for f in sig_write_fields:<EOL><INDENT>if len(getattr(self, f)) != self.n_sig:<EOL><INDENT>raise ValueError('<STR_LIT>'+f+'<STR_LIT>')<EOL><DEDENT><DEDENT>datfmts = {}<EOL>for ch in range(self.n_sig):<EOL><INDENT>if self.file_name[ch] not in datfmts:<EOL><INDENT>datfmts[self.file_name[ch]] = self.fmt[ch]<EOL><DEDENT>else:<EOL><INDENT>if datfmts[self.file_name[ch]] != self.fmt[ch]:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>datoffsets = {}<EOL>if self.byte_offset is not None:<EOL><INDENT>for ch in range(self.n_sig):<EOL><INDENT>if self.byte_offset[ch] is None:<EOL><INDENT>continue<EOL><DEDENT>if self.file_name[ch] not in datoffsets:<EOL><INDENT>datoffsets[self.file_name[ch]] = self.byte_offset[ch]<EOL><DEDENT>else:<EOL><INDENT>if datoffsets[self.file_name[ch]] != self.byte_offset[ch]:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>
Check the cohesion of fields used to write the header
f10212:c1:m4
def wr_header_file(self, rec_write_fields, sig_write_fields, write_dir):
<EOL>record_line = '<STR_LIT>'<EOL>for field in RECORD_SPECS.index:<EOL><INDENT>if field in rec_write_fields:<EOL><INDENT>string_field = str(getattr(self, field))<EOL>if field == '<STR_LIT>' and isinstance(self.fs, float):<EOL><INDENT>if round(self.fs, <NUM_LIT:8>) == float(int(self.fs)):<EOL><INDENT>string_field = str(int(self.fs))<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>' and '<STR_LIT:.>' in string_field:<EOL><INDENT>string_field = string_field.rstrip('<STR_LIT:0>')<EOL><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>string_field = '<STR_LIT:/>'.join((string_field[<NUM_LIT:8>:],<EOL>string_field[<NUM_LIT:5>:<NUM_LIT:7>],<EOL>string_field[:<NUM_LIT:4>]))<EOL><DEDENT>record_line += RECORD_SPECS.loc[field, '<STR_LIT>'] + string_field<EOL>if field == '<STR_LIT>':<EOL><INDENT>record_line += '<STR_LIT:)>'<EOL><DEDENT><DEDENT><DEDENT>header_lines = [record_line]<EOL>if self.n_sig > <NUM_LIT:0>:<EOL><INDENT>signal_lines = self.n_sig * ['<STR_LIT>']<EOL>for ch in range(self.n_sig):<EOL><INDENT>for field in SIGNAL_SPECS.index:<EOL><INDENT>if field in sig_write_fields and ch in sig_write_fields[field]:<EOL><INDENT>signal_lines[ch] += SIGNAL_SPECS.loc[field, '<STR_LIT>'] + str(getattr(self, field)[ch])<EOL><DEDENT>if field == '<STR_LIT>':<EOL><INDENT>signal_lines[ch] += '<STR_LIT:)>'<EOL><DEDENT><DEDENT><DEDENT>header_lines += signal_lines<EOL><DEDENT>if '<STR_LIT>' in rec_write_fields:<EOL><INDENT>comment_lines = ['<STR_LIT>' + comment for comment in self.comments]<EOL>header_lines += comment_lines<EOL><DEDENT>lines_to_file(self.record_name + '<STR_LIT>', write_dir, header_lines)<EOL>
Write a header file using the specified fields. Converts Record attributes into appropriate wfdb format strings. Parameters ---------- rec_write_fields : list List of record specification fields to write sig_write_fields : dict Dictionary of signal specification fields to write, values being equal to a list of channels to write for each field. write_dir : str The directory in which to write the header file
f10212:c1:m5
def set_defaults(self):
for field in self.get_write_fields():<EOL><INDENT>self.set_default(field)<EOL><DEDENT>
Set defaults for fields needed to write the header if they have defaults. This is NOT called by rdheader. It is only called by the gateway wrsamp for convenience. It is also not called by wrhea since it is supposed to be an explicit function. Not responsible for initializing the attributes. That is done by the constructor.
f10212:c2:m0
def get_write_fields(self):
<EOL>write_fields = self.get_write_subset('<STR_LIT>')<EOL>write_fields = write_fields + ['<STR_LIT>', '<STR_LIT>']<EOL>if self.comments !=None:<EOL><INDENT>write_fields.append('<STR_LIT>')<EOL><DEDENT>return write_fields<EOL>
Get the list of fields used to write the multi-segment header. Returns the default required fields, the user defined fields, and their dependencies.
f10212:c2:m2
def wr_header_file(self, write_fields, write_dir):
<EOL>record_line = '<STR_LIT>'<EOL>for field in RECORD_SPECS.index:<EOL><INDENT>if field in write_fields:<EOL><INDENT>record_line += RECORD_SPECS.loc[field, '<STR_LIT>'] + str(getattr(self, field))<EOL><DEDENT><DEDENT>header_lines = [record_line]<EOL>segment_lines = self.n_seg * ['<STR_LIT>']<EOL>for field in SEGMENT_SPECS.index:<EOL><INDENT>for seg_num in range(self.n_seg):<EOL><INDENT>segment_lines[seg_num] += SEGMENT_SPECS.loc[field, '<STR_LIT>'] + str(getattr(self, field)[seg_num])<EOL><DEDENT><DEDENT>header_lines = header_lines + segment_lines<EOL>if '<STR_LIT>' in write_fields:<EOL><INDENT>comment_lines = ['<STR_LIT>'+ comment for comment in self.comments]<EOL>header_lines += comment_lines<EOL><DEDENT>lines_to_file(self.record_name + '<STR_LIT>', header_lines, write_dir)<EOL>
Write a header file using the specified fields
f10212:c2:m5
def get_sig_segments(self, sig_name=None):
if self.segments is None:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT>if sig_name is None:<EOL><INDENT>sig_name = self.get_sig_name()<EOL><DEDENT>if isinstance(sig_name, list):<EOL><INDENT>sigdict = {}<EOL>for sig in sig_name:<EOL><INDENT>sigdict[sig] = self.get_sig_segments(sig)<EOL><DEDENT>return sigdict<EOL><DEDENT>elif isinstance(sig_name, str):<EOL><INDENT>sigsegs = []<EOL>for i in range(self.n_seg):<EOL><INDENT>if self.seg_name[i] != '<STR_LIT>' and sig_name in self.segments[i].sig_name:<EOL><INDENT>sigsegs.append(i)<EOL><DEDENT><DEDENT>return sigsegs<EOL><DEDENT>else:<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>
Get a list of the segment numbers that contain a particular signal (or a dictionary of segment numbers for a list of signals) Only works if information about the segments has been read in
f10212:c2:m6
def rdtff(file_name, cut_end=False):
file_size = os.path.getsize(file_name)<EOL>with open(file_name, '<STR_LIT:rb>') as fp:<EOL><INDENT>fields, file_fields = _rdheader(fp)<EOL>signal, markers, triggers = _rdsignal(fp, file_size=file_size,<EOL>header_size=file_fields['<STR_LIT>'],<EOL>n_sig=file_fields['<STR_LIT>'],<EOL>bit_width=file_fields['<STR_LIT>'],<EOL>is_signed=file_fields['<STR_LIT>'],<EOL>cut_end=cut_end)<EOL><DEDENT>return signal, fields, markers, triggers<EOL>
Read values from a tff file Parameters ---------- file_name : str Name of the .tff file to read cut_end : bool, optional If True, cuts out the last sample for all channels. This is for reading files which appear to terminate with the incorrect number of samples (ie. sample not present for all channels). Returns ------- signal : numpy array A 2d numpy array storing the physical signals from the record. fields : dict A dictionary containing several key attributes of the read record. markers : numpy array A 1d numpy array storing the marker locations. triggers : numpy array A 1d numpy array storing the trigger locations. Notes ----- This function is slow because tff files may contain any number of escape sequences interspersed with the signals. There is no way to know the number of samples/escape sequences beforehand, so the file is inefficiently parsed a small chunk at a time. It is recommended that you convert your tff files to wfdb format.
f10213:m0
def _rdheader(fp):
tag = None<EOL>while tag != <NUM_LIT:2>:<EOL><INDENT>tag = struct.unpack('<STR_LIT>', fp.read(<NUM_LIT:2>))[<NUM_LIT:0>]<EOL>data_size = struct.unpack('<STR_LIT>', fp.read(<NUM_LIT:2>))[<NUM_LIT:0>]<EOL>pad_len = (<NUM_LIT:4> - (data_size % <NUM_LIT:4>)) % <NUM_LIT:4><EOL>pos = fp.tell()<EOL>if tag == <NUM_LIT>:<EOL><INDENT>storage_method = fs = struct.unpack('<STR_LIT:B>', fp.read(<NUM_LIT:1>))[<NUM_LIT:0>]<EOL>storage_method = {<NUM_LIT:0>:'<STR_LIT>', <NUM_LIT:1>:'<STR_LIT>', <NUM_LIT:2>:'<STR_LIT>'}[storage_method]<EOL><DEDENT>elif tag == <NUM_LIT>:<EOL><INDENT>fs = struct.unpack('<STR_LIT>', fp.read(<NUM_LIT:2>))[<NUM_LIT:0>]<EOL><DEDENT>elif tag == <NUM_LIT>:<EOL><INDENT>n_sig = data_size<EOL>channel_data = struct.unpack('<STR_LIT>' % data_size, fp.read(data_size))<EOL>channel_map = ((<NUM_LIT:1>, <NUM_LIT:1>, '<STR_LIT>'),<EOL>(<NUM_LIT:15>, <NUM_LIT:30>, '<STR_LIT>'), (<NUM_LIT>, <NUM_LIT>, '<STR_LIT>'),<EOL>(<NUM_LIT>, <NUM_LIT>, '<STR_LIT>'),<EOL>(<NUM_LIT>, <NUM_LIT>, '<STR_LIT>'), (<NUM_LIT>, <NUM_LIT>, '<STR_LIT>'),<EOL>(<NUM_LIT>, <NUM_LIT>, '<STR_LIT>'), (<NUM_LIT>, <NUM_LIT>, '<STR_LIT>'),<EOL>(<NUM_LIT>, <NUM_LIT>, '<STR_LIT>'))<EOL>sig_name = []<EOL>for data in channel_data:<EOL><INDENT>base_name = '<STR_LIT>'<EOL>if data == <NUM_LIT:0>:<EOL><INDENT>n_sig -= <NUM_LIT:1><EOL>break<EOL><DEDENT>for item in channel_map:<EOL><INDENT>if item[<NUM_LIT:0>] <= data <= item[<NUM_LIT:1>]:<EOL><INDENT>base_name = item[<NUM_LIT:2>]<EOL>break<EOL><DEDENT><DEDENT>existing_count = [base_name in name for name in sig_name].count(True)<EOL>sig_name.append('<STR_LIT>' % (base_name, existing_count))<EOL><DEDENT><DEDENT>elif tag == <NUM_LIT>:<EOL><INDENT>display_scale = struct.unpack('<STR_LIT>', fp.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL><DEDENT>elif tag == <NUM_LIT:3>:<EOL><INDENT>sample_fmt = struct.unpack('<STR_LIT:B>', fp.read(<NUM_LIT:1>))[<NUM_LIT:0>]<EOL>is_signed = bool(sample_fmt >> <NUM_LIT:7>)<EOL>bit_width = sample_fmt & <NUM_LIT><EOL><DEDENT>elif tag == <NUM_LIT>:<EOL><INDENT>n_seconds = struct.unpack('<STR_LIT>', fp.read(<NUM_LIT:4>))[<NUM_LIT:0>]<EOL>base_datetime = datetime.datetime.utcfromtimestamp(n_seconds)<EOL>base_date = base_datetime.date()<EOL>base_time = base_datetime.time()<EOL><DEDENT>elif tag == <NUM_LIT>:<EOL><INDENT>n_minutes = struct.unpack('<STR_LIT>', fp.read(<NUM_LIT:2>))[<NUM_LIT:0>]<EOL><DEDENT>fp.seek(pos + data_size + pad_len)<EOL><DEDENT>header_size = fp.tell()<EOL>fields = {'<STR_LIT>':fs, '<STR_LIT>':n_sig, '<STR_LIT>':sig_name,<EOL>'<STR_LIT>':base_time, '<STR_LIT>':base_date}<EOL>file_fields = {'<STR_LIT>':header_size, '<STR_LIT>':n_sig,<EOL>'<STR_LIT>':bit_width, '<STR_LIT>':is_signed}<EOL>return fields, file_fields<EOL>
Read header info of the windaq file
f10213:m1
def _rdsignal(fp, file_size, header_size, n_sig, bit_width, is_signed, cut_end):
<EOL>fp.seek(header_size)<EOL>signal_size = file_size - header_size<EOL>byte_width = int(bit_width / <NUM_LIT:8>)<EOL>dtype = str(byte_width)<EOL>if is_signed:<EOL><INDENT>dtype = '<STR_LIT:i>' + dtype<EOL><DEDENT>else:<EOL><INDENT>dtype = '<STR_LIT:u>' + dtype<EOL><DEDENT>dtype = '<STR_LIT:>>' + dtype<EOL>max_samples = int(signal_size / byte_width)<EOL>max_samples = max_samples - max_samples % n_sig<EOL>signal = np.empty(max_samples, dtype=dtype)<EOL>markers = []<EOL>triggers = []<EOL>sample_num = <NUM_LIT:0><EOL>if cut_end:<EOL><INDENT>stop_byte = file_size - n_sig * byte_width + <NUM_LIT:1><EOL>while fp.tell() < stop_byte:<EOL><INDENT>chunk = fp.read(<NUM_LIT:2>)<EOL>sample_num = _get_sample(fp, chunk, n_sig, dtype, signal, markers, triggers, sample_num)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>while True:<EOL><INDENT>chunk = fp.read(<NUM_LIT:2>)<EOL>if not chunk:<EOL><INDENT>break<EOL><DEDENT>sample_num = _get_sample(fp, chunk, n_sig, dtype, signal, markers, triggers, sample_num)<EOL><DEDENT><DEDENT>signal = signal[:sample_num]<EOL>signal = signal.reshape((-<NUM_LIT:1>, n_sig))<EOL>markers = np.array(markers, dtype='<STR_LIT:int>')<EOL>triggers = np.array(triggers, dtype='<STR_LIT:int>')<EOL>return signal, markers, triggers<EOL>
Read the signal Parameters ---------- cut_end : bool, optional If True, enables reading the end of files which appear to terminate with the incorrect number of samples (ie. sample not present for all channels), by checking and skipping the reading the end of such files. Checking this option makes reading slower.
f10213:m2
def label_triplets_to_df(triplets):
label_df = pd.DataFrame({'<STR_LIT>':np.array([t[<NUM_LIT:0>] for t in triplets],<EOL>dtype='<STR_LIT:int>'),<EOL>'<STR_LIT>':[t[<NUM_LIT:1>] for t in triplets],<EOL>'<STR_LIT:description>':[t[<NUM_LIT:2>] for t in triplets]})<EOL>label_df.set_index(label_df['<STR_LIT>'].values, inplace=True)<EOL>label_df = label_df[list(ann_label_fields)]<EOL>return label_df<EOL>
Get a pd dataframe from a tuple triplets used to define annotation labels. The triplets should come in the form: (label_store, symbol, description)
f10215:m0
def custom_triplet_bytes(custom_triplet):
<EOL>annbytes = [<NUM_LIT:0>, <NUM_LIT>, len(custom_triplet[<NUM_LIT:2>]) + <NUM_LIT:3> + len(str(custom_triplet[<NUM_LIT:0>])), <NUM_LIT>] + [ord(c) for c in str(custom_triplet[<NUM_LIT:0>])]+ [<NUM_LIT:32>] + [ord(custom_triplet[<NUM_LIT:1>])] + [<NUM_LIT:32>] + [ord(c) for c in custom_triplet[<NUM_LIT:2>]]<EOL>if len(annbytes) % <NUM_LIT:2>:<EOL><INDENT>annbytes.append(<NUM_LIT:0>)<EOL><DEDENT>return annbytes<EOL>
Convert triplet of [label_store, symbol, description] into bytes for defining custom labels in the annotation file
f10215:m1
def compact_carry_field(full_field):
<EOL>if full_field is None:<EOL><INDENT>return None<EOL><DEDENT>compact_field = [None]*len(full_field)<EOL>prev_field = <NUM_LIT:0><EOL>for i in range(len(full_field)):<EOL><INDENT>current_field = full_field[i]<EOL>if current_field != prev_field:<EOL><INDENT>compact_field[i] = current_field<EOL>prev_field = current_field<EOL><DEDENT><DEDENT>if np.array_equal(compact_field, [None]*len(full_field)):<EOL><INDENT>compact_field = None<EOL><DEDENT>return compact_field<EOL>
Return the compact list version of a list/array of an annotation field that has previous values carried over (chan or num) - The first sample is 0 by default. Only set otherwise if necessary. - Only set fields if they are different from their prev field
f10215:m3
def wrann(record_name, extension, sample, symbol=None, subtype=None, chan=None,<EOL>num=None, aux_note=None, label_store=None, fs=None,<EOL>custom_labels=None, write_dir='<STR_LIT>'):
<EOL>annotation = Annotation(record_name=record_name, extension=extension,<EOL>sample=sample, symbol=symbol, subtype=subtype,<EOL>chan=chan, num=num, aux_note=aux_note,<EOL>label_store=label_store, fs=fs,<EOL>custom_labels=custom_labels)<EOL>if symbol is None:<EOL><INDENT>if label_store is None:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if label_store is None:<EOL><INDENT>annotation.sym_to_aux()<EOL><DEDENT>else:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT><DEDENT>annotation.wrann(write_fs=True, write_dir=write_dir)<EOL>
Write a WFDB annotation file. Specify at least the following: - The record name of the WFDB record (record_name) - The annotation file extension (extension) - The annotation locations in samples relative to the beginning of the record (sample) - Either the numerical values used to store the labels (`label_store`), or more commonly, the display symbols of each label (`symbol`). Parameters ---------- record_name : str The string name of the WFDB record to be written (without any file extensions). extension : str The string annotation file extension. sample : numpy array A numpy array containing the annotation locations in samples relative to the beginning of the record. symbol : list, or numpy array, optional The symbols used to display the annotation labels. List or numpy array. If this field is present, `label_store` must not be present. subtype : numpy array, optional A numpy array containing the marked class/category of each annotation. chan : numpy array, optional A numpy array containing the signal channel associated with each annotation. num : numpy array, optional A numpy array containing the labelled annotation number for each annotation. aux_note : list, optional A list containing the auxiliary information string (or None for annotations without notes) for each annotation. label_store : numpy array, optional A numpy array containing the integer values used to store the annotation labels. If this field is present, `symbol` must not be present. fs : int, or float, optional The numerical sampling frequency of the record to be written to the file. custom_labels : pandas dataframe, optional The map of custom defined annotation labels used for this annotation, in addition to the standard WFDB annotation labels. Custom labels are defined by two or three fields: - The integer values used to store custom annotation labels in the file (optional) - Their short display symbols - Their long descriptions. This input argument may come in four formats: 1. A pandas.DataFrame object with columns: ['label_store', 'symbol', 'description'] 2. A pandas.DataFrame object with columns: ['symbol', 'description'] If this option is chosen, label_store values are automatically chosen. 3. A list or tuple of tuple triplets, with triplet elements representing: (label_store, symbol, description). 4. A list or tuple of tuple pairs, with pair elements representing: (symbol, description). If this option is chosen, label_store values are automatically chosen. If the `label_store` field is given for this function, and `custom_labels` is defined, `custom_labels` must contain `label_store` in its mapping. ie. it must come in format 1 or 3 above. write_dir : str, optional The directory in which to write the annotation file Notes ----- This is a gateway function, written as a simple way to write WFDB annotation files without needing to explicity create an Annotation object. You may also create an Annotation object, manually set its attributes, and call its `wrann` instance method. Each annotation stored in a WFDB annotation file contains a sample field and a label field. All other fields may or may not be present. Examples -------- >>> # Read an annotation as an Annotation object >>> annotation = wfdb.rdann('b001', 'atr', pb_dir='cebsdb') >>> # Write a copy of the annotation file >>> wfdb.wrann('b001', 'cpy', annotation.sample, annotation.symbol)
f10215:m5
def show_ann_labels():
print(ann_label_table)<EOL>
Display the standard wfdb annotation label mapping. Examples -------- >>> show_ann_labels()
f10215:m6
def show_ann_classes():
print(ann_class_table)<EOL>
Display the standard wfdb annotation classes Examples -------- >>> show_ann_classes()
f10215:m7
def rdann(record_name, extension, sampfrom=<NUM_LIT:0>, sampto=None, shift_samps=False,<EOL>pb_dir=None, return_label_elements=['<STR_LIT>'],<EOL>summarize_labels=False):
return_label_elements = check_read_inputs(sampfrom, sampto,<EOL>return_label_elements)<EOL>filebytes = load_byte_pairs(record_name, extension, pb_dir)<EOL>(sample, label_store, subtype,<EOL>chan, num, aux_note) = proc_ann_bytes(filebytes, sampto)<EOL>potential_definition_inds, rm_inds = get_special_inds(sample, label_store,<EOL>aux_note)<EOL>(fs,<EOL>custom_labels) = interpret_defintion_annotations(potential_definition_inds,<EOL>aux_note)<EOL>(sample, label_store, subtype,<EOL>chan, num, aux_note) = rm_empty_indices(rm_inds, sample, label_store,<EOL>subtype, chan, num, aux_note)<EOL>(sample, label_store, subtype,<EOL>chan, num) = lists_to_int_arrays(sample, label_store, subtype, chan, num)<EOL>if fs is None:<EOL><INDENT>try:<EOL><INDENT>rec = record.rdheader(record_name, pb_dir)<EOL>fs = rec.fs<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>annotation = Annotation(record_name=os.path.split(record_name)[<NUM_LIT:1>],<EOL>extension=extension, sample=sample,<EOL>label_store=label_store, subtype=subtype,<EOL>chan=chan, num=num, aux_note=aux_note, fs=fs,<EOL>custom_labels=custom_labels)<EOL>if sampfrom > <NUM_LIT:0> and sampto is not None:<EOL><INDENT>annotation.apply_range(sampfrom=sampfrom, sampto=sampto)<EOL><DEDENT>if shift_samps and len(sample) > <NUM_LIT:0> and sampfrom:<EOL><INDENT>annotation.sample = annotation.sample - sampfrom<EOL><DEDENT>if summarize_labels:<EOL><INDENT>annotation.get_contained_labels(inplace=True)<EOL><DEDENT>annotation.set_label_elements(return_label_elements)<EOL>return annotation<EOL>
Read a WFDB annotation file record_name.extension and return an Annotation object. Parameters ---------- record_name : str The record name of the WFDB annotation file. ie. for file '100.atr', record_name='100'. extension : str The annotatator extension of the annotation file. ie. for file '100.atr', extension='atr'. sampfrom : int, optional The minimum sample number for annotations to be returned. sampto : int, optional The maximum sample number for annotations to be returned. shift_samps : bool, optional Specifies whether to return the sample indices relative to `sampfrom` (True), or sample 0 (False). pb_dir : str, optional Option used to stream data from Physiobank. The Physiobank database directory from which to find the required annotation file. eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb': pb_dir='mitdb'. return_label_elements : list, optional The label elements that are to be returned from reading the annotation file. A list with at least one of the following options: 'symbol', 'label_store', 'description'. summarize_labels : bool, optional If True, assign a summary table of the set of annotation labels contained in the file to the 'contained_labels' attribute of the returned object. This table will contain the columns: ['label_store', 'symbol', 'description', 'n_occurrences'] Returns ------- annotation : Annotation The Annotation object. Call help(wfdb.Annotation) for the attribute descriptions. Notes ----- For every annotation sample, the annotation file explictly stores the 'sample' and 'symbol' fields, but not necessarily the others. When reading annotation files using this function, fields which are not stored in the file will either take their default values of 0 or None, or will be carried over from their previous values if any. Examples -------- >>> ann = wfdb.rdann('sample-data/100', 'atr', sampto=300000)
f10215:m8
def proc_extra_field(label_store, filebytes, bpi, subtype, chan, num, aux_note, update):
<EOL>if label_store == <NUM_LIT>:<EOL><INDENT>subtype.append(filebytes[bpi, <NUM_LIT:0>].astype('<STR_LIT>'))<EOL>update['<STR_LIT>'] = False<EOL>bpi = bpi + <NUM_LIT:1><EOL><DEDENT>elif label_store == <NUM_LIT>:<EOL><INDENT>chan.append(filebytes[bpi, <NUM_LIT:0>])<EOL>update['<STR_LIT>'] = False<EOL>bpi = bpi + <NUM_LIT:1><EOL><DEDENT>elif label_store == <NUM_LIT>:<EOL><INDENT>num.append(filebytes[bpi, <NUM_LIT:0>].astype('<STR_LIT>'))<EOL>update['<STR_LIT>'] = False<EOL>bpi = bpi + <NUM_LIT:1><EOL><DEDENT>elif label_store == <NUM_LIT>:<EOL><INDENT>aux_notelen = filebytes[bpi, <NUM_LIT:0>]<EOL>aux_notebytes = filebytes[bpi + <NUM_LIT:1>:bpi + <NUM_LIT:1> + int(np.ceil(aux_notelen / <NUM_LIT>)),:].flatten()<EOL>if aux_notelen & <NUM_LIT:1>:<EOL><INDENT>aux_notebytes = aux_notebytes[:-<NUM_LIT:1>]<EOL><DEDENT>aux_note.append("<STR_LIT>".join([chr(char) for char in aux_notebytes]))<EOL>update['<STR_LIT>'] = False<EOL>bpi = bpi + <NUM_LIT:1> + int(np.ceil(aux_notelen / <NUM_LIT>))<EOL><DEDENT>return subtype, chan, num, aux_note, update, bpi<EOL>
Process extra fields belonging to the current annotation. Potential updated fields: subtype, chan, num, aux_note
f10215:m13
def update_extra_fields(subtype, chan, num, aux_note, update):
if update['<STR_LIT>']:<EOL><INDENT>subtype.append(<NUM_LIT:0>)<EOL><DEDENT>if update['<STR_LIT>']:<EOL><INDENT>if chan == []:<EOL><INDENT>chan.append(<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>chan.append(chan[-<NUM_LIT:1>])<EOL><DEDENT><DEDENT>if update['<STR_LIT>']:<EOL><INDENT>if num == []:<EOL><INDENT>num.append(<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>num.append(num[-<NUM_LIT:1>])<EOL><DEDENT><DEDENT>if update['<STR_LIT>']:<EOL><INDENT>aux_note.append('<STR_LIT>')<EOL><DEDENT>return subtype, chan, num, aux_note<EOL>
Update the field if the current annotation did not provide a value. - aux_note and sub are set to default values if missing. - chan and num copy over previous value if missing.
f10215:m14
def get_special_inds(sample, label_store, aux_note):
s0_inds = np.where(sample == np.int64(<NUM_LIT:0>))[<NUM_LIT:0>]<EOL>note_inds = np.where(label_store == np.int64(<NUM_LIT>))[<NUM_LIT:0>]<EOL>potential_definition_inds = set(s0_inds).intersection(note_inds)<EOL>notann_inds = np.where(label_store == np.int64(<NUM_LIT:0>))[<NUM_LIT:0>]<EOL>rm_inds = potential_definition_inds.union(set(notann_inds))<EOL>return potential_definition_inds, rm_inds<EOL>
Get the indices of annotations that hold definition information about the entire annotation file, and other empty annotations to be removed. Note: There is no need to deal with SKIP annotations (label_store=59) which were already dealt with in proc_core_fields and hence not included here.
f10215:m15
def interpret_defintion_annotations(potential_definition_inds, aux_note):
fs = None<EOL>custom_labels = []<EOL>if len(potential_definition_inds) > <NUM_LIT:0>:<EOL><INDENT>i = <NUM_LIT:0><EOL>while i<len(potential_definition_inds):<EOL><INDENT>if aux_note[i].startswith('<STR_LIT>'):<EOL><INDENT>if not fs:<EOL><INDENT>search_fs = rx_fs.findall(aux_note[i])<EOL>if search_fs:<EOL><INDENT>fs = float(search_fs[<NUM_LIT:0>])<EOL>if round(fs, <NUM_LIT:8>) == float(int(fs)):<EOL><INDENT>fs = int(fs)<EOL><DEDENT>i += <NUM_LIT:1><EOL>continue<EOL><DEDENT><DEDENT>if aux_note[i] == '<STR_LIT>':<EOL><INDENT>i += <NUM_LIT:1><EOL>while aux_note[i] != '<STR_LIT>':<EOL><INDENT>label_store, symbol, description = rx_custom_label.findall(aux_note[i])[<NUM_LIT:0>]<EOL>custom_labels.append((int(label_store), symbol, description))<EOL>i += <NUM_LIT:1><EOL><DEDENT>i += <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>i += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>if not custom_labels:<EOL><INDENT>custom_labels = None<EOL><DEDENT>return fs, custom_labels<EOL>
Try to extract annotation definition information from annotation notes. Information that may be contained: - fs - sample=0, label_state=22, aux_note='## time resolution: XXX' - custom annotation label definitions
f10215:m16
def rm_empty_indices(*args):
rm_inds = args[<NUM_LIT:0>]<EOL>if not rm_inds:<EOL><INDENT>return args[<NUM_LIT:1>:]<EOL><DEDENT>keep_inds = [i for i in range(len(args[<NUM_LIT:1>])) if i not in rm_inds]<EOL>return [[a[i] for i in keep_inds] for a in args[<NUM_LIT:1>:]]<EOL>
Remove unwanted list indices. First argument is the list of indices to remove. Other elements are the lists to trim.
f10215:m17
def lists_to_int_arrays(*args):
return [np.array(a, dtype='<STR_LIT:int>') for a in args]<EOL>
Convert lists to numpy int arrays
f10215:m18
def rm_last(*args):
if len(args) == <NUM_LIT:1>:<EOL><INDENT>return args[:-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>return [a[:-<NUM_LIT:1>] for a in args]<EOL><DEDENT>return<EOL>
Remove the last index from each list
f10215:m19
def __init__(self, record_name, extension, sample, symbol=None,<EOL>subtype=None, chan=None, num=None, aux_note=None, fs=None,<EOL>label_store=None, description=None, custom_labels=None,<EOL>contained_labels=None):
self.record_name = record_name<EOL>self.extension = extension<EOL>self.sample = sample<EOL>self.symbol = symbol<EOL>self.subtype = subtype<EOL>self.chan = chan<EOL>self.num = num<EOL>self.aux_note = aux_note<EOL>self.fs = fs<EOL>self.label_store = label_store<EOL>self.description = description<EOL>self.custom_labels = custom_labels<EOL>self.contained_labels = contained_labels<EOL>self.ann_len = len(self.sample)<EOL>
Parameters ---------- record_name : str The base file name (without extension) of the record that the annotation is associated with. extension : str The file extension of the file the annotation is stored in. sample : numpy array A numpy array containing the annotation locations in samples relative to the beginning of the record. symbol : list, or numpy array, optional The symbols used to display the annotation labels. List or numpy array. If this field is present, `label_store` must not be present. subtype : numpy array, optional A numpy array containing the marked class/category of each annotation. chan : numpy array, optional A numpy array containing the signal channel associated with each annotation. num : numpy array, optional A numpy array containing the labelled annotation number for each annotation. aux_note : list, optional A list containing the auxiliary information string (or None for annotations without notes) for each annotation. fs : int, or float, optional The sampling frequency of the record. label_store : numpy array, optional The integer value used to store/encode each annotation label description : list, optional A list containing the descriptive string of each annotation label. custom_labels : pandas dataframe, optional The custom annotation labels defined in the annotation file. Maps the relationship between the three label fields. The data type is a pandas DataFrame with three columns: ['label_store', 'symbol', 'description'] contained_labels : pandas dataframe, optional The unique labels contained in this annotation. Same structure as `custom_labels`.
f10215:c0:m0
def apply_range(self, sampfrom=<NUM_LIT:0>, sampto=None):
sampto = sampto or self.sample[-<NUM_LIT:1>]<EOL>kept_inds = np.intersect1d(np.where(self.sample>=sampfrom),<EOL>np.where(self.sample<=sampto))<EOL>for field in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>setattr(self, field, getattr(self, field)[kept_inds])<EOL><DEDENT>self.aux_note = [self.aux_note[i] for i in kept_inds]<EOL>self.ann_len = len(self.sample)<EOL>
Filter the annotation attributes to keep only items between the desired sample values
f10215:c0:m2
def wrann(self, write_fs=False, write_dir='<STR_LIT>'):
for field in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if getattr(self, field) is None:<EOL><INDENT>raise Exception('<STR_LIT>',field)<EOL><DEDENT><DEDENT>present_label_fields = self.get_label_fields()<EOL>if not present_label_fields:<EOL><INDENT>raise Exception('<STR_LIT>', ann_label_fields)<EOL><DEDENT>self.check_fields()<EOL>self.standardize_custom_labels()<EOL>self.create_label_map()<EOL>self.check_field_cohesion(present_label_fields)<EOL>if '<STR_LIT>' not in present_label_fields:<EOL><INDENT>self.convert_label_attribute(source_field=present_label_fields[<NUM_LIT:0>],<EOL>target_field='<STR_LIT>')<EOL><DEDENT>self.wr_ann_file(write_fs=write_fs, write_dir=write_dir)<EOL>return<EOL>
Write a WFDB annotation file from this object. Parameters ---------- write_fs : bool, optional Whether to write the `fs` attribute to the file.
f10215:c0:m3
def get_label_fields(self):
present_label_fields = []<EOL>for field in ann_label_fields:<EOL><INDENT>if getattr(self, field) is not None:<EOL><INDENT>present_label_fields.append(field)<EOL><DEDENT><DEDENT>return present_label_fields<EOL>
Get the present label fields in the object
f10215:c0:m4
def check_field(self, field):
item = getattr(self, field)<EOL>if not isinstance(item, ALLOWED_TYPES[field]):<EOL><INDENT>raise TypeError('<STR_LIT>'+field+'<STR_LIT>', ALLOWED_TYPES[field])<EOL><DEDENT>if ALLOWED_TYPES[field] == (np.ndarray):<EOL><INDENT>record.check_np_array(item=item, field_name=field, ndim=<NUM_LIT:1>,<EOL>parent_class=np.integer, channel_num=None)<EOL><DEDENT>if field == '<STR_LIT>':<EOL><INDENT>if bool(re.search('<STR_LIT>', self.record_name)):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if bool(re.search('<STR_LIT>', self.extension)):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if self.fs <=<NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>"""<STR_LIT>"""<EOL>if isinstance(item, pd.DataFrame):<EOL><INDENT>column_names = list(item)<EOL>if '<STR_LIT>' in column_names and '<STR_LIT:description>' in column_names:<EOL><INDENT>if '<STR_LIT>' in column_names:<EOL><INDENT>label_store = list(item['<STR_LIT>'].values)<EOL><DEDENT>else:<EOL><INDENT>label_store = None<EOL><DEDENT>symbol = item['<STR_LIT>'].values<EOL>description = item['<STR_LIT:description>'].values<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'.join(['<STR_LIT>'+field+'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>']))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if set([len(i) for i in item]) == {<NUM_LIT:2>}:<EOL><INDENT>label_store = None<EOL>symbol = [i[<NUM_LIT:0>] for i in item]<EOL>description = [i[<NUM_LIT:1>] for i in item]<EOL><DEDENT>elif set([len(i) for i in item]) == {<NUM_LIT:3>}:<EOL><INDENT>label_store = [i[<NUM_LIT:0>] for i in item]<EOL>symbol = [i[<NUM_LIT:1>] for i in item]<EOL>description = [i[<NUM_LIT:2>] for i in item]<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'.join(['<STR_LIT>'+field+'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>']))<EOL><DEDENT><DEDENT>if label_store:<EOL><INDENT>if len(item) != len(set(label_store)):<EOL><INDENT>raise ValueError('<STR_LIT>'+field+'<STR_LIT>')<EOL><DEDENT>if min(label_store) < <NUM_LIT:1> or max(label_store) > <NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>if len(item) != len(set(symbol)):<EOL><INDENT>raise ValueError('<STR_LIT>'+field+'<STR_LIT>')<EOL><DEDENT>for i in range(len(item)):<EOL><INDENT>if label_store:<EOL><INDENT>if not hasattr(label_store[i], '<STR_LIT>'):<EOL><INDENT>raise TypeError('<STR_LIT>'+field+'<STR_LIT>')<EOL><DEDENT><DEDENT>if not isinstance(symbol[i], str_types) or len(symbol[i]) not in [<NUM_LIT:1>,<NUM_LIT:2>,<NUM_LIT:3>]:<EOL><INDENT>raise ValueError('<STR_LIT>'+field+'<STR_LIT>')<EOL><DEDENT>if bool(re.search('<STR_LIT>', symbol[i])):<EOL><INDENT>raise ValueError('<STR_LIT>'+field+'<STR_LIT>')<EOL><DEDENT>if not isinstance(description[i], str_types):<EOL><INDENT>raise TypeError('<STR_LIT>'+field+'<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>elif field in ['<STR_LIT>', '<STR_LIT:description>', '<STR_LIT>']:<EOL><INDENT>uniq_elements = set(item)<EOL>for e in uniq_elements:<EOL><INDENT>if not isinstance(e, str_types):<EOL><INDENT>raise TypeError('<STR_LIT>'+field+'<STR_LIT>')<EOL><DEDENT><DEDENT>if field == '<STR_LIT>':<EOL><INDENT>for e in uniq_elements:<EOL><INDENT>if len(e) not in [<NUM_LIT:1>,<NUM_LIT:2>,<NUM_LIT:3>]:<EOL><INDENT>raise ValueError('<STR_LIT>'+field+'<STR_LIT>')<EOL><DEDENT>if bool(re.search('<STR_LIT>', e)):<EOL><INDENT>raise ValueError('<STR_LIT>'+field+'<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for e in uniq_elements:<EOL><INDENT>if bool(re.search('<STR_LIT>', e)):<EOL><INDENT>raise ValueError('<STR_LIT>'+field+'<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if len(self.sample) == <NUM_LIT:1>:<EOL><INDENT>sampdiffs = np.array([self.sample[<NUM_LIT:0>]])<EOL><DEDENT>elif len(self.sample) > <NUM_LIT:1>:<EOL><INDENT>sampdiffs = np.concatenate(([self.sample[<NUM_LIT:0>]], np.diff(self.sample)))<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if min(self.sample) < <NUM_LIT:0> :<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if min(sampdiffs) < <NUM_LIT:0> :<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if max(sampdiffs) > <NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if min(item) < <NUM_LIT:1> or max(item) > <NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if min(self.subtype) < <NUM_LIT:0> or max(self.subtype) ><NUM_LIT>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if min(self.chan) < <NUM_LIT:0> or max(self.chan) ><NUM_LIT:255>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if min(self.num) < <NUM_LIT:0> or max(self.num) ><NUM_LIT>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT><DEDENT>return<EOL>
Check a particular annotation field
f10215:c0:m6
def check_field_cohesion(self, present_label_fields):
<EOL>nannots = len(self.sample)<EOL>for field in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']+present_label_fields:<EOL><INDENT>if getattr(self, field) is not None:<EOL><INDENT>if len(getattr(self, field)) != nannots:<EOL><INDENT>raise ValueError("<STR_LIT>"+field+"<STR_LIT>")<EOL><DEDENT><DEDENT><DEDENT>for field in present_label_fields:<EOL><INDENT>defined_values = self.__label_map__[field].values<EOL>if set(getattr(self, field)) - set(defined_values) != set():<EOL><INDENT>raise ValueError('<STR_LIT:\n>'.join(['<STR_LIT>'+field+'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>',<EOL>'<STR_LIT>']))<EOL><DEDENT><DEDENT>return<EOL>
Check that the content and structure of different fields are consistent with one another.
f10215:c0:m7
def standardize_custom_labels(self):
custom_labels = self.custom_labels<EOL>if custom_labels is None:<EOL><INDENT>return<EOL><DEDENT>self.check_field('<STR_LIT>')<EOL>if not isinstance(custom_labels, pd.DataFrame):<EOL><INDENT>if len(self.custom_labels[<NUM_LIT:0>]) == <NUM_LIT:2>:<EOL><INDENT>symbol = self.get_custom_label_attribute('<STR_LIT>')<EOL>description = self.get_custom_label_attribute('<STR_LIT:description>')<EOL>custom_labels = pd.DataFrame({'<STR_LIT>': symbol, '<STR_LIT:description>': description})<EOL><DEDENT>else:<EOL><INDENT>label_store = self.get_custom_label_attribute('<STR_LIT>')<EOL>symbol = self.get_custom_label_attribute('<STR_LIT>')<EOL>description = self.get_custom_label_attribute('<STR_LIT:description>')<EOL>custom_labels = pd.DataFrame({'<STR_LIT>':label_store, '<STR_LIT>': symbol, '<STR_LIT:description>': description})<EOL><DEDENT><DEDENT>if '<STR_LIT>' not in list(custom_labels):<EOL><INDENT>undefined_label_stores = self.get_undefined_label_stores()<EOL>if len(custom_labels) > len(undefined_label_stores):<EOL><INDENT>available_label_stores = self.get_available_label_stores()<EOL><DEDENT>else:<EOL><INDENT>available_label_stores = undefined_label_stores<EOL><DEDENT>n_custom_labels = custom_labels.shape[<NUM_LIT:0>]<EOL>if n_custom_labels > len(available_label_stores):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>custom_labels['<STR_LIT>'] = available_label_stores[:n_custom_labels]<EOL><DEDENT>custom_labels.set_index(custom_labels['<STR_LIT>'].values, inplace=True)<EOL>custom_labels = custom_labels[list(ann_label_fields)]<EOL>self.custom_labels = custom_labels<EOL>return<EOL>
Set the custom_labels field of the object to a standardized format: 3 column pandas df with ann_label_fields as columns. Does nothing if there are no custom labels defined. Does nothing if custom_labels is already a df with all 3 columns If custom_labels is an iterable of pairs/triplets, this function will convert it into a df. If the label_store attribute is not already defined, this function will automatically choose values by trying to use: 1. The undefined store values from the standard wfdb annotation label map. 2. The unused label store values. This is extracted by finding the set of all labels contained in this annotation object and seeing which symbols/descriptions are not used. If there are more custom labels defined than there are enough spaces, even in condition 2 from above, this function will raise an error. This function must work when called as a standalone.
f10215:c0:m8
def get_undefined_label_stores(self):
return list(set(range(<NUM_LIT:50>)) - set(ann_label_table['<STR_LIT>']))<EOL>
Get the label_store values not defined in the standard wfdb annotation labels.
f10215:c0:m9
def get_available_label_stores(self, usefield='<STR_LIT>'):
<EOL>if usefield == '<STR_LIT>':<EOL><INDENT>if self.label_store is not None:<EOL><INDENT>usefield = '<STR_LIT>'<EOL><DEDENT>elif self.symbol is not None:<EOL><INDENT>usefield = '<STR_LIT>'<EOL><DEDENT>elif self.description is not None:<EOL><INDENT>usefield = '<STR_LIT:description>'<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>', ann_label_fields)<EOL><DEDENT>return self.get_available_label_stores(usefield = usefield)<EOL><DEDENT>else:<EOL><INDENT>contained_field = getattr(self, usefield)<EOL>if usefield == '<STR_LIT>':<EOL><INDENT>unused_label_stores = set(ann_label_table['<STR_LIT>'].values) - contained_field<EOL><DEDENT>else:<EOL><INDENT>unused_field = set(ann_label_table[usefield].values) - contained_field<EOL>unused_label_stores = ann_label_table.loc[ann_label_table[usefield] in unused_field, '<STR_LIT>'].values<EOL><DEDENT>if self.custom_symbols is not None:<EOL><INDENT>custom_field = set(self.get_custom_label_attribute(usefield))<EOL>if usefield == '<STR_LIT>':<EOL><INDENT>overwritten_label_stores = set(custom_field).intersection(set(ann_label_table['<STR_LIT>']))<EOL><DEDENT>else:<EOL><INDENT>overwritten_fields = set(custom_field).intersection(set(ann_label_table[usefield]))<EOL>overwritten_label_stores = ann_label_table.loc[ann_label_table[usefield] in overwritten_fields, '<STR_LIT>'].values<EOL><DEDENT><DEDENT>else:<EOL><INDENT>overwritten_label_stores = set()<EOL><DEDENT>undefined_label_stores = self.get_undefined_label_stores()<EOL>available_label_stores = set(undefined_label_stores).union(set(unused_label_stores)).union(overwritten_label_stores)<EOL>return available_label_stores<EOL><DEDENT>
Get the label store values that may be used for writing this annotation. Available store values include: - the undefined values in the standard wfdb labels - the store values not used in the current annotation object. - the store values whose standard wfdb symbols/descriptions match those of the custom labels (if custom_labels exists) If 'usefield' is explicitly specified, the function will use that field to figure out available label stores. If 'usefield' is set to 'tryall', the function will choose one of the contained attributes by checking availability in the order: label_store, symbol, description
f10215:c0:m10
def get_custom_label_attribute(self, attribute):
if attribute not in ann_label_fields:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if isinstance(self.custom_labels, pd.DataFrame):<EOL><INDENT>if '<STR_LIT>' not in list(self.custom_labels):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>a = list(self.custom_labels[attribute].values)<EOL><DEDENT>else:<EOL><INDENT>if len(self.custom_labels[<NUM_LIT:0>]) == <NUM_LIT:2>:<EOL><INDENT>if attribute == '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>elif attribute == '<STR_LIT>':<EOL><INDENT>a = [l[<NUM_LIT:0>] for l in self.custom_labels]<EOL><DEDENT>elif attribute == '<STR_LIT:description>':<EOL><INDENT>a = [l[<NUM_LIT:1>] for l in self.custom_labels]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if attribute == '<STR_LIT>':<EOL><INDENT>a = [l[<NUM_LIT:0>] for l in self.custom_labels]<EOL><DEDENT>elif attribute == '<STR_LIT>':<EOL><INDENT>a = [l[<NUM_LIT:1>] for l in self.custom_labels]<EOL><DEDENT>elif attribute == '<STR_LIT:description>':<EOL><INDENT>a = [l[<NUM_LIT:2>] for l in self.custom_labels]<EOL><DEDENT><DEDENT><DEDENT>return a<EOL>
Get a list of the custom_labels attribute. ie. label_store, symbol, or description. The custom_labels variable could be in a number of formats
f10215:c0:m11
def create_label_map(self, inplace=True):
label_map = ann_label_table.copy()<EOL>if self.custom_labels is not None:<EOL><INDENT>self.standardize_custom_labels()<EOL>for i in self.custom_labels.index:<EOL><INDENT>label_map.loc[i] = self.custom_labels.loc[i]<EOL><DEDENT><DEDENT>if inplace:<EOL><INDENT>self.__label_map__ = label_map<EOL><DEDENT>else:<EOL><INDENT>return label_map<EOL><DEDENT>
Creates mapping df based on ann_label_table and self.custom_labels. Table composed of entire WFDB standard annotation table, overwritten/appended with custom_labels if any. Sets __label_map__ attribute, or returns value.
f10215:c0:m12
def wr_ann_file(self, write_fs, write_dir='<STR_LIT>'):
<EOL>if write_fs:<EOL><INDENT>fs_bytes = self.calc_fs_bytes()<EOL><DEDENT>else:<EOL><INDENT>fs_bytes = []<EOL><DEDENT>cl_bytes = self.calc_cl_bytes()<EOL>core_bytes = self.calc_core_bytes()<EOL>if fs_bytes == [] and cl_bytes == []:<EOL><INDENT>end_special_bytes = []<EOL><DEDENT>else:<EOL><INDENT>end_special_bytes = [<NUM_LIT:0>, <NUM_LIT>, <NUM_LIT:255>, <NUM_LIT:255>, <NUM_LIT:255>, <NUM_LIT:255>, <NUM_LIT:1>, <NUM_LIT:0>]<EOL><DEDENT>with open(os.path.join(write_dir, self.record_name+'<STR_LIT:.>'+self.extension),<EOL>'<STR_LIT:wb>') as f:<EOL><INDENT>np.concatenate((fs_bytes, cl_bytes, end_special_bytes, core_bytes,<EOL>np.array([<NUM_LIT:0>,<NUM_LIT:0>]))).astype('<STR_LIT>').tofile(f)<EOL><DEDENT>return<EOL>
Calculate the bytes used to encode an annotation set and write them to an annotation file
f10215:c0:m13
def calc_core_bytes(self):
<EOL>if len(self.sample) == <NUM_LIT:1>:<EOL><INDENT>sampdiff = np.array([self.sample[<NUM_LIT:0>]])<EOL><DEDENT>else:<EOL><INDENT>sampdiff = np.concatenate(([self.sample[<NUM_LIT:0>]], np.diff(self.sample)))<EOL><DEDENT>compact_annotation = copy.deepcopy(self)<EOL>compact_annotation.compact_fields()<EOL>extra_write_fields = []<EOL>for field in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>if not isblank(getattr(compact_annotation, field)):<EOL><INDENT>extra_write_fields.append(field)<EOL><DEDENT><DEDENT>data_bytes = []<EOL>for i in range(len(sampdiff)):<EOL><INDENT>data_bytes.append(field2bytes('<STR_LIT>', [sampdiff[i], self.symbol[i]]))<EOL>for field in extra_write_fields:<EOL><INDENT>value = getattr(compact_annotation, field)[i]<EOL>if value is not None:<EOL><INDENT>data_bytes.append(field2bytes(field, value))<EOL><DEDENT><DEDENT><DEDENT>data_bytes = np.array([item for sublist in data_bytes for item in sublist]).astype('<STR_LIT>')<EOL>return data_bytes<EOL>
Convert all used annotation fields into bytes to write
f10215:c0:m16
def get_contained_labels(self, inplace=True):
if self.custom_labels is not None:<EOL><INDENT>self.check_field('<STR_LIT>')<EOL><DEDENT>label_map = ann_label_table.copy()<EOL>if isinstance(self.custom_labels, (list, tuple)):<EOL><INDENT>custom_labels = label_triplets_to_df(self.custom_labels)<EOL><DEDENT>elif isinstance(self.custom_labels, pd.DataFrame):<EOL><INDENT>self.custom_labels.set_index(<EOL>self.custom_labels['<STR_LIT>'].values, inplace=True)<EOL>custom_labels = self.custom_labels<EOL><DEDENT>else:<EOL><INDENT>custom_labels = None<EOL><DEDENT>if custom_labels is not None:<EOL><INDENT>for i in custom_labels.index:<EOL><INDENT>label_map.loc[i] = custom_labels.loc[i]<EOL><DEDENT><DEDENT>if self.label_store is not None:<EOL><INDENT>index_vals = set(self.label_store)<EOL>reset_index = False<EOL>counts = np.unique(self.label_store, return_counts=True)<EOL><DEDENT>elif self.symbol is not None:<EOL><INDENT>index_vals = set(self.symbol)<EOL>label_map.set_index(label_map['<STR_LIT>'].values, inplace=True)<EOL>reset_index = True<EOL>counts = np.unique(self.symbol, return_counts=True)<EOL><DEDENT>elif self.description is not None:<EOL><INDENT>index_vals = set(self.description)<EOL>label_map.set_index(label_map['<STR_LIT:description>'].values, inplace=True)<EOL>reset_index = True<EOL>counts = np.unique(self.description, return_counts=True)<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>contained_labels = label_map.loc[index_vals, :]<EOL>for i in range(len(counts[<NUM_LIT:0>])):<EOL><INDENT>contained_labels.loc[counts[<NUM_LIT:0>][i], '<STR_LIT>'] = counts[<NUM_LIT:1>][i]<EOL><DEDENT>contained_labels['<STR_LIT>'] = pd.to_numeric(contained_labels['<STR_LIT>'], downcast='<STR_LIT>')<EOL>if reset_index:<EOL><INDENT>contained_labels.set_index(contained_labels['<STR_LIT>'].values,<EOL>inplace=True)<EOL><DEDENT>if inplace:<EOL><INDENT>self.contained_labels = contained_labels<EOL>return<EOL><DEDENT>else:<EOL><INDENT>return contained_labels<EOL><DEDENT>
Get the set of unique labels contained in this annotation. Returns a pandas dataframe or sets the contained_labels attribute of the object. Requires the label_store field to be set. Function will try to use attributes contained in the order: 1. label_store 2. symbol 3. description This function should also be called to summarize information about an annotation after it has been read. Should not be a helper function to others except rdann.
f10215:c0:m19
def set_label_elements(self, wanted_label_elements):
if isinstance(wanted_label_elements, str):<EOL><INDENT>wanted_label_elements = [wanted_label_elements]<EOL><DEDENT>missing_elements = [e for e in wanted_label_elements if getattr(self, e) is None]<EOL>contained_elements = [e for e in ann_label_fields if getattr(self, e )is not None]<EOL>if not contained_elements:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>for e in missing_elements:<EOL><INDENT>self.convert_label_attribute(contained_elements[<NUM_LIT:0>], e)<EOL><DEDENT>unwanted_label_elements = list(set(ann_label_fields)<EOL>- set(wanted_label_elements))<EOL>self.rm_attributes(unwanted_label_elements)<EOL>return<EOL>
Set one or more label elements based on at least one of the others
f10215:c0:m20
def convert_label_attribute(self, source_field, target_field, inplace=True,<EOL>overwrite=True):
if inplace and not overwrite:<EOL><INDENT>if getattr(self, target_field) is not None:<EOL><INDENT>return<EOL><DEDENT><DEDENT>label_map = self.create_label_map(inplace=False)<EOL>label_map.set_index(label_map[source_field].values, inplace=True)<EOL>target_item = label_map.loc[getattr(self, source_field), target_field].values<EOL>if target_field != '<STR_LIT>':<EOL><INDENT>target_item = list(target_item)<EOL><DEDENT>if inplace:<EOL><INDENT>setattr(self, target_field, target_item)<EOL><DEDENT>else:<EOL><INDENT>return target_item<EOL><DEDENT>
Convert one label attribute (label_store, symbol, or description) to another. Input arguments: - inplace - If True, sets the object attribute. If False, returns the value. - overwrite - if True, performs conversion and replaces target field attribute even if the target attribute already has a value. If False, does not perform conversion in the aforementioned case. Set to True (do conversion) if inplace=False. Creates mapping df on the fly based on ann_label_table and self.custom_labels
f10215:c0:m22
def plot_items(signal=None, ann_samp=None, ann_sym=None, fs=None,<EOL>time_units='<STR_LIT>', sig_name=None, sig_units=None,<EOL>ylabel=None, title=None, sig_style=['<STR_LIT>'], ann_style=['<STR_LIT>'],<EOL>ecg_grids=[], figsize=None, return_fig=False):
<EOL>sig_len, n_sig, n_annot, n_subplots = get_plot_dims(signal, ann_samp)<EOL>fig, axes = create_figure(n_subplots, figsize)<EOL>if signal is not None:<EOL><INDENT>plot_signal(signal, sig_len, n_sig, fs, time_units, sig_style, axes)<EOL><DEDENT>if ann_samp is not None:<EOL><INDENT>plot_annotation(ann_samp, n_annot, ann_sym, signal, n_sig, fs,<EOL>time_units, ann_style, axes)<EOL><DEDENT>if ecg_grids:<EOL><INDENT>plot_ecg_grids(ecg_grids, fs, sig_units, time_units, axes)<EOL><DEDENT>label_figure(axes, n_subplots, time_units, sig_name, sig_units, ylabel,<EOL>title)<EOL>plt.show(fig)<EOL>if return_fig:<EOL><INDENT>return fig<EOL><DEDENT>
Subplot individual channels of signals and/or annotations. Parameters ---------- signal : 1d or 2d numpy array, optional The uniformly sampled signal to be plotted. If signal.ndim is 1, it is assumed to be a one channel signal. If it is 2, axes 0 and 1, must represent time and channel number respectively. ann_samp: list, optional A list of annotation locations to plot, with each list item corresponding to a different channel. List items may be: - 1d numpy array, with values representing sample indices. Empty arrays are skipped. - list, with values representing sample indices. Empty lists are skipped. - None. For channels in which nothing is to be plotted. If `signal` is defined, the annotation locations will be overlaid on the signals, with the list index corresponding to the signal channel. The length of `annotation` does not have to match the number of channels of `signal`. ann_sym: list, optional A list of annotation symbols to plot, with each list item corresponding to a different channel. List items should be lists of strings. The symbols are plotted over the corresponding `ann_samp` index locations. fs : int or float, optional The sampling frequency of the signals and/or annotations. Used to calculate time intervals if `time_units` is not 'samples'. Also required for plotting ecg grids. time_units : str, optional The x axis unit. Allowed options are: 'samples', 'seconds', 'minutes', and 'hours'. sig_name : list, optional A list of strings specifying the signal names. Used with `sig_units` to form y labels, if `ylabel` is not set. sig_units : list, optional A list of strings specifying the units of each signal channel. Used with `sig_name` to form y labels, if `ylabel` is not set. This parameter is required for plotting ecg grids. ylabel : list, optional A list of strings specifying the final y labels. If this option is present, `sig_name` and `sig_units` will not be used for labels. title : str, optional The title of the graph. sig_style : list, optional A list of strings, specifying the style of the matplotlib plot for each signal channel. The list length should match the number of signal channels. If the list has a length of 1, the style will be used for all channels. ann_style : list, optional A list of strings, specifying the style of the matplotlib plot for each annotation channel. If the list has a length of 1, the style will be used for all channels. ecg_grids : list, optional A list of integers specifying channels in which to plot ecg grids. May also be set to 'all' for all channels. Major grids at 0.5mV, and minor grids at 0.125mV. All channels to be plotted with grids must have `sig_units` equal to 'uV', 'mV', or 'V'. figsize : tuple, optional Tuple pair specifying the width, and height of the figure. It is the 'figsize' argument passed into matplotlib.pyplot's `figure` function. return_fig : bool, optional Whether the figure is to be returned as an output argument. Returns ------- figure : matplotlib figure, optional The matplotlib figure generated. Only returned if the 'return_fig' parameter is set to True. Examples -------- >>> record = wfdb.rdrecord('sample-data/100', sampto=3000) >>> ann = wfdb.rdann('sample-data/100', 'atr', sampto=3000) >>> wfdb.plot_items(signal=record.p_signal, annotation=[ann.sample, ann.sample], title='MIT-BIH Record 100', time_units='seconds', figsize=(10,4), ecg_grids='all')
f10216:m0
def get_plot_dims(signal, ann_samp):
if signal is not None:<EOL><INDENT>if signal.ndim == <NUM_LIT:1>:<EOL><INDENT>sig_len = len(signal)<EOL>n_sig = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>sig_len = signal.shape[<NUM_LIT:0>]<EOL>n_sig = signal.shape[<NUM_LIT:1>]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>sig_len = <NUM_LIT:0><EOL>n_sig = <NUM_LIT:0><EOL><DEDENT>if ann_samp is not None:<EOL><INDENT>n_annot = len(ann_samp)<EOL><DEDENT>else:<EOL><INDENT>n_annot = <NUM_LIT:0><EOL><DEDENT>return sig_len, n_sig, n_annot, max(n_sig, n_annot)<EOL>
Figure out the number of plot channels
f10216:m1
def create_figure(n_subplots, figsize):
fig = plt.figure(figsize=figsize)<EOL>axes = []<EOL>for i in range(n_subplots):<EOL><INDENT>axes.append(fig.add_subplot(n_subplots, <NUM_LIT:1>, i+<NUM_LIT:1>))<EOL><DEDENT>return fig, axes<EOL>
Create the plot figure and subplot axes
f10216:m2
def plot_signal(signal, sig_len, n_sig, fs, time_units, sig_style, axes):
<EOL>if len(sig_style) == <NUM_LIT:1>:<EOL><INDENT>sig_style = n_sig * sig_style<EOL><DEDENT>if time_units == '<STR_LIT>':<EOL><INDENT>t = np.linspace(<NUM_LIT:0>, sig_len-<NUM_LIT:1>, sig_len)<EOL><DEDENT>else:<EOL><INDENT>downsample_factor = {'<STR_LIT>':fs, '<STR_LIT>':fs * <NUM_LIT>,<EOL>'<STR_LIT>':fs * <NUM_LIT>}<EOL>t = np.linspace(<NUM_LIT:0>, sig_len-<NUM_LIT:1>, sig_len) / downsample_factor[time_units]<EOL><DEDENT>if signal.ndim == <NUM_LIT:1>:<EOL><INDENT>axes[<NUM_LIT:0>].plot(t, signal, sig_style[<NUM_LIT:0>], zorder=<NUM_LIT:3>)<EOL><DEDENT>else:<EOL><INDENT>for ch in range(n_sig):<EOL><INDENT>axes[ch].plot(t, signal[:,ch], sig_style[ch], zorder=<NUM_LIT:3>)<EOL><DEDENT><DEDENT>
Plot signal channels
f10216:m3
def plot_annotation(ann_samp, n_annot, ann_sym, signal, n_sig, fs, time_units,<EOL>ann_style, axes):
<EOL>if len(ann_style) == <NUM_LIT:1>:<EOL><INDENT>ann_style = n_annot * ann_style<EOL><DEDENT>if time_units == '<STR_LIT>':<EOL><INDENT>downsample_factor = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>downsample_factor = {'<STR_LIT>':float(fs), '<STR_LIT>':float(fs)*<NUM_LIT>,<EOL>'<STR_LIT>':float(fs)*<NUM_LIT>}[time_units]<EOL><DEDENT>for ch in range(n_annot):<EOL><INDENT>if ann_samp[ch] is not None and len(ann_samp[ch]):<EOL><INDENT>if n_sig > ch:<EOL><INDENT>if signal.ndim == <NUM_LIT:1>:<EOL><INDENT>y = signal[ann_samp[ch]]<EOL><DEDENT>else:<EOL><INDENT>y = signal[ann_samp[ch], ch]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>y = np.zeros(len(ann_samp[ch]))<EOL><DEDENT>axes[ch].plot(ann_samp[ch] / downsample_factor, y, ann_style[ch])<EOL>if ann_sym is not None and ann_sym[ch] is not None:<EOL><INDENT>for i, s in enumerate(ann_sym[ch]):<EOL><INDENT>axes[ch].annotate(s, (ann_samp[ch][i] / downsample_factor,<EOL>y[i]))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>
Plot annotations, possibly overlaid on signals
f10216:m4
def plot_ecg_grids(ecg_grids, fs, units, time_units, axes):
if ecg_grids == '<STR_LIT:all>':<EOL><INDENT>ecg_grids = range(<NUM_LIT:0>, len(axes))<EOL><DEDENT>for ch in ecg_grids:<EOL><INDENT>auto_xlims = axes[ch].get_xlim()<EOL>auto_ylims= axes[ch].get_ylim()<EOL>(major_ticks_x, minor_ticks_x, major_ticks_y,<EOL>minor_ticks_y) = calc_ecg_grids(auto_ylims[<NUM_LIT:0>], auto_ylims[<NUM_LIT:1>],<EOL>units[ch], fs, auto_xlims[<NUM_LIT:1>],<EOL>time_units)<EOL>min_x, max_x = np.min(minor_ticks_x), np.max(minor_ticks_x)<EOL>min_y, max_y = np.min(minor_ticks_y), np.max(minor_ticks_y)<EOL>for tick in minor_ticks_x:<EOL><INDENT>axes[ch].plot([tick, tick], [min_y, max_y], c='<STR_LIT>',<EOL>marker='<STR_LIT:|>', zorder=<NUM_LIT:1>)<EOL><DEDENT>for tick in major_ticks_x:<EOL><INDENT>axes[ch].plot([tick, tick], [min_y, max_y], c='<STR_LIT>',<EOL>marker='<STR_LIT:|>', zorder=<NUM_LIT:2>)<EOL><DEDENT>for tick in minor_ticks_y:<EOL><INDENT>axes[ch].plot([min_x, max_x], [tick, tick], c='<STR_LIT>',<EOL>marker='<STR_LIT:_>', zorder=<NUM_LIT:1>)<EOL><DEDENT>for tick in major_ticks_y:<EOL><INDENT>axes[ch].plot([min_x, max_x], [tick, tick], c='<STR_LIT>',<EOL>marker='<STR_LIT:_>', zorder=<NUM_LIT:2>)<EOL><DEDENT>axes[ch].set_xlim(auto_xlims)<EOL>axes[ch].set_ylim(auto_ylims)<EOL><DEDENT>
Add ecg grids to the axes
f10216:m5
def calc_ecg_grids(minsig, maxsig, sig_units, fs, maxt, time_units):
<EOL>if time_units == '<STR_LIT>':<EOL><INDENT>majorx = <NUM_LIT> * fs<EOL>minorx = <NUM_LIT> * fs<EOL><DEDENT>elif time_units == '<STR_LIT>':<EOL><INDENT>majorx = <NUM_LIT><EOL>minorx = <NUM_LIT><EOL><DEDENT>elif time_units == '<STR_LIT>':<EOL><INDENT>majorx = <NUM_LIT> / <NUM_LIT><EOL>minorx = <NUM_LIT>/<NUM_LIT><EOL><DEDENT>elif time_units == '<STR_LIT>':<EOL><INDENT>majorx = <NUM_LIT> / <NUM_LIT><EOL>minorx = <NUM_LIT> / <NUM_LIT><EOL><DEDENT>if sig_units.lower()=='<STR_LIT>':<EOL><INDENT>majory = <NUM_LIT><EOL>minory = <NUM_LIT><EOL><DEDENT>elif sig_units.lower()=='<STR_LIT>':<EOL><INDENT>majory = <NUM_LIT:0.5><EOL>minory = <NUM_LIT><EOL><DEDENT>elif sig_units.lower()=='<STR_LIT:v>':<EOL><INDENT>majory = <NUM_LIT><EOL>minory = <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>major_ticks_x = np.arange(<NUM_LIT:0>, upround(maxt, majorx) + <NUM_LIT>, majorx)<EOL>minor_ticks_x = np.arange(<NUM_LIT:0>, upround(maxt, majorx) + <NUM_LIT>, minorx)<EOL>major_ticks_y = np.arange(downround(minsig, majory),<EOL>upround(maxsig, majory) + <NUM_LIT>, majory)<EOL>minor_ticks_y = np.arange(downround(minsig, majory),<EOL>upround(maxsig, majory) + <NUM_LIT>, minory)<EOL>return (major_ticks_x, minor_ticks_x, major_ticks_y, minor_ticks_y)<EOL>
Calculate tick intervals for ecg grids - 5mm 0.2s major grids, 0.04s minor grids - 0.5mV major grids, 0.125 minor grids 10 mm is equal to 1mV in voltage.
f10216:m6
def label_figure(axes, n_subplots, time_units, sig_name, sig_units, ylabel,<EOL>title):
if title:<EOL><INDENT>axes[<NUM_LIT:0>].set_title(title)<EOL><DEDENT>if not ylabel:<EOL><INDENT>ylabel = []<EOL>if not sig_name:<EOL><INDENT>sig_name = ['<STR_LIT>'+str(i) for i in range(n_subplots)]<EOL><DEDENT>if not sig_units:<EOL><INDENT>sig_units = n_subplots * ['<STR_LIT>']<EOL><DEDENT>ylabel = ['<STR_LIT:/>'.join(pair) for pair in zip(sig_name, sig_units)]<EOL>n_missing_labels = n_subplots - len(ylabel)<EOL>if n_missing_labels:<EOL><INDENT>ylabel = ylabel + ['<STR_LIT>' % i for i in range(len(ylabel),<EOL>n_subplots)]<EOL><DEDENT><DEDENT>for ch in range(n_subplots):<EOL><INDENT>axes[ch].set_ylabel(ylabel[ch])<EOL><DEDENT>axes[-<NUM_LIT:1>].set_xlabel('<STR_LIT:/>'.join(['<STR_LIT:time>', time_units[:-<NUM_LIT:1>]]))<EOL>
Add title, and axes labels
f10216:m7
def plot_wfdb(record=None, annotation=None, plot_sym=False,<EOL>time_units='<STR_LIT>', title=None, sig_style=['<STR_LIT>'],<EOL>ann_style=['<STR_LIT>'], ecg_grids=[], figsize=None, return_fig=False):
(signal, ann_samp, ann_sym, fs,<EOL>ylabel, record_name) = get_wfdb_plot_items(record=record,<EOL>annotation=annotation,<EOL>plot_sym=plot_sym)<EOL>return plot_items(signal=signal, ann_samp=ann_samp, ann_sym=ann_sym, fs=fs,<EOL>time_units=time_units, ylabel=ylabel,<EOL>title=(title or record_name),<EOL>sig_style=sig_style,<EOL>ann_style=ann_style, ecg_grids=ecg_grids,<EOL>figsize=figsize, return_fig=return_fig)<EOL>
Subplot individual channels of a wfdb record and/or annotation. This function implements the base functionality of the `plot_items` function, while allowing direct input of wfdb objects. If the record object is input, the function will extract from it: - signal values, from the `p_signal` (priority) or `d_signal` attribute - sampling frequency, from the `fs` attribute - signal names, from the `sig_name` attribute - signal units, from the `units` attribute If the annotation object is input, the function will extract from it: - sample locations, from the `sample` attribute - symbols, from the `symbol` attribute - the annotation channels, from the `chan` attribute - the sampling frequency, from the `fs` attribute if present, and if fs was not already extracted from the `record` argument. Parameters ---------- record : wfdb Record, optional The Record object to be plotted annotation : wfdb Annotation, optional The Annotation object to be plotted plot_sym : bool, optional Whether to plot the annotation symbols on the graph. time_units : str, optional The x axis unit. Allowed options are: 'samples', 'seconds', 'minutes', and 'hours'. title : str, optional The title of the graph. sig_style : list, optional A list of strings, specifying the style of the matplotlib plot for each signal channel. The list length should match the number of signal channels. If the list has a length of 1, the style will be used for all channels. ann_style : list, optional A list of strings, specifying the style of the matplotlib plot for each annotation channel. The list length should match the number of annotation channels. If the list has a length of 1, the style will be used for all channels. ecg_grids : list, optional A list of integers specifying channels in which to plot ecg grids. May also be set to 'all' for all channels. Major grids at 0.5mV, and minor grids at 0.125mV. All channels to be plotted with grids must have `sig_units` equal to 'uV', 'mV', or 'V'. figsize : tuple, optional Tuple pair specifying the width, and height of the figure. It is the 'figsize' argument passed into matplotlib.pyplot's `figure` function. return_fig : bool, optional Whether the figure is to be returned as an output argument. Returns ------- figure : matplotlib figure, optional The matplotlib figure generated. Only returned if the 'return_fig' option is set to True. Examples -------- >>> record = wfdb.rdrecord('sample-data/100', sampto=3000) >>> annotation = wfdb.rdann('sample-data/100', 'atr', sampto=3000) >>> wfdb.plot_wfdb(record=record, annotation=annotation, plot_sym=True time_units='seconds', title='MIT-BIH Record 100', figsize=(10,4), ecg_grids='all')
f10216:m8
def get_wfdb_plot_items(record, annotation, plot_sym):
<EOL>if record:<EOL><INDENT>if record.p_signal is not None:<EOL><INDENT>signal = record.p_signal<EOL><DEDENT>elif record.d_signal is not None:<EOL><INDENT>signal = record.d_signal<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>fs = record.fs<EOL>sig_name = record.sig_name<EOL>sig_units = record.units<EOL>record_name = '<STR_LIT>' % record.record_name<EOL>ylabel = ['<STR_LIT:/>'.join(pair) for pair in zip(sig_name, sig_units)]<EOL><DEDENT>else:<EOL><INDENT>signal = fs = ylabel = record_name = None<EOL><DEDENT>if annotation:<EOL><INDENT>ann_chans = set(annotation.chan)<EOL>n_ann_chans = max(ann_chans) + <NUM_LIT:1><EOL>chan_inds = n_ann_chans * [np.empty(<NUM_LIT:0>, dtype='<STR_LIT:int>')]<EOL>for chan in ann_chans:<EOL><INDENT>chan_inds[chan] = np.where(annotation.chan == chan)[<NUM_LIT:0>]<EOL><DEDENT>ann_samp = [annotation.sample[ci] for ci in chan_inds]<EOL>if plot_sym:<EOL><INDENT>ann_sym = n_ann_chans * [None]<EOL>for ch in ann_chans:<EOL><INDENT>ann_sym[ch] = [annotation.symbol[ci] for ci in chan_inds[ch]]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>ann_sym = None<EOL><DEDENT>if fs is None:<EOL><INDENT>fs = annotation.fs<EOL><DEDENT>record_name = record_name or annotation.record_name<EOL><DEDENT>else:<EOL><INDENT>ann_samp = None<EOL>ann_sym = None<EOL><DEDENT>if record and annotation:<EOL><INDENT>sig_chans = set(range(signal.shape[<NUM_LIT:1>]))<EOL>all_chans = sorted(sig_chans.union(ann_chans))<EOL>if sig_chans != all_chans:<EOL><INDENT>compact_ann_samp = []<EOL>if plot_sym:<EOL><INDENT>compact_ann_sym = []<EOL><DEDENT>else:<EOL><INDENT>compact_ann_sym = None<EOL><DEDENT>ylabel = []<EOL>for ch in all_chans: <EOL><INDENT>if ch in ann_chans:<EOL><INDENT>compact_ann_samp.append(ann_samp[ch])<EOL>if plot_sym:<EOL><INDENT>compact_ann_sym.append(ann_sym[ch])<EOL><DEDENT><DEDENT>if ch in sig_chans:<EOL><INDENT>ylabel.append('<STR_LIT>'.join([sig_name[ch], sig_units[ch]]))<EOL><DEDENT>else:<EOL><INDENT>ylabel.append('<STR_LIT>' % ch)<EOL><DEDENT><DEDENT>ann_samp = compact_ann_samp<EOL>ann_sym = compact_ann_sym<EOL><DEDENT>else:<EOL><INDENT>ylabel = ['<STR_LIT:/>'.join(pair) for pair in zip(sig_name, sig_units)]<EOL><DEDENT><DEDENT>elif annotation:<EOL><INDENT>ann_samp = [a for a in ann_samp if a.size]<EOL>if ann_sym is not None:<EOL><INDENT>ann_sym = [a for a in ann_sym if a]<EOL><DEDENT>ylabel = ['<STR_LIT>' % ch for ch in ann_chans]<EOL><DEDENT>return signal, ann_samp, ann_sym, fs, ylabel, record_name<EOL>
Get items to plot from wfdb objects
f10216:m9
def plot_all_records(directory='<STR_LIT>'):
directory = directory or os.getcwd()<EOL>headers = [f for f in os.listdir(directory) if os.path.isfile(<EOL>os.path.join(directory, f))]<EOL>headers = [f for f in headers if f.endswith('<STR_LIT>')]<EOL>records = [h.split('<STR_LIT>')[<NUM_LIT:0>] for h in headers]<EOL>records.sort()<EOL>for record_name in records:<EOL><INDENT>record = rdrecord(os.path.join(directory, record_name))<EOL>plot_wfdb(record, title='<STR_LIT>' % record.record_name)<EOL>input('<STR_LIT>')<EOL><DEDENT>
Plot all wfdb records in a directory (by finding header files), one at a time, until the 'enter' key is pressed. Parameters ---------- directory : str, optional The directory in which to search for WFDB records. Defaults to current working directory.
f10216:m10
def find_peaks(sig):
if len(sig) == <NUM_LIT:0>:<EOL><INDENT>return np.empty([<NUM_LIT:0>]), np.empty([<NUM_LIT:0>])<EOL><DEDENT>tmp = sig[<NUM_LIT:1>:]<EOL>tmp = np.append(tmp, [sig[-<NUM_LIT:1>]])<EOL>tmp = sig - tmp<EOL>tmp[np.where(tmp><NUM_LIT:0>)] = <NUM_LIT:1><EOL>tmp[np.where(tmp==<NUM_LIT:0>)] = <NUM_LIT:0><EOL>tmp[np.where(tmp<<NUM_LIT:0>)] = -<NUM_LIT:1><EOL>tmp2 = tmp[<NUM_LIT:1>:]<EOL>tmp2 = np.append(tmp2, [<NUM_LIT:0>])<EOL>tmp = tmp-tmp2<EOL>hard_peaks = np.where(np.logical_or(tmp==-<NUM_LIT:2>, tmp==+<NUM_LIT:2>))[<NUM_LIT:0>] + <NUM_LIT:1><EOL>soft_peaks = []<EOL>for iv in np.where(np.logical_or(tmp==-<NUM_LIT:1>,tmp==+<NUM_LIT:1>))[<NUM_LIT:0>]:<EOL><INDENT>t = tmp[iv]<EOL>i = iv+<NUM_LIT:1><EOL>while True:<EOL><INDENT>if i==len(tmp) or tmp[i] == -t or tmp[i] == -<NUM_LIT:2> or tmp[i] == <NUM_LIT:2>:<EOL><INDENT>break<EOL><DEDENT>if tmp[i] == t:<EOL><INDENT>soft_peaks.append(int(iv + (i - iv)/<NUM_LIT:2>))<EOL>break<EOL><DEDENT>i += <NUM_LIT:1><EOL><DEDENT><DEDENT>soft_peaks = np.array(soft_peaks, dtype='<STR_LIT:int>') + <NUM_LIT:1><EOL>return hard_peaks, soft_peaks<EOL>
Find hard peaks and soft peaks in a signal, defined as follows: - Hard peak: a peak that is either /\ or \/ - Soft peak: a peak that is either /-*\ or \-*/ In this case we define the middle as the peak Parameters ---------- sig : np array The 1d signal array Returns ------- hard_peaks : numpy array Array containing the indices of the hard peaks: soft_peaks : numpy array Array containing the indices of the soft peaks
f10219:m0
def find_local_peaks(sig, radius):
<EOL>if np.min(sig) == np.max(sig):<EOL><INDENT>return np.empty(<NUM_LIT:0>)<EOL><DEDENT>peak_inds = []<EOL>i = <NUM_LIT:0><EOL>while i < radius + <NUM_LIT:1>:<EOL><INDENT>if sig[i] == max(sig[:i + radius]):<EOL><INDENT>peak_inds.append(i)<EOL>i += radius<EOL><DEDENT>else:<EOL><INDENT>i += <NUM_LIT:1><EOL><DEDENT><DEDENT>while i < len(sig):<EOL><INDENT>if sig[i] == max(sig[i - radius:i + radius]):<EOL><INDENT>peak_inds.append(i)<EOL>i += radius<EOL><DEDENT>else:<EOL><INDENT>i += <NUM_LIT:1><EOL><DEDENT><DEDENT>while i < len(sig):<EOL><INDENT>if sig[i] == max(sig[i - radius:]):<EOL><INDENT>peak_inds.append(i)<EOL>i += radius<EOL><DEDENT>else:<EOL><INDENT>i += <NUM_LIT:1><EOL><DEDENT><DEDENT>return (np.array(peak_inds))<EOL>
Find all local peaks in a signal. A sample is a local peak if it is the largest value within the <radius> samples on its left and right. In cases where it shares the max value with nearby samples, the middle sample is classified as the local peak. Parameters ---------- sig : numpy array 1d numpy array of the signal. radius : int The radius in which to search for defining local maxima.
f10219:m1
def correct_peaks(sig, peak_inds, search_radius, smooth_window_size,<EOL>peak_dir='<STR_LIT>'):
sig_len = sig.shape[<NUM_LIT:0>]<EOL>n_peaks = len(peak_inds)<EOL>sig = sig - smooth(sig=sig, window_size=smooth_window_size)<EOL>if peak_dir == '<STR_LIT>':<EOL><INDENT>shifted_peak_inds = shift_peaks(sig=sig,<EOL>peak_inds=peak_inds,<EOL>search_radius=search_radius,<EOL>peak_up=True)<EOL><DEDENT>elif peak_dir == '<STR_LIT>':<EOL><INDENT>shifted_peak_inds = shift_peaks(sig=sig,<EOL>peak_inds=peak_inds,<EOL>search_radius=search_radius,<EOL>peak_up=False)<EOL><DEDENT>elif peak_dir == '<STR_LIT>':<EOL><INDENT>shifted_peak_inds = shift_peaks(sig=np.abs(sig),<EOL>peak_inds=peak_inds,<EOL>search_radius=search_radius,<EOL>peak_up=True)<EOL><DEDENT>else:<EOL><INDENT>shifted_peak_inds_up = shift_peaks(sig=sig,<EOL>peak_inds=peak_inds,<EOL>search_radius=search_radius,<EOL>peak_up=True)<EOL>shifted_peak_inds_down = shift_peaks(sig=sig,<EOL>peak_inds=peak_inds,<EOL>search_radius=search_radius,<EOL>peak_up=False)<EOL>up_dist = np.mean(np.abs(sig[shifted_peak_inds_up]))<EOL>down_dist = np.mean(np.abs(sig[shifted_peak_inds_down]))<EOL>if up_dist >= down_dist:<EOL><INDENT>shifted_peak_inds = shifted_peak_inds_up<EOL><DEDENT>else:<EOL><INDENT>shifted_peak_inds = shifted_peak_inds_down<EOL><DEDENT><DEDENT>return shifted_peak_inds<EOL>
Adjust a set of detected peaks to coincide with local signal maxima, and Parameters ---------- sig : numpy array The 1d signal array peak_inds : np array Array of the original peak indices max_gap : int The radius within which the original peaks may be shifted. smooth_window_size : int The window size of the moving average filter applied on the signal. Peak distance is calculated on the difference between the original and smoothed signal. peak_dir : str, optional The expected peak direction: 'up' or 'down', 'both', or 'compare'. - If 'up', the peaks will be shifted to local maxima - If 'down', the peaks will be shifted to local minima - If 'both', the peaks will be shifted to local maxima of the rectified signal - If 'compare', the function will try both 'up' and 'down' options, and choose the direction that gives the largest mean distance from the smoothed signal. Returns ------- corrected_peak_inds : numpy array Array of the corrected peak indices Examples --------
f10219:m2
def shift_peaks(sig, peak_inds, search_radius, peak_up):
sig_len = sig.shape[<NUM_LIT:0>]<EOL>n_peaks = len(peak_inds)<EOL>shift_inds = np.zeros(n_peaks, dtype='<STR_LIT:int>')<EOL>for i in range(n_peaks):<EOL><INDENT>ind = peak_inds[i]<EOL>local_sig = sig[max(<NUM_LIT:0>, ind - search_radius):min(ind + search_radius, sig_len-<NUM_LIT:1>)]<EOL>if peak_up:<EOL><INDENT>shift_inds[i] = np.argmax(local_sig)<EOL><DEDENT>else:<EOL><INDENT>shift_inds[i] = np.argmin(local_sig)<EOL><DEDENT><DEDENT>for i in range(n_peaks):<EOL><INDENT>ind = peak_inds[i]<EOL>if ind >= search_radius:<EOL><INDENT>break<EOL><DEDENT>shift_inds[i] -= search_radius - ind<EOL><DEDENT>shifted_peak_inds = peak_inds + shift_inds - search_radius<EOL>return shifted_peak_inds<EOL>
Helper function for correct_peaks. Return the shifted peaks to local maxima or minima within a radius. peak_up : bool Whether the expected peak direction is up
f10219:m3
def compute_hr(sig_len, qrs_inds, fs):
heart_rate = np.full(sig_len, np.nan, dtype='<STR_LIT>')<EOL>if len(qrs_inds) < <NUM_LIT:2>:<EOL><INDENT>return heart_rate<EOL><DEDENT>for i in range(<NUM_LIT:0>, len(qrs_inds)-<NUM_LIT:2>):<EOL><INDENT>a = qrs_inds[i]<EOL>b = qrs_inds[i+<NUM_LIT:1>]<EOL>c = qrs_inds[i+<NUM_LIT:2>]<EOL>rr = (b-a) * (<NUM_LIT:1.0> / fs) * <NUM_LIT:1000><EOL>hr = <NUM_LIT> / rr<EOL>heart_rate[b+<NUM_LIT:1>:c+<NUM_LIT:1>] = hr<EOL><DEDENT>heart_rate[qrs_inds[-<NUM_LIT:1>]:] = heart_rate[qrs_inds[-<NUM_LIT:1>]]<EOL>return heart_rate<EOL>
Compute instantaneous heart rate from peak indices. Parameters ---------- sig_len : int The length of the corresponding signal qrs_inds : numpy array The qrs index locations fs : int, or float The corresponding signal's sampling frequency. Returns ------- heart_rate : numpy array An array of the instantaneous heart rate, with the length of the corresponding signal. Contains numpy.nan where heart rate could not be computed.
f10220:m0
def calc_rr(qrs_locs, fs=None, min_rr=None, max_rr=None, qrs_units='<STR_LIT>',<EOL>rr_units='<STR_LIT>'):
rr = np.diff(qrs_locs)<EOL>if not len(rr):<EOL><INDENT>return rr<EOL><DEDENT>if qrs_units == '<STR_LIT>' and rr_units == '<STR_LIT>':<EOL><INDENT>rr = rr / fs<EOL><DEDENT>elif qrs_units == '<STR_LIT>' and rr_units == '<STR_LIT>':<EOL><INDENT>rr = rr * fs<EOL><DEDENT>if min_rr is not None:<EOL><INDENT>rr = rr[rr > min_rr]<EOL><DEDENT>if max_rr is not None:<EOL><INDENT>rr = rr[rr < max_rr]<EOL><DEDENT>return rr<EOL>
Compute rr intervals from qrs indices by extracting the time differences. Parameters ---------- qrs_locs : numpy array 1d array of qrs locations. fs : float, optional Sampling frequency of the original signal. Needed if `qrs_units` does not match `rr_units`. min_rr : float, optional The minimum allowed rr interval. Values below this are excluded from the returned rr intervals. Units are in `rr_units`. max_rr : float, optional The maximum allowed rr interval. Values above this are excluded from the returned rr intervals. Units are in `rr_units`. qrs_units : str, optional The time unit of `qrs_locs`. Must be one of: 'samples', 'seconds'. rr_units : str, optional The desired time unit of the returned rr intervals in. Must be one of: 'samples', 'seconds'. Returns ------- rr : numpy array Array of rr intervals.
f10220:m1
def calc_mean_hr(rr, fs=None, min_rr=None, max_rr=None, rr_units='<STR_LIT>'):
if not len(rr):<EOL><INDENT>return <NUM_LIT:0><EOL><DEDENT>if min_rr is not None:<EOL><INDENT>rr = rr[rr > min_rr]<EOL><DEDENT>if max_rr is not None:<EOL><INDENT>rr = rr[rr < max_rr]<EOL><DEDENT>mean_rr = np.mean(rr)<EOL>mean_hr = <NUM_LIT> / mean_rr<EOL>if rr_units == '<STR_LIT>':<EOL><INDENT>mean_hr = mean_hr * fs<EOL><DEDENT>return mean_hr<EOL>
Compute mean heart rate in beats per minute, from a set of rr intervals. Returns 0 if rr is empty. Parameters ---------- rr : numpy array Array of rr intervals. fs : int, or float The corresponding signal's sampling frequency. Required if 'input_time_units' == 'samples'. min_rr : float, optional The minimum allowed rr interval. Values below this are excluded when calculating the heart rate. Units are in `rr_units`. max_rr : float, optional The maximum allowed rr interval. Values above this are excluded when calculating the heart rate. Units are in `rr_units`. rr_units : str, optional The time units of the input rr intervals. Must be one of: 'samples', 'seconds'. Returns ------- mean_hr : float The mean heart rate in beats per minute
f10220:m2
def resample_ann(resampled_t, ann_sample):
tmp = np.zeros(len(resampled_t), dtype='<STR_LIT>')<EOL>j = <NUM_LIT:0><EOL>tprec = resampled_t[j]<EOL>for i, v in enumerate(ann_sample):<EOL><INDENT>while True:<EOL><INDENT>d = False<EOL>if v < tprec:<EOL><INDENT>j -= <NUM_LIT:1><EOL>tprec = resampled_t[j]<EOL><DEDENT>if j+<NUM_LIT:1> == len(resampled_t):<EOL><INDENT>tmp[j] += <NUM_LIT:1><EOL>break<EOL><DEDENT>tnow = resampled_t[j+<NUM_LIT:1>]<EOL>if tprec <= v and v <= tnow:<EOL><INDENT>if v-tprec < tnow-v:<EOL><INDENT>tmp[j] += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>tmp[j+<NUM_LIT:1>] += <NUM_LIT:1><EOL><DEDENT>d = True<EOL><DEDENT>j += <NUM_LIT:1><EOL>tprec = tnow<EOL>if d:<EOL><INDENT>break<EOL><DEDENT><DEDENT><DEDENT>idx = np.where(tmp><NUM_LIT:0>)[<NUM_LIT:0>].astype('<STR_LIT>')<EOL>res = []<EOL>for i in idx:<EOL><INDENT>for j in range(tmp[i]):<EOL><INDENT>res.append(i)<EOL><DEDENT><DEDENT>assert len(res) == len(ann_sample)<EOL>return np.asarray(res, dtype='<STR_LIT>')<EOL>
Compute the new annotation indices Parameters ---------- resampled_t : numpy array Array of signal locations as returned by scipy.signal.resample ann_sample : numpy array Array of annotation locations Returns ------- resampled_ann_sample : numpy array Array of resampled annotation locations
f10221:m0
def resample_sig(x, fs, fs_target):
t = np.arange(x.shape[<NUM_LIT:0>]).astype('<STR_LIT>')<EOL>if fs == fs_target:<EOL><INDENT>return x, t<EOL><DEDENT>new_length = int(x.shape[<NUM_LIT:0>]*fs_target/fs)<EOL>resampled_x, resampled_t = signal.resample(x, num=new_length, t=t)<EOL>assert resampled_x.shape == resampled_t.shape and resampled_x.shape[<NUM_LIT:0>] == new_length<EOL>assert np.all(np.diff(resampled_t) > <NUM_LIT:0>)<EOL>return resampled_x, resampled_t<EOL>
Resample a signal to a different frequency. Parameters ---------- x : numpy array Array containing the signal fs : int, or float The original sampling frequency fs_target : int, or float The target frequency Returns ------- resampled_x : numpy array Array of the resampled signal values resampled_t : numpy array Array of the resampled signal locations
f10221:m1
def resample_singlechan(x, ann, fs, fs_target):
resampled_x, resampled_t = resample_sig(x, fs, fs_target)<EOL>new_sample = resample_ann(resampled_t, ann.sample)<EOL>assert ann.sample.shape == new_sample.shape<EOL>resampled_ann = Annotation(record_name=ann.record_name,<EOL>extension=ann.extension,<EOL>sample=new_sample,<EOL>symbol=ann.symbol,<EOL>subtype=ann.subtype,<EOL>chan=ann.chan,<EOL>num=ann.num,<EOL>aux_note=ann.aux_note,<EOL>fs=fs_target)<EOL>return resampled_x, resampled_ann<EOL>
Resample a single-channel signal with its annotations Parameters ---------- x: numpy array The signal array ann : wfdb Annotation The wfdb annotation object fs : int, or float The original frequency fs_target : int, or float The target frequency Returns ------- resampled_x : numpy array Array of the resampled signal values resampled_ann : wfdb Annotation Annotation containing resampled annotation locations
f10221:m2
def resample_multichan(xs, ann, fs, fs_target, resamp_ann_chan=<NUM_LIT:0>):
assert resamp_ann_chan < xs.shape[<NUM_LIT:1>]<EOL>lx = []<EOL>lt = None<EOL>for chan in range(xs.shape[<NUM_LIT:1>]):<EOL><INDENT>resampled_x, resampled_t = resample_sig(xs[:, chan], fs, fs_target)<EOL>lx.append(resampled_x)<EOL>if chan == resamp_ann_chan:<EOL><INDENT>lt = resampled_t<EOL><DEDENT><DEDENT>new_sample = resample_ann(lt, ann.sample)<EOL>assert ann.sample.shape == new_sample.shape<EOL>resampled_ann = Annotation(record_name=ann.record_name,<EOL>extension=ann.extension,<EOL>sample=new_sample,<EOL>symbol=ann.symbol,<EOL>subtype=ann.subtype,<EOL>chan=ann.chan,<EOL>num=ann.num,<EOL>aux_note=ann.aux_note,<EOL>fs=fs_target)<EOL>return np.column_stack(lx), resampled_ann<EOL>
Resample multiple channels with their annotations Parameters ---------- xs: numpy array The signal array ann : wfdb Annotation The wfdb annotation object fs : int, or float The original frequency fs_target : int, or float The target frequency resample_ann_channel : int, optional The signal channel used to compute new annotation indices Returns ------- resampled_xs : numpy array Array of the resampled signal values resampled_ann : wfdb Annotation Annotation containing resampled annotation locations
f10221:m3
def normalize_bound(sig, lb=<NUM_LIT:0>, ub=<NUM_LIT:1>):
mid = ub - (ub - lb) / <NUM_LIT:2><EOL>min_v = np.min(sig)<EOL>max_v = np.max(sig)<EOL>mid_v = max_v - (max_v - min_v) / <NUM_LIT:2><EOL>coef = (ub - lb) / (max_v - min_v)<EOL>return sig * coef - (mid_v * coef) + mid<EOL>
Normalize a signal between the lower and upper bound Parameters ---------- sig : numpy array Original signal to be normalized lb : int, or float Lower bound ub : int, or float Upper bound Returns ------- x_normalized : numpy array Normalized signal
f10221:m4
def smooth(sig, window_size):
box = np.ones(window_size)/window_size<EOL>return np.convolve(sig, box, mode='<STR_LIT>')<EOL>
Apply a uniform moving average filter to a signal Parameters ---------- sig : numpy array The signal to smooth. window_size : int The width of the moving average filter.
f10221:m5
def get_filter_gain(b, a, f_gain, fs):
<EOL>w, h = signal.freqz(b, a)<EOL>w_gain = f_gain * <NUM_LIT:2> * np.pi / fs<EOL>ind = np.where(w >= w_gain)[<NUM_LIT:0>][<NUM_LIT:0>]<EOL>gain = abs(h[ind])<EOL>return gain<EOL>
Given filter coefficients, return the gain at a particular frequency. Parameters ---------- b : list List of linear filter b coefficients a : list List of linear filter a coefficients f_gain : int or float, optional The frequency at which to calculate the gain fs : int or float, optional The sampling frequency of the system
f10221:m6
def benchmark_mitdb(detector, verbose=False, print_results=False):
record_list = get_record_list('<STR_LIT>')<EOL>n_records = len(record_list)<EOL>args = zip(record_list, n_records * [detector], n_records * [verbose])<EOL>with Pool(cpu_count() - <NUM_LIT:1>) as p:<EOL><INDENT>comparitors = p.starmap(benchmark_mitdb_record, args)<EOL><DEDENT>specificity = np.mean([c.specificity for c in comparitors])<EOL>positive_predictivity = np.mean(<EOL>[c.positive_predictivity for c in comparitors])<EOL>false_positive_rate = np.mean(<EOL>[c.false_positive_rate for c in comparitors])<EOL>comparitors = dict(zip(record_list, comparitors))<EOL>print('<STR_LIT>')<EOL>if print_results:<EOL><INDENT>print('<STR_LIT>'<EOL>% (specificity, positive_predictivity, false_positive_rate))<EOL>for record_name in record_list:<EOL><INDENT>print('<STR_LIT>' % record_name)<EOL>comparitors[record_name].print_summary()<EOL>print('<STR_LIT>')<EOL><DEDENT><DEDENT>return comparitors, specificity, positive_predictivity, false_positive_rate<EOL>
Benchmark a qrs detector against mitdb's records. Parameters ---------- detector : function The detector function. verbose : bool, optional The verbose option of the detector function. print_results : bool, optional Whether to print the overall performance, and the results for each record. Returns ------- comparitors : dictionary Dictionary of Comparitor objects run on the records, keyed on the record names. specificity : float Aggregate specificity. positive_predictivity : float Aggregate positive_predictivity. false_positive_rate : float Aggregate false_positive_rate. Notes ----- TODO: - remove non-qrs detections from reference annotations - allow kwargs Examples -------- >>> import wfdb >> from wfdb.processing import benchmark_mitdb, xqrs_detect >>> comparitors, spec, pp, fpr = benchmark_mitdb(xqrs_detect)
f10223:m1
def benchmark_mitdb_record(rec, detector, verbose):
sig, fields = rdsamp(rec, pb_dir='<STR_LIT>', channels=[<NUM_LIT:0>])<EOL>ann_ref = rdann(rec, pb_dir='<STR_LIT>', extension='<STR_LIT>')<EOL>qrs_inds = detector(sig=sig[:,<NUM_LIT:0>], fs=fields['<STR_LIT>'], verbose=verbose)<EOL>comparitor = compare_annotations(ref_sample=ann_ref.sample[<NUM_LIT:1>:],<EOL>test_sample=qrs_inds,<EOL>window_width=int(<NUM_LIT:0.1> * fields['<STR_LIT>']))<EOL>if verbose:<EOL><INDENT>print('<STR_LIT>' % rec)<EOL><DEDENT>return comparitor<EOL>
Benchmark a single mitdb record
f10223:m2
def _calc_stats(self):
<EOL>self.matched_ref_inds = np.where(self.matching_sample_nums != -<NUM_LIT:1>)[<NUM_LIT:0>]<EOL>self.unmatched_ref_inds = np.where(self.matching_sample_nums == -<NUM_LIT:1>)[<NUM_LIT:0>]<EOL>self.matched_test_inds = self.matching_sample_nums[<EOL>self.matching_sample_nums != -<NUM_LIT:1>]<EOL>self.unmatched_test_inds = np.setdiff1d(np.array(range(self.n_test)),<EOL>self.matched_test_inds, assume_unique=True)<EOL>self.matched_ref_sample = self.ref_sample[self.matched_ref_inds]<EOL>self.unmatched_ref_sample = self.ref_sample[self.unmatched_ref_inds]<EOL>self.matched_test_sample = self.test_sample[self.matched_test_inds]<EOL>self.unmatched_test_sample = self.test_sample[self.unmatched_test_inds]<EOL>self.tp = len(self.matched_ref_inds)<EOL>self.fp = self.n_test - self.tp<EOL>self.fn = self.n_ref - self.tp<EOL>self.specificity = float(self.tp) / self.n_ref<EOL>self.positive_predictivity = float(self.tp) / self.n_test<EOL>self.false_positive_rate = float(self.fp) / self.n_test<EOL>
Calculate performance statistics after the two sets of annotations are compared. Example: ------------------- ref=500 test=480 { 30 { 470 } 10 } ------------------- tp = 470 fp = 10 fn = 30 specificity = 470 / 500 positive_predictivity = 470 / 480 false_positive_rate = 10 / 480
f10223:c0:m1
def compare(self):
"""<STR_LIT>"""<EOL>test_samp_num = <NUM_LIT:0><EOL>ref_samp_num = <NUM_LIT:0><EOL>while ref_samp_num < self.n_ref and test_samp_num < self.n_test:<EOL><INDENT>closest_samp_num, smallest_samp_diff = (<EOL>self._get_closest_samp_num(ref_samp_num, test_samp_num))<EOL>if ref_samp_num < self.n_ref - <NUM_LIT:1>:<EOL><INDENT>closest_samp_num_next, smallest_samp_diff_next = (<EOL>self._get_closest_samp_num(ref_samp_num + <NUM_LIT:1>, test_samp_num))<EOL><DEDENT>else:<EOL><INDENT>closest_samp_num_next = -<NUM_LIT:1><EOL><DEDENT>if (closest_samp_num == closest_samp_num_next<EOL>and smallest_samp_diff_next < smallest_samp_diff):<EOL><INDENT>if closest_samp_num and (not ref_samp_num or closest_samp_num - <NUM_LIT:1> != self.matching_sample_nums[ref_samp_num - <NUM_LIT:1>]):<EOL><INDENT>closest_samp_num = closest_samp_num - <NUM_LIT:1><EOL>smallest_samp_diff = abs(self.ref_sample[ref_samp_num]<EOL>- self.test_sample[closest_samp_num])<EOL>if smallest_samp_diff < self.window_width:<EOL><INDENT>self.matching_sample_nums[ref_samp_num] = closest_samp_num<EOL><DEDENT>test_samp_num = closest_samp_num + <NUM_LIT:1><EOL><DEDENT><DEDENT>else:<EOL><INDENT>if smallest_samp_diff < self.window_width:<EOL><INDENT>self.matching_sample_nums[ref_samp_num] = closest_samp_num<EOL><DEDENT>test_samp_num = closest_samp_num + <NUM_LIT:1><EOL><DEDENT>ref_samp_num += <NUM_LIT:1><EOL><DEDENT>self._calc_stats()<EOL>
Main comparison function
f10223:c0:m2
def print_summary(self):
<EOL>self.tp = len(self.matched_ref_inds)<EOL>self.fp = self.n_test - self.tp<EOL>self.fn = self.n_ref - self.tp<EOL>self.specificity = self.tp / self.n_ref<EOL>self.positive_predictivity = self.tp / self.n_test<EOL>self.false_positive_rate = self.fp / self.n_test<EOL>print('<STR_LIT>'<EOL>% (self.n_ref, self.n_test))<EOL>print('<STR_LIT>' % self.tp)<EOL>print('<STR_LIT>' % self.fp)<EOL>print('<STR_LIT>' % self.fn)<EOL>print('<STR_LIT>'<EOL>% (self.specificity, self.tp, self.n_ref))<EOL>print('<STR_LIT>'<EOL>% (self.positive_predictivity, self.tp, self.n_test))<EOL>print('<STR_LIT>'<EOL>% (self.false_positive_rate, self.fp, self.n_test))<EOL>
Print summary metrics of the annotation comparisons.
f10223:c0:m4
def plot(self, sig_style='<STR_LIT>', title=None, figsize=None,<EOL>return_fig=False):
fig = plt.figure(figsize=figsize)<EOL>ax = fig.add_subplot(<NUM_LIT:1>, <NUM_LIT:1>, <NUM_LIT:1>)<EOL>legend = ['<STR_LIT>',<EOL>'<STR_LIT>' % (self.tp, self.n_ref),<EOL>'<STR_LIT>' % (self.fn, self.n_ref),<EOL>'<STR_LIT>' % (self.tp, self.n_test),<EOL>'<STR_LIT>' % (self.fp, self.n_test)<EOL>]<EOL>if self.signal is not None:<EOL><INDENT>ax.plot(self.signal, sig_style)<EOL>ax.plot(self.matched_ref_sample,<EOL>self.signal[self.matched_ref_sample], '<STR_LIT>')<EOL>ax.plot(self.unmatched_ref_sample,<EOL>self.signal[self.unmatched_ref_sample], '<STR_LIT>',<EOL>fillstyle='<STR_LIT:none>')<EOL>ax.plot(self.matched_test_sample,<EOL>self.signal[self.matched_test_sample], '<STR_LIT>')<EOL>ax.plot(self.unmatched_test_sample,<EOL>self.signal[self.unmatched_test_sample], '<STR_LIT>')<EOL>ax.legend(legend)<EOL><DEDENT>else:<EOL><INDENT>ax.plot(self.matched_ref_sample, np.ones(self.tp), '<STR_LIT>')<EOL>ax.plot(self.unmatched_ref_sample, np.ones(self.fn), '<STR_LIT>',<EOL>fillstyle='<STR_LIT:none>')<EOL>ax.plot(self.matched_test_sample, <NUM_LIT:0.5> * np.ones(self.tp), '<STR_LIT>')<EOL>ax.plot(self.unmatched_test_sample, <NUM_LIT:0.5> * np.ones(self.fp), '<STR_LIT>')<EOL>ax.legend(legend[<NUM_LIT:1>:])<EOL><DEDENT>if title:<EOL><INDENT>ax.set_title(title)<EOL><DEDENT>ax.set_xlabel('<STR_LIT>')<EOL>fig.show()<EOL>if return_fig:<EOL><INDENT>return fig, ax<EOL><DEDENT>
Plot the comparison of two sets of annotations, possibly overlaid on their original signal. Parameters ---------- sig_style : str, optional The matplotlib style of the signal title : str, optional The title of the plot figsize: tuple, optional Tuple pair specifying the width, and height of the figure. It is the'figsize' argument passed into matplotlib.pyplot's `figure` function. return_fig : bool, optional Whether the figure is to be returned as an output argument.
f10223:c0:m5
def xqrs_detect(sig, fs, sampfrom=<NUM_LIT:0>, sampto='<STR_LIT:end>', conf=None,<EOL>learn=True, verbose=True):
xqrs = XQRS(sig=sig, fs=fs, conf=conf)<EOL>xqrs.detect(sampfrom=sampfrom, sampto=sampto, verbose=verbose)<EOL>return xqrs.qrs_inds<EOL>
Run the 'xqrs' qrs detection algorithm on a signal. See the docstring of the XQRS class for algorithm details. Parameters ---------- sig : numpy array The input ecg signal to apply the qrs detection on. fs : int or float The sampling frequency of the input signal. sampfrom : int, optional The starting sample number to run the detection on. sampto : The final sample number to run the detection on. Set as 'end' to run on the entire signal. conf : XQRS.Conf object, optional The configuration object specifying signal configuration parameters. See the docstring of the XQRS.Conf class. learn : bool, optional Whether to apply learning on the signal before running the main detection. If learning fails or is not conducted, the default configuration parameters will be used to initialize these variables. verbose : bool, optional Whether to display the stages and outcomes of the detection process. Returns ------- qrs_inds : numpy array The indices of the detected qrs complexes Examples -------- >>> import wfdb >>> from wfdb import processing >>> sig, fields = wfdb.rdsamp('sample-data/100', channels=[0]) >>> qrs_inds = processing.xqrs_detect(sig=sig[:,0], fs=fields['fs'])
f10224:m0
def gqrs_detect(sig=None, fs=None, d_sig=None, adc_gain=None, adc_zero=None,<EOL>threshold=<NUM_LIT:1.0>, hr=<NUM_LIT>, RRdelta=<NUM_LIT>, RRmin=<NUM_LIT>, RRmax=<NUM_LIT>,<EOL>QS=<NUM_LIT>, QT=<NUM_LIT>, RTmin=<NUM_LIT>, RTmax=<NUM_LIT>,<EOL>QRSa=<NUM_LIT>, QRSamin=<NUM_LIT>):
<EOL>if sig is not None:<EOL><INDENT>record = Record(p_signal=sig.reshape([-<NUM_LIT:1>,<NUM_LIT:1>]), fmt=['<STR_LIT>'])<EOL>record.set_d_features(do_adc=True)<EOL>d_sig = record.d_signal[:,<NUM_LIT:0>]<EOL>adc_zero = <NUM_LIT:0><EOL>adc_gain = record.adc_gain[<NUM_LIT:0>]<EOL><DEDENT>conf = GQRS.Conf(fs=fs, adc_gain=adc_gain, hr=hr, RRdelta=RRdelta, RRmin=RRmin,<EOL>RRmax=RRmax, QS=QS, QT=QT, RTmin=RTmin, RTmax=RTmax, QRSa=QRSa,<EOL>QRSamin=QRSamin, thresh=threshold)<EOL>gqrs = GQRS()<EOL>annotations = gqrs.detect(x=d_sig, conf=conf, adc_zero=adc_zero)<EOL>return np.array([a.time for a in annotations])<EOL>
Detect qrs locations in a single channel ecg. Functionally, a direct port of the gqrs algorithm from the original wfdb package. Accepts either a physical signal, or a digital signal with known adc_gain and adc_zero. See the notes below for a summary of the program. This algorithm is not being developed/supported. Parameters ---------- sig : 1d numpy array, optional The input physical signal. The detection algorithm which replicates the original, works using digital samples, and this physical option is provided as a convenient interface. If this is the specified input signal, automatic adc is performed using 24 bit precision, to obtain the `d_sig`, `adc_gain`, and `adc_zero` parameters. There may be minor differences in detection results (ie. an occasional 1 sample difference) between using `sig` and `d_sig`. To replicate the exact output of the original gqrs algorithm, use the `d_sig` argument instead. fs : int, or float The sampling frequency of the signal. d_sig : 1d numpy array, optional The input digital signal. If this is the specified input signal rather than `sig`, the `adc_gain` and `adc_zero` parameters must be specified. adc_gain : int, or float, optional The analogue to digital gain of the signal (the number of adus per physical unit). adc_zero: int, optional The value produced by the ADC given a 0 volt input. threshold : int, or float, optional The relative amplitude detection threshold. Used to initialize the peak and qrs detection threshold. hr : int, or float, optional Typical heart rate, in beats per minute. RRdelta : int or float, optional Typical difference between successive RR intervals in seconds. RRmin : int or float, optional Minimum RR interval ("refractory period"), in seconds. RRmax : int or float, optional Maximum RR interval, in seconds. Thresholds will be adjusted if no peaks are detected within this interval. QS : int or float, optional Typical QRS duration, in seconds. QT : int or float, optional Typical QT interval, in seconds. RTmin : int or float, optional Minimum interval between R and T peaks, in seconds. RTmax : int or float, optional Maximum interval between R and T peaks, in seconds. QRSa : int or float, optional Typical QRS peak-to-peak amplitude, in microvolts. QRSamin : int or float, optional Minimum QRS peak-to-peak amplitude, in microvolts. Returns ------- qrs_locs : numpy array Detected qrs locations Notes ----- This function should not be used for signals with fs <= 50Hz The algorithm theoretically works as follows: - Load in configuration parameters. They are used to set/initialize the: * allowed rr interval limits (fixed) * initial recent rr interval (running) * qrs width, used for detection filter widths (fixed) * allowed rt interval limits (fixed) * initial recent rt interval (running) * initial peak amplitude detection threshold (running) * initial qrs amplitude detection threshold (running) * `Note`: this algorithm does not normalize signal amplitudes, and hence is highly dependent on configuration amplitude parameters. - Apply trapezoid low-pass filtering to the signal - Convolve a QRS matched filter with the filtered signal - Run the learning phase using a calculated signal length: detect qrs and non-qrs peaks as in the main detection phase, without saving the qrs locations. During this phase, running parameters of recent intervals and peak/qrs thresholds are adjusted. - Run the detection:: if a sample is bigger than its immediate neighbors and larger than the peak detection threshold, it is a peak. if it is further than RRmin from the previous qrs, and is a *primary peak. if it is further than 2 standard deviations from the previous qrs, do a backsearch for a missed low amplitude beat return the primary peak between the current sample and the previous qrs if any. if it surpasses the qrs threshold, it is a qrs complex save the qrs location. update running rr and qrs amplitude parameters. look for the qrs complex's t-wave and mark it if found. else if it is not a peak lower the peak detection threshold if the last peak found was more than RRmax ago, and not already at its minimum. *A peak is secondary if there is a larger peak within its neighborhood (time +- rrmin), or if it has been identified as a T-wave associated with a previous primary peak. A peak is primary if it is largest in its neighborhood, or if the only larger peaks are secondary. The above describes how the algorithm should theoretically work, but there are bugs which make the program contradict certain parts of its supposed logic. A list of issues from the original c, code and hence this python implementation can be found here: https://github.com/bemoody/wfdb/issues/17 gqrs will not be supported/developed in this library. Examples -------- >>> import numpy as np >>> import wfdb >>> from wfdb import processing >>> # Detect using a physical input signal >>> record = wfdb.rdrecord('sample-data/100', channels=[0]) >>> qrs_locs = processing.gqrs_detect(record.p_signal[:,0], fs=record.fs) >>> # Detect using a digital input signal >>> record_2 = wfdb.rdrecord('sample-data/100', channels=[0], physical=False) >>> qrs_locs_2 = processing.gqrs_detect(d_sig=record_2.d_signal[:,0], fs=record_2.fs, adc_gain=record_2.adc_gain[0], adc_zero=record_2.adc_zero[0])
f10224:m2
def _set_conf(self):
self.rr_init = <NUM_LIT> * self.fs / self.conf.hr_init<EOL>self.rr_max = <NUM_LIT> * self.fs / self.conf.hr_min<EOL>self.rr_min = <NUM_LIT> * self.fs / self.conf.hr_max<EOL>self.qrs_width = int(self.conf.qrs_width * self.fs)<EOL>self.qrs_radius = int(self.conf.qrs_radius * self.fs)<EOL>self.qrs_thr_init = self.conf.qrs_thr_init<EOL>self.qrs_thr_min = self.conf.qrs_thr_min<EOL>self.ref_period = int(self.conf.ref_period * self.fs)<EOL>self.t_inspect_period = int(self.conf.t_inspect_period * self.fs)<EOL>
Set configuration parameters from the Conf object into the detector object. Time values are converted to samples, and amplitude values are in mV.
f10224:c0:m1
def _bandpass(self, fc_low=<NUM_LIT:5>, fc_high=<NUM_LIT:20>):
self.fc_low = fc_low<EOL>self.fc_high = fc_high<EOL>b, a = signal.butter(<NUM_LIT:2>, [float(fc_low) * <NUM_LIT:2> / self.fs,<EOL>float(fc_high) * <NUM_LIT:2> / self.fs], '<STR_LIT>')<EOL>self.sig_f = signal.filtfilt(b, a, self.sig[self.sampfrom:self.sampto],<EOL>axis=<NUM_LIT:0>)<EOL>self.filter_gain = get_filter_gain(b, a, np.mean([fc_low, fc_high]),<EOL>self.fs) * <NUM_LIT:2><EOL>
Apply a bandpass filter onto the signal, and save the filtered signal.
f10224:c0:m2
def _mwi(self):
wavelet_filter = signal.ricker(self.qrs_width, <NUM_LIT:4>)<EOL>self.sig_i = signal.filtfilt(wavelet_filter, [<NUM_LIT:1>], self.sig_f,<EOL>axis=<NUM_LIT:0>) ** <NUM_LIT:2><EOL>self.mwi_gain = get_filter_gain(wavelet_filter, [<NUM_LIT:1>],<EOL>np.mean([self.fc_low, self.fc_high]), self.fs) * <NUM_LIT:2><EOL>self.transform_gain = self.filter_gain * self.mwi_gain<EOL>self.peak_inds_i = find_local_peaks(self.sig_i, radius=self.qrs_radius)<EOL>self.n_peaks_i = len(self.peak_inds_i)<EOL>
Apply moving wave integration (mwi) with a ricker (Mexican hat) wavelet onto the filtered signal, and save the square of the integrated signal. The width of the hat is equal to the qrs width After integration, find all local peaks in the mwi signal.
f10224:c0:m3
def _learn_init_params(self, n_calib_beats=<NUM_LIT:8>):
if self.verbose:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>last_qrs_ind = -self.rr_max<EOL>qrs_inds = []<EOL>qrs_amps = []<EOL>noise_amps = []<EOL>ricker_wavelet = signal.ricker(self.qrs_radius * <NUM_LIT:2>, <NUM_LIT:4>).reshape(-<NUM_LIT:1>,<NUM_LIT:1>)<EOL>peak_inds_f = find_local_peaks(self.sig_f, self.qrs_radius)<EOL>peak_nums_r = np.where(peak_inds_f > self.qrs_width)[<NUM_LIT:0>]<EOL>peak_nums_l = np.where(peak_inds_f <= self.sig_len - self.qrs_width)[<NUM_LIT:0>]<EOL>if (not peak_inds_f.size or not peak_nums_r.size<EOL>or not peak_nums_l.size):<EOL><INDENT>if self.verbose:<EOL><INDENT>print('<STR_LIT>'<EOL>% n_calib_beats)<EOL><DEDENT>self._set_default_init_params()<EOL>return<EOL><DEDENT>for peak_num in range(peak_nums_r[<NUM_LIT:0>], peak_nums_l[-<NUM_LIT:1>]):<EOL><INDENT>i = peak_inds_f[peak_num]<EOL>sig_segment = normalize((self.sig_f[i - self.qrs_radius:<EOL>i + self.qrs_radius]).reshape(-<NUM_LIT:1>, <NUM_LIT:1>), axis=<NUM_LIT:0>)<EOL>xcorr = np.correlate(sig_segment[:, <NUM_LIT:0>], ricker_wavelet[:,<NUM_LIT:0>])<EOL>if xcorr > <NUM_LIT> and i-last_qrs_ind > self.rr_min:<EOL><INDENT>last_qrs_ind = i<EOL>qrs_inds.append(i)<EOL>qrs_amps.append(self.sig_i[i])<EOL><DEDENT>else:<EOL><INDENT>noise_amps.append(self.sig_i[i])<EOL><DEDENT>if len(qrs_inds) == n_calib_beats:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if len(qrs_inds) == n_calib_beats:<EOL><INDENT>if self.verbose:<EOL><INDENT>print('<STR_LIT>' % n_calib_beats<EOL>+ '<STR_LIT>')<EOL><DEDENT>qrs_amp = np.mean(qrs_amps)<EOL>if noise_amps:<EOL><INDENT>noise_amp = np.mean(noise_amps)<EOL><DEDENT>else:<EOL><INDENT>noise_amp = qrs_amp / <NUM_LIT:10><EOL><DEDENT>rr_intervals = np.diff(qrs_inds)<EOL>rr_intervals = rr_intervals[rr_intervals < self.rr_max]<EOL>if rr_intervals.any():<EOL><INDENT>rr_recent = np.mean(rr_intervals)<EOL><DEDENT>else:<EOL><INDENT>rr_recent = self.rr_init<EOL><DEDENT>last_qrs_ind = min(<NUM_LIT:0>, qrs_inds[<NUM_LIT:0>] - self.rr_min - <NUM_LIT:1>)<EOL>self._set_init_params(qrs_amp_recent=qrs_amp,<EOL>noise_amp_recent=noise_amp,<EOL>rr_recent=rr_recent,<EOL>last_qrs_ind=last_qrs_ind)<EOL>self.learned_init_params = True<EOL><DEDENT>else:<EOL><INDENT>if self.verbose:<EOL><INDENT>print('<STR_LIT>'<EOL>% n_calib_beats)<EOL><DEDENT>self._set_default_init_params()<EOL><DEDENT>
Find a number of consecutive beats and use them to initialize: - recent qrs amplitude - recent noise amplitude - recent rr interval - qrs detection threshold The learning works as follows: - Find all local maxima (largest sample within `qrs_radius` samples) of the filtered signal. - Inspect the local maxima until `n_calib_beats` beats are found: - Calculate the cross-correlation between a ricker wavelet of length `qrs_width`, and the filtered signal segment centered around the local maximum. - If the cross-correlation exceeds 0.6, classify it as a beat. - Use the beats to initialize the previously described parameters. - If the system fails to find enough beats, the default parameters will be used instead. See the docstring of `XQRS._set_default_init_params` for detauls. Parameters ---------- n_calib_beats : int, optional Number of calibration beats to detect for learning
f10224:c0:m4
def _set_init_params(self, qrs_amp_recent, noise_amp_recent, rr_recent,<EOL>last_qrs_ind):
self.qrs_amp_recent = qrs_amp_recent<EOL>self.noise_amp_recent = noise_amp_recent<EOL>self.qrs_thr = max(<NUM_LIT>*self.qrs_amp_recent<EOL>+ <NUM_LIT>*self.noise_amp_recent,<EOL>self.qrs_thr_min * self.transform_gain)<EOL>self.rr_recent = rr_recent<EOL>self.last_qrs_ind = last_qrs_ind<EOL>self.last_qrs_peak_num = None<EOL>
Set initial online parameters
f10224:c0:m5
def _set_default_init_params(self):
if self.verbose:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>qrs_thr_init = self.qrs_thr_init * self.transform_gain<EOL>qrs_thr_min = self.qrs_thr_min * self.transform_gain<EOL>qrs_amp = <NUM_LIT>/<NUM_LIT> * qrs_thr_init<EOL>noise_amp = qrs_amp / <NUM_LIT:10><EOL>rr_recent = self.rr_init<EOL>last_qrs_ind = <NUM_LIT:0><EOL>self._set_init_params(qrs_amp_recent=qrs_amp,<EOL>noise_amp_recent=noise_amp,<EOL>rr_recent=rr_recent,<EOL>last_qrs_ind=last_qrs_ind)<EOL>self.learned_init_params = False<EOL>
Set initial running parameters using default values. The steady state equation is: `qrs_thr = 0.25*qrs_amp + 0.75*noise_amp` Estimate that qrs amp is 10x noise amp, giving: `qrs_thr = 0.325 * qrs_amp or 13/40 * qrs_amp`
f10224:c0:m6
def _is_qrs(self, peak_num, backsearch=False):
i = self.peak_inds_i[peak_num]<EOL>if backsearch:<EOL><INDENT>qrs_thr = self.qrs_thr / <NUM_LIT:2><EOL><DEDENT>else:<EOL><INDENT>qrs_thr = self.qrs_thr<EOL><DEDENT>if (i-self.last_qrs_ind > self.ref_period<EOL>and self.sig_i[i] > qrs_thr):<EOL><INDENT>if i-self.last_qrs_ind < self.t_inspect_period:<EOL><INDENT>if self._is_twave(peak_num):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL><DEDENT>return False<EOL>
Check whether a peak is a qrs complex. It is classified as qrs if it: - Comes after the refractory period - Passes qrs threshold - Is not a t-wave (check it if the peak is close to the previous qrs). Parameters ---------- peak_num : int The peak number of the mwi signal to be inspected backsearch: bool, optional Whether the peak is being inspected during backsearch
f10224:c0:m7
def _update_qrs(self, peak_num, backsearch=False):
i = self.peak_inds_i[peak_num]<EOL>rr_new = i - self.last_qrs_ind<EOL>if rr_new < self.rr_max:<EOL><INDENT>self.rr_recent = <NUM_LIT>*self.rr_recent + <NUM_LIT>*rr_new<EOL><DEDENT>self.qrs_inds.append(i)<EOL>self.last_qrs_ind = i<EOL>self.last_qrs_peak_num = self.peak_num<EOL>if backsearch:<EOL><INDENT>self.backsearch_qrs_inds.append(i)<EOL>self.qrs_amp_recent = (<NUM_LIT>*self.qrs_amp_recent<EOL>+ <NUM_LIT>*self.sig_i[i])<EOL><DEDENT>else:<EOL><INDENT>self.qrs_amp_recent = (<NUM_LIT>*self.qrs_amp_recent<EOL>+ <NUM_LIT>*self.sig_i[i])<EOL><DEDENT>self.qrs_thr = max((<NUM_LIT>*self.qrs_amp_recent<EOL>+ <NUM_LIT>*self.noise_amp_recent), self.qrs_thr_min)<EOL>return<EOL>
Update live qrs parameters. Adjust the recent rr-intervals and qrs amplitudes, and the qrs threshold. Parameters ---------- peak_num : int The peak number of the mwi signal where the qrs is detected backsearch: bool, optional Whether the qrs was found via backsearch
f10224:c0:m8
def _is_twave(self, peak_num):
i = self.peak_inds_i[peak_num]<EOL>if self.last_qrs_ind - self.qrs_radius < <NUM_LIT:0>:<EOL><INDENT>return False<EOL><DEDENT>sig_segment = normalize((self.sig_f[i - self.qrs_radius:i]<EOL>).reshape(-<NUM_LIT:1>, <NUM_LIT:1>), axis=<NUM_LIT:0>)<EOL>last_qrs_segment = self.sig_f[self.last_qrs_ind - self.qrs_radius:<EOL>self.last_qrs_ind]<EOL>segment_slope = np.diff(sig_segment)<EOL>last_qrs_slope = np.diff(last_qrs_segment)<EOL>if max(segment_slope) < <NUM_LIT:0.5>*max(abs(last_qrs_slope)):<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>
Check whether a segment is a t-wave. Compare the maximum gradient of the filtered signal segment with that of the previous qrs segment. Parameters ---------- peak_num : int The peak number of the mwi signal where the qrs is detected
f10224:c0:m9
def _update_noise(self, peak_num):
i = self.peak_inds_i[peak_num]<EOL>self.noise_amp_recent = (<NUM_LIT>*self.noise_amp_recent<EOL>+ <NUM_LIT>*self.sig_i[i])<EOL>return<EOL>
Update live noise parameters
f10224:c0:m10
def _require_backsearch(self):
if self.peak_num == self.n_peaks_i-<NUM_LIT:1>:<EOL><INDENT>return False<EOL><DEDENT>next_peak_ind = self.peak_inds_i[self.peak_num + <NUM_LIT:1>]<EOL>if next_peak_ind-self.last_qrs_ind > self.rr_recent*<NUM_LIT>:<EOL><INDENT>return True<EOL><DEDENT>else:<EOL><INDENT>return False<EOL><DEDENT>
Determine whether a backsearch should be performed on prior peaks
f10224:c0:m11
def _backsearch(self):
if self.last_qrs_peak_num is not None:<EOL><INDENT>for peak_num in range(self.last_qrs_peak_num + <NUM_LIT:1>, self.peak_num + <NUM_LIT:1>):<EOL><INDENT>if self._is_qrs(peak_num=peak_num, backsearch=True):<EOL><INDENT>self._update_qrs(peak_num=peak_num, backsearch=True)<EOL><DEDENT><DEDENT><DEDENT>
Inspect previous peaks from the last detected qrs peak (if any), using a lower threshold
f10224:c0:m12
def _run_detection(self):
if self.verbose:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>self.qrs_inds = []<EOL>self.backsearch_qrs_inds = []<EOL>for self.peak_num in range(self.n_peaks_i):<EOL><INDENT>if self._is_qrs(self.peak_num):<EOL><INDENT>self._update_qrs(self.peak_num)<EOL><DEDENT>else:<EOL><INDENT>self._update_noise(self.peak_num)<EOL><DEDENT>if self._require_backsearch():<EOL><INDENT>self._backsearch()<EOL><DEDENT><DEDENT>if self.qrs_inds:<EOL><INDENT>self.qrs_inds = np.array(self.qrs_inds) + self.sampfrom<EOL><DEDENT>else:<EOL><INDENT>self.qrs_inds = np.array(self.qrs_inds)<EOL><DEDENT>if self.verbose:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>
Run the qrs detection after all signals and parameters have been configured and set.
f10224:c0:m13
def detect(self, sampfrom=<NUM_LIT:0>, sampto='<STR_LIT:end>', learn=True, verbose=True):
if sampfrom < <NUM_LIT:0>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>self.sampfrom = sampfrom<EOL>if sampto == '<STR_LIT:end>':<EOL><INDENT>sampto = self.sig_len<EOL><DEDENT>elif sampto > self.sig_len:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>self.sampto = sampto<EOL>self.verbose = verbose<EOL>if np.max(self.sig) == np.min(self.sig):<EOL><INDENT>self.qrs_inds = np.empty(<NUM_LIT:0>)<EOL>if self.verbose:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>return<EOL><DEDENT>self._set_conf()<EOL>self._bandpass()<EOL>self._mwi()<EOL>if learn:<EOL><INDENT>self._learn_init_params()<EOL><DEDENT>else:<EOL><INDENT>self._set_default_init_params()<EOL><DEDENT>self._run_detection()<EOL>
Detect qrs locations between two samples. Parameters ---------- sampfrom : int, optional The starting sample number to run the detection on. sampto : int, optional The final sample number to run the detection on. Set as 'end' to run on the entire signal. learn : bool, optional Whether to apply learning on the signal before running the main detection. If learning fails or is not conducted, the default configuration parameters will be used to initialize these variables. See the `XQRS._learn_init_params` docstring for details. verbose : bool, optional Whether to display the stages and outcomes of the detection process.
f10224:c0:m14
def detect(self, x, conf, adc_zero):
self.c = conf<EOL>self.annotations = []<EOL>self.sample_valid = False<EOL>if len(x) < <NUM_LIT:1>:<EOL><INDENT>return []<EOL><DEDENT>self.x = x<EOL>self.adc_zero = adc_zero<EOL>self.qfv = np.zeros((self.c._BUFLN), dtype="<STR_LIT>")<EOL>self.smv = np.zeros((self.c._BUFLN), dtype="<STR_LIT>")<EOL>self.v1 = <NUM_LIT:0><EOL>t0 = <NUM_LIT:0><EOL>self.tf = len(x) - <NUM_LIT:1><EOL>self.t = <NUM_LIT:0> - self.c.dt4<EOL>self.annot = GQRS.Annotation(<NUM_LIT:0>, "<STR_LIT>", <NUM_LIT:0>, <NUM_LIT:0>)<EOL>first_peak = GQRS.Peak(<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>tmp = first_peak<EOL>for _ in range(<NUM_LIT:1>, self.c._NPEAKS):<EOL><INDENT>tmp.next_peak = GQRS.Peak(<NUM_LIT:0>, <NUM_LIT:0>, <NUM_LIT:0>)<EOL>tmp.next_peak.prev_peak = tmp<EOL>tmp = tmp.next_peak<EOL><DEDENT>tmp.next_peak = first_peak<EOL>first_peak.prev_peak = tmp<EOL>self.current_peak = first_peak<EOL>if self.c.spm > self.c._BUFLN:<EOL><INDENT>if self.tf - t0 > self.c._BUFLN:<EOL><INDENT>tf_learn = t0 + self.c._BUFLN - self.c.dt4<EOL><DEDENT>else:<EOL><INDENT>tf_learn = self.tf - self.c.dt4<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if self.tf - t0 > self.c.spm:<EOL><INDENT>tf_learn = t0 + self.c.spm - self.c.dt4<EOL><DEDENT>else:<EOL><INDENT>tf_learn = self.tf - self.c.dt4<EOL><DEDENT><DEDENT>self.countdown = -<NUM_LIT:1><EOL>self.state = "<STR_LIT>"<EOL>self.gqrs(t0, tf_learn)<EOL>self.rewind_gqrs()<EOL>self.state = "<STR_LIT>"<EOL>self.t = t0 - self.c.dt4<EOL>self.gqrs(t0, self.tf)<EOL>return self.annotations<EOL>
Run detection. x is digital signal
f10224:c1:m1
def get_config(self):
with open(self.path) as f:<EOL><INDENT>return yaml.safe_load(f)<EOL><DEDENT>
Read config file :returns: the configuration of path to the dict or list
f10227:c0:m1
def setup_external_interface(self, *args, **kwargs):
return execute(self._setup_external_interface, *args, **kwargs)<EOL>
host networking :param public_interface(str): the public interface :returns: None
f10228:c0:m1
def _setup_ntp(self):
sudo('<STR_LIT>')<EOL>sudo('<STR_LIT>')<EOL>sudo('<STR_LIT>')<EOL>sudo('<STR_LIT>')<EOL>
network time protocal (ntp)
f10228:c0:m2
def setup_ntp(self):
return execute(self._setup_ntp)<EOL>
Setup ntp service :returns: None
f10228:c0:m3
def _set_openstack_repository(self):
if self._release() == '<STR_LIT>':<EOL><INDENT>print(red(env.host_string + '<STR_LIT>'))<EOL>sudo('<STR_LIT>')<EOL>sudo('<STR_LIT>')<EOL>sudo('<STR_LIT>')<EOL><DEDENT>print(red(env.host_string + '<STR_LIT>'))<EOL>with prefix('<STR_LIT>'):<EOL><INDENT>sudo('<STR_LIT>')<EOL><DEDENT>print(red(env.host_string + '<STR_LIT>'))<EOL>with settings(warn_only=True):<EOL><INDENT>reboot(wait=<NUM_LIT>)<EOL><DEDENT>print(red(env.host_string + '<STR_LIT>'))<EOL>with prefix('<STR_LIT>'):<EOL><INDENT>sudo('<STR_LIT>')<EOL><DEDENT>
openstack packages
f10228:c0:m4
def set_openstack_repository(self):
return execute(self._set_openstack_repository)<EOL>
Install OpenStack repository only for trusty. This method install cloud-archive:mitaka on trusty, when xenial using the default xenial repo. :returns: None
f10228:c0:m5
def create_service_credentials(self, *args, **kwargs):
return execute(self._create_service_credentials, *args, **kwargs)<EOL>
r""" Create the swift service credentials :param os_password: the password of openstack `admin` user :param os_auth_url: keystone endpoint url e.g. `http://CONTROLLER_VIP:35357/v3` :param swift_pass: password of `swift` user :param public_endpoint: public endpoint for swift service e.g. `http://CONTROLLER_VIP:8080/v1/AUTH_%\\(tenant_id\\)s` :param internal_endpoint: internal endpoint for swift service e.g. `http://CONTROLLER_VIP:8080/v1/AUTH_%\\(tenant_id\\)s` :param admin_endpoint: admin endpoint for swift service e.g. `http://CONTROLLER_VIP:8080/v1` :returns: None
f10229:c0:m1