signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def add_elasticache_replication_group(self, replication_group, region):
|
<EOL>if not self.all_elasticache_replication_groups and replication_group['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>return<EOL><DEDENT>dest = replication_group['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>']<EOL>if not dest:<EOL><INDENT>return<EOL><DEDENT>self.index[dest] = [region, replication_group['<STR_LIT>']]<EOL>if self.group_by_instance_id:<EOL><INDENT>self.inventory[replication_group['<STR_LIT>']] = [dest]<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', replication_group['<STR_LIT>'])<EOL><DEDENT><DEDENT>if self.group_by_region:<EOL><INDENT>self.push(self.inventory, region, dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', region)<EOL><DEDENT><DEDENT>if self.group_by_elasticache_engine:<EOL><INDENT>self.push(self.inventory, '<STR_LIT>', dest)<EOL>if self.nested_groups:<EOL><INDENT>self.push_group(self.inventory, '<STR_LIT>', '<STR_LIT>')<EOL><DEDENT><DEDENT>self.push(self.inventory, '<STR_LIT>', replication_group['<STR_LIT>'])<EOL>host_info = self.get_host_info_dict_from_describe_dict(replication_group)<EOL>self.inventory["<STR_LIT>"]["<STR_LIT>"][dest] = host_info<EOL>
|
Adds an ElastiCache replication group to the inventory and index
|
f10185:c0:m20
|
def get_route53_records(self):
|
r53_conn = route53.Route53Connection()<EOL>all_zones = r53_conn.get_zones()<EOL>route53_zones = [ zone for zone in all_zones if zone.name[:-<NUM_LIT:1>]<EOL>not in self.route53_excluded_zones ]<EOL>self.route53_records = {}<EOL>for zone in route53_zones:<EOL><INDENT>rrsets = r53_conn.get_all_rrsets(zone.id)<EOL>for record_set in rrsets:<EOL><INDENT>record_name = record_set.name<EOL>if record_name.endswith('<STR_LIT:.>'):<EOL><INDENT>record_name = record_name[:-<NUM_LIT:1>]<EOL><DEDENT>for resource in record_set.resource_records:<EOL><INDENT>self.route53_records.setdefault(resource, set())<EOL>self.route53_records[resource].add(record_name)<EOL><DEDENT><DEDENT><DEDENT>
|
Get and store the map of resource records to domain names that
point to them.
|
f10185:c0:m21
|
def get_instance_route53_names(self, instance):
|
instance_attributes = [ '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>' ]<EOL>name_list = set()<EOL>for attrib in instance_attributes:<EOL><INDENT>try:<EOL><INDENT>value = getattr(instance, attrib)<EOL><DEDENT>except AttributeError:<EOL><INDENT>continue<EOL><DEDENT>if value in self.route53_records:<EOL><INDENT>name_list.update(self.route53_records[value])<EOL><DEDENT><DEDENT>return list(name_list)<EOL>
|
Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list.
|
f10185:c0:m22
|
def get_host_info_dict_from_describe_dict(self, describe_dict):
|
<EOL>host_info = {}<EOL>for key in describe_dict:<EOL><INDENT>value = describe_dict[key]<EOL>key = self.to_safe('<STR_LIT>' + self.uncammelize(key))<EOL>if key == '<STR_LIT>' and value:<EOL><INDENT>host_info['<STR_LIT>'] = value['<STR_LIT>']<EOL>host_info['<STR_LIT>'] = value['<STR_LIT>']<EOL><DEDENT>if key == '<STR_LIT>' and value:<EOL><INDENT>host_info['<STR_LIT>'] = value['<STR_LIT>']<EOL>host_info['<STR_LIT>'] = value['<STR_LIT>']<EOL><DEDENT>if key == '<STR_LIT>' and value:<EOL><INDENT>host_info['<STR_LIT>'] = value[<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>']<EOL>host_info['<STR_LIT>'] = value[<NUM_LIT:0>]['<STR_LIT>']['<STR_LIT>']<EOL>replica_count = <NUM_LIT:0><EOL>for node in value[<NUM_LIT:0>]['<STR_LIT>']:<EOL><INDENT>if node['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>host_info['<STR_LIT>'] = node['<STR_LIT>']['<STR_LIT>']<EOL>host_info['<STR_LIT>'] = node['<STR_LIT>']['<STR_LIT>']<EOL>host_info['<STR_LIT>'] = node['<STR_LIT>']<EOL><DEDENT>elif node['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>host_info['<STR_LIT>'+ str(replica_count)] = node['<STR_LIT>']['<STR_LIT>']<EOL>host_info['<STR_LIT>'+ str(replica_count)] = node['<STR_LIT>']['<STR_LIT>']<EOL>host_info['<STR_LIT>'+ str(replica_count)] = node['<STR_LIT>']<EOL>replica_count += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>if key == '<STR_LIT>' and value:<EOL><INDENT>host_info['<STR_LIT>'] = '<STR_LIT:U+002C>'.join([str(i) for i in value])<EOL><DEDENT>elif key == '<STR_LIT>':<EOL><INDENT>host_info["<STR_LIT>"] = '<STR_LIT:U+002C>'.join([str(i) for i in value['<STR_LIT>']])<EOL>host_info['<STR_LIT>'] = value['<STR_LIT>']<EOL>host_info['<STR_LIT>'] = value['<STR_LIT>']<EOL><DEDENT>elif key == '<STR_LIT>':<EOL><INDENT>if value is not None:<EOL><INDENT>sg_ids = []<EOL>for sg in value:<EOL><INDENT>sg_ids.append(sg['<STR_LIT>'])<EOL><DEDENT>host_info["<STR_LIT>"] = '<STR_LIT:U+002C>'.join([str(i) for i in sg_ids])<EOL><DEDENT><DEDENT>elif type(value) in [int, bool]:<EOL><INDENT>host_info[key] = value<EOL><DEDENT>elif isinstance(value, six.string_types):<EOL><INDENT>host_info[key] = value.strip()<EOL><DEDENT>elif type(value) == type(None):<EOL><INDENT>host_info[key] = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return host_info<EOL>
|
Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes.
|
f10185:c0:m24
|
def get_host(self, host):
|
if len(self.index) == <NUM_LIT:0>:<EOL><INDENT>self.load_index_from_cache()<EOL><DEDENT>if not host in self.index:<EOL><INDENT>self.do_api_calls_update_cache()<EOL>if not host in self.index:<EOL><INDENT>return {}<EOL><DEDENT><DEDENT>(region, instance_id) = self.index[host]<EOL>instance = self.get_instance(region, instance_id)<EOL>return self.get_host_info_dict_from_instance(instance)<EOL>
|
Get variables about a specific host
|
f10185:c0:m25
|
def push(self, my_dict, key, element):
|
group_info = my_dict.setdefault(key, [])<EOL>if isinstance(group_info, dict):<EOL><INDENT>host_list = group_info.setdefault('<STR_LIT>', [])<EOL>host_list.append(element)<EOL><DEDENT>else:<EOL><INDENT>group_info.append(element)<EOL><DEDENT>
|
Push an element onto an array that may not have been defined in
the dict
|
f10185:c0:m26
|
def push_group(self, my_dict, key, element):
|
parent_group = my_dict.setdefault(key, {})<EOL>if not isinstance(parent_group, dict):<EOL><INDENT>parent_group = my_dict[key] = {'<STR_LIT>': parent_group}<EOL><DEDENT>child_groups = parent_group.setdefault('<STR_LIT>', [])<EOL>if element not in child_groups:<EOL><INDENT>child_groups.append(element)<EOL><DEDENT>
|
Push a group as a child of another group.
|
f10185:c0:m27
|
def load_inventory_from_cache(self):
|
cache = open(self.cache_path_cache, '<STR_LIT:r>')<EOL>json_inventory = cache.read()<EOL>self.inventory = json.loads(json_inventory)<EOL>
|
Reads the inventory from the cache file and returns it as a JSON
object
|
f10185:c0:m28
|
def load_index_from_cache(self):
|
cache = open(self.cache_path_index, '<STR_LIT:r>')<EOL>json_index = cache.read()<EOL>self.index = json.loads(json_index)<EOL>
|
Reads the index from the cache file sets self.index
|
f10185:c0:m29
|
def write_to_cache(self, data, filename):
|
json_data = json.dumps(data, sort_keys=True, indent=<NUM_LIT:2>)<EOL>cache = open(filename, '<STR_LIT:w>')<EOL>cache.write(json_data)<EOL>cache.close()<EOL>
|
Writes data in JSON format to a file
|
f10185:c0:m30
|
def to_safe(self, word):
|
regex = "<STR_LIT>"<EOL>if not self.replace_dash_in_groups:<EOL><INDENT>regex += "<STR_LIT>"<EOL><DEDENT>return re.sub(regex + "<STR_LIT:]>", "<STR_LIT:_>", word)<EOL>
|
Converts 'bad' characters in a string to underscores so they can be used as Ansible groups
|
f10185:c0:m32
|
def read(fname):
|
return codecs.open(<EOL>os.path.join(os.path.dirname(__file__), fname),<EOL>encoding='<STR_LIT:utf-8>'<EOL>).read()<EOL>
|
Read a file
|
f10187:m0
|
def __str__(self):
|
return repr(self.error_message)<EOL>
|
r""" This just returns one of the error messages listed in the checkresponse() function
|
f10192:c0:m1
|
def __init__(self, token):
|
self.base_url = '<STR_LIT>'<EOL>self.token = token<EOL>self.geo_criteria = ['<STR_LIT>', '<STR_LIT:state>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>']<EOL>
|
r""" Instantiates an instance of MesoPy.
Arguments:
----------
token: string, mandatory
Your API token that authenticates you for requests against MesoWest.mes
Returns:
--------
None.
Raises:
-------
None.
|
f10192:c1:m0
|
@staticmethod<EOL><INDENT>def _checkresponse(response):<DEDENT>
|
results_error = '<STR_LIT>'<EOL>auth_error = '<STR_LIT>''<STR_LIT>'<EOL>rule_error = '<STR_LIT>''<STR_LIT>'<EOL>catch_error = '<STR_LIT>'<EOL>if response['<STR_LIT>']['<STR_LIT>'] == <NUM_LIT:1>:<EOL><INDENT>return response<EOL><DEDENT>elif response['<STR_LIT>']['<STR_LIT>'] == <NUM_LIT:2>:<EOL><INDENT>if response['<STR_LIT>']['<STR_LIT>'] == <NUM_LIT:0>:<EOL><INDENT>return None<EOL><DEDENT>raise MesoPyError(results_error)<EOL><DEDENT>elif response['<STR_LIT>']['<STR_LIT>'] == <NUM_LIT:200>:<EOL><INDENT>raise MesoPyError(auth_error)<EOL><DEDENT>elif response['<STR_LIT>']['<STR_LIT>'] == <NUM_LIT>:<EOL><INDENT>raise MesoPyError(rule_error)<EOL><DEDENT>elif response['<STR_LIT>']['<STR_LIT>'] == -<NUM_LIT:1>:<EOL><INDENT>format_error = response['<STR_LIT>']['<STR_LIT>']<EOL>raise MesoPyError(format_error)<EOL><DEDENT>else:<EOL><INDENT>raise MesoPyError(catch_error)<EOL><DEDENT>
|
r""" Returns the data requested by the other methods assuming the response from the API is ok. If not, provides
error handling for all possible API errors. HTTP errors are handled in the get_response() function.
Arguments:
----------
None.
Returns:
--------
The response from the API as a dictionary if the API code is 2.
Raises:
-------
MesoPyError: Gives different response messages depending on returned code from API. If the response is 2,
resultsError is displayed. For a response of 200, an authError message is shown. A ruleError is displayed
if the code is 400, a formatError for -1, and catchError for any other invalid response.
|
f10192:c1:m1
|
def _get_response(self, endpoint, request_dict):
|
http_error = '<STR_LIT>''<STR_LIT>'<EOL>json_error = '<STR_LIT>'<EOL>try:<EOL><INDENT>qsp = urllib.parse.urlencode(request_dict, doseq=True)<EOL>resp = urllib.request.urlopen(self.base_url + endpoint + '<STR_LIT:?>' + qsp).read()<EOL><DEDENT>except AttributeError or NameError:<EOL><INDENT>try:<EOL><INDENT>qsp = urllib.urlencode(request_dict, doseq=True)<EOL>resp = urllib2.urlopen(self.base_url + endpoint + '<STR_LIT:?>' + qsp).read()<EOL><DEDENT>except urllib2.URLError:<EOL><INDENT>raise MesoPyError(http_error)<EOL><DEDENT><DEDENT>except urllib.error.URLError:<EOL><INDENT>raise MesoPyError(http_error)<EOL><DEDENT>try:<EOL><INDENT>json_data = json.loads(resp.decode('<STR_LIT:utf-8>'))<EOL><DEDENT>except ValueError:<EOL><INDENT>raise MesoPyError(json_error)<EOL><DEDENT>return self._checkresponse(json_data)<EOL>
|
Returns a dictionary of data requested by each function.
Arguments:
----------
endpoint: string, mandatory
Set in all other methods, this is the API endpoint specific to each function.
request_dict: string, mandatory
A dictionary of parameters that are formatted into the API call.
Returns:
--------
response: A dictionary that has been dumped from JSON.
Raises:
-------
MesoPyError: Overrides the exceptions given in the requests library to give more custom error messages.
Connection_error occurs if no internet connection exists. Timeout_error occurs if the request takes too
long and redirect_error is shown if the url is formatted incorrectly.
|
f10192:c1:m2
|
def _check_geo_param(self, arg_list):
|
geo_func = lambda a, b: any(i in b for i in a)<EOL>check = geo_func(self.geo_criteria, arg_list)<EOL>if check is False:<EOL><INDENT>raise MesoPyError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>
|
r""" Checks each function call to make sure that the user has provided at least one of the following geographic
parameters: 'stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc'.
Arguments:
----------
arg_list: list, mandatory
A list of kwargs from other functions.
Returns:
--------
None.
Raises:
-------
MesoPyError if no geographic search criteria is provided.
|
f10192:c1:m3
|
def attime(self, **kwargs):
|
self._check_geo_param(kwargs)<EOL>kwargs['<STR_LIT>'] = self.token<EOL>return self._get_response('<STR_LIT>', kwargs)<EOL>
|
r""" Returns a dictionary of latest observations at a user specified location for a specified time. Users must
specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa',
'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See
below for optional parameters. Also see the metadata() function for station IDs.
Arguments:
----------
attime: string, required
Date and time in form of YYYYMMDDhhmm for which returned obs are closest. All times are UTC. e.g.
attime='201504261800'
within: string, required
Can be a single number representing a time period before attime or two comma separated numbers representing
a period before and after the attime e.g. attime='201306011800', within='30' would return the ob closest to
attime within a 30 min period before or after attime.
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: string, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
status: string, optional
A value of either active or inactive returns stations currently set as active or inactive in the archive.
Omitting this param returns all stations. e.g. status='active'
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of observations around a specific time.
Raises:
-------
None.
|
f10192:c1:m4
|
def precip(self, start, end, **kwargs):
|
self._check_geo_param(kwargs)<EOL>kwargs['<STR_LIT:start>'] = start<EOL>kwargs['<STR_LIT:end>'] = end<EOL>kwargs['<STR_LIT>'] = self.token<EOL>return self._get_response('<STR_LIT>', kwargs)<EOL>
|
r""" Returns precipitation observations at a user specified location for a specified time. Users must specify at
least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa',
'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See
below mandatory and optional parameters. Also see the metadata() function for station IDs.
Arguments:
----------
start: string, mandatory
Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC
e.g., start='201306011800'
end: string, mandatory
End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC
e.g., end='201306011800'
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: list, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: list, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
status: string, optional
A value of either active or inactive returns stations currently set as active or inactive in the archive.
Omitting this param returns all stations. e.g. status='active'
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of precipitation observations.
Raises:
-------
None.
|
f10192:c1:m6
|
def timeseries(self, start, end, **kwargs):
|
self._check_geo_param(kwargs)<EOL>kwargs['<STR_LIT:start>'] = start<EOL>kwargs['<STR_LIT:end>'] = end<EOL>kwargs['<STR_LIT>'] = self.token<EOL>return self._get_response('<STR_LIT>', kwargs)<EOL>
|
r""" Returns a time series of observations at a user specified location for a specified time. Users must specify
at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa',
'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See
below mandatory and optional parameters. Also see the metadata() function for station IDs.
Arguments:
----------
start: string, mandatory
Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC
e.g., start='201306011800'
end: string, mandatory
End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC
e.g., end='201306011800'
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: string, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
status: string, optional
A value of either active or inactive returns stations currently set as active or inactive in the archive.
Omitting this param returns all stations. e.g. status='active'
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of time series observations through the get_response() function.
Raises:
-------
None.
|
f10192:c1:m7
|
def climatology(self, startclim, endclim, **kwargs):
|
self._check_geo_param(kwargs)<EOL>kwargs['<STR_LIT>'] = startclim<EOL>kwargs['<STR_LIT>'] = endclim<EOL>kwargs['<STR_LIT>'] = self.token<EOL>return self._get_response('<STR_LIT>', kwargs)<EOL>
|
r""" Returns a climatology of observations at a user specified location for a specified time. Users must specify
at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa',
'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See
below mandatory and optional parameters. Also see the metadata() function for station IDs.
Arguments:
----------
startclim: string, mandatory
Start date in form of MMDDhhmm. MUST BE USED WITH THE ENDCLIM PARAMETER. Default time is UTC
e.g. startclim='06011800' Do not specify a year
endclim: string, mandatory
End date in form of MMDDhhmm. MUST BE USED WITH THE STARTCLIM PARAMETER. Default time is UTC
e.g. endclim='06011800' Do not specify a year
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: string, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
status: string, optional
A value of either active or inactive returns stations currently set as active or inactive in the archive.
Omitting this param returns all stations. e.g. status='active'
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of climatology observations through the get_response() function.
Raises:
-------
None.
|
f10192:c1:m8
|
def variables(self):
|
return self._get_response('<STR_LIT>', {'<STR_LIT>': self.token})<EOL>
|
Returns a dictionary of a list of variables that could be obtained from the 'vars' param in other functions.
Some stations may not record all variables listed. Use the metadata() function to return metadata on each
station.
Arguments:
----------
None.
Returns:
--------
Dictionary of variables.
Raises:
-------
None.
|
f10192:c1:m9
|
def climate_stats(self, startclim, endclim, type, **kwargs):
|
self._check_geo_param(kwargs)<EOL>kwargs['<STR_LIT:type>'] = type<EOL>kwargs['<STR_LIT>'] = startclim<EOL>kwargs['<STR_LIT>'] = endclim<EOL>kwargs['<STR_LIT>'] = self.token<EOL>return self._get_response('<STR_LIT>', kwargs)<EOL>
|
r""" Returns a dictionary of aggregated yearly climate statistics (count, standard deviation,
average, median, maximum, minimum, min time, and max time depending on user specified type) of a time series
for a specified range of time at user specified location. Users must specify at least one geographic search
parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc')
to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters.
Also see the metadata() function for station IDs.
Arguments:
----------
type: string, mandatory
Describes what statistical values will be returned. Can be one of the following values:
"avg"/"average"/"mean", "max"/"maximum", "min"/"minimum", "stdev"/"standarddeviation"/"std", "median"/"med",
"count", or "all". "All" will return all of the statistics.
startclim: string, mandatory
Start date in form of MMDDhhmm. MUST BE USED WITH THE ENDCLIM PARAMETER. Default time is UTC
e.g. startclim=06011800 Do not specify a year.
endclim: string, mandatory
End date in form of MMDDhhmm. MUST BE USED WITH THE STARTCLIM PARAMETER. Default time is UTC
e.g. endclim=06011800 Do not specify a year.
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'.
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: string, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of aggregated climatology statistics.
Raises:
-------
None.
|
f10192:c1:m10
|
def time_stats(self, start, end, type, **kwargs):
|
self._check_geo_param(kwargs)<EOL>kwargs['<STR_LIT:type>'] = type<EOL>kwargs['<STR_LIT:start>'] = start<EOL>kwargs['<STR_LIT:end>'] = end<EOL>kwargs['<STR_LIT>'] = self.token<EOL>return self._get_response('<STR_LIT>', kwargs)<EOL>
|
r""" Returns a dictionary of discrete time statistics (count, standard deviation, average, median, maximum,
minimum, min time, and max time depending on user specified type) of a time series for a specified range of time
at user specified location. Users must specify at least one geographic search parameter ('stid', 'state',
'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data.
Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata()
function for station IDs.
Arguments:
----------
type: string, mandatory
Describes what statistical values will be returned. Can be one of the following values:
"avg"/"average"/"mean", "max"/"maximum", "min"/"minimum", "stdev"/"standarddeviation"/"std", "median"/"med",
"count", or "all". "All" will return all of the statistics.
start: string, optional
Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC
e.g. start=201506011800.
end: string, optional
End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC
e.g. end=201506011800.
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
showemptystations: string, optional
Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are
omitted by default.
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: list, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of discrete time statistics.
Raises:
-------
None.
|
f10192:c1:m11
|
def metadata(self, **kwargs):
|
self._check_geo_param(kwargs)<EOL>kwargs['<STR_LIT>'] = self.token<EOL>return self._get_response('<STR_LIT>', kwargs)<EOL>
|
r""" Returns the metadata for a station or stations. Users must specify at least one geographic search parameter
('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain
observation data. Other parameters may also be included. See below for optional parameters.
Arguments:
----------
complete: string, optional
A value of 1 or 0. When set to 1, an extended list of metadata attributes for each returned station is
provided. This result is useful for exploring the zones and regions in which a station resides.
e.g. complete='1'
sensorvars: string, optional
A value of 1 or 0. When set to 1, a complete history of sensor variables and period of record is given for
each station. e.g. sensorvars='1'
obrange: string, optional
Filters metadata for stations which were in operation for a specified time period. Users can specify one
date or a date range. Dates are in the format of YYYYmmdd. e.g. obrange='20150101',
obrange='20040101,20060101'
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: string, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars.
status: string, optional
A value of either active or inactive returns stations currently set as active or inactive in the archive.
Omitting this param returns all stations. e.g. status='active'
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
A dictionary of metadata.
Raises:
-------
None.
|
f10192:c1:m12
|
def latency(self, start, end, **kwargs):
|
self._check_geo_param(kwargs)<EOL>kwargs['<STR_LIT:start>'] = start<EOL>kwargs['<STR_LIT:end>'] = end<EOL>kwargs['<STR_LIT>'] = self.token<EOL>return self._get_response('<STR_LIT>', kwargs)<EOL>
|
r""" Returns data latency values for a station based on a start and end date/time. Users must specify at least
one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone',
'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and
optional parameters. Also see the metadata() function for station IDs.
Arguments:
----------
start: string, mandatory
Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC
e.g. start='201506011800'
end: string, mandatory
End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC
e.g. end='201506011800'
stats: string, optional
Describes what statistical values will be returned. Can be one of the following values:
"avg"/"average"/"mean", "max"/"maximum", "min"/"minimum", "stdev"/"standarddeviation"/"std", "median"/"med",
"count", or "all". "All" will return all of the statistics. e.g. stats='avg'
obtimezone: string, optional
Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local'
stid: string, optional
Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb'
county: string, optional
County/parish/borough (US/Canada only), full name e.g. county='Larimer'
state: string, optional
US state, 2-letter ID e.g. state='CO'
country: string, optional
Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx'
radius: list, optional
Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20"
bbox: string, optional
Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41"
cwa: string, optional
NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX'
nwsfirezone: string, optional
NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile
containing the full list of zones. e.g. nwsfirezone='LOX241'
gacc: string, optional
Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs.
subgacc: string, optional
Name of Sub GACC e.g. subgacc='EB07'
vars: string, optional
Single or comma separated list of sensor variables. Will return all stations that match one of provided
variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in
the query. e.g. vars='wind_speed,pressure' Use the variables() function to see a list of sensor vars.
units: string, optional
String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for
FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph,
speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa,
alti|inhg. e.g. units='temp|F,speed|kph,metric'
groupby: string, optional
Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc
e.g. groupby='state'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of latency data.
Raises:
-------
None.
|
f10192:c1:m13
|
def networks(self, **kwargs):
|
kwargs['<STR_LIT>'] = self.token<EOL>return self._get_response('<STR_LIT>', kwargs)<EOL>
|
r""" Returns the metadata associated with the MesoWest network ID(s) entered. Leaving this function blank will
return all networks in MesoWest.
Arguments:
----------
id: string, optional
A single or comma-separated list of MesoNet network categories. e.g. ids='1,2,3'
shortname: string, optional
A single or comma-separated list of abbreviations or short names. e.g shortname='DUGWAY,RAWS'
sortby: string, optional
Determines how to sort the returned networks. The only valid value is 'alphabet' which orders the results
in alphabetical order. By default, networks are sorted by ID. e.g. sortby='alphabet'
timeformat: string, optional
A python format string for returning customized date-time groups for observation times. Can include
characters. e.g. timeformat='%m/%d/%Y at %H:%M'
Returns:
--------
Dictionary of network descriptions.
Raises:
-------
None.
|
f10192:c1:m14
|
def networktypes(self, **kwargs):
|
kwargs['<STR_LIT>'] = self.token<EOL>return self._get_response('<STR_LIT>', kwargs)<EOL>
|
r""" Returns the network type metadata depending on the ID specified. Can be left blank to return all network
types.
Arguments:
----------
id: string, optional
A single or comma-separated list of MesoNet categories. e.g.: type_ids='1,2,3'
Returns:
--------
Dictionary of network type descriptions.
Raises:
-------
None.
|
f10192:c1:m15
|
def execute(action, io_loop=None):
|
if not io_loop:<EOL><INDENT>io_loop = IOLoop.current()<EOL><DEDENT>output = Future()<EOL>def call():<EOL><INDENT>try:<EOL><INDENT>result = _execute(_TornadoAction(action, io_loop))<EOL><DEDENT>except Exception:<EOL><INDENT>output.set_exc_info(sys.exc_info())<EOL><DEDENT>else:<EOL><INDENT>output.set_result(result)<EOL><DEDENT><DEDENT>io_loop.add_callback(greenlet.greenlet(call).switch)<EOL>return output<EOL>
|
Execute the given action and return a Future with the result.
The ``forwards`` and/or ``backwards`` methods for the action may be
synchronous or asynchronous. If asynchronous, that method must return a
Future that will resolve to its result.
See :py:func:`reversible.execute` for more details on the behavior of
``execute``.
:param action:
The action to execute.
:param io_loop:
IOLoop through which asynchronous operations will be executed. If
omitted, the current IOLoop is used.
:returns:
A future containing the result of executing the action.
|
f10197:m1
|
def _map_generator(f, generator):
|
item = next(generator)<EOL>while True:<EOL><INDENT>try:<EOL><INDENT>result = yield f(item)<EOL><DEDENT>except Exception:<EOL><INDENT>item = generator.throw(*sys.exc_info())<EOL><DEDENT>else:<EOL><INDENT>item = generator.send(result)<EOL><DEDENT><DEDENT>
|
Apply ``f`` to the results of the given bi-directional generator.
Unfortunately, generator comprehension (``f(x) for x in gen``) does not
work for as expected for bi-directional generators. It won't send
exceptions and results back.
This function implements a map function for generators that sends values
and exceptions back and forth as expected.
|
f10198:m0
|
def gen(function, io_loop=None):
|
@functools.wraps(function) <EOL>def new_function(*args, **kwargs):<EOL><INDENT>try:<EOL><INDENT>value = function(*args, **kwargs)<EOL><DEDENT>except _RETURNS as result:<EOL><INDENT>return SimpleAction(<EOL>lambda ctx: ctx.value,<EOL>lambda _: None,<EOL>result,<EOL>)<EOL><DEDENT>else:<EOL><INDENT>if isinstance(value, types.GeneratorType):<EOL><INDENT>return _TornadoGeneratorAction(value, io_loop)<EOL><DEDENT>else:<EOL><INDENT>return SimpleAction(<EOL>lambda _: value,<EOL>lambda _: None,<EOL>None,<EOL>)<EOL><DEDENT><DEDENT><DEDENT>return new_function<EOL>
|
Allows using a generator to chain together reversible actions.
This function is very similar to :py:func:`reversible.gen` except that it
may be used with actions whose ``forwards`` and/or ``backwards`` methods
are couroutines. Specifically, if either of those methods return futures
the generated action will stop execution until the result of the future is
available.
.. code-block:: python
@reversible.tornado.gen
@tornado.gen.coroutine
def save_comment(ctx, comment):
ctx['comment_id'] = yield async_http_client.fetch(
# ...
)
raise tornado.gen.Return(ctx['comment_id'])
@save_comment.backwards
def delete_comment(ctx, comment):
# returns a Future
return async_http_client.fetch(...)
@reversible.tornado.gen
def post_comment(post, comment, client):
try:
comment_id = yield save_comment(comment)
except CommentStoreException:
# Exceptions thrown by actions may be caught by the
# action.
yield queue_save_comment_request(comment)
else:
yield update_comment_count(post)
update_cache()
:param function:
The generator function. This generator must yield action objects. The
``forwards`` and/or ``backwards`` methods on the action may be
asynchronous operations returning coroutines.
:param io_loop:
IOLoop used to execute asynchronous operations. Defaults to the
current IOLoop if omitted.
:returns:
An action executable via :py:func:`reversible.tornado.execute` and
yieldable in other instances of :py:func:`reversible.tornado.gen`.
|
f10198:m1
|
def lift(future):
|
return _Lift(future)<EOL>
|
Returns the result of a Tornado Future inside a generator-based action.
Inside a :py:func:`reversible.tornado.gen` context, the meaning of
``yield`` changes to "execute this possibly asynchronous action and return
the result." However sometimes it is necessary to execute a standard
Tornado coroutine. To make this possible, the ``lift`` method is made
available.
.. code-block:: python
@reversible.tornado.gen
def my_action():
request = yield build_request_action()
try:
response = yield reversible.tornado.lift(
AsyncHTTPClient().fetch(request)
)
except HTTPError:
# ...
raise reversible.tornado.Return(response)
Note that operations executed through lift are assumed to be
non-reversible. If the operations are intended to be reversible, a
reversible action must be constructed.
:param future:
Tornado future whose result is required. When the returned object is
yielded, action execution will stop until the future's result is
available or the future fails. If the future fails, its exception will
be propagated back at the yield point.
:returns:
An action yieldable inside a :py:func:`reversible.tornado.gen`
context.
|
f10198:m2
|
def execute(action):
|
<EOL>try:<EOL><INDENT>return action.forwards()<EOL><DEDENT>except Exception:<EOL><INDENT>log.exception('<STR_LIT>', action)<EOL>try:<EOL><INDENT>action.backwards()<EOL><DEDENT>except Exception:<EOL><INDENT>log.exception('<STR_LIT>', action)<EOL>raise<EOL><DEDENT>else:<EOL><INDENT>raise<EOL><DEDENT><DEDENT>
|
Execute the given action.
An action is any object with a ``forwards()`` and ``backwards()`` method.
.. code-block:: python
class CreateUser(object):
def __init__(self, userinfo):
self.userinfo = userinfo
self.user_id = None
def forwards(self):
self.user_id = UserStore.create(userinfo)
return self.user_id
def backwards(self):
if self.user_id is not None:
# user_id will be None if creation failed
UserStore.delete(self.user_id)
If the ``forwards`` method succeeds, the action is considered successful.
If the method fails, the ``backwards`` method is called to revert any
effect it might have had on the system.
In addition to defining classes, actions may be built using the
:py:func:`reversible.action` decorator. Actions may be composed together
using the :py:func:`reversible.gen` decorator.
:param action:
The action to execute.
:returns:
The value returned by the ``forwards()`` method of the action.
:raises:
The exception raised by the ``forwards()`` method if rollback
succeeded. Otherwise, the exception raised by the ``backwards()``
method is raised.
|
f10200:m0
|
def action(forwards=None, context_class=None):
|
context_class = context_class or dict<EOL>def decorator(_forwards):<EOL><INDENT>return ActionBuilder(_forwards, context_class)<EOL><DEDENT>if forwards is not None:<EOL><INDENT>return decorator(forwards)<EOL><DEDENT>else:<EOL><INDENT>return decorator<EOL><DEDENT>
|
Decorator to build functions.
This decorator can be applied to a function to build actions. The
decorated function becomes the ``forwards`` implementation of the action.
The first argument of the ``forwards`` implementation is a context object
that can be used to share state between the forwards and backwards
implementations. This argument is passed implicitly by ``reversible`` and
callers of the function shouldn't provide it.
.. code-block:: python
@reversible.action
def create_order(context, order_details):
order_id = OrderStore.put(order_details)
context['order_id'] = order_id
return order_id
The ``.backwards`` attribute of the decorated function can itself be used
as a decorator to specify the ``backwards`` implementation of the action.
.. code-block:: python
@create_order.backwards
def delete_order(context, order_details):
if 'order_id' in context:
# order_id will be absent if create_order failed
OrderStore.delete(context['order_id'])
# Note that the context argument was not provided here. It's added
# implicitly by the library.
action = create_order(order_details)
order_id = reversible.execute(action)
Both, the ``forwards`` and ``backwards`` implementations will be called
with the same arguments. Any information that needs to be sent from
``forwards`` to ``backwards`` must be added to the context object.
The context object defaults to a dictionary. An alternative context
constructor may be provided using the ``context_class`` argument. It will
be called with no arguments to construct the context object.
.. code-block:: python
@reversible.action(context_class=UserInfo)
def create_user(user_info, user_details):
user_info.user_id = UserStore.put(user_details)
return user_info
Note that a backwards action is required. Attempts to use the action
without specifying a way to roll back will fail.
:param forwards:
The function will be treated as the ``forwards`` implementation.
:param context_class:
Constructor for context objects. A single action call will have its
own context object and that object will be implictly passed as the
first argument to both, the ``forwards`` and the ``backwards``
implementations.
:returns:
If ``forwards`` was given, a partially constructed action is returned.
The ``backwards`` method on that object can be used as a decorator to
specify the rollback method for the action. If ``forwards`` was
omitted, a decorator that accepts the ``forwards`` method is returned.
|
f10200:m1
|
def backwards(self, backwards):
|
if self._backwards is not None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self._backwards = backwards<EOL>return backwards<EOL>
|
Decorator to specify the ``backwards`` action.
|
f10200:c1:m2
|
def gen(function):
|
@functools.wraps(function) <EOL>def new_function(*args, **kwargs):<EOL><INDENT>try:<EOL><INDENT>value = function(*args, **kwargs)<EOL><DEDENT>except Return as result:<EOL><INDENT>return SimpleAction(<EOL>lambda ctx: ctx.value,<EOL>lambda _: None,<EOL>result,<EOL>)<EOL><DEDENT>else:<EOL><INDENT>if isinstance(value, types.GeneratorType):<EOL><INDENT>return _GeneratorAction(value)<EOL><DEDENT>else:<EOL><INDENT>return SimpleAction(<EOL>lambda _: value,<EOL>lambda _: None,<EOL>None,<EOL>)<EOL><DEDENT><DEDENT><DEDENT>return new_function<EOL>
|
Allows using a generator to chain together reversible actions.
This decorator may be added to a generator that yields reversible actions
(any object with a ``.forwards()`` and ``.backwards()`` method). These may
be constructed manually or via :py:func:`reversible.action`. The decorated
function, when called, will return another reversible action that runs all
yielded actions and if any of them fails, rolls back all actions that had
been executed *in the reverse order*.
Values can be returned by raising the :py:class:`reversible.Return`
exception, or if using Python 3.3 or newer, by simply using the ``return``
statement.
For example,
.. code-block:: python
@reversible.gen
def submit_order(order):
# CreateOrder is a class that declares a forwards() and
# backwards() method. The forwards() method returns the
# order_id. It is propagated back to the yield point.
order_id = yield CreateOrder(order.cart)
# If get_payment_info throws an exception, the order will
# be deleted and the exeception will be re-raised to the
# caller.
payment_info = PaymentStore.get_payment_info(order.user_id)
try:
# charge_order is a function that returns an action.
# It is easy to create such a function by using
# reversible.action as a decorator.
total = yield charge_order(payment_info, order_id)
except InsufficientFundsException:
# Exceptions thrown by a dependency's forwards()
# method are propagated at the yield point. It's
# possible to handle them and prevent rollback for
# everything else.
send_insufficient_funds_email(order_id, order.user_id)
else:
yield update_receipt(order_id, total)
send_receipt(order_id)
# The order ID is the result of this action.
raise reversible.Return(order_id)
order_id = reversible.execute(submit_order(order))
# If another action based on reversible.gen calls
# submit_order, it can simply do:
#
# order_id = yield submit_order(order_details)
When an action fails, its ``backwards`` method and the ``backwards``
methods of all actions executed so far will be called in reverse of the
order in which the ``forwards`` methods were called.
If any of the ``backwards`` methods fail, rollback will be aborted.
:param function:
The generator function. This generator must yield action objects.
:returns:
A function that, when called, produces an action object that executes
actions and functions as yielded by the generator.
|
f10201:m0
|
@classmethod<EOL><INDENT>def tearDownClass(cls):<DEDENT>
|
writefiles = ['<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>','<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>']<EOL>for file in writefiles:<EOL><INDENT>if os.path.isfile(file):<EOL><INDENT>os.remove(file)<EOL><DEDENT><DEDENT>
|
Clean up written files
|
f10205:c0:m15
|
def _rd_segment(file_name, dir_name, pb_dir, fmt, n_sig, sig_len, byte_offset,<EOL>samps_per_frame, skew, sampfrom, sampto, channels,<EOL>smooth_frames, ignore_skew):
|
<EOL>byte_offset = byte_offset[:]<EOL>samps_per_frame = samps_per_frame[:]<EOL>skew = skew[:]<EOL>for i in range(n_sig):<EOL><INDENT>if byte_offset[i] == None:<EOL><INDENT>byte_offset[i] = <NUM_LIT:0><EOL><DEDENT>if samps_per_frame[i] == None:<EOL><INDENT>samps_per_frame[i] = <NUM_LIT:1><EOL><DEDENT>if skew[i] == None:<EOL><INDENT>skew[i] = <NUM_LIT:0><EOL><DEDENT><DEDENT>if ignore_skew:<EOL><INDENT>skew = [<NUM_LIT:0>]*n_sig<EOL><DEDENT>file_name, datchannel = describe_list_indices(file_name)<EOL>w_file_name = [] <EOL>w_fmt = {} <EOL>w_byte_offset = {} <EOL>w_samps_per_frame = {} <EOL>w_skew = {} <EOL>w_channel = {} <EOL>for fn in file_name:<EOL><INDENT>idc = [c for c in datchannel[fn] if c in channels]<EOL>if idc != []:<EOL><INDENT>w_file_name.append(fn)<EOL>w_fmt[fn] = fmt[datchannel[fn][<NUM_LIT:0>]]<EOL>w_byte_offset[fn] = byte_offset[datchannel[fn][<NUM_LIT:0>]]<EOL>w_samps_per_frame[fn] = [samps_per_frame[c] for c in datchannel[fn]]<EOL>w_skew[fn] = [skew[c] for c in datchannel[fn]]<EOL>w_channel[fn] = idc<EOL><DEDENT><DEDENT>r_w_channel = {}<EOL>out_dat_channel = {}<EOL>for fn in w_channel:<EOL><INDENT>r_w_channel[fn] = [c - min(datchannel[fn]) for c in w_channel[fn]]<EOL>out_dat_channel[fn] = [channels.index(c) for c in w_channel[fn]]<EOL><DEDENT>if smooth_frames or sum(samps_per_frame) == n_sig:<EOL><INDENT>max_dtype = _np_dtype(_fmt_res(fmt, max_res=True), discrete=True)<EOL>signals = np.zeros([sampto-sampfrom, len(channels)], dtype=max_dtype)<EOL>for fn in w_file_name:<EOL><INDENT>signals[:, out_dat_channel[fn]] = _rd_dat_signals(fn, dir_name, pb_dir,<EOL>w_fmt[fn], len(datchannel[fn]), sig_len, w_byte_offset[fn],<EOL>w_samps_per_frame[fn], w_skew[fn], sampfrom, sampto,<EOL>smooth_frames)[:, r_w_channel[fn]]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>signals = [None] * len(channels)<EOL>for fn in w_file_name:<EOL><INDENT>datsignals = _rd_dat_signals(fn, dir_name, pb_dir, w_fmt[fn],<EOL>len(datchannel[fn]), sig_len, w_byte_offset[fn],<EOL>w_samps_per_frame[fn], w_skew[fn], sampfrom, sampto,<EOL>smooth_frames)<EOL>for cn in range(len(out_dat_channel[fn])):<EOL><INDENT>signals[out_dat_channel[fn][cn]] = datsignals[r_w_channel[fn][cn]]<EOL><DEDENT><DEDENT><DEDENT>return signals<EOL>
|
Read the digital samples from a single segment record's associated
dat file(s).
Parameters
----------
file_name : list
The names of the dat files to be read.
dir_name : str
The full directory where the dat file(s) are located, if the dat
file(s) are local.
pb_dir : str
The physiobank directory where the dat file(s) are located, if
the dat file(s) are remote.
fmt : list
The formats of the dat files
n_sig : int
The number of signals contained in the dat file
sig_len : int
The signal length (per channel) of the dat file
byte_offset : int
The byte offset of the dat file
samps_per_frame : list
The samples/frame for each signal of the dat file
skew : list
The skew for the signals of the dat file
sampfrom : int
The starting sample number to be read from the signals
sampto : int
The final sample number to be read from the signals
smooth_frames : bool
Whether to smooth channels with multiple samples/frame
ignore_skew : bool, optional
Used when reading records with at least one skewed signal.
Specifies whether to apply the skew to align the signals in the
output variable (False), or to ignore the skew field and load in
all values contained in the dat files unaligned (True).
Returns
-------
signals : numpy array, or list
The signals read from the dat file(s). A 2d numpy array is
returned if the signals have uniform samples/frame or if
`smooth_frames` is True. Otherwise a list of 1d numpy arrays
is returned.
Notes
-----
'channels', 'sampfrom', 'sampto', 'smooth_frames', and 'ignore_skew'
are user desired input fields. All other parameters are
specifications of the segment
|
f10209:m0
|
def _rd_dat_signals(file_name, dir_name, pb_dir, fmt, n_sig, sig_len,<EOL>byte_offset, samps_per_frame, skew, sampfrom, sampto,<EOL>smooth_frames):
|
<EOL>tsamps_per_frame = sum(samps_per_frame)<EOL>read_len = sampto - sampfrom<EOL>(start_byte, n_read_samples, block_floor_samples,<EOL>extra_flat_samples, nan_replace) = _dat_read_params(fmt, sig_len,<EOL>byte_offset, skew,<EOL>tsamps_per_frame,<EOL>sampfrom, sampto)<EOL>total_read_bytes = _required_byte_num('<STR_LIT>', fmt, n_read_samples)<EOL>total_process_samples = n_read_samples + extra_flat_samples<EOL>total_process_bytes = _required_byte_num('<STR_LIT>', fmt,<EOL>total_process_samples)<EOL>if extra_flat_samples:<EOL><INDENT>if fmt in UNALIGNED_FMTS:<EOL><INDENT>n_extra_bytes = total_process_bytes - total_read_bytes<EOL>sig_data = np.concatenate((_rd_dat_file(file_name, dir_name,<EOL>pb_dir, fmt, start_byte,<EOL>n_read_samples),<EOL>np.zeros(n_extra_bytes,<EOL>dtype=np.dtype(DATA_LOAD_TYPES[fmt]))))<EOL><DEDENT>else:<EOL><INDENT>sig_data = np.concatenate((_rd_dat_file(file_name, dir_name,<EOL>pb_dir, fmt, start_byte,<EOL>n_read_samples),<EOL>np.zeros(extra_flat_samples,<EOL>dtype=np.dtype(DATA_LOAD_TYPES[fmt]))))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>sig_data = _rd_dat_file(file_name, dir_name, pb_dir, fmt, start_byte,<EOL>n_read_samples)<EOL><DEDENT>if fmt in UNALIGNED_FMTS:<EOL><INDENT>sig_data = _blocks_to_samples(sig_data, total_process_samples, fmt)<EOL>if block_floor_samples:<EOL><INDENT>sig_data = sig_data[block_floor_samples:]<EOL><DEDENT><DEDENT>if fmt in OFFSET_FMTS:<EOL><INDENT>if fmt == '<STR_LIT>':<EOL><INDENT>sig_data = (sig_data.astype('<STR_LIT>') - <NUM_LIT>).astype('<STR_LIT>')<EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>sig_data = (sig_data.astype('<STR_LIT>') - <NUM_LIT>).astype('<STR_LIT>')<EOL><DEDENT><DEDENT>if tsamps_per_frame == n_sig:<EOL><INDENT>signal = sig_data.reshape(-<NUM_LIT:1>, n_sig)<EOL>signal = _skew_sig(signal, skew, n_sig, read_len, fmt, nan_replace)<EOL><DEDENT>elif smooth_frames:<EOL><INDENT>signal = np.zeros((int(len(sig_data) / tsamps_per_frame) , n_sig),<EOL>dtype=sig_data.dtype)<EOL>for ch in range(n_sig):<EOL><INDENT>if samps_per_frame[ch] == <NUM_LIT:1>:<EOL><INDENT>signal[:, ch] = sig_data[sum(([<NUM_LIT:0>] + samps_per_frame)[:ch + <NUM_LIT:1>])::tsamps_per_frame]<EOL><DEDENT>else:<EOL><INDENT>if ch == <NUM_LIT:0>:<EOL><INDENT>startind = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>startind = np.sum(samps_per_frame[:ch])<EOL><DEDENT>signal[:,ch] = [np.average(sig_data[ind:ind+samps_per_frame[ch]]) for ind in range(startind,len(sig_data),tsamps_per_frame)]<EOL><DEDENT><DEDENT>signal = _skew_sig(signal, skew, n_sig, read_len, fmt, nan_replace)<EOL><DEDENT>else:<EOL><INDENT>signal = []<EOL>for ch in range(n_sig):<EOL><INDENT>ch_indices = np.concatenate([np.array(range(samps_per_frame[ch]))<EOL>+ sum([<NUM_LIT:0>] + samps_per_frame[:ch])<EOL>+ tsamps_per_frame * framenum for framenum in range(int(len(sig_data)/tsamps_per_frame))])<EOL>signal.append(sig_data[ch_indices])<EOL><DEDENT>signal = _skew_sig(signal, skew, n_sig, read_len, fmt, nan_replace, samps_per_frame)<EOL><DEDENT>_check_sig_dims(signal, read_len, n_sig, samps_per_frame)<EOL>return signal<EOL>
|
Read all signals from a WFDB dat file.
Parameters
----------
file_name : str
The name of the dat file
* other params
See docstring for `_rd_segment`.
Returns
-------
signals : numpy array, or list
See docstring for `_rd_segment`.
Notes
-----
See docstring notes for `_rd_segment`.
|
f10209:m1
|
def _dat_read_params(fmt, sig_len, byte_offset, skew, tsamps_per_frame,<EOL>sampfrom, sampto):
|
<EOL>start_flat_sample = sampfrom * tsamps_per_frame<EOL>if (sampto + max(skew)) > sig_len:<EOL><INDENT>end_flat_sample = sig_len * tsamps_per_frame<EOL>extra_flat_samples = (sampto + max(skew) - sig_len) * tsamps_per_frame<EOL><DEDENT>else:<EOL><INDENT>end_flat_sample = (sampto + max(skew)) * tsamps_per_frame<EOL>extra_flat_samples = <NUM_LIT:0><EOL><DEDENT>if fmt == '<STR_LIT>':<EOL><INDENT>block_floor_samples = start_flat_sample % <NUM_LIT:2><EOL>start_flat_sample = start_flat_sample - block_floor_samples<EOL><DEDENT>elif fmt in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>block_floor_samples = start_flat_sample % <NUM_LIT:3><EOL>start_flat_sample = start_flat_sample - block_floor_samples<EOL><DEDENT>else:<EOL><INDENT>block_floor_samples = <NUM_LIT:0><EOL><DEDENT>start_byte = byte_offset + int(start_flat_sample * BYTES_PER_SAMPLE[fmt])<EOL>n_read_samples = end_flat_sample - start_flat_sample<EOL>nan_replace = [max(<NUM_LIT:0>, sampto + s - sig_len) for s in skew]<EOL>return (start_byte, n_read_samples, block_floor_samples,<EOL>extra_flat_samples, nan_replace)<EOL>
|
Calculate the parameters used to read and process a dat file, given
its layout, and the desired sample range.
Parameters
----------
fmt : str
The format of the dat file
sig_len : int
The signal length (per channel) of the dat file
byte_offset : int
The byte offset of the dat file
skew : list
The skew for the signals of the dat file
tsamps_per_frame : int
The total samples/frame for all channels of the dat file
sampfrom : int
The starting sample number to be read from the signals
sampto : int
The final sample number to be read from the signals
Returns
-------
start_byte : int
The starting byte to read the dat file from. Always points to
the start of a byte block for special formats.
n_read_samples : int
The number of flat samples to read from the dat file.
block_floor_samples : int
The extra samples read prior to the first desired sample, for
special formats, in order to ensure entire byte blocks are read.
extra_flat_samples : int
The extra samples desired beyond what is contained in the file.
nan_replace : list
The number of samples to replace with nan at the end of each
signal, due to skew wanting samples beyond the file.
Examples
--------
sig_len=100, t = 4 (total samples/frame), skew = [0, 2, 4, 5]
sampfrom=0, sampto=100 --> read_len = 100, n_sampread = 100*t, extralen = 5, nan_replace = [0, 2, 4, 5]
sampfrom=50, sampto=100 --> read_len = 50, n_sampread = 50*t, extralen = 5, nan_replace = [0, 2, 4, 5]
sampfrom=0, sampto=50 --> read_len = 50, n_sampread = 55*t, extralen = 0, nan_replace = [0, 0, 0, 0]
sampfrom=95, sampto=99 --> read_len = 4, n_sampread = 5*t, extralen = 4, nan_replace = [0, 1, 3, 4]
|
f10209:m2
|
def _required_byte_num(mode, fmt, n_samp):
|
if fmt == '<STR_LIT>':<EOL><INDENT>n_bytes = math.ceil(n_samp*<NUM_LIT>)<EOL><DEDENT>elif fmt in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>n_extra = n_samp % <NUM_LIT:3><EOL>if n_extra == <NUM_LIT:2>:<EOL><INDENT>if fmt == '<STR_LIT>':<EOL><INDENT>n_bytes = upround(n_samp * <NUM_LIT:4>/<NUM_LIT:3>, <NUM_LIT:4>)<EOL><DEDENT>else:<EOL><INDENT>if mode == '<STR_LIT>':<EOL><INDENT>n_bytes = math.ceil(n_samp * <NUM_LIT:4>/<NUM_LIT:3>)<EOL><DEDENT>else:<EOL><INDENT>n_bytes = upround(n_samp * <NUM_LIT:4>/<NUM_LIT:3>, <NUM_LIT:4>)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>n_bytes = math.ceil(n_samp * <NUM_LIT:4>/<NUM_LIT:3> )<EOL><DEDENT><DEDENT>else:<EOL><INDENT>n_bytes = n_samp * BYTES_PER_SAMPLE[fmt]<EOL><DEDENT>return int(n_bytes)<EOL>
|
Determine how many signal bytes are needed to read or write a
number of desired samples from a dat file.
Parameters
----------
mode : str
Whether the file is to be read or written: 'read' or 'write'.
fmt : str
The wfdb dat format.
n_samp : int
The number of samples wanted.
Returns
-------
n_bytes : int
The number of bytes required to read or write the file
Notes
-----
Read and write require the same number in most cases. An exception
is fmt 311 for n_extra==2.
|
f10209:m3
|
def _rd_dat_file(file_name, dir_name, pb_dir, fmt, start_byte, n_samp):
|
<EOL>if fmt == '<STR_LIT>':<EOL><INDENT>byte_count = _required_byte_num('<STR_LIT>', '<STR_LIT>', n_samp)<EOL>element_count = byte_count<EOL><DEDENT>elif fmt in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>byte_count = _required_byte_num('<STR_LIT>', fmt, n_samp)<EOL>element_count = byte_count<EOL><DEDENT>else:<EOL><INDENT>element_count = n_samp<EOL>byte_count = n_samp * BYTES_PER_SAMPLE[fmt]<EOL><DEDENT>if pb_dir is None:<EOL><INDENT>with open(os.path.join(dir_name, file_name), '<STR_LIT:rb>') as fp:<EOL><INDENT>fp.seek(start_byte)<EOL>sig_data = np.fromfile(fp, dtype=np.dtype(DATA_LOAD_TYPES[fmt]),<EOL>count=element_count)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>sig_data = download._stream_dat(file_name, pb_dir, byte_count,<EOL>start_byte,<EOL>np.dtype(DATA_LOAD_TYPES[fmt]))<EOL><DEDENT>return sig_data<EOL>
|
Read data from a dat file, either local or remote, into a 1d numpy
array.
This is the lowest level dat reading function (along with
`_stream_dat` which this function may call), and is called by
`_rd_dat_signals`.
Parameters
----------
start_byte : int
The starting byte number to read from.
n_samp : int
The total number of samples to read. Does NOT need to create
whole blocks for special format. Any number of samples should be
readable.
* other params
See docstring for `_rd_dat_signals`
Returns
-------
sig_data : numpy array
The data read from the dat file. The dtype varies depending on
fmt. Byte aligned fmts are read in their final required format.
Unaligned formats are read as uint8 to be further processed.
Notes
-----
See docstring notes for `_rd_dat_signals`
|
f10209:m4
|
def _blocks_to_samples(sig_data, n_samp, fmt):
|
if fmt == '<STR_LIT>':<EOL><INDENT>if n_samp % <NUM_LIT:2>:<EOL><INDENT>n_samp += <NUM_LIT:1><EOL>added_samps = <NUM_LIT:1><EOL>sig_data = np.append(sig_data, np.zeros(<NUM_LIT:1>, dtype='<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>added_samps = <NUM_LIT:0><EOL><DEDENT>sig_data = sig_data.astype('<STR_LIT>')<EOL>sig = np.zeros(n_samp, dtype='<STR_LIT>')<EOL>sig[<NUM_LIT:0>::<NUM_LIT:2>] = sig_data[<NUM_LIT:0>::<NUM_LIT:3>] + <NUM_LIT> * np.bitwise_and(sig_data[<NUM_LIT:1>::<NUM_LIT:3>], <NUM_LIT>)<EOL>sig[<NUM_LIT:1>::<NUM_LIT:2>] = sig_data[<NUM_LIT:2>::<NUM_LIT:3>] + <NUM_LIT>*np.bitwise_and(sig_data[<NUM_LIT:1>::<NUM_LIT:3>] >> <NUM_LIT:4>, <NUM_LIT>)<EOL>if added_samps:<EOL><INDENT>sig = sig[:-added_samps]<EOL><DEDENT>sig[sig > <NUM_LIT>] -= <NUM_LIT><EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>if n_samp % <NUM_LIT:3>:<EOL><INDENT>n_samp = upround(n_samp,<NUM_LIT:3>)<EOL>added_samps = n_samp % <NUM_LIT:3><EOL>sig_data = np.append(sig_data, np.zeros(added_samps, dtype='<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>added_samps = <NUM_LIT:0><EOL><DEDENT>sig_data = sig_data.astype('<STR_LIT>')<EOL>sig = np.zeros(n_samp, dtype='<STR_LIT>')<EOL>sig[<NUM_LIT:0>::<NUM_LIT:3>] = (sig_data[<NUM_LIT:0>::<NUM_LIT:4>] >> <NUM_LIT:1>)[<NUM_LIT:0>:len(sig[<NUM_LIT:0>::<NUM_LIT:3>])] + <NUM_LIT> * np.bitwise_and(sig_data[<NUM_LIT:1>::<NUM_LIT:4>], <NUM_LIT>)[<NUM_LIT:0>:len(sig[<NUM_LIT:0>::<NUM_LIT:3>])]<EOL>sig[<NUM_LIT:1>::<NUM_LIT:3>] = (sig_data[<NUM_LIT:2>::<NUM_LIT:4>] >> <NUM_LIT:1>)[<NUM_LIT:0>:len(sig[<NUM_LIT:1>::<NUM_LIT:3>])] + <NUM_LIT> * np.bitwise_and(sig_data[<NUM_LIT:3>::<NUM_LIT:4>], <NUM_LIT>)[<NUM_LIT:0>:len(sig[<NUM_LIT:1>::<NUM_LIT:3>])]<EOL>sig[<NUM_LIT:2>::<NUM_LIT:3>] = np.bitwise_and((sig_data[<NUM_LIT:1>::<NUM_LIT:4>] >> <NUM_LIT:3>), <NUM_LIT>)[<NUM_LIT:0>:len(sig[<NUM_LIT:2>::<NUM_LIT:3>])] + <NUM_LIT:32> * np.bitwise_and(sig_data[<NUM_LIT:3>::<NUM_LIT:4>] >> <NUM_LIT:3>, <NUM_LIT>)[<NUM_LIT:0>:len(sig[<NUM_LIT:2>::<NUM_LIT:3>])]<EOL>if added_samps:<EOL><INDENT>sig = sig[:-added_samps]<EOL><DEDENT>sig[sig > <NUM_LIT>] -= <NUM_LIT><EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>if n_samp % <NUM_LIT:3>:<EOL><INDENT>n_samp = upround(n_samp,<NUM_LIT:3>)<EOL>added_samps = n_samp % <NUM_LIT:3><EOL>sig_data = np.append(sig_data, np.zeros(added_samps, dtype='<STR_LIT>'))<EOL><DEDENT>else:<EOL><INDENT>added_samps = <NUM_LIT:0><EOL><DEDENT>sig_data = sig_data.astype('<STR_LIT>')<EOL>sig = np.zeros(n_samp, dtype='<STR_LIT>')<EOL>sig[<NUM_LIT:0>::<NUM_LIT:3>] = sig_data[<NUM_LIT:0>::<NUM_LIT:4>][<NUM_LIT:0>:len(sig[<NUM_LIT:0>::<NUM_LIT:3>])] + <NUM_LIT> * np.bitwise_and(sig_data[<NUM_LIT:1>::<NUM_LIT:4>], <NUM_LIT>)[<NUM_LIT:0>:len(sig[<NUM_LIT:0>::<NUM_LIT:3>])]<EOL>sig[<NUM_LIT:1>::<NUM_LIT:3>] = (sig_data[<NUM_LIT:1>::<NUM_LIT:4>] >> <NUM_LIT:2>)[<NUM_LIT:0>:len(sig[<NUM_LIT:1>::<NUM_LIT:3>])] + <NUM_LIT:64> * np.bitwise_and(sig_data[<NUM_LIT:2>::<NUM_LIT:4>], <NUM_LIT>)[<NUM_LIT:0>:len(sig[<NUM_LIT:1>::<NUM_LIT:3>])]<EOL>sig[<NUM_LIT:2>::<NUM_LIT:3>] = (sig_data[<NUM_LIT:2>::<NUM_LIT:4>] >> <NUM_LIT:4>)[<NUM_LIT:0>:len(sig[<NUM_LIT:2>::<NUM_LIT:3>])] + <NUM_LIT:16> * np.bitwise_and(sig_data[<NUM_LIT:3>::<NUM_LIT:4>], <NUM_LIT>)[<NUM_LIT:0>:len(sig[<NUM_LIT:2>::<NUM_LIT:3>])]<EOL>if added_samps:<EOL><INDENT>sig = sig[:-added_samps]<EOL><DEDENT>sig[sig > <NUM_LIT>] -= <NUM_LIT><EOL><DEDENT>return sig<EOL>
|
Convert uint8 blocks into signal samples for unaligned dat formats.
Parameters
----------
sig_data : numpy array
The uint8 data blocks.
n_samp : int
The number of samples contained in the bytes
Returns
-------
signal : numpy array
The numpy array of digital samples
|
f10209:m5
|
def _skew_sig(sig, skew, n_sig, read_len, fmt, nan_replace, samps_per_frame=None):
|
if max(skew)><NUM_LIT:0>:<EOL><INDENT>if isinstance(sig, list):<EOL><INDENT>for ch in range(n_sig):<EOL><INDENT>if skew[ch]><NUM_LIT:0>:<EOL><INDENT>sig[ch][:read_len*samps_per_frame[ch]] = sig[ch][skew[ch]*samps_per_frame[ch]:]<EOL><DEDENT><DEDENT>for ch in range(n_sig):<EOL><INDENT>sig[ch] = sig[ch][:read_len*samps_per_frame[ch]]<EOL><DEDENT>for ch in range(n_sig):<EOL><INDENT>if nan_replace[ch]><NUM_LIT:0>:<EOL><INDENT>sig[ch][-nan_replace[ch]:] = _digi_nan(fmt)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>for ch in range(n_sig):<EOL><INDENT>if skew[ch]><NUM_LIT:0>:<EOL><INDENT>sig[:read_len, ch] = sig[skew[ch]:, ch]<EOL><DEDENT><DEDENT>sig = sig[:read_len, :]<EOL>for ch in range(n_sig):<EOL><INDENT>if nan_replace[ch]><NUM_LIT:0>:<EOL><INDENT>sig[-nan_replace[ch]:, ch] = _digi_nan(fmt)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return sig<EOL>
|
Skew the signal, insert nans and shave off end of array if needed.
Parameters
----------
sig : numpy array
The original signal
skew : list
List of samples to skew for each signal
n_sig : int
The number of signals
Notes
-----
`fmt` is just for the correct nan value.
`samps_per_frame` is only used for skewing expanded signals.
|
f10209:m6
|
def _check_sig_dims(sig, read_len, n_sig, samps_per_frame):
|
if isinstance(sig, np.ndarray):<EOL><INDENT>if sig.shape != (read_len, n_sig):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if len(sig) != n_sig:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>for ch in range(n_sig):<EOL><INDENT>if len(sig[ch]) != samps_per_frame[ch] * read_len:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>
|
Integrity check of a signal's shape after reading.
|
f10209:m7
|
def _digi_bounds(fmt):
|
if isinstance(fmt, list):<EOL><INDENT>return [_digi_bounds(f) for f in fmt]<EOL><DEDENT>if fmt == '<STR_LIT>':<EOL><INDENT>return (-<NUM_LIT>, <NUM_LIT>)<EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>return (-<NUM_LIT>, <NUM_LIT>)<EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>return (-<NUM_LIT>, <NUM_LIT>)<EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>return (-<NUM_LIT>, <NUM_LIT>)<EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>return (-<NUM_LIT>, <NUM_LIT>)<EOL><DEDENT>
|
Return min and max digital values for each format type.
Accepts lists.
Parmeters
---------
fmt : str, or list
The wfdb dat format, or a list of them.
|
f10209:m8
|
def _digi_nan(fmt):
|
if isinstance(fmt, list):<EOL><INDENT>return [_digi_nan(f) for f in fmt]<EOL><DEDENT>if fmt == '<STR_LIT>':<EOL><INDENT>return -<NUM_LIT><EOL><DEDENT>if fmt == '<STR_LIT>':<EOL><INDENT>return -<NUM_LIT><EOL><DEDENT>if fmt == '<STR_LIT>':<EOL><INDENT>return -<NUM_LIT><EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>return -<NUM_LIT><EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>return -<NUM_LIT><EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>return -<NUM_LIT><EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>return -<NUM_LIT><EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>return -<NUM_LIT><EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>return -<NUM_LIT><EOL><DEDENT>
|
Return the wfdb digital value used to store nan for the format type.
Parmeters
---------
fmt : str, or list
The wfdb dat format, or a list of them.
|
f10209:m9
|
def est_res(signals):
|
res_levels = np.power(<NUM_LIT:2>, np.arange(<NUM_LIT:0>, <NUM_LIT>))<EOL>if isinstance(signals, list):<EOL><INDENT>n_sig = len(signals)<EOL><DEDENT>else:<EOL><INDENT>if signals.ndim ==<NUM_LIT:1>:<EOL><INDENT>n_sig = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>n_sig = signals.shape[<NUM_LIT:1>]<EOL><DEDENT><DEDENT>res = []<EOL>for ch in range(n_sig):<EOL><INDENT>if isinstance(signals, list):<EOL><INDENT>sorted_sig = np.sort(np.unique(signals[ch]))<EOL><DEDENT>else:<EOL><INDENT>if signals.ndim == <NUM_LIT:1>:<EOL><INDENT>sorted_sig = np.sort(np.unique(signals))<EOL><DEDENT>else:<EOL><INDENT>sorted_sig = np.sort(np.unique(signals[:,ch]))<EOL><DEDENT><DEDENT>min_inc = min(np.diff(sorted_sig))<EOL>if min_inc == <NUM_LIT:0>:<EOL><INDENT>res.append(<NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>nlevels = <NUM_LIT:1> + (sorted_sig[-<NUM_LIT:1>]-sorted_sig[<NUM_LIT:0>]) / min_inc<EOL>if nlevels >= res_levels[-<NUM_LIT:1>]:<EOL><INDENT>res.append(<NUM_LIT:32>)<EOL><DEDENT>else:<EOL><INDENT>res.append(np.where(res_levels>=nlevels)[<NUM_LIT:0>][<NUM_LIT:0>])<EOL><DEDENT><DEDENT><DEDENT>return res<EOL>
|
Estimate the resolution of each signal in a multi-channel signal in
bits. Maximum of 32 bits.
Parameters
----------
signals : numpy array, or list
A 2d numpy array representing a uniform multichannel signal, or
a list of 1d numpy arrays representing multiple channels of
signals with different numbers of samples per frame.
Returns
-------
bit_res : list
A list of estimated integer resolutions for each channel
|
f10209:m10
|
def _wfdb_fmt(bit_res, single_fmt=True):
|
if isinstance(bit_res, list):<EOL><INDENT>if single_fmt:<EOL><INDENT>bit_res = [max(bit_res)] * len(bit_res)<EOL><DEDENT>return [wfdb_fmt(r) for r in bit_res]<EOL><DEDENT>if bit_res <= <NUM_LIT:8>:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif bit_res <= <NUM_LIT:12>:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif bit_res <= <NUM_LIT:16>:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>elif bit_res <= <NUM_LIT>:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>
|
Return the most suitable wfdb format(s) to use given signal
resolutions.
Parameters
----------
bit_res : int, or list
The resolution of the signal, or a list of resolutions, in bits.
single_fmt : bool, optional
Whether to return the format for the maximum resolution signal.
Returns
-------
fmt : str or list
The most suitable wfdb format(s) used to encode the signal(s).
|
f10209:m11
|
def _fmt_res(fmt, max_res=False):
|
if isinstance(fmt, list):<EOL><INDENT>if max_res:<EOL><INDENT>bit_res = np.max([_fmt_res(f) for f in fmt if f is not None])<EOL><DEDENT>else:<EOL><INDENT>bit_res = [_fmt_res(f) for f in fmt]<EOL><DEDENT>return bit_res<EOL><DEDENT>return BIT_RES[fmt]<EOL>
|
Return the resolution of the WFDB dat format(s). Uses the BIT_RES
dictionary, but accepts lists and other options.
Parameters
----------
fmt : str
The wfdb format. Can be a list of valid fmts. If it is a list,
and `max_res` is True, the list may contain None.
max_res : bool, optional
If given a list of fmts, whether to return the highest
resolution.
Returns
-------
bit_res : int, or list
The resolution(s) of the dat format(s) in bits.
|
f10209:m12
|
def _np_dtype(bit_res, discrete):
|
bit_res = min(bit_res, <NUM_LIT:64>)<EOL>for np_res in [<NUM_LIT:8>, <NUM_LIT:16>, <NUM_LIT:32>, <NUM_LIT:64>]:<EOL><INDENT>if bit_res <= np_res:<EOL><INDENT>break<EOL><DEDENT><DEDENT>if discrete is True:<EOL><INDENT>return '<STR_LIT:int>' + str(np_res)<EOL><DEDENT>else:<EOL><INDENT>return '<STR_LIT:float>' + str(max(np_res, <NUM_LIT:16>))<EOL><DEDENT>
|
Given the bit resolution of a signal, return the minimum numpy dtype
used to store it.
Parameters
----------
bit_res : int
The bit resolution.
discrete : bool
Whether the dtype is to be int or float.
Returns
-------
dtype : str
String numpy dtype used to store the signal of the given
resolution
|
f10209:m13
|
def wr_dat_file(file_name, fmt, d_signal, byte_offset, expanded=False,<EOL>e_d_signal=None, samps_per_frame=None, write_dir='<STR_LIT>'):
|
<EOL>if expanded:<EOL><INDENT>n_sig = len(e_d_signal)<EOL>sig_len = int(len(e_d_signal[<NUM_LIT:0>])/samps_per_frame[<NUM_LIT:0>])<EOL>d_signal = np.zeros((sig_len, sum(samps_per_frame)), dtype = '<STR_LIT>')<EOL>expand_ch = <NUM_LIT:0><EOL>for ch in range(n_sig):<EOL><INDENT>spf = samps_per_frame[ch]<EOL>for framenum in range(spf):<EOL><INDENT>d_signal[:, expand_ch] = e_d_signal[ch][framenum::spf]<EOL>expand_ch = expand_ch + <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>n_sig = d_signal.shape[<NUM_LIT:1>]<EOL>if fmt == '<STR_LIT>':<EOL><INDENT>d_signal = d_signal + <NUM_LIT><EOL>d_signal = d_signal.reshape(-<NUM_LIT:1>)<EOL>b_write = d_signal.astype('<STR_LIT>')<EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>d_signal[d_signal<<NUM_LIT:0>] = d_signal[d_signal<<NUM_LIT:0>] + <NUM_LIT><EOL>d_signal = d_signal.reshape(-<NUM_LIT:1>)<EOL>n_samp = len(d_signal)<EOL>processn_samp = n_samp<EOL>if processn_samp % <NUM_LIT:2>:<EOL><INDENT>d_signal = np.concatenate([d_signal, np.array([<NUM_LIT:0>])])<EOL>processn_samp +=<NUM_LIT:1><EOL><DEDENT>b_write = np.zeros([int(<NUM_LIT>*processn_samp)], dtype = '<STR_LIT>')<EOL>b_write[<NUM_LIT:0>::<NUM_LIT:3>] = d_signal[<NUM_LIT:0>::<NUM_LIT:2>] & <NUM_LIT:255><EOL>b_write[<NUM_LIT:1>::<NUM_LIT:3>] = ((d_signal[<NUM_LIT:0>::<NUM_LIT:2>] & <NUM_LIT>) >> <NUM_LIT:8>) + ((d_signal[<NUM_LIT:1>::<NUM_LIT:2>] & <NUM_LIT>) >> <NUM_LIT:4>)<EOL>b_write[<NUM_LIT:2>::<NUM_LIT:3>] = d_signal[<NUM_LIT:1>::<NUM_LIT:2>] & <NUM_LIT:255><EOL>if n_samp % <NUM_LIT:2>:<EOL><INDENT>b_write = b_write[:-<NUM_LIT:1>]<EOL><DEDENT><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>d_signal[d_signal<<NUM_LIT:0>] = d_signal[d_signal<<NUM_LIT:0>] + <NUM_LIT><EOL>b1 = d_signal & [<NUM_LIT:255>]*n_sig<EOL>b2 = ( d_signal & [<NUM_LIT>]*n_sig ) >> <NUM_LIT:8><EOL>b1 = b1.reshape((-<NUM_LIT:1>, <NUM_LIT:1>))<EOL>b2 = b2.reshape((-<NUM_LIT:1>, <NUM_LIT:1>))<EOL>b_write = np.concatenate((b1, b2), axis=<NUM_LIT:1>)<EOL>b_write = b_write.reshape((<NUM_LIT:1>,-<NUM_LIT:1>))[<NUM_LIT:0>]<EOL>b_write = b_write.astype('<STR_LIT>')<EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>d_signal[d_signal<<NUM_LIT:0>] = d_signal[d_signal<<NUM_LIT:0>] + <NUM_LIT><EOL>b1 = d_signal & [<NUM_LIT:255>]*n_sig<EOL>b2 = ( d_signal & [<NUM_LIT>]*n_sig ) >> <NUM_LIT:8><EOL>b3 = ( d_signal & [<NUM_LIT>]*n_sig ) >> <NUM_LIT:16><EOL>b1 = b1.reshape((-<NUM_LIT:1>, <NUM_LIT:1>))<EOL>b2 = b2.reshape((-<NUM_LIT:1>, <NUM_LIT:1>))<EOL>b3 = b3.reshape((-<NUM_LIT:1>, <NUM_LIT:1>))<EOL>b_write = np.concatenate((b1, b2, b3), axis=<NUM_LIT:1>)<EOL>b_write = b_write.reshape((<NUM_LIT:1>,-<NUM_LIT:1>))[<NUM_LIT:0>]<EOL>b_write = b_write.astype('<STR_LIT>')<EOL><DEDENT>elif fmt == '<STR_LIT>':<EOL><INDENT>d_signal[d_signal<<NUM_LIT:0>] = d_signal[d_signal<<NUM_LIT:0>] + <NUM_LIT><EOL>b1 = d_signal & [<NUM_LIT:255>]*n_sig<EOL>b2 = ( d_signal & [<NUM_LIT>]*n_sig ) >> <NUM_LIT:8><EOL>b3 = ( d_signal & [<NUM_LIT>]*n_sig ) >> <NUM_LIT:16><EOL>b4 = ( d_signal & [<NUM_LIT>]*n_sig ) >> <NUM_LIT><EOL>b1 = b1.reshape((-<NUM_LIT:1>, <NUM_LIT:1>))<EOL>b2 = b2.reshape((-<NUM_LIT:1>, <NUM_LIT:1>))<EOL>b3 = b3.reshape((-<NUM_LIT:1>, <NUM_LIT:1>))<EOL>b4 = b4.reshape((-<NUM_LIT:1>, <NUM_LIT:1>))<EOL>b_write = np.concatenate((b1, b2, b3, b4), axis=<NUM_LIT:1>)<EOL>b_write = b_write.reshape((<NUM_LIT:1>,-<NUM_LIT:1>))[<NUM_LIT:0>]<EOL>b_write = b_write.astype('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if byte_offset is not None and byte_offset><NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>'+file_name+'<STR_LIT>'+str(byte_offset)+'<STR_LIT>')<EOL>b_write = np.append(np.zeros(byte_offset, dtype = '<STR_LIT>'), b_write)<EOL><DEDENT>with open(os.path.join(write_dir, file_name),'<STR_LIT:wb>') as f:<EOL><INDENT>b_write.tofile(f)<EOL><DEDENT>
|
Write a dat file. All bytes are written one at a time to avoid
endianness issues.
|
f10209:m14
|
def describe_list_indices(full_list):
|
unique_elements = []<EOL>element_indices = {}<EOL>for i in range(len(full_list)):<EOL><INDENT>item = full_list[i]<EOL>if item not in unique_elements:<EOL><INDENT>unique_elements.append(item)<EOL>element_indices[item] = [i]<EOL><DEDENT>else:<EOL><INDENT>element_indices[item].append(i)<EOL><DEDENT><DEDENT>return unique_elements, element_indices<EOL>
|
Parameters
----------
full_list : list
The list of items to order and
Returns
-------
unique_elements : list
A list of the unique elements of the list, in the order in which
they first appear.
element_indices : dict
A dictionary of lists for each unique element, giving all the
indices in which they appear in the original list.
|
f10209:m15
|
def _infer_sig_len(file_name, fmt, n_sig, dir_name, pb_dir=None):
|
if pb_dir is None:<EOL><INDENT>file_size = os.path.getsize(os.path.join(dir_name, file_name))<EOL><DEDENT>else:<EOL><INDENT>file_size = download._remote_file_size(file_name=file_name,<EOL>pb_dir=pb_dir)<EOL><DEDENT>sig_len = int(file_size / (BYTES_PER_SAMPLE[fmt] * n_sig))<EOL>return sig_len<EOL>
|
Infer the length of a signal from a dat file.
Parameters
----------
file_name : str
Name of the dat file
fmt : str
WFDB fmt of the dat file
n_sig : int
Number of signals contained in the dat file
Notes
-----
sig_len * n_sig * bytes_per_sample == file_size
|
f10209:m16
|
def downround(x, base):
|
return base * math.floor(float(x)/base)<EOL>
|
Round <x> down to nearest <base>
|
f10209:m17
|
def upround(x, base):
|
return base * math.ceil(float(x)/base)<EOL>
|
Round <x> up to nearest <base>
|
f10209:m18
|
def check_sig_cohesion(self, write_fields, expanded):
|
<EOL>if expanded:<EOL><INDENT>spf = self.samps_per_frame<EOL>for ch in range(len(spf)):<EOL><INDENT>if spf[ch] is None:<EOL><INDENT>spf[ch] = <NUM_LIT:1><EOL><DEDENT><DEDENT>if self.n_sig != len(self.e_d_signal):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>for ch in range(self.n_sig):<EOL><INDENT>if len(self.e_d_signal[ch]) != spf[ch]*self.sig_len:<EOL><INDENT>raise ValueError('<STR_LIT>'+str(ch)+'<STR_LIT>'+str(ch+'<STR_LIT>'))<EOL><DEDENT><DEDENT>for ch in range(self.n_sig):<EOL><INDENT>fmt = self.fmt[ch]<EOL>dmin, dmax = _digi_bounds(self.fmt[ch])<EOL>chmin = min(self.e_d_signal[ch])<EOL>chmax = max(self.e_d_signal[ch])<EOL>if (chmin < dmin) or (chmax > dmax):<EOL><INDENT>raise IndexError("<STR_LIT>"+str(ch)+"<STR_LIT>"+str(dmin)+"<STR_LIT:U+002CU+0020>"+str(dmax)+"<STR_LIT>"+str(fmt))<EOL><DEDENT><DEDENT>if self.n_sig > <NUM_LIT:0>:<EOL><INDENT>if '<STR_LIT>' in write_fields:<EOL><INDENT>realchecksum = self.calc_checksum(expanded)<EOL>if self.checksum != realchecksum:<EOL><INDENT>print("<STR_LIT>", realchecksum)<EOL>raise ValueError("<STR_LIT>")<EOL><DEDENT><DEDENT>if '<STR_LIT>' in write_fields:<EOL><INDENT>realinit_value = [self.e_d_signal[ch][<NUM_LIT:0>] for ch in range(self.n_sig)]<EOL>if self.init_value != realinit_value:<EOL><INDENT>print("<STR_LIT>", realinit_value)<EOL>raise ValueError("<STR_LIT>")<EOL><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if (self.sig_len, self.n_sig) != self.d_signal.shape:<EOL><INDENT>print('<STR_LIT>', self.sig_len)<EOL>print('<STR_LIT>', self.n_sig)<EOL>print('<STR_LIT>', self.d_signal.shape)<EOL>raise ValueError('<STR_LIT>')<EOL><DEDENT>for ch in range(self.n_sig):<EOL><INDENT>fmt = self.fmt[ch]<EOL>dmin, dmax = _digi_bounds(self.fmt[ch])<EOL>chmin = min(self.d_signal[:,ch])<EOL>chmax = max(self.d_signal[:,ch])<EOL>if (chmin < dmin) or (chmax > dmax):<EOL><INDENT>raise IndexError("<STR_LIT>"+str(ch)+"<STR_LIT>"+str(dmin)+"<STR_LIT:U+002CU+0020>"+str(dmax)+"<STR_LIT>"+str(fmt))<EOL><DEDENT><DEDENT>if self.n_sig><NUM_LIT:0>:<EOL><INDENT>if '<STR_LIT>' in write_fields:<EOL><INDENT>realchecksum = self.calc_checksum()<EOL>if self.checksum != realchecksum:<EOL><INDENT>print("<STR_LIT>", realchecksum)<EOL>raise ValueError("<STR_LIT>")<EOL><DEDENT><DEDENT>if '<STR_LIT>' in write_fields:<EOL><INDENT>realinit_value = list(self.d_signal[<NUM_LIT:0>,:])<EOL>if self.init_value != realinit_value:<EOL><INDENT>print("<STR_LIT>", realinit_value)<EOL>raise ValueError("<STR_LIT>")<EOL><DEDENT><DEDENT><DEDENT><DEDENT>
|
Check the cohesion of the d_signal/e_d_signal field with the other
fields used to write the record
|
f10209:c0:m1
|
def set_p_features(self, do_dac=False, expanded=False):
|
if expanded:<EOL><INDENT>if do_dac:<EOL><INDENT>self.check_field('<STR_LIT>')<EOL>self.check_field('<STR_LIT>', '<STR_LIT:all>')<EOL>self.check_field('<STR_LIT>', '<STR_LIT:all>')<EOL>self.check_field('<STR_LIT>', '<STR_LIT:all>')<EOL>self.check_field('<STR_LIT>', '<STR_LIT:all>')<EOL>self.e_p_signal = self.dac(expanded)<EOL><DEDENT>self.check_field('<STR_LIT>', channels = '<STR_LIT:all>')<EOL>self.sig_len = int(len(self.e_p_signal[<NUM_LIT:0>])/self.samps_per_frame[<NUM_LIT:0>])<EOL>self.n_sig = len(self.e_p_signal)<EOL><DEDENT>else:<EOL><INDENT>if do_dac:<EOL><INDENT>self.check_field('<STR_LIT>')<EOL>self.check_field('<STR_LIT>', '<STR_LIT:all>')<EOL>self.check_field('<STR_LIT>', '<STR_LIT:all>')<EOL>self.check_field('<STR_LIT>', '<STR_LIT:all>')<EOL>self.p_signal = self.dac()<EOL><DEDENT>self.check_field('<STR_LIT>')<EOL>self.sig_len = self.p_signal.shape[<NUM_LIT:0>]<EOL>self.n_sig = self.p_signal.shape[<NUM_LIT:1>]<EOL><DEDENT>
|
Use properties of the physical signal field to set the following
features: n_sig, sig_len.
Parameters
----------
do_dac : bool
Whether to use the digital signal field to perform dac
conversion to get the physical signal field beforehand.
expanded : bool
Whether to use the `e_p_signal` or `p_signal` field. If
True, the `samps_per_frame` attribute is also required.
Notes
-----
Regarding dac conversion:
- fmt, gain, and baseline must all be set in order to perform
dac.
- Unlike with adc, there is no way to infer these fields.
- Using the fmt, gain and baseline fields, dac is performed,
and (e_)p_signal is set.
*Developer note: Seems this function will be very infrequently used.
The set_d_features function seems far more useful.
|
f10209:c0:m2
|
def set_d_features(self, do_adc=False, single_fmt=True, expanded=False):
|
if expanded:<EOL><INDENT>if do_adc:<EOL><INDENT>self.check_field('<STR_LIT>', channels='<STR_LIT:all>')<EOL>if self.fmt is None:<EOL><INDENT>if self.adc_gain is not None or self.baseline is not None:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>res = est_res(self.e_p_signal)<EOL>self.fmt = _wfdb_fmt(res, single_fmt)<EOL><DEDENT>else:<EOL><INDENT>self.check_field('<STR_LIT>', '<STR_LIT:all>')<EOL>if self.adc_gain is None and self.baseline is None:<EOL><INDENT>self.adc_gain, self.baseline = self.calc_adc_params()<EOL><DEDENT>elif (self.adc_gain is None) ^ (self.baseline is None):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>self.check_field('<STR_LIT>', '<STR_LIT:all>')<EOL>self.check_field('<STR_LIT>', '<STR_LIT:all>')<EOL>self.d_signal = self.adc(expanded)<EOL><DEDENT>self.check_field('<STR_LIT>', channels='<STR_LIT:all>')<EOL>self.sig_len = int(len(self.e_d_signal[<NUM_LIT:0>])/self.samps_per_frame[<NUM_LIT:0>])<EOL>self.n_sig = len(self.e_d_signal)<EOL>self.init_value = [sig[<NUM_LIT:0>] for sig in self.e_d_signal]<EOL>self.checksum = self.calc_checksum(expanded)<EOL><DEDENT>else:<EOL><INDENT>if do_adc:<EOL><INDENT>self.check_field('<STR_LIT>')<EOL>if self.fmt is None:<EOL><INDENT>if self.adc_gain is not None or self.baseline is not None:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>res = est_res(self.p_signal)<EOL>self.fmt = _wfdb_fmt(res, single_fmt)<EOL>self.adc_gain, self.baseline = self.calc_adc_params()<EOL><DEDENT>else:<EOL><INDENT>self.check_field('<STR_LIT>', '<STR_LIT:all>')<EOL>if self.adc_gain is None and self.baseline is None:<EOL><INDENT>self.adc_gain, self.baseline = self.calc_adc_params()<EOL><DEDENT>elif (self.adc_gain is None) ^ (self.baseline is None):<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>self.check_field('<STR_LIT>', '<STR_LIT:all>')<EOL>self.check_field('<STR_LIT>', '<STR_LIT:all>')<EOL>self.d_signal = self.adc()<EOL><DEDENT>self.check_field('<STR_LIT>')<EOL>self.sig_len = self.d_signal.shape[<NUM_LIT:0>]<EOL>self.n_sig = self.d_signal.shape[<NUM_LIT:1>]<EOL>self.init_value = list(self.d_signal[<NUM_LIT:0>,:])<EOL>self.checksum = self.calc_checksum()<EOL><DEDENT>
|
Use properties of the digital signal field to set the following
features: n_sig, sig_len, init_value, checksum, and possibly
*(fmt, adc_gain, baseline).
Parameters
----------
do_adc : bools
Whether to use the physical signal field to perform adc
conversion to get the digital signal field beforehand.
single_fmt : bool
Whether to use a single digital format during adc, if it is
performed.
expanded : bool
Whether to use the `e_d_signal` or `d_signal` field.
Notes
-----
Regarding adc conversion:
- If fmt is unset:
- Neither adc_gain nor baseline may be set. If the digital values
used to store the signal are known, then the file format should
also be known.
- The most appropriate fmt for the signals will be calculated and the
`fmt` attribute will be set. Given that neither `adc_gain` nor
`baseline` is allowed to be set, optimal values for those fields will
then be calculated and set as well.
- If fmt is set:
- If both adc_gain and baseline are unset, optimal values for those
fields will be calculated the fields will be set.
- If both adc_gain and baseline are set, the function will continue.
- If only one of adc_gain and baseline are set, this function will
raise an error. It makes no sense to know only one of those fields.
- ADC will occur after valid values for fmt, adc_gain, and baseline are
present, using all three fields.
|
f10209:c0:m3
|
def adc(self, expanded=False, inplace=False):
|
<EOL>d_nans = _digi_nan(self.fmt)<EOL>intdtype = '<STR_LIT>'<EOL>if inplace:<EOL><INDENT>if expanded:<EOL><INDENT>for ch in range(self.n_sig):<EOL><INDENT>ch_nanlocs = np.isnan(self.e_p_signal[ch])<EOL>np.multiply(self.e_p_signal[ch], self.adc_gain[ch],<EOL>self.e_p_signal[ch])<EOL>np.add(e_p_signal[ch], self.baseline[ch],<EOL>self.e_p_signal[ch])<EOL>self.e_p_signal[ch] = self.e_p_signal[ch].astype(intdtype,<EOL>copy=False)<EOL>self.e_p_signal[ch][ch_nanlocs] = d_nans[ch]<EOL><DEDENT>self.e_d_signal = self.e_p_signal<EOL>self.e_p_signal = None<EOL><DEDENT>else:<EOL><INDENT>nanlocs = np.isnan(self.p_signal)<EOL>np.multiply(self.p_signal, self.adc_gain, self.p_signal)<EOL>np.add(self.p_signal, self.baseline, self.p_signal)<EOL>self.p_signal = self.p_signal.astype(intdtype, copy=False)<EOL>self.d_signal = self.p_signal<EOL>self.p_signal = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if expanded:<EOL><INDENT>d_signal = []<EOL>for ch in range(self.n_sig):<EOL><INDENT>ch_nanlocs = np.isnan(self.e_p_signal[ch])<EOL>ch_d_signal = self.e_p_signal.copy()<EOL>np.multiply(ch_d_signal, self.adc_gain[ch], ch_d_signal)<EOL>np.add(ch_d_signal, self.baseline[ch], ch_d_signal)<EOL>ch_d_signal = ch_d_signal.astype(intdtype, copy=False)<EOL>ch_d_signal[ch_nanlocs] = d_nans[ch]<EOL>d_signal.append(ch_d_signal)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>nanlocs = np.isnan(self.p_signal)<EOL>d_signal = self.p_signal.copy()<EOL>np.multiply(d_signal, self.adc_gain, d_signal)<EOL>np.add(d_signal, self.baseline, d_signal)<EOL>d_signal = d_signal.astype(intdtype, copy=False)<EOL>if nanlocs.any():<EOL><INDENT>for ch in range(d_signal.shape[<NUM_LIT:1>]):<EOL><INDENT>if nanlocs[:,ch].any():<EOL><INDENT>d_signal[nanlocs[:,ch],ch] = d_nans[ch]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>return d_signal<EOL><DEDENT>
|
Performs analogue to digital conversion of the physical signal stored
in p_signal if expanded is False, or e_p_signal if expanded is True.
The p_signal/e_p_signal, fmt, gain, and baseline fields must all be
valid.
If inplace is True, the adc will be performed inplace on the variable,
the d_signal/e_d_signal attribute will be set, and the
p_signal/e_p_signal field will be set to None.
Parameters
----------
expanded : bool, optional
Whether to transform the `e_p_signal` attribute (True) or
the `p_signal` attribute (False).
inplace : bool, optional
Whether to automatically set the object's corresponding
digital signal attribute and set the physical
signal attribute to None (True), or to return the converted
signal as a separate variable without changing the original
physical signal attribute (False).
Returns
-------
d_signal : numpy array, optional
The digital conversion of the signal. Either a 2d numpy
array or a list of 1d numpy arrays.
Examples:
---------
>>> import wfdb
>>> record = wfdb.rdsamp('sample-data/100')
>>> d_signal = record.adc()
>>> record.adc(inplace=True)
>>> record.dac(inplace=True)
|
f10209:c0:m4
|
def dac(self, expanded=False, return_res=<NUM_LIT:64>, inplace=False):
|
<EOL>d_nans = _digi_nan(self.fmt)<EOL>if return_res == <NUM_LIT:64>:<EOL><INDENT>floatdtype = '<STR_LIT>'<EOL><DEDENT>elif return_res == <NUM_LIT:32>:<EOL><INDENT>floatdtype = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>floatdtype = '<STR_LIT>'<EOL><DEDENT>if inplace:<EOL><INDENT>if expanded:<EOL><INDENT>for ch in range(self.n_sig):<EOL><INDENT>ch_nanlocs = self.e_d_signal[ch] == d_nans[ch]<EOL>self.e_d_signal[ch] = self.e_d_signal[ch].astype(floatdtype, copy=False)<EOL>np.subtract(self.e_d_signal[ch], self.baseline[ch], self.e_d_signal[ch])<EOL>np.divide(self.e_d_signal[ch], self.adc_gain[ch], self.e_d_signal[ch])<EOL>self.e_d_signal[ch][ch_nanlocs] = np.nan<EOL><DEDENT>self.e_p_signal = self.e_d_signal<EOL>self.e_d_signal = None<EOL><DEDENT>else:<EOL><INDENT>nanlocs = self.d_signal == d_nans<EOL>self.d_signal = self.d_signal.astype(floatdtype, copy=False)<EOL>np.subtract(self.d_signal, self.baseline, self.d_signal)<EOL>np.divide(self.d_signal, self.adc_gain, self.d_signal)<EOL>self.d_signal[nanlocs] = np.nan<EOL>self.p_signal = self.d_signal<EOL>self.d_signal = None<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if expanded:<EOL><INDENT>p_signal = []<EOL>for ch in range(self.n_sig):<EOL><INDENT>ch_nanlocs = self.e_d_signal[ch] == d_nans[ch]<EOL>ch_p_signal = self.e_d_signal[ch].astype(floatdtype, copy=False)<EOL>np.subtract(ch_p_signal, self.baseline[ch], ch_p_signal)<EOL>np.divide(ch_p_signal, self.adc_gain[ch], ch_p_signal)<EOL>ch_p_signal[ch_nanlocs] = np.nan<EOL>p_signal.append(ch_p_signal)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>nanlocs = self.d_signal == d_nans<EOL>p_signal = self.d_signal.astype(floatdtype, copy=False)<EOL>np.subtract(p_signal, self.baseline, p_signal)<EOL>np.divide(p_signal, self.adc_gain, p_signal)<EOL>p_signal[nanlocs] = np.nan<EOL><DEDENT>return p_signal<EOL><DEDENT>
|
Performs the digital to analogue conversion of the signal stored
in `d_signal` if expanded is False, or `e_d_signal` if expanded
is True.
The d_signal/e_d_signal, fmt, gain, and baseline fields must all be
valid.
If inplace is True, the dac will be performed inplace on the
variable, the p_signal/e_p_signal attribute will be set, and the
d_signal/e_d_signal field will be set to None.
Parameters
----------
expanded : bool, optional
Whether to transform the `e_d_signal attribute` (True) or
the `d_signal` attribute (False).
inplace : bool, optional
Whether to automatically set the object's corresponding
physical signal attribute and set the digital signal
attribute to None (True), or to return the converted
signal as a separate variable without changing the original
digital signal attribute (False).
Returns
-------
p_signal : numpy array, optional
The physical conversion of the signal. Either a 2d numpy
array or a list of 1d numpy arrays.
Examples
--------
>>> import wfdb
>>> record = wfdb.rdsamp('sample-data/100', physical=False)
>>> p_signal = record.dac()
>>> record.dac(inplace=True)
>>> record.adc(inplace=True)
|
f10209:c0:m5
|
def calc_adc_params(self):
|
adc_gains = []<EOL>baselines = []<EOL>if np.where(np.isinf(self.p_signal))[<NUM_LIT:0>].size:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>minvals = np.nanmin(self.p_signal, axis=<NUM_LIT:0>)<EOL>maxvals = np.nanmax(self.p_signal, axis=<NUM_LIT:0>)<EOL>for ch in range(np.shape(self.p_signal)[<NUM_LIT:1>]):<EOL><INDENT>dmin, dmax = _digi_bounds(self.fmt[ch])<EOL>dmin = dmin + <NUM_LIT:1><EOL>pmin = minvals[ch]<EOL>pmax = maxvals[ch]<EOL>if pmin == np.nan:<EOL><INDENT>adc_gain = <NUM_LIT:1><EOL>baseline = <NUM_LIT:1><EOL><DEDENT>elif pmin == pmax:<EOL><INDENT>if pmin == <NUM_LIT:0>:<EOL><INDENT>adc_gain = <NUM_LIT:1><EOL>baseline = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>adc_gain = abs(<NUM_LIT:1> / pmin)<EOL>baseline = <NUM_LIT:0><EOL><DEDENT><DEDENT>else:<EOL><INDENT>adc_gain = (dmax-dmin) / (pmax-pmin)<EOL>baseline = dmin - adc_gain*pmin<EOL>if pmin > <NUM_LIT:0>:<EOL><INDENT>baseline = int(np.ceil(baseline))<EOL><DEDENT>else:<EOL><INDENT>baseline = int(np.floor(baseline))<EOL><DEDENT>if dmin != baseline:<EOL><INDENT>adc_gain = (dmin - baseline) / pmin<EOL><DEDENT><DEDENT>if baseline > MAX_I32:<EOL><INDENT>adc_gain = (MAX_I32) - dmin / abs(pmin)<EOL>baseline = MAX_I32<EOL><DEDENT>elif baseline < MIN_I32:<EOL><INDENT>adc_gain = (dmax - MIN_I32) / pmax<EOL>baseline = MIN_I32<EOL><DEDENT>adc_gains.append(adc_gain)<EOL>baselines.append(baseline)<EOL><DEDENT>return (adc_gains, baselines)<EOL>
|
Compute appropriate adc_gain and baseline parameters for adc
conversion, given the physical signal and the fmts.
Returns
-------
adc_gains : list
List of calculated `adc_gain` values for each channel.
baselines : list
List of calculated `baseline` values for each channel.
Notes
-----
This is the mapping equation:
`digital - baseline / adc_gain = physical`
`physical * adc_gain + baseline = digital`
The original WFDB library stores `baseline` as int32.
Constrain abs(adc_gain) <= 2**31 == 2147483648
This function does carefully deal with overflow for calculated
int32 `baseline` values, but does not consider over/underflow
for calculated float `adc_gain` values.
|
f10209:c0:m6
|
def calc_checksum(self, expanded=False):
|
if expanded:<EOL><INDENT>cs = [int(np.sum(self.e_d_signal[ch]) % <NUM_LIT>) for ch in range(self.n_sig)]<EOL><DEDENT>else:<EOL><INDENT>cs = np.sum(self.d_signal, <NUM_LIT:0>) % <NUM_LIT><EOL>cs = [int(c) for c in cs]<EOL><DEDENT>return cs<EOL>
|
Calculate the checksum(s) of the d_signal (expanded=False)
or e_d_signal field (expanded=True)
|
f10209:c0:m8
|
def wr_dat_files(self, expanded=False, write_dir='<STR_LIT>'):
|
<EOL>file_names, dat_channels = describe_list_indices(self.file_name)<EOL>DAT_FMTS = {}<EOL>dat_offsets = {}<EOL>for fn in file_names:<EOL><INDENT>DAT_FMTS[fn] = self.fmt[dat_channels[fn][<NUM_LIT:0>]]<EOL>if self.byte_offset is None:<EOL><INDENT>dat_offsets[fn] = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>dat_offsets[fn] = self.byte_offset[dat_channels[fn][<NUM_LIT:0>]]<EOL><DEDENT><DEDENT>if expanded:<EOL><INDENT>for fn in file_names:<EOL><INDENT>wr_dat_file(fn, DAT_FMTS[fn], None , dat_offsets[fn], True,<EOL>[self.e_d_signal[ch] for ch in dat_channels[fn]],<EOL>self.samps_per_frame, write_dir=write_dir)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>dsig = self.d_signal.copy()<EOL>for fn in file_names:<EOL><INDENT>wr_dat_file(fn, DAT_FMTS[fn],<EOL>dsig[:, dat_channels[fn][<NUM_LIT:0>]:dat_channels[fn][-<NUM_LIT:1>]+<NUM_LIT:1>],<EOL>dat_offsets[fn], write_dir=write_dir)<EOL><DEDENT><DEDENT>
|
Write each of the specified dat files
|
f10209:c0:m9
|
def smooth_frames(self, sigtype='<STR_LIT>'):
|
spf = self.samps_per_frame[:]<EOL>for ch in range(len(spf)):<EOL><INDENT>if spf[ch] is None:<EOL><INDENT>spf[ch] = <NUM_LIT:1><EOL><DEDENT><DEDENT>tspf = sum(spf)<EOL>if sigtype == '<STR_LIT>':<EOL><INDENT>n_sig = len(self.e_p_signal)<EOL>sig_len = int(len(self.e_p_signal[<NUM_LIT:0>])/spf[<NUM_LIT:0>])<EOL>signal = np.zeros((sig_len, n_sig), dtype='<STR_LIT>')<EOL>for ch in range(n_sig):<EOL><INDENT>if spf[ch] == <NUM_LIT:1>:<EOL><INDENT>signal[:, ch] = self.e_p_signal[ch]<EOL><DEDENT>else:<EOL><INDENT>for frame in range(spf[ch]):<EOL><INDENT>signal[:, ch] += self.e_p_signal[ch][frame::spf[ch]]<EOL><DEDENT>signal[:, ch] = signal[:, ch] / spf[ch]<EOL><DEDENT><DEDENT><DEDENT>elif sigtype == '<STR_LIT>':<EOL><INDENT>n_sig = len(self.e_d_signal)<EOL>sig_len = int(len(self.e_d_signal[<NUM_LIT:0>])/spf[<NUM_LIT:0>])<EOL>signal = np.zeros((sig_len, n_sig), dtype='<STR_LIT>')<EOL>for ch in range(n_sig):<EOL><INDENT>if spf[ch] == <NUM_LIT:1>:<EOL><INDENT>signal[:, ch] = self.e_d_signal[ch]<EOL><DEDENT>else:<EOL><INDENT>for frame in range(spf[ch]):<EOL><INDENT>signal[:, ch] += self.e_d_signal[ch][frame::spf[ch]]<EOL><DEDENT>signal[:, ch] = signal[:, ch] / spf[ch]<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>return signal<EOL>
|
Convert expanded signals with different samples/frame into
a uniform numpy array.
Input parameters
- sigtype (default='physical'): Specifies whether to mooth
the e_p_signal field ('physical'), or the e_d_signal
field ('digital').
|
f10209:c0:m10
|
def set_db_index_url(db_index_url=PB_INDEX_URL):
|
config.db_index_url = db_index_url<EOL>
|
Set the database index url to a custom value, to stream remote
files from another location.
Parameters
----------
db_index_url : str, optional
The desired new database index url. Leave as default to reset
to the physiobank index url.
|
f10210:m0
|
def _remote_file_size(url=None, file_name=None, pb_dir=None):
|
<EOL>if file_name and pb_dir:<EOL><INDENT>url = posixpath.join(config.db_index_url, pb_dir, file_name)<EOL><DEDENT>response = requests.head(url, headers={'<STR_LIT>': '<STR_LIT>'})<EOL>response.raise_for_status()<EOL>remote_file_size = int(response.headers['<STR_LIT>'])<EOL>return remote_file_size<EOL>
|
Get the remote file size in bytes
Parameters
----------
url : str, optional
The full url of the file. Use this option to explicitly
state the full url.
file_name : str, optional
The base file name. Use this argument along with pb_dir if you
want the full url to be constructed.
pb_dir : str, optional
The base file name. Use this argument along with file_name if
you want the full url to be constructed.
Returns
-------
remote_file_size : int
Size of the file in bytes
|
f10210:m1
|
def _stream_header(file_name, pb_dir):
|
<EOL>url = posixpath.join(config.db_index_url, pb_dir, file_name)<EOL>response = requests.get(url)<EOL>response.raise_for_status()<EOL>filelines = response.content.decode('<STR_LIT>').splitlines()<EOL>header_lines = []<EOL>comment_lines = []<EOL>for line in filelines:<EOL><INDENT>line = str(line.strip())<EOL>if line.startswith('<STR_LIT:#>'):<EOL><INDENT>comment_lines.append(line)<EOL><DEDENT>elif line:<EOL><INDENT>ci = line.find('<STR_LIT:#>')<EOL>if ci > <NUM_LIT:0>:<EOL><INDENT>header_lines.append(line[:ci])<EOL>comment_lines.append(line[ci:])<EOL><DEDENT>else:<EOL><INDENT>header_lines.append(line)<EOL><DEDENT><DEDENT><DEDENT>return (header_lines, comment_lines)<EOL>
|
Stream the lines of a remote header file.
Parameters
----------
file_name : str
pb_dir : str
The Physiobank database directory from which to find the
required header file. eg. For file '100.hea' in
'http://physionet.org/physiobank/database/mitdb', pb_dir='mitdb'.
|
f10210:m2
|
def _stream_dat(file_name, pb_dir, byte_count, start_byte, dtype):
|
<EOL>url = posixpath.join(config.db_index_url, pb_dir, file_name)<EOL>end_byte = start_byte + byte_count - <NUM_LIT:1><EOL>headers = {"<STR_LIT>":"<STR_LIT>" % (start_byte, end_byte),<EOL>'<STR_LIT>': '<STR_LIT:*>'}<EOL>response = requests.get(url, headers=headers, stream=True)<EOL>response.raise_for_status()<EOL>sig_data = np.fromstring(response.content, dtype=dtype)<EOL>return sig_data<EOL>
|
Stream data from a remote dat file, into a 1d numpy array.
Parameters
----------
file_name : str
The name of the dat file to be read.
pb_dir : str
The physiobank directory where the dat file is located.
byte_count : int
The number of bytes to be read.
start_byte : int
The starting byte number to read from.
dtype : str
The numpy dtype to load the data into.
Returns
-------
sig_data : numpy array
The data read from the dat file.
|
f10210:m3
|
def _stream_annotation(file_name, pb_dir):
|
<EOL>url = posixpath.join(config.db_index_url, pb_dir, file_name)<EOL>response = requests.get(url)<EOL>response.raise_for_status()<EOL>ann_data = np.fromstring(response.content, dtype=np.dtype('<STR_LIT>'))<EOL>return ann_data<EOL>
|
Stream an entire remote annotation file from physiobank
Parameters
----------
file_name : str
The name of the annotation file to be read.
pb_dir : str
The physiobank directory where the annotation file is located.
|
f10210:m4
|
def get_dbs():
|
url = posixpath.join(config.db_index_url, '<STR_LIT>')<EOL>response = requests.get(url)<EOL>dbs = response.content.decode('<STR_LIT:ascii>').splitlines()<EOL>dbs = [re.sub('<STR_LIT>', '<STR_LIT:\t>', line).split('<STR_LIT:\t>') for line in dbs]<EOL>return dbs<EOL>
|
Get a list of all the Physiobank databases available.
Examples
--------
>>> dbs = get_dbs()
|
f10210:m5
|
def get_record_list(db_dir, records='<STR_LIT:all>'):
|
<EOL>db_url = posixpath.join(config.db_index_url, db_dir)<EOL>if records == '<STR_LIT:all>':<EOL><INDENT>response = requests.get(posixpath.join(db_url, '<STR_LIT>'))<EOL>if response.status_code == <NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>' % db_url)<EOL><DEDENT>record_list = response.content.decode('<STR_LIT:ascii>').splitlines()<EOL><DEDENT>else:<EOL><INDENT>record_list = records<EOL><DEDENT>return record_list<EOL>
|
Get a list of records belonging to a database.
Parameters
----------
db_dir : str
The database directory, usually the same as the database slug.
The location to look for a RECORDS file.
records : list, optional
A Option used when this function acts as a helper function.
Leave as default 'all' to get all records.
Examples
--------
>>> wfdb.get_record_list('mitdb')
|
f10210:m6
|
def make_local_dirs(dl_dir, dl_inputs, keep_subdirs):
|
<EOL>if not os.path.isdir(dl_dir):<EOL><INDENT>os.makedirs(dl_dir)<EOL>print('<STR_LIT>' % dl_dir)<EOL><DEDENT>if keep_subdirs:<EOL><INDENT>dl_dirs = set([os.path.join(dl_dir, d[<NUM_LIT:1>]) for d in dl_inputs])<EOL>for d in dl_dirs:<EOL><INDENT>if not os.path.isdir(d):<EOL><INDENT>os.makedirs(d)<EOL><DEDENT><DEDENT><DEDENT>return<EOL>
|
Make any required local directories to prepare for downloading
|
f10210:m8
|
def dl_pb_file(inputs):
|
basefile, subdir, db, dl_dir, keep_subdirs, overwrite = inputs<EOL>url = posixpath.join(config.db_index_url, db, subdir, basefile)<EOL>remote_file_size = _remote_file_size(url)<EOL>if keep_subdirs:<EOL><INDENT>dldir = os.path.join(dl_dir, subdir)<EOL><DEDENT>else:<EOL><INDENT>dldir = dl_dir<EOL><DEDENT>local_file = os.path.join(dldir, basefile)<EOL>if os.path.isfile(local_file):<EOL><INDENT>if overwrite:<EOL><INDENT>dl_full_file(url, local_file)<EOL><DEDENT>else:<EOL><INDENT>local_file_size = os.path.getsize(local_file)<EOL>if local_file_size < remote_file_size:<EOL><INDENT>print('<STR_LIT>' % local_file)<EOL>headers = {"<STR_LIT>": "<STR_LIT>"+str(local_file_size)+"<STR_LIT:->", '<STR_LIT>': '<STR_LIT:*>'}<EOL>r = requests.get(url, headers=headers, stream=True)<EOL>print('<STR_LIT>', headers)<EOL>print('<STR_LIT>', len(r.content))<EOL>with open(local_file, '<STR_LIT>') as writefile:<EOL><INDENT>writefile.write(r.content)<EOL><DEDENT>print('<STR_LIT>')<EOL><DEDENT>elif local_file_size > remote_file_size:<EOL><INDENT>dl_full_file(url, local_file)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>dl_full_file(url, local_file)<EOL><DEDENT>return<EOL>
|
Download a file from physiobank.
The input args are to be unpacked for the use of multiprocessing
map, because python2 doesn't have starmap...
|
f10210:m9
|
def dl_full_file(url, save_file_name):
|
response = requests.get(url)<EOL>with open(save_file_name, '<STR_LIT:wb>') as writefile:<EOL><INDENT>writefile.write(response.content)<EOL><DEDENT>return<EOL>
|
Download a file. No checks are performed.
Parameters
----------
url : str
The url of the file to download
save_file_name : str
The name to save the file as
|
f10210:m10
|
def dl_files(db, dl_dir, files, keep_subdirs=True, overwrite=False):
|
<EOL>db_url = posixpath.join(config.db_index_url, db)<EOL>response = requests.get(db_url)<EOL>response.raise_for_status()<EOL>dl_inputs = [(os.path.split(file)[<NUM_LIT:1>], os.path.split(file)[<NUM_LIT:0>], db, dl_dir, keep_subdirs, overwrite) for file in files]<EOL>make_local_dirs(dl_dir, dl_inputs, keep_subdirs)<EOL>print('<STR_LIT>')<EOL>pool = multiprocessing.Pool(processes=<NUM_LIT:2>)<EOL>pool.map(dl_pb_file, dl_inputs)<EOL>print('<STR_LIT>')<EOL>return<EOL>
|
Download specified files from a Physiobank database.
Parameters
----------
db : str
The Physiobank database directory to download. eg. For database:
'http://physionet.org/physiobank/database/mitdb', db='mitdb'.
dl_dir : str
The full local directory path in which to download the files.
files : list
A list of strings specifying the file names to download relative to the
database base directory.
keep_subdirs : bool, optional
Whether to keep the relative subdirectories of downloaded files as they
are organized in Physiobank (True), or to download all files into the
same base directory (False).
overwrite : bool, optional
If True, all files will be redownloaded regardless. If False, existing
files with the same name and relative subdirectory will be checked.
If the local file is the same size as the online file, the download is
skipped. If the local file is larger, it will be deleted and the file
will be redownloaded. If the local file is smaller, the file will be
assumed to be partially downloaded and the remaining bytes will be
downloaded and appended.
Examples
--------
>>> wfdb.dl_files('ahadb', os.getcwd(),
['STAFF-Studies-bibliography-2016.pdf', 'data/001a.hea',
'data/001a.dat'])
|
f10210:m11
|
def _check_item_type(item, field_name, allowed_types, expect_list=False,<EOL>required_channels='<STR_LIT:all>'):
|
if expect_list:<EOL><INDENT>if not isinstance(item, list):<EOL><INDENT>raise TypeError('<STR_LIT>' % field_name)<EOL><DEDENT>if required_channels == '<STR_LIT:all>':<EOL><INDENT>required_channels = list(range(len(item)))<EOL><DEDENT>for ch in range(len(item)):<EOL><INDENT>if ch in required_channels:<EOL><INDENT>allowed_types_ch = allowed_types<EOL><DEDENT>else:<EOL><INDENT>allowed_types_ch = allowed_types + (type(None),)<EOL><DEDENT>if not isinstance(item[ch], allowed_types_ch):<EOL><INDENT>raise TypeError('<STR_LIT>' % (ch, field_name),<EOL>allowed_types_ch)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>if not isinstance(item, allowed_types):<EOL><INDENT>raise TypeError('<STR_LIT>',<EOL>allowed_types)<EOL><DEDENT><DEDENT>
|
Check the item's type against a set of allowed types.
Vary the print message regarding whether the item can be None.
Helper to `BaseRecord.check_field`.
Parameters
----------
item : any
The item to check.
field_name : str
The field name.
allowed_types : iterable
Iterable of types the item is allowed to be.
expect_list : bool, optional
Whether the item is expected to be a list.
required_channels : list, optional
List of integers specifying which channels of the item must be
present. May be set to 'all' to indicate all channels. Only used
if `expect_list` is True, ie. item is a list, and its
subelements are to be checked.
Notes
-----
This is called by `check_field`, which determines whether the item
should be a list or not. This function should generally not be
called by the user directly.
|
f10211:m0
|
def check_np_array(item, field_name, ndim, parent_class, channel_num=None):
|
<EOL>if item.ndim != ndim:<EOL><INDENT>error_msg = '<STR_LIT>' % (field_name, ndim)<EOL>if channel_num is not None:<EOL><INDENT>error_msg = ('<STR_LIT>' % channel_num) + error_msg[<NUM_LIT:1>:]<EOL><DEDENT>raise TypeError(error_msg)<EOL><DEDENT>if not np.issubdtype(item.dtype, parent_class):<EOL><INDENT>error_msg = '<STR_LIT>' % (field_name, parent_class)<EOL>if channel_num is not None:<EOL><INDENT>error_msg = ('<STR_LIT>' % channel_num) + error_msg[<NUM_LIT:1>:]<EOL><DEDENT>raise TypeError(error_msg)<EOL><DEDENT>
|
Check a numpy array's shape and dtype against required
specifications.
Parameters
----------
item : numpy array
The numpy array to check
field_name : str
The name of the field to check
ndim : int
The required number of dimensions
parent_class : type
The parent class of the dtype. ie. np.integer, np.floating.
channel_num : int, optional
If not None, indicates that the item passed in is a subelement
of a list. Indicate this in the error message if triggered.
|
f10211:m1
|
def rdheader(record_name, pb_dir=None, rd_segments=False):
|
dir_name, base_record_name = os.path.split(record_name)<EOL>dir_name = os.path.abspath(dir_name)<EOL>header_lines, comment_lines = _header._read_header_lines(base_record_name,<EOL>dir_name, pb_dir)<EOL>record_fields = _header._parse_record_line(header_lines[<NUM_LIT:0>])<EOL>if record_fields['<STR_LIT>'] is None:<EOL><INDENT>record = Record()<EOL>if len(header_lines)><NUM_LIT:1>:<EOL><INDENT>signal_fields = _header._parse_signal_lines(header_lines[<NUM_LIT:1>:])<EOL>for field in signal_fields:<EOL><INDENT>setattr(record, field, signal_fields[field])<EOL><DEDENT><DEDENT>for field in record_fields:<EOL><INDENT>if field == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>setattr(record, field, record_fields[field])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>record = MultiRecord()<EOL>segment_fields = _header._read_segment_lines(header_lines[<NUM_LIT:1>:])<EOL>for field in segment_fields:<EOL><INDENT>setattr(record, field, segment_fields[field])<EOL><DEDENT>for field in record_fields:<EOL><INDENT>setattr(record, field, record_fields[field])<EOL><DEDENT>if record.seg_len[<NUM_LIT:0>] == <NUM_LIT:0>:<EOL><INDENT>record.layout = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>record.layout = '<STR_LIT>'<EOL><DEDENT>if rd_segments:<EOL><INDENT>record.segments = []<EOL>for s in record.seg_name:<EOL><INDENT>if s == '<STR_LIT>':<EOL><INDENT>record.segments.append(None)<EOL><DEDENT>else:<EOL><INDENT>record.segments.append(rdheader(os.path.join(dir_name, s),<EOL>pb_dir))<EOL><DEDENT><DEDENT>record.sig_name = record.get_sig_name()<EOL>record.sig_segments = record.get_sig_segments()<EOL><DEDENT><DEDENT>record.comments = [line.strip('<STR_LIT>') for line in comment_lines]<EOL>return record<EOL>
|
Read a WFDB header file and return a `Record` or `MultiRecord`
object with the record descriptors as attributes.
Parameters
----------
record_name : str
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pb_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
pb_dir : str, optional
Option used to stream data from Physiobank. The Physiobank
database directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'
pb_dir='mitdb'.
rd_segments : bool, optional
Used when reading multi-segment headers. If True, segment headers will
also be read (into the record object's `segments` field).
Returns
-------
record : Record or MultiRecord
The wfdb Record or MultiRecord object representing the contents
of the header read.
Examples
--------
>>> ecg_record = wfdb.rdheader('sample-data/test01_00s', sampfrom=800,
channels = [1,3])
|
f10211:m2
|
def rdrecord(record_name, sampfrom=<NUM_LIT:0>, sampto=None, channels=None,<EOL>physical=True, pb_dir=None, m2s=True, smooth_frames=True,<EOL>ignore_skew=False, return_res=<NUM_LIT:64>, force_channels=True,<EOL>channel_names=None, warn_empty=False):
|
dir_name, base_record_name = os.path.split(record_name)<EOL>dir_name = os.path.abspath(dir_name)<EOL>record = rdheader(record_name, pb_dir=pb_dir, rd_segments=False)<EOL>if sampto is None:<EOL><INDENT>if record.sig_len is None:<EOL><INDENT>if record.n_sig == <NUM_LIT:0>:<EOL><INDENT>record.sig_len = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>record.sig_len = _signal._infer_sig_len(<EOL>file_name=record.file_name[<NUM_LIT:0>], fmt=record.fmt[<NUM_LIT:0>],<EOL>n_sig=record.file_name.count(record.file_name[<NUM_LIT:0>]),<EOL>dir_name=dir_name, pb_dir=pb_dir)<EOL><DEDENT><DEDENT>sampto = record.sig_len<EOL><DEDENT>if channel_names is not None:<EOL><INDENT>if isinstance(record, Record):<EOL><INDENT>reference_record = record<EOL><DEDENT>else:<EOL><INDENT>if record.layout == '<STR_LIT>':<EOL><INDENT>first_seg_name = [n for n in record.seg_name if n != '<STR_LIT>'][<NUM_LIT:0>]<EOL>reference_record = rdheader(os.path.join(dir_name,<EOL>record.seg_name[<NUM_LIT:0>]),<EOL>pb_dir=pb_dir)<EOL><DEDENT>else:<EOL><INDENT>reference_record = rdheader(os.path.join(dir_name,<EOL>record.seg_name[<NUM_LIT:0>]),<EOL>pb_dir=pb_dir)<EOL><DEDENT><DEDENT>channels = _get_wanted_channels(wanted_sig_names=channel_names,<EOL>record_sig_names=reference_record.sig_name)<EOL><DEDENT>elif channels is None:<EOL><INDENT>channels = list(range(record.n_sig))<EOL><DEDENT>record.check_read_inputs(sampfrom, sampto, channels, physical,<EOL>smooth_frames, return_res)<EOL>if not len(channels):<EOL><INDENT>old_record = record<EOL>record = Record()<EOL>for attr in _header.RECORD_SPECS.index:<EOL><INDENT>if attr == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>elif attr in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>setattr(record, attr, <NUM_LIT:0>)<EOL><DEDENT>else:<EOL><INDENT>setattr(record, attr, getattr(old_record, attr))<EOL><DEDENT><DEDENT>if warn_empty:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>elif isinstance(record, Record):<EOL><INDENT>if smooth_frames or max([record.samps_per_frame[c] for c in channels]) == <NUM_LIT:1>:<EOL><INDENT>record.d_signal = _signal._rd_segment(record.file_name, dir_name,<EOL>pb_dir, record.fmt,<EOL>record.n_sig, record.sig_len,<EOL>record.byte_offset,<EOL>record.samps_per_frame,<EOL>record.skew, sampfrom, sampto,<EOL>channels, smooth_frames,<EOL>ignore_skew)<EOL>record._arrange_fields(channels=channels, sampfrom=sampfrom,<EOL>expanded=False)<EOL>if physical:<EOL><INDENT>record.dac(expanded=False, return_res=return_res, inplace=True)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>record.e_d_signal = _signal._rd_segment(record.file_name, dir_name,<EOL>pb_dir, record.fmt,<EOL>record.n_sig,<EOL>record.sig_len,<EOL>record.byte_offset,<EOL>record.samps_per_frame,<EOL>record.skew, sampfrom,<EOL>sampto, channels,<EOL>smooth_frames, ignore_skew)<EOL>record._arrange_fields(channels=channels, sampfrom=sampfrom,<EOL>expanded=True)<EOL>if physical:<EOL><INDENT>record.dac(expanded=True, return_res=return_res, inplace=True)<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>record.segments = [None] * record.n_seg<EOL>if record.layout == '<STR_LIT>':<EOL><INDENT>record.segments[<NUM_LIT:0>] = rdheader(os.path.join(dir_name,<EOL>record.seg_name[<NUM_LIT:0>]),<EOL>pb_dir=pb_dir)<EOL><DEDENT>seg_numbers, seg_ranges = record._required_segments(sampfrom, sampto)<EOL>seg_channels = record._required_channels(seg_numbers, channels,<EOL>dir_name, pb_dir)<EOL>for i in range(len(seg_numbers)):<EOL><INDENT>seg_num = seg_numbers[i]<EOL>if record.seg_name[seg_num] == '<STR_LIT>' or len(seg_channels[i]) == <NUM_LIT:0>:<EOL><INDENT>record.segments[seg_num] = None<EOL><DEDENT>else:<EOL><INDENT>record.segments[seg_num] = rdrecord(<EOL>os.path.join(dir_name, record.seg_name[seg_num]),<EOL>sampfrom=seg_ranges[i][<NUM_LIT:0>], sampto=seg_ranges[i][<NUM_LIT:1>],<EOL>channels=seg_channels[i], physical=physical, pb_dir=pb_dir)<EOL><DEDENT><DEDENT>record._arrange_fields(seg_numbers=seg_numbers, seg_ranges=seg_ranges,<EOL>channels=channels, sampfrom=sampfrom,<EOL>force_channels=force_channels)<EOL>if m2s:<EOL><INDENT>record = record.multi_to_single(physical=physical,<EOL>return_res=return_res)<EOL><DEDENT><DEDENT>if isinstance(record, Record) and record.n_sig > <NUM_LIT:0>:<EOL><INDENT>record.convert_dtype(physical, return_res, smooth_frames)<EOL><DEDENT>return record<EOL>
|
Read a WFDB record and return the signal and record descriptors as
attributes in a Record or MultiRecord object.
Parameters
----------
record_name : str
The name of the WFDB record to be read, without any file
extensions. If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/BASE_RECORD.
Both relative and absolute paths are accepted. If the `pb_dir`
parameter is set, this parameter should contain just the base
record name, and the files fill be searched for remotely.
Otherwise, the data files will be searched for in the local path.
sampfrom : int, optional
The starting sample number to read for all channels.
sampto : int, or 'end', optional
The sample number at which to stop reading for all channels.
Reads the entire duration by default.
channels : list, optional
List of integer indices specifying the channels to be read.
Reads all channels by default.
physical : bool, optional
Specifies whether to return signals in physical units in the
`p_signal` field (True), or digital units in the `d_signal`
field (False).
pb_dir : str, optional
Option used to stream data from Physiobank. The Physiobank
database directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'
pb_dir='mitdb'.
m2s : bool, optional
Used when reading multi-segment records. Specifies whether to
directly return a wfdb MultiRecord object (False), or to convert
it into and return a wfdb Record object (True).
smooth_frames : bool, optional
Used when reading records with signals having multiple samples
per frame. Specifies whether to smooth the samples in signals
with more than one sample per frame and return an (MxN) uniform
numpy array as the `d_signal` or `p_signal` field (True), or to
return a list of 1d numpy arrays containing every expanded
sample as the `e_d_signal` or `e_p_signal` field (False).
ignore_skew : bool, optional
Used when reading records with at least one skewed signal.
Specifies whether to apply the skew to align the signals in the
output variable (False), or to ignore the skew field and load in
all values contained in the dat files unaligned (True).
return_res : int, optional
The numpy array dtype of the returned signals. Options are: 64,
32, 16, and 8, where the value represents the numpy int or float
dtype. Note that the value cannot be 8 when physical is True
since there is no float8 format.
force_channels : bool, optional
Used when reading multi-segment variable layout records. Whether
to update the layout specification record, and the converted
Record object if `m2s` is True, to match the input `channels`
argument, or to omit channels in which no read segment contains
the signals.
channel_names : list, optional
List of channel names to return. If this parameter is specified,
it takes precedence over `channels`.
warn_empty : bool, optional
Whether to display a warning if the specified channel indices
or names are not contained in the record, and no signal is
returned.
Returns
-------
record : Record or MultiRecord
The wfdb Record or MultiRecord object representing the contents
of the record read.
Notes
-----
If a signal range or channel selection is specified when calling
this function, the resulting attributes of the returned object will
be set to reflect the section of the record that is actually read,
rather than necessarily the entire record. For example, if
`channels=[0, 1, 2]` is specified when reading a 12 channel record,
the 'n_sig' attribute will be 3, not 12.
The `rdsamp` function exists as a simple alternative to `rdrecord`
for the common purpose of extracting the physical signals and a few
important descriptor fields.
Examples
--------
>>> record = wfdb.rdrecord('sample-data/test01_00s', sampfrom=800,
channels=[1, 3])
|
f10211:m3
|
def rdsamp(record_name, sampfrom=<NUM_LIT:0>, sampto=None, channels=None, pb_dir=None,<EOL>channel_names=None, warn_empty=False):
|
record = rdrecord(record_name=record_name, sampfrom=sampfrom,<EOL>sampto=sampto, channels=channels, physical=True,<EOL>pb_dir=pb_dir, m2s=True, channel_names=channel_names,<EOL>warn_empty=warn_empty)<EOL>signals = record.p_signal<EOL>fields = {}<EOL>for field in ['<STR_LIT>','<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>','<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>fields[field] = getattr(record, field)<EOL><DEDENT>return signals, fields<EOL>
|
Read a WFDB record, and return the physical signals and a few important
descriptor fields.
Parameters
----------
record_name : str
The name of the WFDB record to be read (without any file
extensions). If the argument contains any path delimiter
characters, the argument will be interpreted as PATH/baserecord
and the data files will be searched for in the local path.
sampfrom : int, optional
The starting sample number to read for all channels.
sampto : int, or 'end', optional
The sample number at which to stop reading for all channels.
Reads the entire duration by default.
channels : list, optional
List of integer indices specifying the channels to be read.
Reads all channels by default.
pb_dir : str, optional
Option used to stream data from Physiobank. The Physiobank
database directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'
pb_dir='mitdb'.
channel_names : list, optional
List of channel names to return. If this parameter is specified,
it takes precedence over `channels`.
warn_empty : bool, optional
Whether to display a warning if the specified channel indices
or names are not contained in the record, and no signal is
returned.
Returns
-------
signals : numpy array
A 2d numpy array storing the physical signals from the record.
fields : dict
A dictionary containing several key attributes of the read
record:
- fs: The sampling frequency of the record
- units: The units for each channel
- sig_name: The signal name for each channel
- comments: Any comments written in the header
Notes
-----
If a signal range or channel selection is specified when calling
this function, the resulting attributes of the returned object will
be set to reflect the section of the record that is actually read,
rather than necessarily the entire record. For example, if
`channels=[0, 1, 2]` is specified when reading a 12 channel record,
the 'n_sig' attribute will be 3, not 12.
The `rdrecord` function is the base function upon which this one is
built. It returns all attributes present, along with the signals, as
attributes in a `Record` object. The function, along with the
returned data type, has more options than `rdsamp` for users who
wish to more directly manipulate WFDB content.
Examples
--------
>>> signals, fields = wfdb.rdsamp('sample-data/test01_00s',
sampfrom=800,
channel =[1,3])
|
f10211:m4
|
def _get_wanted_channels(wanted_sig_names, record_sig_names, pad=False):
|
if pad:<EOL><INDENT>return [record_sig_names.index(s) if s in record_sig_names else None for s in wanted_sig_names]<EOL><DEDENT>else:<EOL><INDENT>return [record_sig_names.index(s) for s in wanted_sig_names if s in record_sig_names]<EOL><DEDENT>
|
Given some wanted signal names, and the signal names contained in a
record, return the indices of the record channels that intersect.
Parameters
----------
wanted_sig_names : list
List of desired signal name strings
record_sig_names : list
List of signal names for a single record
pad : bool, optional
Whether the output channels is to always have the same number
of elements and the wanted channels. If True, pads missing
signals with None.
Returns
-------
wanted_channel_inds
|
f10211:m5
|
def wrsamp(record_name, fs, units, sig_name, p_signal=None, d_signal=None,<EOL>fmt=None, adc_gain=None, baseline=None, comments=None,<EOL>base_time=None, base_date=None, write_dir='<STR_LIT>'):
|
<EOL>if p_signal is not None and d_signal is not None:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>if d_signal is not None:<EOL><INDENT>if fmt is None or adc_gain is None or baseline is None:<EOL><INDENT>raise Exception("<STR_LIT>")<EOL><DEDENT><DEDENT>if p_signal is not None:<EOL><INDENT>record = Record(record_name=record_name, p_signal=p_signal, fs=fs,<EOL>fmt=fmt, units=units, sig_name=sig_name,<EOL>adc_gain=adc_gain, baseline=baseline,<EOL>comments=comments, base_time=base_time,<EOL>base_date=base_date)<EOL>record.set_d_features(do_adc=<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>record = Record(record_name=record_name, d_signal=d_signal, fs=fs,<EOL>fmt=fmt, units=units, sig_name=sig_name,<EOL>adc_gain=adc_gain, baseline=baseline,<EOL>comments=comments, base_time=base_time,<EOL>base_date=base_date)<EOL>record.set_d_features()<EOL><DEDENT>record.set_defaults()<EOL>record.wrsamp(write_dir=write_dir)<EOL>
|
Write a single segment WFDB record, creating a WFDB header file and any
associated dat files.
Parameters
----------
record_name : str
The string name of the WFDB record to be written (without any file
extensions).
fs : int, or float
The sampling frequency of the record.
units : list
A list of strings giving the units of each signal channel.
sig_name :
A list of strings giving the signal name of each signal channel.
p_signal : numpy array, optional
An (MxN) 2d numpy array, where M is the signal length. Gives the
physical signal values intended to be written. Either p_signal or
d_signal must be set, but not both. If p_signal is set, this method will
use it to perform analogue-digital conversion, writing the resultant
digital values to the dat file(s). If fmt is set, gain and baseline must
be set or unset together. If fmt is unset, gain and baseline must both
be unset.
d_signal : numpy array, optional
An (MxN) 2d numpy array, where M is the signal length. Gives the
digital signal values intended to be directly written to the dat
file(s). The dtype must be an integer type. Either p_signal or d_signal
must be set, but not both. In addition, if d_signal is set, fmt, gain
and baseline must also all be set.
fmt : list, optional
A list of strings giving the WFDB format of each file used to store each
channel. Accepted formats are: '80','212",'16','24', and '32'. There are
other WFDB formats as specified by:
https://www.physionet.org/physiotools/wag/signal-5.htm
but this library will not write (though it will read) those file types.
adc_gain : list, optional
A list of numbers specifying the ADC gain.
baseline : list, optional
A list of integers specifying the digital baseline.
comments : list, optional
A list of string comments to be written to the header file.
base_time : str, optional
A string of the record's start time in 24h 'HH:MM:SS(.ms)' format.
base_date : str, optional
A string of the record's start date in 'DD/MM/YYYY' format.
write_dir : str, optional
The directory in which to write the files.
Notes
-----
This is a gateway function, written as a simple method to write WFDB record
files using the most common parameters. Therefore not all WFDB fields can be
set via this function.
For more control over attributes, create a `Record` object, manually set its
attributes, and call its `wrsamp` instance method. If you choose this more
advanced method, see also the `set_defaults`, `set_d_features`, and
`set_p_features` instance methods to help populate attributes.
Examples
--------
>>> # Read part of a record from Physiobank
>>> signals, fields = wfdb.rdsamp('a103l', sampfrom=50000, channels=[0,1],
pb_dir='challenge/2015/training')
>>> # Write a local WFDB record (manually inserting fields)
>>> wfdb.wrsamp('ecgrecord', fs = 250, units=['mV', 'mV'],
sig_name=['I', 'II'], p_signal=signals, fmt=['16', '16'])
|
f10211:m6
|
def is_monotonic(full_list):
|
prev_elements = set({full_list[<NUM_LIT:0>]})<EOL>prev_item = full_list[<NUM_LIT:0>]<EOL>for item in full_list:<EOL><INDENT>if item != prev_item:<EOL><INDENT>if item in prev_elements:<EOL><INDENT>return False<EOL><DEDENT>prev_item = item<EOL>prev_elements.add(item)<EOL><DEDENT><DEDENT>return True<EOL>
|
Determine whether elements in a list are monotonic. ie. unique
elements are clustered together.
ie. [5,5,3,4] is, [5,3,5] is not.
|
f10211:m7
|
def dl_database(db_dir, dl_dir, records='<STR_LIT:all>', annotators='<STR_LIT:all>',<EOL>keep_subdirs=True, overwrite=False):
|
<EOL>db_url = posixpath.join(download.config.db_index_url, db_dir)<EOL>r = requests.get(db_url)<EOL>r.raise_for_status()<EOL>recordlist = download.get_record_list(db_dir, records)<EOL>annotators = download.get_annotators(db_dir, annotators)<EOL>allfiles = []<EOL>for rec in recordlist:<EOL><INDENT>if rec.endswith('<STR_LIT>'):<EOL><INDENT>allfiles.append(rec)<EOL><DEDENT>else:<EOL><INDENT>if rec.endswith('<STR_LIT:/>'):<EOL><INDENT>rec = rec + rec[:-<NUM_LIT:1>]<EOL><DEDENT>allfiles.append(rec+'<STR_LIT>')<EOL>dir_name, baserecname = os.path.split(rec)<EOL>record = rdheader(baserecname, pb_dir=posixpath.join(db_dir, dir_name))<EOL>if isinstance(record, Record):<EOL><INDENT>for file in (record.file_name if record.file_name else []):<EOL><INDENT>allfiles.append(posixpath.join(dir_name, file))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for seg in record.seg_name:<EOL><INDENT>if seg == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>allfiles.append(posixpath.join(dir_name, seg+'<STR_LIT>'))<EOL>if seg.endswith('<STR_LIT>'):<EOL><INDENT>continue<EOL><DEDENT>recseg = rdheader(seg, pb_dir=posixpath.join(db_dir, dir_name))<EOL>for file in recseg.file_name:<EOL><INDENT>allfiles.append(posixpath.join(dir_name, file))<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if annotators is not None:<EOL><INDENT>for a in annotators:<EOL><INDENT>annfile = rec+'<STR_LIT:.>'+a<EOL>url = posixpath.join(download.config.db_index_url, db_dir, annfile)<EOL>rh = requests.head(url)<EOL>if rh.status_code != <NUM_LIT>:<EOL><INDENT>allfiles.append(annfile)<EOL><DEDENT><DEDENT><DEDENT><DEDENT>dlinputs = [(os.path.split(file)[<NUM_LIT:1>], os.path.split(file)[<NUM_LIT:0>], db_dir, dl_dir, keep_subdirs, overwrite) for file in allfiles]<EOL>download.make_local_dirs(dl_dir, dlinputs, keep_subdirs)<EOL>print('<STR_LIT>')<EOL>pool = multiprocessing.Pool(processes=<NUM_LIT:2>)<EOL>pool.map(download.dl_pb_file, dlinputs)<EOL>print('<STR_LIT>')<EOL>return<EOL>
|
Download WFDB record (and optionally annotation) files from a
Physiobank database. The database must contain a 'RECORDS' file in
its base directory which lists its WFDB records.
Parameters
----------
db_dir : str
The Physiobank database directory to download. eg. For database:
'http://physionet.org/physiobank/database/mitdb', db_dir='mitdb'.
dl_dir : str
The full local directory path in which to download the files.
records : list, or 'all', optional
A list of strings specifying the WFDB records to download. Leave
as 'all' to download all records listed in the database's
RECORDS file.
eg. records=['test01_00s', test02_45s] for database:
https://physionet.org/physiobank/database/macecgdb/
annotators : list, 'all', or None, optional
A list of strings specifying the WFDB annotation file types to
download along with the record files. Is either None to skip
downloading any annotations, 'all' to download all annotation
types as specified by the ANNOTATORS file, or a list of strings
which each specify an annotation extension.
eg. annotators = ['anI'] for database:
https://physionet.org/physiobank/database/prcp/
keep_subdirs : bool, optional
Whether to keep the relative subdirectories of downloaded files
as they are organized in Physiobank (True), or to download all
files into the same base directory (False).
overwrite : bool, optional
If True, all files will be redownloaded regardless. If False,
existing files with the same name and relative subdirectory will
be checked. If the local file is the same size as the online
file, the download is skipped. If the local file is larger, it
will be deleted and the file will be redownloaded. If the local
file is smaller, the file will be assumed to be partially
downloaded and the remaining bytes will be downloaded and
appended.
Examples
--------
>>> wfdb.dl_database('ahadb', os.getcwd())
|
f10211:m8
|
def check_field(self, field, required_channels='<STR_LIT:all>'):
|
item = getattr(self, field)<EOL>if item is None:<EOL><INDENT>raise Exception('<STR_LIT>' % field)<EOL><DEDENT>expect_list = True if field in LIST_FIELDS else False<EOL>_check_item_type(item, field_name=field,<EOL>allowed_types=ALLOWED_TYPES[field],<EOL>expect_list=expect_list,<EOL>required_channels=required_channels)<EOL>if field in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>check_np_array(item=item, field_name=field, ndim=<NUM_LIT:2>,<EOL>parent_class=(lambda f: np.integer if f == '<STR_LIT>' else np.floating)(field))<EOL><DEDENT>elif field in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>for ch in range(len(item)):<EOL><INDENT>check_np_array(item=item[ch], field_name=field,<EOL>ndim=<NUM_LIT:1>, parent_class=(lambda f: np.integer if f == '<STR_LIT>' else np.floating)(field),<EOL>channel_num=ch)<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>accepted_string = re.match('<STR_LIT>', self.record_name)<EOL>if not accepted_string or accepted_string.string != self.record_name:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if self.n_seg <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if self.n_sig <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if self.fs <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if self.counter_freq <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if self.base_counter <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if self.sig_len < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field in _header.SIGNAL_SPECS.index:<EOL><INDENT>if required_channels == '<STR_LIT:all>':<EOL><INDENT>required_channels = range(len(item))<EOL><DEDENT>for ch in range(len(item)):<EOL><INDENT>if ch not in required_channels:<EOL><INDENT>if item[ch] is None:<EOL><INDENT>continue<EOL><DEDENT><DEDENT>if field == '<STR_LIT>':<EOL><INDENT>accepted_string = re.match('<STR_LIT>', item[ch])<EOL>if not accepted_string or accepted_string.string != item[ch]:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if not is_monotonic(self.file_name):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if item[ch] not in _signal.DAT_FMTS:<EOL><INDENT>raise ValueError('<STR_LIT>', _signal.DAT_FMTS)<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if item[ch] < <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if item[ch] < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if item[ch] < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if item[ch] <= <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if item[ch] < -<NUM_LIT> or item[ch] > <NUM_LIT>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if re.search('<STR_LIT>', item[ch]):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if item[ch] < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if item[ch] < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if re.search('<STR_LIT>', item[ch]):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if len(set(item)) != len(item):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT><DEDENT>elif field in _header.SEGMENT_SPECS.index:<EOL><INDENT>for ch in range(len(item)):<EOL><INDENT>if field == '<STR_LIT>':<EOL><INDENT>if item[ch] == '<STR_LIT>':<EOL><INDENT>continue<EOL><DEDENT>accepted_string = re.match('<STR_LIT>', item[ch])<EOL>if not accepted_string or accepted_string.string != item[ch]:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>min_len = <NUM_LIT:0> if ch == <NUM_LIT:0> else <NUM_LIT:1><EOL>if item[ch] < min_len:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>if item[ch].startswith('<STR_LIT:#>'):<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>if re.search('<STR_LIT>', item[ch]):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT><DEDENT>
|
Check whether a single field is valid in its basic form. Does
not check compatibility with other fields.
Parameters
----------
field : str
The field name
required_channels : list, optional
Used for signal specification fields. All channels are
checked for their integrity if present, but channels that do
not lie in this field may be None.
Notes
-----
This function is called from wrheader to check fields before
writing. It is also supposed to be usable at any point to
check a specific field.
|
f10211:c0:m1
|
def check_read_inputs(self, sampfrom, sampto, channels, physical,<EOL>smooth_frames, return_res):
|
<EOL>if not hasattr(sampfrom, '<STR_LIT>'):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if not hasattr(sampto, '<STR_LIT>'):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if not isinstance(channels, list):<EOL><INDENT>raise TypeError('<STR_LIT>')<EOL><DEDENT>if sampfrom < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if sampfrom > self.sig_len:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if sampto < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if sampto > self.sig_len:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if sampto <= sampfrom:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if len(channels):<EOL><INDENT>if min(channels) < <NUM_LIT:0>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if max(channels) > self.n_sig - <NUM_LIT:1>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>if return_res not in [<NUM_LIT:64>, <NUM_LIT:32>, <NUM_LIT:16>, <NUM_LIT:8>]:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if physical is True and return_res == <NUM_LIT:8>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if isinstance(self, MultiRecord):<EOL><INDENT>if smooth_frames is False:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>
|
Ensure that input read parameters (from rdsamp) are valid for
the record
|
f10211:c0:m2
|
def _adjust_datetime(self, sampfrom):
|
if sampfrom:<EOL><INDENT>dt_seconds = sampfrom / self.fs<EOL>if self.base_date and self.base_time:<EOL><INDENT>self.base_datetime = datetime.datetime.combine(self.base_date,<EOL>self.base_time)<EOL>self.base_datetime += datetime.timedelta(seconds=dt_seconds)<EOL>self.base_date = self.base_datetime.date()<EOL>self.base_time = self.base_datetime.time()<EOL><DEDENT>elif self.base_time:<EOL><INDENT>tmp_datetime = datetime.datetime.combine(<EOL>datetime.datetime.today().date(), self.base_time)<EOL>self.base_time = (tmp_datetime<EOL>+ datetime.timedelta(seconds=dt_seconds)).time()<EOL><DEDENT><DEDENT>
|
Adjust date and time fields to reflect user input if possible.
Helper function for the `_arrange_fields` of both Record and
MultiRecord objects.
|
f10211:c0:m3
|
def wrsamp(self, expanded=False, write_dir='<STR_LIT>'):
|
<EOL>self.wrheader(write_dir=write_dir)<EOL>if self.n_sig > <NUM_LIT:0>:<EOL><INDENT>self.wr_dats(expanded=expanded, write_dir=write_dir)<EOL><DEDENT>
|
Write a wfdb header file and any associated dat files from this
object.
Parameters
----------
expanded : bool, optional
Whether to write the expanded signal (e_d_signal) instead
of the uniform signal (d_signal).
write_dir : str, optional
The directory in which to write the files.
|
f10211:c1:m2
|
def _arrange_fields(self, channels, sampfrom=<NUM_LIT:0>, expanded=False):
|
<EOL>for field in _header.SIGNAL_SPECS.index:<EOL><INDENT>item = getattr(self, field)<EOL>setattr(self, field, [item[c] for c in channels])<EOL><DEDENT>if expanded:<EOL><INDENT>if self.sig_len != int(len(self.e_d_signal[<NUM_LIT:0>]) / self.samps_per_frame[<NUM_LIT:0>]):<EOL><INDENT>self.checksum = self.calc_checksum(expanded)<EOL>self.init_value = [s[<NUM_LIT:0>] for s in self.e_d_signal]<EOL><DEDENT>self.n_sig = len(channels)<EOL>self.sig_len = int(len(self.e_d_signal[<NUM_LIT:0>]) / self.samps_per_frame[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>if self.sig_len != self.d_signal.shape[<NUM_LIT:0>]:<EOL><INDENT>if self.checksum is not None:<EOL><INDENT>self.checksum = self.calc_checksum()<EOL><DEDENT>if self.init_value is not None:<EOL><INDENT>ival = list(self.d_signal[<NUM_LIT:0>, :])<EOL>self.init_value = [int(i) for i in ival]<EOL><DEDENT><DEDENT>self.n_sig = len(channels)<EOL>self.sig_len = self.d_signal.shape[<NUM_LIT:0>]<EOL><DEDENT>self._adjust_datetime(sampfrom=sampfrom)<EOL>
|
Arrange/edit object fields to reflect user channel and/or signal
range input.
Parameters
----------
channels : list
List of channel numbers specified.
sampfrom : int, optional
Starting sample number read.
expanded : bool, optional
Whether the record was read in expanded mode.
|
f10211:c1:m3
|
def wrsamp(self, write_dir='<STR_LIT>'):
|
<EOL>self.wrheader(write_dir=write_dir)<EOL>for seg in self.segments:<EOL><INDENT>seg.wrsamp(write_dir=write_dir)<EOL><DEDENT>
|
Write a multi-segment header, along with headers and dat files
for all segments, from this object.
|
f10211:c2:m1
|
def _check_segment_cohesion(self):
|
if self.n_seg != len(self.segments):<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>for i in range(n_seg):<EOL><INDENT>s = self.segments[i]<EOL>if i == <NUM_LIT:0> and self.seg_len[<NUM_LIT:0>] == <NUM_LIT:0>:<EOL><INDENT>for file_name in s.file_name:<EOL><INDENT>if file_name != '<STR_LIT>':<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT><DEDENT><DEDENT>if s.fs != self.fs:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if s.sig_len != self.seg_len[i]:<EOL><INDENT>raise ValueError('<STR_LIT>'+str(i)+'<STR_LIT>')<EOL><DEDENT>totalsig_len = totalsig_len + getattr(s, '<STR_LIT>')<EOL><DEDENT>
|
Check the cohesion of the segments field with other fields used
to write the record
|
f10211:c2:m2
|
def _required_segments(self, sampfrom, sampto):
|
<EOL>if self.layout == '<STR_LIT>':<EOL><INDENT>startseg = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>startseg = <NUM_LIT:1><EOL><DEDENT>cumsumlengths = list(np.cumsum(self.seg_len[startseg:]))<EOL>seg_numbers = [[sampfrom < cs for cs in cumsumlengths].index(True)]<EOL>if sampto == cumsumlengths[len(cumsumlengths) - <NUM_LIT:1>]:<EOL><INDENT>seg_numbers.append(len(cumsumlengths) - <NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>seg_numbers.append([sampto <= cs for cs in cumsumlengths].index(True))<EOL><DEDENT>seg_numbers = list(np.add(seg_numbers,startseg))<EOL>if seg_numbers[<NUM_LIT:1>] == seg_numbers[<NUM_LIT:0>]:<EOL><INDENT>seg_numbers = [seg_numbers[<NUM_LIT:0>]]<EOL>segstartsamp = sum(self.seg_len[<NUM_LIT:0>:seg_numbers[<NUM_LIT:0>]])<EOL>readsamps = [[sampfrom-segstartsamp, sampto-segstartsamp]]<EOL><DEDENT>else:<EOL><INDENT>seg_numbers = list(range(seg_numbers[<NUM_LIT:0>], seg_numbers[<NUM_LIT:1>]+<NUM_LIT:1>))<EOL>readsamps = [[<NUM_LIT:0>, self.seg_len[s]] for s in seg_numbers]<EOL>readsamps[<NUM_LIT:0>][<NUM_LIT:0>] = sampfrom - ([<NUM_LIT:0>] + cumsumlengths)[seg_numbers[<NUM_LIT:0>]-startseg]<EOL>readsamps[-<NUM_LIT:1>][<NUM_LIT:1>] = sampto - ([<NUM_LIT:0>] + cumsumlengths)[seg_numbers[-<NUM_LIT:1>]-startseg]<EOL><DEDENT>return (seg_numbers, readsamps)<EOL>
|
Determine the segments and the samples within each segment in a
multi-segment record, that lie within a sample range.
Parameters
----------
sampfrom : int
The starting sample number to read for each channel.
sampto : int
The sample number at which to stop reading for each channel.
|
f10211:c2:m3
|
def _required_channels(self, seg_numbers, channels, dir_name, pb_dir):
|
<EOL>if self.layout == '<STR_LIT>':<EOL><INDENT>required_channels = [channels] * len(seg_numbers)<EOL><DEDENT>else:<EOL><INDENT>required_channels = []<EOL>l_sig_names = self.segments[<NUM_LIT:0>].sig_name<EOL>w_sig_names = [l_sig_names[c] for c in channels]<EOL>for i in range(len(seg_numbers)):<EOL><INDENT>if self.seg_name[seg_numbers[i]] == '<STR_LIT>':<EOL><INDENT>required_channels.append([])<EOL><DEDENT>else:<EOL><INDENT>s_sig_names = rdheader(<EOL>os.path.join(dir_name, self.seg_name[seg_numbers[i]]),<EOL>pb_dir=pb_dir).sig_name<EOL>required_channels.append(_get_wanted_channels(<EOL>w_sig_names, s_sig_names))<EOL><DEDENT><DEDENT><DEDENT>return required_channels<EOL>
|
Get the channel numbers to be read from each specified segment,
given the channel numbers specified for the entire record.
Parameters
----------
seg_numbers : list
List of segment numbers to read.
channels : list
The channel indices to read for the whole record. Same one
specified by user input.
Returns
-------
required_channels : list
List of lists, containing channel indices to read for each
desired segment.
|
f10211:c2:m4
|
def _arrange_fields(self, seg_numbers, seg_ranges, channels,<EOL>sampfrom=<NUM_LIT:0>, force_channels=True):
|
<EOL>for i in range(len(seg_numbers)):<EOL><INDENT>self.seg_len[seg_numbers[i]] = seg_ranges[i][<NUM_LIT:1>] - seg_ranges[i][<NUM_LIT:0>]<EOL><DEDENT>if self.layout == '<STR_LIT>':<EOL><INDENT>self.n_sig = len(channels)<EOL>self.segments = self.segments[seg_numbers[<NUM_LIT:0>]:seg_numbers[-<NUM_LIT:1>]+<NUM_LIT:1>]<EOL>self.seg_name = self.seg_name[seg_numbers[<NUM_LIT:0>]:seg_numbers[-<NUM_LIT:1>]+<NUM_LIT:1>]<EOL>self.seg_len = self.seg_len[seg_numbers[<NUM_LIT:0>]:seg_numbers[-<NUM_LIT:1>]+<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>self.segments = [self.segments[<NUM_LIT:0>]] + self.segments[seg_numbers[<NUM_LIT:0>]:seg_numbers[-<NUM_LIT:1>]+<NUM_LIT:1>]<EOL>self.seg_name = [self.seg_name[<NUM_LIT:0>]] + self.seg_name[seg_numbers[<NUM_LIT:0>]:seg_numbers[-<NUM_LIT:1>]+<NUM_LIT:1>]<EOL>self.seg_len = [self.seg_len[<NUM_LIT:0>]] + self.seg_len[seg_numbers[<NUM_LIT:0>]:seg_numbers[-<NUM_LIT:1>]+<NUM_LIT:1>]<EOL>if not force_channels:<EOL><INDENT>desired_sig_names = [self.segments[<NUM_LIT:0>].sig_name[ch] for ch in channels]<EOL>contained_sig_names = set([name for seg in self.segments[<NUM_LIT:1>:] if seg is not None for name in seg.sig_name])<EOL>sig_name = [name for name in desired_sig_names if name in contained_sig_names]<EOL>channels = [self.segments[<NUM_LIT:0>].sig_name.index(name) for name in sig_name]<EOL><DEDENT>for field in _header.SIGNAL_SPECS.index:<EOL><INDENT>item = getattr(self.segments[<NUM_LIT:0>], field)<EOL>setattr(self.segments[<NUM_LIT:0>], field, [item[c] for c in channels])<EOL><DEDENT>self.segments[<NUM_LIT:0>].n_sig = self.n_sig = len(channels)<EOL>if self.n_sig == <NUM_LIT:0>:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>self.sig_len = sum([sr[<NUM_LIT:1>]-sr[<NUM_LIT:0>] for sr in seg_ranges])<EOL>self.n_seg = len(self.segments)<EOL>self._adjust_datetime(sampfrom=sampfrom)<EOL>
|
Arrange/edit object fields to reflect user channel and/or
signal range inputs. Updates layout specification header if
necessary.
Parameters
----------
seg_numbers : list
List of integer segment numbers read.
seg_ranges: list
List of integer pairs, giving the sample ranges for each
segment number read.
channels : list
List of channel numbers specified
sampfrom : int
Starting sample read.
force_channels : bool, optional
Used when reading multi-segment variable layout records.
Whether to update the layout specification record to match
the input `channels` argument, or to omit channels in which
no read segment contains the signals.
|
f10211:c2:m5
|
def multi_to_single(self, physical, return_res=<NUM_LIT:64>):
|
<EOL>fields = self.__dict__.copy()<EOL>for attr in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>del(fields[attr])<EOL><DEDENT>if self.layout == '<STR_LIT>':<EOL><INDENT>for attr in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>fields[attr] = getattr(self.segments[<NUM_LIT:0>], attr)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>signal_names = self.segments[<NUM_LIT:0>].sig_name<EOL>n_sig = len(signal_names)<EOL>reference_fields = {'<STR_LIT>':n_sig*[None], '<STR_LIT>':n_sig*[None],<EOL>'<STR_LIT>':n_sig*[None],<EOL>'<STR_LIT>':n_sig*[None]}<EOL>mismatched_fields = []<EOL>for seg in self.segments[<NUM_LIT:1>:]:<EOL><INDENT>if seg is None:<EOL><INDENT>continue<EOL><DEDENT>for seg_ch in range(seg.n_sig):<EOL><INDENT>sig_name = seg.sig_name[seg_ch]<EOL>ch = signal_names.index(sig_name)<EOL>for field in reference_fields:<EOL><INDENT>item_ch = getattr(seg, field)[seg_ch]<EOL>if reference_fields[field][ch] is None:<EOL><INDENT>reference_fields[field][ch] = item_ch<EOL><DEDENT>elif reference_fields[field][ch] != item_ch:<EOL><INDENT>if physical:<EOL><INDENT>mismatched_fields.append(field)<EOL><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>for field in set(mismatched_fields):<EOL><INDENT>del(reference_fields[field])<EOL><DEDENT>fields.update(reference_fields)<EOL>fields['<STR_LIT>'] = signal_names<EOL><DEDENT>if physical:<EOL><INDENT>sig_attr = '<STR_LIT>'<EOL>dtype = _signal._np_dtype(return_res, discrete=False)<EOL>nan_vals = np.array([self.n_sig * [np.nan]], dtype=dtype)<EOL><DEDENT>else:<EOL><INDENT>sig_attr = '<STR_LIT>'<EOL>dtype = _signal._np_dtype(return_res, discrete=True)<EOL>nan_vals = np.array([_signal._digi_nan(fields['<STR_LIT>'])], dtype=dtype)<EOL><DEDENT>combined_signal = np.repeat(nan_vals, self.sig_len, axis=<NUM_LIT:0>)<EOL>start_samps = [<NUM_LIT:0>] + list(np.cumsum(self.seg_len)[<NUM_LIT:0>:-<NUM_LIT:1>])<EOL>end_samps = list(np.cumsum(self.seg_len))<EOL>if self.layout == '<STR_LIT>':<EOL><INDENT>for i in range(self.n_seg):<EOL><INDENT>combined_signal[start_samps[i]:end_samps[i], :] = getattr(self.segments[i], sig_attr)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>for i in range(<NUM_LIT:1>, self.n_seg):<EOL><INDENT>seg = self.segments[i]<EOL>if seg is not None:<EOL><INDENT>segment_channels = _get_wanted_channels(fields['<STR_LIT>'],<EOL>seg.sig_name,<EOL>pad=True)<EOL>for ch in range(self.n_sig):<EOL><INDENT>if segment_channels[ch] is not None:<EOL><INDENT>combined_signal[start_samps[i]:end_samps[i], ch] = getattr(seg, sig_attr)[:, segment_channels[ch]]<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>record = Record()<EOL>for field in fields:<EOL><INDENT>setattr(record, field, fields[field])<EOL><DEDENT>setattr(record, sig_attr, combined_signal)<EOL>if physical:<EOL><INDENT>record.set_p_features()<EOL><DEDENT>else:<EOL><INDENT>record.set_d_features()<EOL><DEDENT>return record<EOL>
|
Create a Record object from the MultiRecord object. All signal
segments will be combined into the new object's `p_signal` or
`d_signal` field. For digital format, the signals must have
the same storage format, baseline, and adc_gain in all segments.
Parameters
----------
physical : bool
Whether to convert the physical or digital signal.
return_res : int, optional
The return resolution of the `p_signal` field. Options are:
64, 32, and 16.
Returns
-------
record : wfdb Record
The single segment record created.
|
f10211:c2:m6
|
def wfdb_strptime(time_string):
|
n_colons = time_string.count('<STR_LIT::>')<EOL>if n_colons == <NUM_LIT:0>:<EOL><INDENT>time_fmt = '<STR_LIT>'<EOL><DEDENT>elif n_colons == <NUM_LIT:1>:<EOL><INDENT>time_fmt = '<STR_LIT>'<EOL><DEDENT>elif n_colons == <NUM_LIT:2>:<EOL><INDENT>time_fmt = '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT:.>' in time_string:<EOL><INDENT>time_fmt += '<STR_LIT>'<EOL><DEDENT>return datetime.datetime.strptime(time_string, time_fmt).time()<EOL>
|
Given a time string in an acceptable wfdb format, return
a datetime.time object.
Valid formats: SS, MM:SS, HH:MM:SS, all with and without microsec.
|
f10212:m0
|
def _read_header_lines(base_record_name, dir_name, pb_dir):
|
file_name = base_record_name + '<STR_LIT>'<EOL>if pb_dir is None:<EOL><INDENT>with open(os.path.join(dir_name, file_name), '<STR_LIT:r>') as fp:<EOL><INDENT>header_lines = []<EOL>comment_lines = []<EOL>for line in fp:<EOL><INDENT>line = line.strip()<EOL>if line.startswith('<STR_LIT:#>'):<EOL><INDENT>comment_lines.append(line)<EOL><DEDENT>elif line:<EOL><INDENT>ci = line.find('<STR_LIT:#>')<EOL>if ci > <NUM_LIT:0>:<EOL><INDENT>header_lines.append(line[:ci])<EOL>comment_lines.append(line[ci:])<EOL><DEDENT>else:<EOL><INDENT>header_lines.append(line)<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>header_lines, comment_lines = download._stream_header(file_name,<EOL>pb_dir)<EOL><DEDENT>return header_lines, comment_lines<EOL>
|
Read the lines in a local or remote header file.
Parameters
----------
base_record_name : str
The base name of the WFDB record to be read, without any file
extensions.
dir_name : str
The local directory location of the header file. This parameter
is ignored if `pb_dir` is set.
pb_dir : str
Option used to stream data from Physiobank. The Physiobank
database directory from which to find the required record files.
eg. For record '100' in 'http://physionet.org/physiobank/database/mitdb'
pb_dir='mitdb'.
Returns
-------
header_lines : list
List of strings corresponding to the header lines.
comment_lines : list
List of strings corresponding to the comment lines.
|
f10212:m1
|
def _parse_record_line(record_line):
|
<EOL>record_fields = {}<EOL>(record_fields['<STR_LIT>'], record_fields['<STR_LIT>'],<EOL>record_fields['<STR_LIT>'], record_fields['<STR_LIT>'],<EOL>record_fields['<STR_LIT>'], record_fields['<STR_LIT>'],<EOL>record_fields['<STR_LIT>'], record_fields['<STR_LIT>'],<EOL>record_fields['<STR_LIT>']) = re.findall(_rx_record, record_line)[<NUM_LIT:0>]<EOL>for field in RECORD_SPECS.index:<EOL><INDENT>if record_fields[field] == '<STR_LIT>':<EOL><INDENT>record_fields[field] = RECORD_SPECS.loc[field, '<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>if RECORD_SPECS.loc[field, '<STR_LIT>'] == int_types:<EOL><INDENT>record_fields[field] = int(record_fields[field])<EOL><DEDENT>elif RECORD_SPECS.loc[field, '<STR_LIT>'] == float_types:<EOL><INDENT>record_fields[field] = float(record_fields[field])<EOL>if field == '<STR_LIT>':<EOL><INDENT>fs = float(record_fields['<STR_LIT>'])<EOL>if round(fs, <NUM_LIT:8>) == float(int(fs)):<EOL><INDENT>fs = int(fs)<EOL><DEDENT>record_fields['<STR_LIT>'] = fs<EOL><DEDENT><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>record_fields['<STR_LIT>'] = wfdb_strptime(record_fields['<STR_LIT>'])<EOL><DEDENT>elif field == '<STR_LIT>':<EOL><INDENT>record_fields['<STR_LIT>'] = datetime.datetime.strptime(<EOL>record_fields['<STR_LIT>'], '<STR_LIT>').date()<EOL><DEDENT><DEDENT><DEDENT>if record_fields['<STR_LIT>'] and record_fields['<STR_LIT>']:<EOL><INDENT>record_fields['<STR_LIT>'] = datetime.datetime.combine(<EOL>record_fields['<STR_LIT>'], record_fields['<STR_LIT>'])<EOL><DEDENT>return record_fields<EOL>
|
Extract fields from a record line string into a dictionary
|
f10212:m2
|
def _parse_signal_lines(signal_lines):
|
n_sig = len(signal_lines)<EOL>signal_fields = {}<EOL>for field in SIGNAL_SPECS.index:<EOL><INDENT>signal_fields[field] = n_sig * [None]<EOL><DEDENT>for ch in range(n_sig):<EOL><INDENT>(signal_fields['<STR_LIT>'][ch], signal_fields['<STR_LIT>'][ch],<EOL>signal_fields['<STR_LIT>'][ch], signal_fields['<STR_LIT>'][ch],<EOL>signal_fields['<STR_LIT>'][ch], signal_fields['<STR_LIT>'][ch],<EOL>signal_fields['<STR_LIT>'][ch], signal_fields['<STR_LIT>'][ch],<EOL>signal_fields['<STR_LIT>'][ch], signal_fields['<STR_LIT>'][ch],<EOL>signal_fields['<STR_LIT>'][ch], signal_fields['<STR_LIT>'][ch],<EOL>signal_fields['<STR_LIT>'][ch],<EOL>signal_fields['<STR_LIT>'][ch]) = _rx_signal.findall(signal_lines[ch])[<NUM_LIT:0>]<EOL>for field in SIGNAL_SPECS.index:<EOL><INDENT>if signal_fields[field][ch] == '<STR_LIT>':<EOL><INDENT>signal_fields[field][ch] = SIGNAL_SPECS.loc[field, '<STR_LIT>']<EOL>if field == '<STR_LIT>' and signal_fields['<STR_LIT>'][ch] != '<STR_LIT>':<EOL><INDENT>signal_fields['<STR_LIT>'][ch] = int(signal_fields['<STR_LIT>'][ch])<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if SIGNAL_SPECS.loc[field, '<STR_LIT>'] is int_types:<EOL><INDENT>signal_fields[field][ch] = int(signal_fields[field][ch])<EOL><DEDENT>elif SIGNAL_SPECS.loc[field, '<STR_LIT>'] is float_types:<EOL><INDENT>signal_fields[field][ch] = float(signal_fields[field][ch])<EOL>if field == '<STR_LIT>' and signal_fields['<STR_LIT>'][ch] == <NUM_LIT:0>:<EOL><INDENT>signal_fields['<STR_LIT>'][ch] = <NUM_LIT><EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>return signal_fields<EOL>
|
Extract fields from a list of signal line strings into a dictionary.
|
f10212:m3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.