sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def get_user_by_username(self, username):
"""
Returns details for user of the given username.
If there is more than one match will only return the first. Use
get_users() for full result set.
"""
results = self.get_users(filter='username eq "%s"' % (username))
if results['totalResults'] == 0:
logging.warning("Found no matches for given username.")
return
elif results['totalResults'] > 1:
logging.warning("Found %s matches for username %s" %
(results['totalResults'], username))
return results['resources'][0]
|
Returns details for user of the given username.
If there is more than one match will only return the first. Use
get_users() for full result set.
|
entailment
|
def get_user_by_email(self, email):
"""
Returns details for user with the given email address.
If there is more than one match will only return the first. Use
get_users() for full result set.
"""
results = self.get_users(filter='email eq "%s"' % (email))
if results['totalResults'] == 0:
logging.warning("Found no matches for given email.")
return
elif results['totalResults'] > 1:
logging.warning("Found %s matches for email %s" %
(results['totalResults'], email))
return results['resources'][0]
|
Returns details for user with the given email address.
If there is more than one match will only return the first. Use
get_users() for full result set.
|
entailment
|
def get_user(self, id):
"""
Returns details about the user for the given id.
Use get_user_by_email() or get_user_by_username() for help
identifiying the id.
"""
self.assert_has_permission('scim.read')
return self._get(self.uri + '/Users/%s' % (id))
|
Returns details about the user for the given id.
Use get_user_by_email() or get_user_by_username() for help
identifiying the id.
|
entailment
|
def create(self):
"""
Create an instance of the Time Series Service with the typical
starting settings.
"""
self.service.create()
predix.config.set_env_value(self.use_class, 'ingest_uri',
self.get_ingest_uri())
predix.config.set_env_value(self.use_class, 'ingest_zone_id',
self.get_ingest_zone_id())
predix.config.set_env_value(self.use_class, 'query_uri',
self.get_query_uri())
predix.config.set_env_value(self.use_class, 'query_zone_id',
self.get_query_zone_id())
|
Create an instance of the Time Series Service with the typical
starting settings.
|
entailment
|
def grant_client(self, client_id, read=True, write=True):
"""
Grant the given client id all the scopes and authorities
needed to work with the timeseries service.
"""
scopes = ['openid']
authorities = ['uaa.resource']
if write:
for zone in self.service.settings.data['ingest']['zone-token-scopes']:
scopes.append(zone)
authorities.append(zone)
if read:
for zone in self.service.settings.data['query']['zone-token-scopes']:
scopes.append(zone)
authorities.append(zone)
self.service.uaa.uaac.update_client_grants(client_id, scope=scopes,
authorities=authorities)
return self.service.uaa.uaac.get_client(client_id)
|
Grant the given client id all the scopes and authorities
needed to work with the timeseries service.
|
entailment
|
def get_query_uri(self):
"""
Return the uri used for queries on time series data.
"""
# Query URI has extra path we don't want so strip it off here
query_uri = self.service.settings.data['query']['uri']
query_uri = urlparse(query_uri)
return query_uri.scheme + '://' + query_uri.netloc
|
Return the uri used for queries on time series data.
|
entailment
|
def add_to_manifest(self, manifest):
"""
Add useful details to the manifest about this service
so that it can be used in an application.
:param manifest: An predix.admin.app.Manifest object
instance that manages reading/writing manifest config
for a cloud foundry app.
"""
# Add this service to list of services
manifest.add_service(self.service.name)
# Add environment variables
uri = predix.config.get_env_key(self.use_class, 'ingest_uri')
manifest.add_env_var(uri, self.get_ingest_uri())
zone_id = predix.config.get_env_key(self.use_class, 'ingest_zone_id')
manifest.add_env_var(zone_id, self.get_ingest_zone_id())
uri = predix.config.get_env_key(self.use_class, 'query_uri')
manifest.add_env_var(uri, self.get_query_uri())
zone_id = predix.config.get_env_key(self.use_class, 'query_zone_id')
manifest.add_env_var(zone_id, self.get_query_zone_id())
manifest.write_manifest()
|
Add useful details to the manifest about this service
so that it can be used in an application.
:param manifest: An predix.admin.app.Manifest object
instance that manages reading/writing manifest config
for a cloud foundry app.
|
entailment
|
def find_requirements(path):
"""
This method tries to determine the requirements of a particular project
by inspecting the possible places that they could be defined.
It will attempt, in order:
1) to parse setup.py in the root for an install_requires value
2) to read a requirements.txt file or a requirements.pip in the root
3) to read all .txt files in a folder called 'requirements' in the root
4) to read files matching "*requirements*.txt" and "*reqs*.txt" in the root,
excluding any starting or ending with 'test'
If one of these succeeds, then a list of pkg_resources.Requirement's
will be returned. If none can be found, then a RequirementsNotFound
will be raised
"""
requirements = []
setup_py = os.path.join(path, 'setup.py')
if os.path.exists(setup_py) and os.path.isfile(setup_py):
try:
requirements = from_setup_py(setup_py)
requirements.sort()
return requirements
except CouldNotParseRequirements:
pass
for reqfile_name in ('requirements.txt', 'requirements.pip'):
reqfile_path = os.path.join(path, reqfile_name)
if os.path.exists(reqfile_path) and os.path.isfile(reqfile_path):
try:
requirements += from_requirements_txt(reqfile_path)
except CouldNotParseRequirements as e:
pass
requirements_dir = os.path.join(path, 'requirements')
if os.path.exists(requirements_dir) and os.path.isdir(requirements_dir):
from_dir = from_requirements_dir(requirements_dir)
if from_dir is not None:
requirements += from_dir
from_blob = from_requirements_blob(path)
if from_blob is not None:
requirements += from_blob
requirements = list(set(requirements))
if len(requirements) > 0:
requirements.sort()
return requirements
raise RequirementsNotFound
|
This method tries to determine the requirements of a particular project
by inspecting the possible places that they could be defined.
It will attempt, in order:
1) to parse setup.py in the root for an install_requires value
2) to read a requirements.txt file or a requirements.pip in the root
3) to read all .txt files in a folder called 'requirements' in the root
4) to read files matching "*requirements*.txt" and "*reqs*.txt" in the root,
excluding any starting or ending with 'test'
If one of these succeeds, then a list of pkg_resources.Requirement's
will be returned. If none can be found, then a RequirementsNotFound
will be raised
|
entailment
|
def get_app_guid(self, app_name):
"""
Returns the GUID for the app instance with
the given name.
"""
summary = self.space.get_space_summary()
for app in summary['apps']:
if app['name'] == app_name:
return app['guid']
|
Returns the GUID for the app instance with
the given name.
|
entailment
|
def delete_app(self, app_name):
"""
Delete the given app.
Will fail intentionally if there are any service
bindings. You must delete those first.
"""
if app_name not in self.space.get_apps():
logging.warning("App not found so... succeeded?")
return True
guid = self.get_app_guid(app_name)
self.api.delete("/v2/apps/%s" % (guid))
|
Delete the given app.
Will fail intentionally if there are any service
bindings. You must delete those first.
|
entailment
|
def _get_service_config(self):
"""
Reads in config file of UAA credential information
or generates one as a side-effect if not yet
initialized.
"""
# Should work for windows, osx, and linux environments
if not os.path.exists(self.config_path):
try:
os.makedirs(os.path.dirname(self.config_path))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
return {}
with open(self.config_path, 'r') as data:
return json.load(data)
|
Reads in config file of UAA credential information
or generates one as a side-effect if not yet
initialized.
|
entailment
|
def _write_service_config(self):
"""
Will write the config out to disk.
"""
with open(self.config_path, 'w') as output:
output.write(json.dumps(self.data, sort_keys=True, indent=4))
|
Will write the config out to disk.
|
entailment
|
def create(self, **kwargs):
"""
Create an instance of the Blob Store Service with the typical
starting settings.
"""
self.service.create(**kwargs)
predix.config.set_env_value(self.use_class, 'url',
self.service.settings.data['url'])
predix.config.set_env_value(self.use_class, 'access_key_id',
self.service.settings.data['access_key_id'])
predix.config.set_env_value(self.use_class, 'bucket_name',
self.service.settings.data['bucket_name'])
predix.config.set_env_value(self.use_class, 'host',
self.service.settings.data['host'])
predix.config.set_env_value(self.use_class, 'secret_access_key',
self.service.settings.data['secret_access_key'])
|
Create an instance of the Blob Store Service with the typical
starting settings.
|
entailment
|
def add_to_manifest(self, manifest):
"""
Add useful details to the manifest about this service
so that it can be used in an application.
:param manifest: An predix.admin.app.Manifest object
instance that manages reading/writing manifest config
for a cloud foundry app.
"""
# Add this service to the list of services
manifest.add_service(self.service.name)
# Add environment variables
url = predix.config.get_env_key(self.use_class, 'url')
manifest.add_env_var(url, self.service.settings.data['url'])
akid = predix.config.get_env_key(self.use_class, 'access_key_id')
manifest.add_env_var(akid, self.service.settings.data['access_key_id'])
bucket = predix.config.get_env_key(self.use_class, 'bucket_name')
manifest.add_env_var(bucket, self.service.settings.data['bucket_name'])
host = predix.config.get_env_key(self.use_class, 'host')
manifest.add_env_var(host, self.service.settings.data['host'])
secret_access_key = predix.config.get_env_key(self.use_class, 'secret_access_key')
manifest.add_env_var(secret_access_key, self.service.settings.data['secret_access_key'])
manifest.write_manifest()
|
Add useful details to the manifest about this service
so that it can be used in an application.
:param manifest: An predix.admin.app.Manifest object
instance that manages reading/writing manifest config
for a cloud foundry app.
|
entailment
|
def subscribe(self):
"""
return a generator for all subscribe messages
:return: None
"""
while self.run_subscribe_generator:
if len(self._rx_messages) != 0:
yield self._rx_messages.pop(0)
return
|
return a generator for all subscribe messages
:return: None
|
entailment
|
def send_acks(self, message):
"""
send acks to the service
:param message: EventHub_pb2.Message
:return: None
"""
if isinstance(message, EventHub_pb2.Message):
ack = EventHub_pb2.Ack(partition=message.partition, offset=message.offset)
self.grpc_manager.send_message(EventHub_pb2.SubscriptionResponse(ack=ack))
elif isinstance(message, EventHub_pb2.SubscriptionMessage):
acks = []
for m in message.messages:
acks.append(EventHub_pb2.Ack(parition=m.partition, offset=m.offset))
self.grpc_manager.send_message(EventHub_pb2.SubscriptionAcks(ack=acks))
|
send acks to the service
:param message: EventHub_pb2.Message
:return: None
|
entailment
|
def _generate_subscribe_headers(self):
"""
generate the subscribe stub headers based on the supplied config
:return: i
"""
headers =[]
headers.append(('predix-zone-id', self.eventhub_client.zone_id))
token = self.eventhub_client.service._get_bearer_token()
headers.append(('subscribername', self._config.subscriber_name))
headers.append(('authorization', token[(token.index(' ') + 1):]))
if self._config.topics is []:
headers.append(('topic', self.eventhub_client.zone_id + '_topic'))
else:
for topic in self._config.topics:
headers.append(('topic', topic))
headers.append(('offset-newest', str(self._config.recency == self._config.Recency.NEWEST).lower()))
headers.append(('acks', str(self._config.acks_enabled).lower()))
if self._config.acks_enabled:
headers.append(('max-retries', str(self._config.ack_max_retries)))
headers.append(('retry-interval', str(self._config.ack_retry_interval_seconds) + 's'))
headers.append(('duration-before-retry', str(self._config.ack_duration_before_retry_seconds) + 's'))
if self._config.batching_enabled:
headers.append(('batch-size', str(self._config.batch_size)))
headers.append(('batch-interval', str(self._config.batch_interval_millis) + 'ms'))
return headers
|
generate the subscribe stub headers based on the supplied config
:return: i
|
entailment
|
def _get_assets(self, bbox, size=None, page=None, asset_type=None,
device_type=None, event_type=None, media_type=None):
"""
Returns the raw results of an asset search for a given bounding
box.
"""
uri = self.uri + '/v1/assets/search'
headers = self._get_headers()
params = {
'bbox': bbox,
}
# Query parameters
params['q'] = []
if device_type:
if isinstance(device_type, str):
device_type = [device_type]
for device in device_type:
if device not in self.DEVICE_TYPES:
logging.warning("Invalid device type: %s" % device)
params['q'].append("device-type:%s" % device)
if asset_type:
if isinstance(asset_type, str):
asset_type = [asset_type]
for asset in asset_type:
if asset not in self.ASSET_TYPES:
logging.warning("Invalid asset type: %s" % asset)
params['q'].append("assetType:%s" % asset)
if media_type:
if isinstance(media_type, str):
media_type = [media_type]
for media in media_type:
if media not in self.MEDIA_TYPES:
logging.warning("Invalid media type: %s" % media)
params['q'].append("mediaType:%s" % media)
if event_type:
if isinstance(event_type, str):
event_type = [event_type]
for event in event_type:
if event not in self.EVENT_TYPES:
logging.warning("Invalid event type: %s" % event)
params['q'].append("eventTypes:%s" % event)
# Pagination parameters
if size:
params['size'] = size
if page:
params['page'] = page
return self.service._get(uri, params=params, headers=headers)
|
Returns the raw results of an asset search for a given bounding
box.
|
entailment
|
def get_assets(self, bbox, **kwargs):
"""
Query the assets stored in the intelligent environment for a given
bounding box and query.
Assets can be filtered by type of asset, event, or media available.
- device_type=['DATASIM']
- asset_type=['CAMERA']
- event_type=['PKIN']
- media_type=['IMAGE']
Pagination can be controlled with keyword parameters
- page=2
- size=100
Returns a list of assets stored in a dictionary that describe their:
- asset-id
- device-type
- device-id
- media-type
- coordinates
- event-type
Additionally there are some _links for additional information.
"""
response = self._get_assets(bbox, **kwargs)
# Remove broken HATEOAS _links but identify asset uid first
assets = []
for asset in response['_embedded']['assets']:
asset_url = asset['_links']['self']
uid = asset_url['href'].split('/')[-1]
asset['uid'] = uid
del(asset['_links'])
assets.append(asset)
return assets
|
Query the assets stored in the intelligent environment for a given
bounding box and query.
Assets can be filtered by type of asset, event, or media available.
- device_type=['DATASIM']
- asset_type=['CAMERA']
- event_type=['PKIN']
- media_type=['IMAGE']
Pagination can be controlled with keyword parameters
- page=2
- size=100
Returns a list of assets stored in a dictionary that describe their:
- asset-id
- device-type
- device-id
- media-type
- coordinates
- event-type
Additionally there are some _links for additional information.
|
entailment
|
def _get_asset(self, asset_uid):
"""
Returns raw response for an given asset by its unique id.
"""
uri = self.uri + '/v2/assets/' + asset_uid
headers = self._get_headers()
return self.service._get(uri, headers=headers)
|
Returns raw response for an given asset by its unique id.
|
entailment
|
def label(self, input_grid):
"""
Label input grid with hysteresis method.
Args:
input_grid: 2D array of values.
Returns:
Labeled output grid.
"""
unset = 0
high_labels, num_labels = label(input_grid > self.high_thresh)
region_ranking = np.argsort(maximum(input_grid, high_labels, index=np.arange(1, num_labels + 1)))[::-1]
output_grid = np.zeros(input_grid.shape, dtype=int)
stack = []
for rank in region_ranking:
label_num = rank + 1
label_i, label_j = np.where(high_labels == label_num)
for i in range(label_i.size):
if output_grid[label_i[i], label_j[i]] == unset:
stack.append((label_i[i], label_j[i]))
while len(stack) > 0:
index = stack.pop()
output_grid[index] = label_num
for i in range(index[0] - 1, index[0] + 2):
for j in range(index[1] - 1, index[1] + 2):
if 0 <= i < output_grid.shape[0] and 0 <= j < output_grid.shape[1]:
if (input_grid[i, j] > self.low_thresh) and (output_grid[i, j] == unset):
stack.append((i, j))
return output_grid
|
Label input grid with hysteresis method.
Args:
input_grid: 2D array of values.
Returns:
Labeled output grid.
|
entailment
|
def size_filter(labeled_grid, min_size):
"""
Remove labeled objects that do not meet size threshold criteria.
Args:
labeled_grid: 2D output from label method.
min_size: minimum size of object in pixels.
Returns:
labeled grid with smaller objects removed.
"""
out_grid = np.zeros(labeled_grid.shape, dtype=int)
slices = find_objects(labeled_grid)
j = 1
for i, s in enumerate(slices):
box = labeled_grid[s]
size = np.count_nonzero(box.ravel() == (i + 1))
if size >= min_size and box.shape[0] > 1 and box.shape[1] > 1:
out_grid[np.where(labeled_grid == i + 1)] = j
j += 1
return out_grid
|
Remove labeled objects that do not meet size threshold criteria.
Args:
labeled_grid: 2D output from label method.
min_size: minimum size of object in pixels.
Returns:
labeled grid with smaller objects removed.
|
entailment
|
def roc_curve(roc_objs, obj_labels, colors, markers, filename, figsize=(8, 8),
xlabel="Probability of False Detection",
ylabel="Probability of Detection",
title="ROC Curve", ticks=np.arange(0, 1.1, 0.1), dpi=300,
legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5),
label_fontsize=14, title_fontsize=16, tick_fontsize=12):
"""
Plots a set receiver/relative operating characteristic (ROC) curves from DistributedROC objects.
The ROC curve shows how well a forecast discriminates between two outcomes over a series of thresholds. It
features Probability of Detection (True Positive Rate) on the y-axis and Probability of False Detection
(False Alarm Rate) on the x-axis. This plotting function allows one to customize the colors and markers of the
ROC curves as well as the parameters of the legend and the title.
Args:
roc_objs (list): DistributedROC objects being plotted.
obj_labels (list): Label describing the forecast associated with a DistributedROC object.
colors (list): List of matplotlib-readable colors (names or hex-values) for each curve.
markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve.
filename (str): Name of figure file being saved.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): Label for the x-axis.
ylabel (str): Label for the y-axis.
title (str): The title of the figure.
ticks (numpy.ndarray): Values shown on the x and y axes.
dpi (int): Figure resolution in dots per inch.
legend_params (None, dict): Keyword arguments for the formatting of the figure legend.
bootstrap_sets (list): List of lists of DistributedROC objects that were bootstrap resampled for each model.
ci (tuple of 2 floats): Quantiles of the edges of the bootstrap confidence intervals ranging from 0 to 100.
label_fontsize (int): Font size of the x and y axis labels.
title_fontsize (int): Font size of the title.
tick_fontsize (int): Font size of the x and y tick labels.
Examples:
>>> from hagelslag.evaluation import DistributedROC
>>> import numpy as np
>>> forecasts = np.random.random(1000)
>>> obs = np.random.random_integers(0, 1, 1000)
>>> roc = DistributedROC()
>>> roc.update(forecasts, obs)
>>> roc_curve([roc], ["Random"], ["orange"], ["o"], "random_roc.png")
"""
if legend_params is None:
legend_params = dict(loc=4, fontsize=12, framealpha=1, frameon=True)
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(ticks, ticks, "k--")
if bootstrap_sets is not None:
for b, b_set in enumerate(bootstrap_sets):
broc_curves = np.dstack([b_roc.roc_curve().values for b_roc in b_set])
pod_range = np.percentile(broc_curves[:,0], ci, axis=1)
pofd_range = np.percentile(broc_curves[:, 1], ci, axis=1)
pod_poly = np.concatenate((pod_range[1], pod_range[0, ::-1]))
pofd_poly = np.concatenate((pofd_range[0], pofd_range[1, ::-1]))
pod_poly[np.isnan(pod_poly)] = 0
pofd_poly[np.isnan(pofd_poly)] = 0
plt.fill(pofd_poly, pod_poly, alpha=0.5, color=colors[b])
for r, roc_obj in enumerate(roc_objs):
roc_data = roc_obj.roc_curve()
plt.plot(roc_data["POFD"], roc_data["POD"], marker=markers[r], color=colors[r], label=obj_labels[r])
plt.xlabel(xlabel, fontsize=label_fontsize)
plt.ylabel(ylabel, fontsize=label_fontsize)
plt.xticks(ticks, fontsize=tick_fontsize)
plt.yticks(ticks, fontsize=tick_fontsize)
plt.title(title, fontsize=title_fontsize)
plt.legend(**legend_params)
plt.savefig(filename, dpi=dpi, bbox_inches="tight")
plt.close()
|
Plots a set receiver/relative operating characteristic (ROC) curves from DistributedROC objects.
The ROC curve shows how well a forecast discriminates between two outcomes over a series of thresholds. It
features Probability of Detection (True Positive Rate) on the y-axis and Probability of False Detection
(False Alarm Rate) on the x-axis. This plotting function allows one to customize the colors and markers of the
ROC curves as well as the parameters of the legend and the title.
Args:
roc_objs (list): DistributedROC objects being plotted.
obj_labels (list): Label describing the forecast associated with a DistributedROC object.
colors (list): List of matplotlib-readable colors (names or hex-values) for each curve.
markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve.
filename (str): Name of figure file being saved.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): Label for the x-axis.
ylabel (str): Label for the y-axis.
title (str): The title of the figure.
ticks (numpy.ndarray): Values shown on the x and y axes.
dpi (int): Figure resolution in dots per inch.
legend_params (None, dict): Keyword arguments for the formatting of the figure legend.
bootstrap_sets (list): List of lists of DistributedROC objects that were bootstrap resampled for each model.
ci (tuple of 2 floats): Quantiles of the edges of the bootstrap confidence intervals ranging from 0 to 100.
label_fontsize (int): Font size of the x and y axis labels.
title_fontsize (int): Font size of the title.
tick_fontsize (int): Font size of the x and y tick labels.
Examples:
>>> from hagelslag.evaluation import DistributedROC
>>> import numpy as np
>>> forecasts = np.random.random(1000)
>>> obs = np.random.random_integers(0, 1, 1000)
>>> roc = DistributedROC()
>>> roc.update(forecasts, obs)
>>> roc_curve([roc], ["Random"], ["orange"], ["o"], "random_roc.png")
|
entailment
|
def performance_diagram(roc_objs, obj_labels, colors, markers, filename, figsize=(8, 8),
xlabel="Success Ratio (1-FAR)",
ylabel="Probability of Detection", ticks=np.arange(0, 1.1, 0.1),
dpi=300, csi_cmap="Blues",
csi_label="Critical Success Index", title="Performance Diagram",
legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5), label_fontsize=14,
title_fontsize=16, tick_fontsize=12):
"""
Draws a performance diagram from a set of DistributedROC objects.
A performance diagram is a variation on the ROC curve in which the Probability of False Detection on the
x-axis has been replaced with the Success Ratio (1-False Alarm Ratio or Precision). The diagram also shows
the Critical Success Index (CSI or Threat Score) as a series of curved contours, and the frequency bias as
angled diagonal lines. Points along the 1:1 diagonal are unbiased, and better performing models should appear
in the upper right corner. The performance diagram is particularly useful for displaying verification for
severe weather warnings as it displays all three commonly used statistics (POD, FAR, and CSI) simultaneously
on the same chart.
Args:
roc_objs (list): DistributedROC objects being plotted.
obj_labels: list or array of labels describing each DistributedROC object.
obj_labels (list): Label describing the forecast associated with a DistributedROC object.
colors (list): List of matplotlib-readable colors (names or hex-values) for each curve.
markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve.
filename (str): Name of figure file being saved.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): Label for the x-axis.
ylabel (str): Label for the y-axis.
title (str): The title of the figure.
ticks (numpy.ndarray): Values shown on the x and y axes.
dpi (int): Figure resolution in dots per inch.
csi_cmap (str): Matplotlib colormap used to fill CSI contours.
csi_label (str): Label for CSI colormap.
legend_params (None or dict): Keyword arguments for the formatting of the figure legend.
bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None,
confidence regions will be plotted.
ci (tuple): tuple of bootstrap confidence interval percentiles.
label_fontsize (int): Font size of the x and y axis labels.
title_fontsize (int): Font size of the title.
tick_fontsize (int): Font size of the x and y tick labels.
Examples:
>>> from hagelslag.evaluation import DistributedROC
>>> import numpy as np
>>> forecasts = np.random.random(1000)
>>> obs = np.random.random_integers(0, 1, 1000)
>>> roc = DistributedROC()
>>> roc.update(forecasts, obs)
>>> performance_diagram([roc], ["Random"], ["orange"], ["o"], "random_performance.png")
"""
if legend_params is None:
legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True)
plt.figure(figsize=figsize)
grid_ticks = np.arange(0, 1.01, 0.01)
sr_g, pod_g = np.meshgrid(grid_ticks, grid_ticks)
bias = pod_g / sr_g
csi = 1.0 / (1.0 / sr_g + 1.0 / pod_g - 1.0)
csi_contour = plt.contourf(sr_g, pod_g, csi, np.arange(0.1, 1.1, 0.1), extend="max", cmap=csi_cmap)
b_contour = plt.contour(sr_g, pod_g, bias, [0.5, 1, 1.5, 2, 4], colors="k", linestyles="dashed")
plt.clabel(b_contour, fmt="%1.1f", manual=[(0.2, 0.9), (0.4, 0.9), (0.6, 0.9), (0.7, 0.7)])
if bootstrap_sets is not None:
for b, b_set in enumerate(bootstrap_sets):
perf_curves = np.dstack([b_roc.performance_curve().values for b_roc in b_set])
pod_range = np.nanpercentile(perf_curves[:, 0], ci, axis=1)
sr_range = np.nanpercentile(1 - perf_curves[:, 1], ci, axis=1)
pod_poly = np.concatenate((pod_range[1], pod_range[0, ::-1]))
sr_poly = np.concatenate((sr_range[1], sr_range[0, ::-1]))
pod_poly[np.isnan(pod_poly)] = 0
sr_poly[np.isnan(sr_poly)] = 1
plt.fill(sr_poly, pod_poly, alpha=0.5, color=colors[b])
for r, roc_obj in enumerate(roc_objs):
perf_data = roc_obj.performance_curve()
plt.plot(1 - perf_data["FAR"], perf_data["POD"], marker=markers[r], color=colors[r], label=obj_labels[r])
cbar = plt.colorbar(csi_contour)
cbar.set_label(csi_label)
plt.xlabel(xlabel, fontsize=label_fontsize)
plt.ylabel(ylabel, fontsize=label_fontsize)
plt.xticks(ticks, fontsize=tick_fontsize)
plt.yticks(ticks, fontsize=tick_fontsize)
plt.title(title, fontsize=title_fontsize)
plt.legend(**legend_params)
plt.savefig(filename, dpi=dpi, bbox_inches="tight")
plt.close()
|
Draws a performance diagram from a set of DistributedROC objects.
A performance diagram is a variation on the ROC curve in which the Probability of False Detection on the
x-axis has been replaced with the Success Ratio (1-False Alarm Ratio or Precision). The diagram also shows
the Critical Success Index (CSI or Threat Score) as a series of curved contours, and the frequency bias as
angled diagonal lines. Points along the 1:1 diagonal are unbiased, and better performing models should appear
in the upper right corner. The performance diagram is particularly useful for displaying verification for
severe weather warnings as it displays all three commonly used statistics (POD, FAR, and CSI) simultaneously
on the same chart.
Args:
roc_objs (list): DistributedROC objects being plotted.
obj_labels: list or array of labels describing each DistributedROC object.
obj_labels (list): Label describing the forecast associated with a DistributedROC object.
colors (list): List of matplotlib-readable colors (names or hex-values) for each curve.
markers (list): Matplotlib marker (e.g. *, o, v, etc.) for each curve.
filename (str): Name of figure file being saved.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): Label for the x-axis.
ylabel (str): Label for the y-axis.
title (str): The title of the figure.
ticks (numpy.ndarray): Values shown on the x and y axes.
dpi (int): Figure resolution in dots per inch.
csi_cmap (str): Matplotlib colormap used to fill CSI contours.
csi_label (str): Label for CSI colormap.
legend_params (None or dict): Keyword arguments for the formatting of the figure legend.
bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None,
confidence regions will be plotted.
ci (tuple): tuple of bootstrap confidence interval percentiles.
label_fontsize (int): Font size of the x and y axis labels.
title_fontsize (int): Font size of the title.
tick_fontsize (int): Font size of the x and y tick labels.
Examples:
>>> from hagelslag.evaluation import DistributedROC
>>> import numpy as np
>>> forecasts = np.random.random(1000)
>>> obs = np.random.random_integers(0, 1, 1000)
>>> roc = DistributedROC()
>>> roc.update(forecasts, obs)
>>> performance_diagram([roc], ["Random"], ["orange"], ["o"], "random_performance.png")
|
entailment
|
def reliability_diagram(rel_objs, obj_labels, colors, markers, filename, figsize=(8, 8), xlabel="Forecast Probability",
ylabel="Observed Relative Frequency", ticks=np.arange(0, 1.05, 0.05), dpi=300, inset_size=1.5,
title="Reliability Diagram", legend_params=None, bootstrap_sets=None, ci=(2.5, 97.5)):
"""
Plot reliability curves against a 1:1 diagonal to determine if probability forecasts are consistent with their
observed relative frequency.
Args:
rel_objs (list): List of DistributedReliability objects.
obj_labels (list): List of labels describing the forecast model associated with each curve.
colors (list): List of colors for each line
markers (list): List of line markers
filename (str): Where to save the figure.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): X-axis label
ylabel (str): Y-axis label
ticks (array): Tick value labels for the x and y axes.
dpi (int): resolution of the saved figure in dots per inch.
inset_size (float): Size of inset
title (str): Title of figure
legend_params (dict): Keyword arguments for the plot legend.
bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None,
confidence regions will be plotted.
ci (tuple): tuple of bootstrap confidence interval percentiles
"""
if legend_params is None:
legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True)
fig, ax = plt.subplots(figsize=figsize)
plt.plot(ticks, ticks, "k--")
inset_hist = inset_axes(ax, width=inset_size, height=inset_size, loc=2)
if bootstrap_sets is not None:
for b, b_set in enumerate(bootstrap_sets):
brel_curves = np.dstack([b_rel.reliability_curve().values for b_rel in b_set])
bin_range = np.percentile(brel_curves[:,0], ci, axis=1)
rel_range = np.percentile(brel_curves[:, 3], ci, axis=1)
bin_poly = np.concatenate((bin_range[1], bin_range[0, ::-1]))
rel_poly = np.concatenate((rel_range[1], rel_range[0, ::-1]))
bin_poly[np.isnan(bin_poly)] = 0
rel_poly[np.isnan(rel_poly)] = 0
plt.fill(bin_poly, rel_poly, alpha=0.5, color=colors[b])
for r, rel_obj in enumerate(rel_objs):
rel_curve = rel_obj.reliability_curve()
ax.plot(rel_curve["Bin_Start"], rel_curve["Positive_Relative_Freq"], color=colors[r], marker=markers[r],
label=obj_labels[r])
inset_hist.semilogy(rel_curve["Bin_Start"], rel_curve["Total_Relative_Freq"], color=colors[r],
marker=markers[r])
inset_hist.set_xlabel("Forecast Probability")
inset_hist.set_ylabel("Forecast Relative Frequency")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
ax.legend(**legend_params)
ax.set_title(title)
plt.savefig(filename, dpi=dpi, bbox_inches="tight")
plt.close()
|
Plot reliability curves against a 1:1 diagonal to determine if probability forecasts are consistent with their
observed relative frequency.
Args:
rel_objs (list): List of DistributedReliability objects.
obj_labels (list): List of labels describing the forecast model associated with each curve.
colors (list): List of colors for each line
markers (list): List of line markers
filename (str): Where to save the figure.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): X-axis label
ylabel (str): Y-axis label
ticks (array): Tick value labels for the x and y axes.
dpi (int): resolution of the saved figure in dots per inch.
inset_size (float): Size of inset
title (str): Title of figure
legend_params (dict): Keyword arguments for the plot legend.
bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None,
confidence regions will be plotted.
ci (tuple): tuple of bootstrap confidence interval percentiles
|
entailment
|
def attributes_diagram(rel_objs, obj_labels, colors, markers, filename, figsize=(8, 8), xlabel="Forecast Probability",
ylabel="Observed Relative Frequency", ticks=np.arange(0, 1.05, 0.05), dpi=300,
title="Attributes Diagram", legend_params=None, inset_params=None,
inset_position=(0.12, 0.72, 0.25, 0.25), bootstrap_sets=None, ci=(2.5, 97.5)):
"""
Plot reliability curves against a 1:1 diagonal to determine if probability forecasts are consistent with their
observed relative frequency. Also adds gray areas to show where the climatological probabilities lie and what
areas result in a positive Brier Skill Score.
Args:
rel_objs (list): List of DistributedReliability objects.
obj_labels (list): List of labels describing the forecast model associated with each curve.
colors (list): List of colors for each line
markers (list): List of line markers
filename (str): Where to save the figure.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): X-axis label
ylabel (str): Y-axis label
ticks (array): Tick value labels for the x and y axes.
dpi (int): resolution of the saved figure in dots per inch.
title (str): Title of figure
legend_params (dict): Keyword arguments for the plot legend.
inset_params (dict): Keyword arguments for the inset axis.
inset_position (tuple): Position of the inset axis in normalized axes coordinates (left, bottom, width, height)
bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None,
confidence regions will be plotted.
ci (tuple): tuple of bootstrap confidence interval percentiles
"""
if legend_params is None:
legend_params = dict(loc=4, fontsize=10, framealpha=1, frameon=True)
if inset_params is None:
inset_params = dict(width="25%", height="25%", loc=2, axes_kwargs=dict(axisbg='white'))
fig, ax = plt.subplots(figsize=figsize)
plt.plot(ticks, ticks, "k--")
inset_hist = inset_axes(ax, **inset_params)
ip = InsetPosition(ax, inset_position)
inset_hist.set_axes_locator(ip)
climo = rel_objs[0].climatology()
no_skill = 0.5 * ticks + 0.5 * climo
skill_x = [climo, climo, 1, 1, climo, climo, 0, 0, climo]
skill_y = [climo, 1, 1, no_skill[-1], climo, 0, 0, no_skill[0], climo]
f = ax.fill(skill_x, skill_y, "0.8")
f[0].set_zorder(1)
ax.plot(ticks, np.ones(ticks.shape) * climo, "k--")
if bootstrap_sets is not None:
for b, b_set in enumerate(bootstrap_sets):
brel_curves = np.vstack([b_rel.reliability_curve()["Positive_Relative_Freq"].values for b_rel in b_set])
rel_range = np.nanpercentile(brel_curves, ci, axis=0)
fb = ax.fill_between(b_rel.thresholds[:-1], rel_range[1], rel_range[0], alpha=0.5, color=colors[b])
fb.set_zorder(2)
for r, rel_obj in enumerate(rel_objs):
rel_curve = rel_obj.reliability_curve()
ax.plot(rel_curve["Bin_Start"], rel_curve["Positive_Relative_Freq"], color=colors[r],
marker=markers[r], label=obj_labels[r])
inset_hist.semilogy(rel_curve["Bin_Start"] * 100, rel_obj.frequencies["Total_Freq"][:-1], color=colors[r],
marker=markers[r])
inset_hist.set_xlabel("Forecast Probability")
inset_hist.set_ylabel("Frequency")
ax.annotate("No Skill", (0.6, no_skill[12]), rotation=22.5)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xticks(ticks)
ax.set_xticklabels((ticks * 100).astype(int))
ax.set_yticks(ticks)
ax.set_yticklabels((ticks * 100).astype(int))
ax.legend(**legend_params)
ax.set_title(title)
plt.savefig(filename, dpi=dpi, bbox_inches="tight")
plt.close()
|
Plot reliability curves against a 1:1 diagonal to determine if probability forecasts are consistent with their
observed relative frequency. Also adds gray areas to show where the climatological probabilities lie and what
areas result in a positive Brier Skill Score.
Args:
rel_objs (list): List of DistributedReliability objects.
obj_labels (list): List of labels describing the forecast model associated with each curve.
colors (list): List of colors for each line
markers (list): List of line markers
filename (str): Where to save the figure.
figsize (tuple): (Width, height) of the figure in inches.
xlabel (str): X-axis label
ylabel (str): Y-axis label
ticks (array): Tick value labels for the x and y axes.
dpi (int): resolution of the saved figure in dots per inch.
title (str): Title of figure
legend_params (dict): Keyword arguments for the plot legend.
inset_params (dict): Keyword arguments for the inset axis.
inset_position (tuple): Position of the inset axis in normalized axes coordinates (left, bottom, width, height)
bootstrap_sets (list): A list of arrays of bootstrapped DistributedROC objects. If not None,
confidence regions will be plotted.
ci (tuple): tuple of bootstrap confidence interval percentiles
|
entailment
|
def get_string(self, prompt, default_str=None) -> str:
"""Return a string value that the user enters. Raises exception for cancel."""
accept_event = threading.Event()
value_ref = [None]
def perform():
def accepted(text):
value_ref[0] = text
accept_event.set()
def rejected():
accept_event.set()
self.__message_column.remove_all()
pose_get_string_message_box(self.ui, self.__message_column, prompt, str(default_str), accepted, rejected)
#self.__message_column.add(self.__make_cancel_row())
with self.__lock:
self.__q.append(perform)
self.document_controller.add_task("ui_" + str(id(self)), self.__handle_output_and_q)
accept_event.wait()
def update_message_column():
self.__message_column.remove_all()
self.__message_column.add(self.__make_cancel_row())
self.document_controller.add_task("ui_" + str(id(self)), update_message_column)
if value_ref[0] is None:
raise Exception("Cancel")
return value_ref[0]
|
Return a string value that the user enters. Raises exception for cancel.
|
entailment
|
def __accept_reject(self, prompt, accepted_text, rejected_text, display_rejected):
"""Return a boolean value for accept/reject."""
accept_event = threading.Event()
result_ref = [False]
def perform():
def accepted():
result_ref[0] = True
accept_event.set()
def rejected():
result_ref[0] = False
accept_event.set()
self.__message_column.remove_all()
pose_confirmation_message_box(self.ui, self.__message_column, prompt, accepted, rejected, accepted_text, rejected_text, display_rejected)
#self.__message_column.add(self.__make_cancel_row())
with self.__lock:
self.__q.append(perform)
self.document_controller.add_task("ui_" + str(id(self)), self.__handle_output_and_q)
accept_event.wait()
def update_message_column():
self.__message_column.remove_all()
self.__message_column.add(self.__make_cancel_row())
self.document_controller.add_task("ui_" + str(id(self)), update_message_column)
return result_ref[0]
|
Return a boolean value for accept/reject.
|
entailment
|
def compute_weight(self, r, ytr=None, mask=None):
"""Returns the weight (w) using OLS of r * w = gp._ytr """
ytr = self._ytr if ytr is None else ytr
mask = self._mask if mask is None else mask
return compute_weight(r, ytr, mask)
|
Returns the weight (w) using OLS of r * w = gp._ytr
|
entailment
|
def isfinite(self):
"Test whether the predicted values are finite"
if self._multiple_outputs:
if self.hy_test is not None:
r = [(hy.isfinite() and (hyt is None or hyt.isfinite()))
for hy, hyt in zip(self.hy, self.hy_test)]
else:
r = [hy.isfinite() for hy in self.hy]
return np.all(r)
return self.hy.isfinite() and (self.hy_test is None or
self.hy_test.isfinite())
|
Test whether the predicted values are finite
|
entailment
|
def value_of_named_argument_in_function(argument_name, function_name, search_str,
resolve_varname=False):
""" Parse an arbitrary block of python code to get the value of a named argument
from inside a function call
"""
try:
search_str = unicode(search_str)
except NameError:
pass
readline = StringIO(search_str).readline
try:
token_generator = tokenize.generate_tokens(readline)
tokens = [SimplifiedToken(toknum, tokval) for toknum, tokval, _, _, _ in token_generator]
except tokenize.TokenError as e:
raise ValueError('search_str is not parse-able python code: ' + str(e))
in_function = False
is_var = False
for i in range(len(tokens)):
if (
not in_function and
tokens[i].typenum == tokenize.NAME and tokens[i].value == function_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '('
):
in_function = True
continue
elif (
in_function and
tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '='
):
# value is set to another variable which we are going to attempt to resolve
if resolve_varname and tokens[i+2].typenum == 1:
is_var = True
argument_name = tokens[i+2].value
break
# again, for a very specific usecase -- get the whole value and concatenate it
# this will match something like _version.__version__
j = 3
while True:
if tokens[i+j].value in (',', ')') or tokens[i+j].typenum == 58:
break
j += 1
return ''.join([t.value for t in tokens[i+2:i+j]]).strip()
# this is very dumb logic, and only works if the function argument is set to a variable
# which is set to a string value
if is_var:
for i in range(len(tokens)):
if (
tokens[i].typenum == tokenize.NAME and tokens[i].value == argument_name and
tokens[i+1].typenum == tokenize.OP and tokens[i+1].value == '='
):
return tokens[i+2].value.strip()
return None
|
Parse an arbitrary block of python code to get the value of a named argument
from inside a function call
|
entailment
|
def regex_in_file(regex, filepath, return_match=False):
""" Search for a regex in a file
If return_match is True, return the found object instead of a boolean
"""
file_content = get_file_content(filepath)
re_method = funcy.re_find if return_match else funcy.re_test
return re_method(regex, file_content)
|
Search for a regex in a file
If return_match is True, return the found object instead of a boolean
|
entailment
|
def regex_in_package_file(regex, filename, package_name, return_match=False):
""" Search for a regex in a file contained within the package directory
If return_match is True, return the found object instead of a boolean
"""
filepath = package_file_path(filename, package_name)
return regex_in_file(regex, filepath, return_match=return_match)
|
Search for a regex in a file contained within the package directory
If return_match is True, return the found object instead of a boolean
|
entailment
|
def string_is_url(test_str):
""" Test to see if a string is a URL or not, defined in this case as a string for which
urlparse returns a scheme component
>>> string_is_url('somestring')
False
>>> string_is_url('https://some.domain.org/path')
True
"""
parsed = urlparse.urlparse(test_str)
return parsed.scheme is not None and parsed.scheme != ''
|
Test to see if a string is a URL or not, defined in this case as a string for which
urlparse returns a scheme component
>>> string_is_url('somestring')
False
>>> string_is_url('https://some.domain.org/path')
True
|
entailment
|
def item_transaction(self, item) -> Transaction:
"""Begin transaction state for item.
A transaction state is exists to prevent writing out to disk, mainly for performance reasons.
All changes to the object are delayed until the transaction state exits.
This method is thread safe.
"""
items = self.__build_transaction_items(item)
transaction = Transaction(self, item, items)
self.__transactions.append(transaction)
return transaction
|
Begin transaction state for item.
A transaction state is exists to prevent writing out to disk, mainly for performance reasons.
All changes to the object are delayed until the transaction state exits.
This method is thread safe.
|
entailment
|
def insert_data_item(self, before_index, data_item, auto_display: bool = True) -> None:
"""Insert a new data item into document model.
This method is NOT threadsafe.
"""
assert data_item is not None
assert data_item not in self.data_items
assert before_index <= len(self.data_items) and before_index >= 0
assert data_item.uuid not in self.__uuid_to_data_item
# update the session
data_item.session_id = self.session_id
# insert in internal list
self.__insert_data_item(before_index, data_item, do_write=True)
# automatically add a display
if auto_display:
display_item = DisplayItem.DisplayItem(data_item=data_item)
self.append_display_item(display_item)
|
Insert a new data item into document model.
This method is NOT threadsafe.
|
entailment
|
def remove_data_item(self, data_item: DataItem.DataItem, *, safe: bool=False) -> typing.Optional[typing.Sequence]:
"""Remove data item from document model.
This method is NOT threadsafe.
"""
# remove data item from any computations
return self.__cascade_delete(data_item, safe=safe)
|
Remove data item from document model.
This method is NOT threadsafe.
|
entailment
|
def __cascade_delete_inner(self, master_item, safe: bool=False) -> typing.Optional[typing.Sequence]:
"""Cascade delete an item.
Returns an undelete log that can be used to undo the cascade deletion.
Builds a cascade of items to be deleted and dependencies to be removed when the passed item is deleted. Then
removes computations that are no longer valid. Removing a computation may result in more deletions, so the
process is repeated until nothing more gets removed.
Next remove dependencies.
Next remove individual items (from the most distant from the root item to the root item).
"""
# print(f"cascade {master_item}")
# this horrible little hack ensures that computation changed messages are delayed until the end of the cascade
# delete; otherwise there are cases where dependencies can be reestablished during the changed messages while
# this method is partially finished. ugh. see test_computation_deletes_when_source_cycle_deletes.
if self.__computation_changed_delay_list is None:
computation_changed_delay_list = list()
self.__computation_changed_delay_list = computation_changed_delay_list
else:
computation_changed_delay_list = None
undelete_log = list()
try:
items = list()
dependencies = list()
self.__build_cascade(master_item, items, dependencies)
cascaded = True
while cascaded:
cascaded = False
# adjust computation bookkeeping to remove deleted items, then delete unused computations
items_set = set(items)
for computation in copy.copy(self.computations):
output_deleted = master_item in computation._outputs
computation._inputs -= items_set
computation._outputs -= items_set
if computation not in items and computation != self.__current_computation:
# computations are auto deleted if all inputs are deleted or any output is deleted
if output_deleted or all(input in items for input in computation._inputs):
self.__build_cascade(computation, items, dependencies)
cascaded = True
# print(list(reversed(items)))
# print(list(reversed(dependencies)))
for source, target in reversed(dependencies):
self.__remove_dependency(source, target)
# now delete the actual items
for item in reversed(items):
for computation in self.computations:
new_entries = computation.list_item_removed(item)
undelete_log.extend(new_entries)
container = item.container
if isinstance(item, DataItem.DataItem):
name = "data_items"
elif isinstance(item, DisplayItem.DisplayItem):
name = "display_items"
elif isinstance(item, Graphics.Graphic):
name = "graphics"
elif isinstance(item, DataStructure.DataStructure):
name = "data_structures"
elif isinstance(item, Symbolic.Computation):
name = "computations"
elif isinstance(item, Connection.Connection):
name = "connections"
elif isinstance(item, DisplayItem.DisplayDataChannel):
name = "display_data_channels"
else:
name = None
assert False, "Unable to cascade delete type " + str(type(item))
assert name
# print(container, name, item)
if container is self and name == "data_items":
# call the version of __remove_data_item that doesn't cascade again
index = getattr(container, name).index(item)
item_dict = item.write_to_dict()
# NOTE: __remove_data_item will notify_remove_item
undelete_log.extend(self.__remove_data_item(item, safe=safe))
undelete_log.append({"type": name, "index": index, "properties": item_dict})
elif container is self and name == "display_items":
# call the version of __remove_data_item that doesn't cascade again
index = getattr(container, name).index(item)
item_dict = item.write_to_dict()
# NOTE: __remove_display_item will notify_remove_item
undelete_log.extend(self.__remove_display_item(item, safe=safe))
undelete_log.append({"type": name, "index": index, "properties": item_dict})
elif container:
container_ref = str(container.uuid)
index = getattr(container, name).index(item)
item_dict = item.write_to_dict()
container_properties = container.save_properties() if hasattr(container, "save_properties") else dict()
undelete_log.append({"type": name, "container": container_ref, "index": index, "properties": item_dict, "container_properties": container_properties})
container.remove_item(name, item)
# handle top level 'remove item' notifications for data structures, computations, and display items here
# since they're not handled elsewhere.
if container == self and name in ("data_structures", "computations"):
self.notify_remove_item(name, item, index)
except Exception as e:
import sys, traceback
traceback.print_exc()
traceback.format_exception(*sys.exc_info())
finally:
# check whether this call of __cascade_delete is the top level one that will finish the computation
# changed messages.
if computation_changed_delay_list is not None:
self.__finish_computation_changed()
return undelete_log
|
Cascade delete an item.
Returns an undelete log that can be used to undo the cascade deletion.
Builds a cascade of items to be deleted and dependencies to be removed when the passed item is deleted. Then
removes computations that are no longer valid. Removing a computation may result in more deletions, so the
process is repeated until nothing more gets removed.
Next remove dependencies.
Next remove individual items (from the most distant from the root item to the root item).
|
entailment
|
def get_dependent_items(self, item) -> typing.List:
"""Return the list of data items containing data that directly depends on data in this item."""
with self.__dependency_tree_lock:
return copy.copy(self.__dependency_tree_source_to_target_map.get(weakref.ref(item), list()))
|
Return the list of data items containing data that directly depends on data in this item.
|
entailment
|
def __get_deep_dependent_item_set(self, item, item_set) -> None:
"""Return the list of data items containing data that directly depends on data in this item."""
if not item in item_set:
item_set.add(item)
with self.__dependency_tree_lock:
for dependent in self.get_dependent_items(item):
self.__get_deep_dependent_item_set(dependent, item_set)
|
Return the list of data items containing data that directly depends on data in this item.
|
entailment
|
def get_dependent_data_items(self, data_item: DataItem.DataItem) -> typing.List[DataItem.DataItem]:
"""Return the list of data items containing data that directly depends on data in this item."""
with self.__dependency_tree_lock:
return [data_item for data_item in self.__dependency_tree_source_to_target_map.get(weakref.ref(data_item), list()) if isinstance(data_item, DataItem.DataItem)]
|
Return the list of data items containing data that directly depends on data in this item.
|
entailment
|
def transaction_context(self):
"""Return a context object for a document-wide transaction."""
class DocumentModelTransaction:
def __init__(self, document_model):
self.__document_model = document_model
def __enter__(self):
self.__document_model.persistent_object_context.enter_write_delay(self.__document_model)
return self
def __exit__(self, type, value, traceback):
self.__document_model.persistent_object_context.exit_write_delay(self.__document_model)
self.__document_model.persistent_object_context.rewrite_item(self.__document_model)
return DocumentModelTransaction(self)
|
Return a context object for a document-wide transaction.
|
entailment
|
def data_item_live(self, data_item):
""" Return a context manager to put the data item in a 'live state'. """
class LiveContextManager:
def __init__(self, manager, object):
self.__manager = manager
self.__object = object
def __enter__(self):
self.__manager.begin_data_item_live(self.__object)
return self
def __exit__(self, type, value, traceback):
self.__manager.end_data_item_live(self.__object)
return LiveContextManager(self, data_item)
|
Return a context manager to put the data item in a 'live state'.
|
entailment
|
def begin_data_item_live(self, data_item):
"""Begins a live state for the data item.
The live state is propagated to dependent data items.
This method is thread safe. See slow_test_dependent_data_item_removed_while_live_data_item_becomes_unlive.
"""
with self.__live_data_items_lock:
old_live_count = self.__live_data_items.get(data_item.uuid, 0)
self.__live_data_items[data_item.uuid] = old_live_count + 1
if old_live_count == 0:
data_item._enter_live_state()
for dependent_data_item in self.get_dependent_data_items(data_item):
self.begin_data_item_live(dependent_data_item)
|
Begins a live state for the data item.
The live state is propagated to dependent data items.
This method is thread safe. See slow_test_dependent_data_item_removed_while_live_data_item_becomes_unlive.
|
entailment
|
def end_data_item_live(self, data_item):
"""Ends a live state for the data item.
The live-ness property is propagated to dependent data items, similar to the transactions.
This method is thread safe.
"""
with self.__live_data_items_lock:
live_count = self.__live_data_items.get(data_item.uuid, 0) - 1
assert live_count >= 0
self.__live_data_items[data_item.uuid] = live_count
if live_count == 0:
data_item._exit_live_state()
for dependent_data_item in self.get_dependent_data_items(data_item):
self.end_data_item_live(dependent_data_item)
|
Ends a live state for the data item.
The live-ness property is propagated to dependent data items, similar to the transactions.
This method is thread safe.
|
entailment
|
def __construct_data_item_reference(self, hardware_source: HardwareSource.HardwareSource, data_channel: HardwareSource.DataChannel):
"""Construct a data item reference.
Construct a data item reference and assign a data item to it. Update data item session id and session metadata.
Also connect the data channel processor.
This method is thread safe.
"""
session_id = self.session_id
key = self.make_data_item_reference_key(hardware_source.hardware_source_id, data_channel.channel_id)
data_item_reference = self.get_data_item_reference(key)
with data_item_reference.mutex:
data_item = data_item_reference.data_item
# if we still don't have a data item, create it.
if data_item is None:
data_item = DataItem.DataItem()
data_item.ensure_data_source()
data_item.title = "%s (%s)" % (hardware_source.display_name, data_channel.name) if data_channel.name else hardware_source.display_name
data_item.category = "temporary"
data_item_reference.data_item = data_item
def append_data_item():
self.append_data_item(data_item)
self._update_data_item_reference(key, data_item)
self.__call_soon(append_data_item)
def update_session():
# update the session, but only if necessary (this is an optimization to prevent unnecessary display updates)
if data_item.session_id != session_id:
data_item.session_id = session_id
session_metadata = ApplicationData.get_session_metadata_dict()
if data_item.session_metadata != session_metadata:
data_item.session_metadata = session_metadata
if data_channel.processor:
src_data_channel = hardware_source.data_channels[data_channel.src_channel_index]
src_data_item_reference = self.get_data_item_reference(self.make_data_item_reference_key(hardware_source.hardware_source_id, src_data_channel.channel_id))
data_channel.processor.connect_data_item_reference(src_data_item_reference)
self.__call_soon(update_session)
return data_item_reference
|
Construct a data item reference.
Construct a data item reference and assign a data item to it. Update data item session id and session metadata.
Also connect the data channel processor.
This method is thread safe.
|
entailment
|
def __make_computation(self, processing_id: str, inputs: typing.List[typing.Tuple[DisplayItem.DisplayItem, typing.Optional[Graphics.Graphic]]], region_list_map: typing.Mapping[str, typing.List[Graphics.Graphic]]=None, parameters: typing.Mapping[str, typing.Any]=None) -> DataItem.DataItem:
"""Create a new data item with computation specified by processing_id, inputs, and region_list_map.
The region_list_map associates a list of graphics corresponding to the required regions with a computation source (key).
"""
region_list_map = region_list_map or dict()
parameters = parameters or dict()
processing_descriptions = self._processing_descriptions
processing_description = processing_descriptions[processing_id]
# first process the sources in the description. match them to the inputs (which are data item/crop graphic tuples)
src_dicts = processing_description.get("sources", list())
assert len(inputs) == len(src_dicts)
src_names = list()
src_texts = list()
src_labels = list()
regions = list()
region_map = dict()
for i, (src_dict, input) in enumerate(zip(src_dicts, inputs)):
display_item = input[0]
data_item = display_item.data_items[0] if display_item and len(display_item.data_items) > 0 else None
if not data_item:
return None
# each source can have a list of requirements, check through them
requirements = src_dict.get("requirements", list())
for requirement in requirements:
requirement_type = requirement["type"]
if requirement_type == "dimensionality":
min_dimension = requirement.get("min")
max_dimension = requirement.get("max")
dimensionality = len(data_item.dimensional_shape)
if min_dimension is not None and dimensionality < min_dimension:
return None
if max_dimension is not None and dimensionality > max_dimension:
return None
if requirement_type == "is_sequence":
if not data_item.is_sequence:
return None
src_name = src_dict["name"]
src_label = src_dict["label"]
use_display_data = src_dict.get("use_display_data", True)
xdata_property = "display_xdata" if use_display_data else "xdata"
if src_dict.get("croppable"):
xdata_property = "cropped_" + xdata_property
elif src_dict.get("use_filtered_data", False):
xdata_property = "filtered_" + xdata_property
src_text = "{}.{}".format(src_name, xdata_property)
src_names.append(src_name)
src_texts.append(src_text)
src_labels.append(src_label)
# each source can have a list of regions to be matched to arguments or created on the source
region_dict_list = src_dict.get("regions", list())
src_region_list = region_list_map.get(src_name, list())
assert len(region_dict_list) == len(src_region_list)
for region_dict, region in zip(region_dict_list, src_region_list):
region_params = region_dict.get("params", dict())
region_type = region_dict["type"]
region_name = region_dict["name"]
region_label = region_params.get("label")
if region_type == "point":
if region:
assert isinstance(region, Graphics.PointGraphic)
point_region = region
else:
point_region = Graphics.PointGraphic()
for k, v in region_params.items():
setattr(point_region, k, v)
if display_item:
display_item.add_graphic(point_region)
regions.append((region_name, point_region, region_label))
region_map[region_name] = point_region
elif region_type == "line":
if region:
assert isinstance(region, Graphics.LineProfileGraphic)
line_region = region
else:
line_region = Graphics.LineProfileGraphic()
line_region.start = 0.25, 0.25
line_region.end = 0.75, 0.75
for k, v in region_params.items():
setattr(line_region, k, v)
if display_item:
display_item.add_graphic(line_region)
regions.append((region_name, line_region, region_params.get("label")))
region_map[region_name] = line_region
elif region_type == "rectangle":
if region:
assert isinstance(region, Graphics.RectangleGraphic)
rect_region = region
else:
rect_region = Graphics.RectangleGraphic()
rect_region.center = 0.5, 0.5
rect_region.size = 0.5, 0.5
for k, v in region_params.items():
setattr(rect_region, k, v)
if display_item:
display_item.add_graphic(rect_region)
regions.append((region_name, rect_region, region_params.get("label")))
region_map[region_name] = rect_region
elif region_type == "ellipse":
if region:
assert isinstance(region, Graphics.EllipseGraphic)
ellipse_region = region
else:
ellipse_region = Graphics.RectangleGraphic()
ellipse_region.center = 0.5, 0.5
ellipse_region.size = 0.5, 0.5
for k, v in region_params.items():
setattr(ellipse_region, k, v)
if display_item:
display_item.add_graphic(ellipse_region)
regions.append((region_name, ellipse_region, region_params.get("label")))
region_map[region_name] = ellipse_region
elif region_type == "spot":
if region:
assert isinstance(region, Graphics.SpotGraphic)
spot_region = region
else:
spot_region = Graphics.SpotGraphic()
spot_region.center = 0.25, 0.75
spot_region.size = 0.1, 0.1
for k, v in region_params.items():
setattr(spot_region, k, v)
if display_item:
display_item.add_graphic(spot_region)
regions.append((region_name, spot_region, region_params.get("label")))
region_map[region_name] = spot_region
elif region_type == "interval":
if region:
assert isinstance(region, Graphics.IntervalGraphic)
interval_region = region
else:
interval_region = Graphics.IntervalGraphic()
for k, v in region_params.items():
setattr(interval_region, k, v)
if display_item:
display_item.add_graphic(interval_region)
regions.append((region_name, interval_region, region_params.get("label")))
region_map[region_name] = interval_region
elif region_type == "channel":
if region:
assert isinstance(region, Graphics.ChannelGraphic)
channel_region = region
else:
channel_region = Graphics.ChannelGraphic()
for k, v in region_params.items():
setattr(channel_region, k, v)
if display_item:
display_item.add_graphic(channel_region)
regions.append((region_name, channel_region, region_params.get("label")))
region_map[region_name] = channel_region
# now extract the script (full script) or expression (implied imports and return statement)
script = processing_description.get("script")
if not script:
expression = processing_description.get("expression")
if expression:
script = Symbolic.xdata_expression(expression)
assert script
# construct the computation
script = script.format(**dict(zip(src_names, src_texts)))
computation = self.create_computation(script)
computation.label = processing_description["title"]
computation.processing_id = processing_id
# process the data item inputs
for src_dict, src_name, src_label, input in zip(src_dicts, src_names, src_labels, inputs):
in_display_item = input[0]
secondary_specifier = None
if src_dict.get("croppable", False):
secondary_specifier = self.get_object_specifier(input[1])
display_data_channel = in_display_item.display_data_channel
computation.create_object(src_name, self.get_object_specifier(display_data_channel), label=src_label, secondary_specifier=secondary_specifier)
# process the regions
for region_name, region, region_label in regions:
computation.create_object(region_name, self.get_object_specifier(region), label=region_label)
# next process the parameters
for param_dict in processing_description.get("parameters", list()):
parameter_value = parameters.get(param_dict["name"], param_dict["value"])
computation.create_variable(param_dict["name"], param_dict["type"], parameter_value, value_default=param_dict.get("value_default"),
value_min=param_dict.get("value_min"), value_max=param_dict.get("value_max"),
control_type=param_dict.get("control_type"), label=param_dict["label"])
data_item0 = inputs[0][0].data_items[0]
new_data_item = DataItem.new_data_item()
prefix = "{} of ".format(processing_description["title"])
new_data_item.title = prefix + data_item0.title
new_data_item.category = data_item0.category
self.append_data_item(new_data_item)
new_display_item = self.get_display_item_for_data_item(new_data_item)
# next come the output regions that get created on the target itself
new_regions = dict()
for out_region_dict in processing_description.get("out_regions", list()):
region_type = out_region_dict["type"]
region_name = out_region_dict["name"]
region_params = out_region_dict.get("params", dict())
if region_type == "interval":
interval_region = Graphics.IntervalGraphic()
for k, v in region_params.items():
setattr(interval_region, k, v)
new_display_item.add_graphic(interval_region)
new_regions[region_name] = interval_region
# now come the connections between the source and target
for connection_dict in processing_description.get("connections", list()):
connection_type = connection_dict["type"]
connection_src = connection_dict["src"]
connection_src_prop = connection_dict.get("src_prop")
connection_dst = connection_dict["dst"]
connection_dst_prop = connection_dict.get("dst_prop")
if connection_type == "property":
if connection_src == "display_data_channel":
# TODO: how to refer to the data_items? hardcode to data_item0 for now.
display_item0 = self.get_display_item_for_data_item(data_item0)
display_data_channel0 = display_item0.display_data_channel if display_item0 else None
connection = Connection.PropertyConnection(display_data_channel0, connection_src_prop, new_regions[connection_dst], connection_dst_prop, parent=new_data_item)
self.append_connection(connection)
elif connection_type == "interval_list":
connection = Connection.IntervalListConnection(new_display_item, region_map[connection_dst], parent=new_data_item)
self.append_connection(connection)
# save setting the computation until last to work around threaded clone/merge operation bug.
# the bug is that setting the computation triggers the recompute to occur on a thread.
# the recompute clones the data item and runs the operation. meanwhile this thread
# updates the connection. now the recompute finishes and merges back the data item
# which was cloned before the connection was established, effectively reversing the
# update that matched the graphic interval to the slice interval on the display.
# the result is that the slice interval on the display would get set to the default
# value of the graphic interval. so don't actually update the computation until after
# everything is configured. permanent solution would be to improve the clone/merge to
# only update data that had been changed. alternative implementation would only track
# changes to the data item and then apply them again to the original during merge.
self.set_data_item_computation(new_data_item, computation)
return new_data_item
|
Create a new data item with computation specified by processing_id, inputs, and region_list_map.
The region_list_map associates a list of graphics corresponding to the required regions with a computation source (key).
|
entailment
|
def interpolate_colors(array: numpy.ndarray, x: int) -> numpy.ndarray:
"""
Creates a color map for values in array
:param array: color map to interpolate
:param x: number of colors
:return: interpolated color map
"""
out_array = []
for i in range(x):
if i % (x / (len(array) - 1)) == 0:
index = i / (x / (len(array) - 1))
out_array.append(array[int(index)])
else:
start_marker = array[math.floor(i / (x / (len(array) - 1)))]
stop_marker = array[math.ceil(i / (x / (len(array) - 1)))]
interp_amount = i % (x / (len(array) - 1)) / (x / (len(array) - 1))
interp_color = numpy.rint(start_marker + ((stop_marker - start_marker) * interp_amount))
out_array.append(interp_color)
out_array[-1] = array[-1]
return numpy.array(out_array).astype(numpy.uint8)
|
Creates a color map for values in array
:param array: color map to interpolate
:param x: number of colors
:return: interpolated color map
|
entailment
|
def star(self, **args):
'''
star any gist by providing gistID or gistname(for authenticated user)
'''
if 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid to be starred')
r = requests.put(
'%s'%BASE_URL+'/gists/%s/star' % self.gist_id,
headers=self.gist.header
)
if (r.status_code == 204):
response = {
'id': self.gist_id
}
return response
raise Exception('Gist can\'t be starred')
|
star any gist by providing gistID or gistname(for authenticated user)
|
entailment
|
def fork(self, **args):
'''
fork any gist by providing gistID or gistname(for authenticated user)
'''
if 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid to be forked')
r = requests.post(
'%s'%BASE_URL+'/gists/%s/forks' % self.gist_id,
headers=self.gist.header
)
if (r.status_code == 201):
response = {
'id': self.gist_id,
'description': r.json()['description'],
'public': r.json()['public'],
'comments': r.json()['comments']
}
return response
raise Exception('Gist can\'t be forked')
|
fork any gist by providing gistID or gistname(for authenticated user)
|
entailment
|
def checkifstar(self, **args):
'''
Check a gist if starred by providing gistID or gistname(for authenticated user)
'''
if 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid to be checked for star')
r = requests.get(
'%s'%BASE_URL+'/gists/%s/star' % self.gist_id,
headers=self.gist.header
)
if (r.status_code == 204):
response = {
'starred': 'True',
'id': self.gist_id
}
else:
response = {
'starred': 'False'
}
return response
|
Check a gist if starred by providing gistID or gistname(for authenticated user)
|
entailment
|
def salvar(self, destino=None, prefix='tmp', suffix='-sat.log'):
"""Salva o arquivo de log decodificado.
:param str destino: (Opcional) Caminho completo para o arquivo onde os
dados dos logs deverão ser salvos. Se não informado, será criado
um arquivo temporário via :func:`tempfile.mkstemp`.
:param str prefix: (Opcional) Prefixo para o nome do arquivo. Se não
informado será usado ``"tmp"``.
:param str suffix: (Opcional) Sufixo para o nome do arquivo. Se não
informado será usado ``"-sat.log"``.
:return: Retorna o caminho completo para o arquivo salvo.
:rtype: str
:raises IOError: Se o destino for informado e o arquivo já existir.
"""
if destino:
if os.path.exists(destino):
raise IOError((errno.EEXIST, 'File exists', destino,))
destino = os.path.abspath(destino)
fd = os.open(destino, os.O_EXCL|os.O_CREAT|os.O_WRONLY)
else:
fd, destino = tempfile.mkstemp(prefix=prefix, suffix=suffix)
os.write(fd, self.conteudo())
os.fsync(fd)
os.close(fd)
return os.path.abspath(destino)
|
Salva o arquivo de log decodificado.
:param str destino: (Opcional) Caminho completo para o arquivo onde os
dados dos logs deverão ser salvos. Se não informado, será criado
um arquivo temporário via :func:`tempfile.mkstemp`.
:param str prefix: (Opcional) Prefixo para o nome do arquivo. Se não
informado será usado ``"tmp"``.
:param str suffix: (Opcional) Sufixo para o nome do arquivo. Se não
informado será usado ``"-sat.log"``.
:return: Retorna o caminho completo para o arquivo salvo.
:rtype: str
:raises IOError: Se o destino for informado e o arquivo já existir.
|
entailment
|
def analisar(retorno):
"""Constrói uma :class:`RespostaExtrairLogs` a partir do retorno
informado.
:param unicode retorno: Retorno da função ``ExtrairLogs``.
"""
resposta = analisar_retorno(forcar_unicode(retorno),
funcao='ExtrairLogs',
classe_resposta=RespostaExtrairLogs,
campos=RespostaSAT.CAMPOS + (
('arquivoLog', unicode),
),
campos_alternativos=[
# se a extração dos logs falhar espera-se o padrão de
# campos no retorno...
RespostaSAT.CAMPOS,
]
)
if resposta.EEEEE not in ('15000',):
raise ExcecaoRespostaSAT(resposta)
return resposta
|
Constrói uma :class:`RespostaExtrairLogs` a partir do retorno
informado.
:param unicode retorno: Retorno da função ``ExtrairLogs``.
|
entailment
|
def load_data_old(self):
"""
Loads time series of 2D data grids from each opened file. The code
handles loading a full time series from one file or individual time steps
from multiple files. Missing files are supported.
"""
units = ""
if len(self.file_objects) == 1 and self.file_objects[0] is not None:
data = self.file_objects[0].variables[self.variable][self.forecast_hours]
if hasattr(self.file_objects[0].variables[self.variable], "units"):
units = self.file_objects[0].variables[self.variable].units
elif len(self.file_objects) > 1:
grid_shape = [len(self.file_objects), 1, 1]
for file_object in self.file_objects:
if file_object is not None:
if self.variable in file_object.variables.keys():
grid_shape = file_object.variables[self.variable].shape
elif self.variable.ljust(6, "_") in file_object.variables.keys():
grid_shape = file_object.variables[self.variable.ljust(6, "_")].shape
else:
print("{0} not found".format(self.variable))
raise KeyError
break
data = np.zeros((len(self.file_objects), grid_shape[1], grid_shape[2]))
for f, file_object in enumerate(self.file_objects):
if file_object is not None:
if self.variable in file_object.variables.keys():
var_name = self.variable
elif self.variable.ljust(6, "_") in file_object.variables.keys():
var_name = self.variable.ljust(6, "_")
else:
print("{0} not found".format(self.variable))
raise KeyError
data[f] = file_object.variables[var_name][0]
if units == "" and hasattr(file_object.variables[var_name], "units"):
units = file_object.variables[var_name].units
else:
data = None
return data, units
|
Loads time series of 2D data grids from each opened file. The code
handles loading a full time series from one file or individual time steps
from multiple files. Missing files are supported.
|
entailment
|
def load_data(self):
"""
Load data from netCDF file objects or list of netCDF file objects. Handles special variable name formats.
Returns:
Array of data loaded from files in (time, y, x) dimensions, Units
"""
units = ""
if self.file_objects[0] is None:
raise IOError()
var_name, z_index = self.format_var_name(self.variable, list(self.file_objects[0].variables.keys()))
ntimes = 0
if 'time' in self.file_objects[0].variables[var_name].dimensions:
ntimes = len(self.file_objects[0].dimensions['time'])
if ntimes > 1:
if z_index is None:
data = self.file_objects[0].variables[var_name][self.forecast_hours].astype(np.float32)
else:
data = self.file_objects[0].variables[var_name][self.forecast_hours, z_index].astype(np.float32)
else:
y_dim, x_dim = self.file_objects[0].variables[var_name].shape[-2:]
data = np.zeros((len(self.valid_dates), y_dim, x_dim), dtype=np.float32)
for f, file_object in enumerate(self.file_objects):
if file_object is not None:
if z_index is None:
data[f] = file_object.variables[var_name][0]
else:
data[f] = file_object.variables[var_name][0, z_index]
if hasattr(self.file_objects[0].variables[var_name], "units"):
units = self.file_objects[0].variables[var_name].units
return data, units
|
Load data from netCDF file objects or list of netCDF file objects. Handles special variable name formats.
Returns:
Array of data loaded from files in (time, y, x) dimensions, Units
|
entailment
|
def format_var_name(variable, var_list):
"""
Searches var list for variable name, checks other variable name format options.
Args:
variable (str): Variable being loaded
var_list (list): List of variables in file.
Returns:
Name of variable in file containing relevant data, and index of variable z-level if multiple variables
contained in same array in file.
"""
z_index = None
if variable in var_list:
var_name = variable
elif variable.ljust(6, "_") in var_list:
var_name = variable.ljust(6, "_")
elif any([variable in v_sub.split("_") for v_sub in var_list]):
var_name = var_list[[variable in v_sub.split("_") for v_sub in var_list].index(True)]
z_index = var_name.split("_").index(variable)
else:
raise KeyError("{0} not found in {1}".format(variable, var_list))
return var_name, z_index
|
Searches var list for variable name, checks other variable name format options.
Args:
variable (str): Variable being loaded
var_list (list): List of variables in file.
Returns:
Name of variable in file containing relevant data, and index of variable z-level if multiple variables
contained in same array in file.
|
entailment
|
def load_data(self, mode="train", format="csv"):
"""
Load data from flat data files containing total track information and information about each timestep.
The two sets are combined using merge operations on the Track IDs. Additional member information is gathered
from the appropriate member file.
Args:
mode: "train" or "forecast"
format: file format being used. Default is "csv"
"""
if mode in self.data.keys():
run_dates = pd.DatetimeIndex(start=self.start_dates[mode],
end=self.end_dates[mode],freq="1D")
run_date_str = [d.strftime("%Y%m%d-%H%M") for d in run_dates.date]
print(run_date_str)
all_total_track_files = sorted(glob(getattr(self, mode + "_data_path") +
"*total_" + self.ensemble_name + "*." + format))
all_step_track_files = sorted(glob(getattr(self, mode + "_data_path") +
"*step_" + self.ensemble_name + "*." + format))
total_track_files = []
for track_file in all_total_track_files:
file_date = track_file.split("_")[-1][:-4]
if file_date in run_date_str:
total_track_files.append(track_file)
step_track_files = []
for step_file in all_step_track_files:
file_date = step_file.split("_")[-1][:-4]
if file_date in run_date_str:
step_track_files.append(step_file)
self.data[mode]["total"] = pd.concat(map(pd.read_csv, total_track_files),
ignore_index=True)
self.data[mode]["total"] = self.data[mode]["total"].fillna(value=0)
self.data[mode]["total"] = self.data[mode]["total"].replace([np.inf, -np.inf], 0)
self.data[mode]["step"] = pd.concat(map(pd.read_csv, step_track_files),
ignore_index=True)
self.data[mode]["step"] = self.data[mode]["step"].fillna(value=0)
self.data[mode]["step"] = self.data[mode]["step"].replace([np.inf, -np.inf], 0)
if mode == "forecast":
self.data[mode]["step"] = self.data[mode]["step"].drop_duplicates("Step_ID")
self.data[mode]["member"] = pd.read_csv(self.member_files[mode])
self.data[mode]["combo"] = pd.merge(self.data[mode]["step"],
self.data[mode]["total"],
on=["Track_ID", "Ensemble_Name", "Ensemble_Member", "Run_Date"])
self.data[mode]["combo"] = pd.merge(self.data[mode]["combo"],
self.data[mode]["member"],
on="Ensemble_Member")
self.data[mode]["total_group"] = pd.merge(self.data[mode]["total"],
self.data[mode]["member"],
on="Ensemble_Member")
|
Load data from flat data files containing total track information and information about each timestep.
The two sets are combined using merge operations on the Track IDs. Additional member information is gathered
from the appropriate member file.
Args:
mode: "train" or "forecast"
format: file format being used. Default is "csv"
|
entailment
|
def calc_copulas(self,
output_file,
model_names=("start-time", "translation-x", "translation-y"),
label_columns=("Start_Time_Error", "Translation_Error_X", "Translation_Error_Y")):
"""
Calculate a copula multivariate normal distribution from the training data for each group of ensemble members.
Distributions are written to a pickle file for later use.
Args:
output_file: Pickle file
model_names: Names of the tracking models
label_columns: Names of the data columns used for labeling
Returns:
"""
if len(self.data['train']) == 0:
self.load_data()
groups = self.data["train"]["member"][self.group_col].unique()
copulas = {}
label_columns = list(label_columns)
for group in groups:
print(group)
group_data = self.data["train"]["total_group"].loc[
self.data["train"]["total_group"][self.group_col] == group]
group_data = group_data.dropna()
group_data.reset_index(drop=True, inplace=True)
copulas[group] = {}
copulas[group]["mean"] = group_data[label_columns].mean(axis=0).values
copulas[group]["cov"] = np.cov(group_data[label_columns].values.T)
copulas[group]["model_names"] = list(model_names)
del group_data
pickle.dump(copulas, open(output_file, "w"), pickle.HIGHEST_PROTOCOL)
|
Calculate a copula multivariate normal distribution from the training data for each group of ensemble members.
Distributions are written to a pickle file for later use.
Args:
output_file: Pickle file
model_names: Names of the tracking models
label_columns: Names of the data columns used for labeling
Returns:
|
entailment
|
def fit_condition_models(self, model_names,
model_objs,
input_columns,
output_column="Matched",
output_threshold=0.0):
"""
Fit machine learning models to predict whether or not hail will occur.
Args:
model_names: List of strings with the names for the particular machine learning models
model_objs: scikit-learn style machine learning model objects.
input_columns: list of the names of the columns used as input for the machine learning model
output_column: name of the column used for labeling whether or not the event occurs
output_threshold: splitting threshold to determine if event has occurred. Default 0.0
"""
print("Fitting condition models")
groups = self.data["train"]["member"][self.group_col].unique()
weights = None
for group in groups:
print(group)
group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group]
if self.sector:
lon_obj = data.loc[:,'Centroid_Lon']
lat_obj = data.loc[:,'Centroid_Lat']
left_lon,right_lon = self.grid_dict["sw_lon"],self.grid_dict["ne_lon"]
lower_lat,upper_lat = self.grid_dict["sw_lat"],self.grid_dict["ne_lat"]
weights = np.where((left_lon<=lon_obj)&(right_lon>=lon_obj) &\
(lower_lat<=lat_obj)&(upper_lat>=lat_obj),1,0.3)
output_data = np.where(group_data[output_column] > output_threshold, 1, 0)
print("Ones: ", np.count_nonzero(output_data > 0), "Zeros: ", np.count_nonzero(output_data == 0))
self.condition_models[group] = {}
for m, model_name in enumerate(model_names):
print(model_name)
self.condition_models[group][model_name] = deepcopy(model_objs[m])
try:
self.condition_models[group][model_name].fit(group_data[input_columns],
output_data,sample_weight=weights)
except:
self.condition_models[group][model_name].fit(group_data[input_columns], output_data)
if hasattr(self.condition_models[group][model_name], "best_estimator_"):
print(self.condition_models[group][model_name].best_estimator_)
print(self.condition_models[group][model_name].best_score_)
|
Fit machine learning models to predict whether or not hail will occur.
Args:
model_names: List of strings with the names for the particular machine learning models
model_objs: scikit-learn style machine learning model objects.
input_columns: list of the names of the columns used as input for the machine learning model
output_column: name of the column used for labeling whether or not the event occurs
output_threshold: splitting threshold to determine if event has occurred. Default 0.0
|
entailment
|
def fit_condition_threshold_models(self, model_names, model_objs, input_columns, output_column="Matched",
output_threshold=0.5, num_folds=5, threshold_score="ets"):
"""
Fit models to predict hail/no-hail and use cross-validation to determine the probaility threshold that
maximizes a skill score.
Args:
model_names: List of machine learning model names
model_objs: List of Scikit-learn ML models
input_columns: List of input variables in the training data
output_column: Column used for prediction
output_threshold: Values exceeding this threshold are considered positive events; below are nulls
num_folds: Number of folds in the cross-validation procedure
threshold_score: Score available in ContingencyTable used for determining the best probability threshold
Returns:
None
"""
print("Fitting condition models")
groups = self.data["train"]["member"][self.group_col].unique()
weights=None
for group in groups:
print(group)
group_data = self.data["train"]["combo"].iloc[
np.where(self.data["train"]["combo"][self.group_col] == group)[0]]
if self.sector:
lon_obj = group_data.loc[:,'Centroid_Lon']
lat_obj = group_data.loc[:,'Centroid_Lat']
conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel())
center_lon, center_lat = self.proj_dict["lon_0"],self.proj_dict["lat_0"]
distances = np.array([np.sqrt((x-center_lon)**2+\
(y-center_lat)**2) for (x, y) in conus_lat_lon_points])
min_dist, max_minus_min = min(distances),max(distances)-min(distances)
distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances]
weights = np.array(distance_0_1)
output_data = np.where(group_data.loc[:, output_column] > output_threshold, 1, 0)
ones = np.count_nonzero(output_data > 0)
print("Ones: ", ones, "Zeros: ", np.count_nonzero(output_data == 0))
self.condition_models[group] = {}
num_elements = group_data[input_columns].shape[0]
for m, model_name in enumerate(model_names):
print(model_name)
roc = DistributedROC(thresholds=np.arange(0, 1.1, 0.01))
self.condition_models[group][model_name] = deepcopy(model_objs[m])
try:
kf = KFold(n_splits=num_folds)
for train_index, test_index in kf.split(group_data[input_columns].values):
if np.count_nonzero(output_data[train_index]) > 0:
try:
self.condition_models[group][model_name].fit(
group_data.iloc[train_index][input_columns],
output_data[train_index],sample_weight=weights[train_index])
except:
self.condition_models[group][model_name].fit(
group_data.iloc[train_index][input_columns],
output_data[train_index])
cv_preds = self.condition_models[group][model_name].predict_proba(
group_data.iloc[test_index][input_columns])[:,1]
roc.update(cv_preds, output_data[test_index])
else:
continue
except TypeError:
kf = KFold(num_elements,n_folds=num_folds)
for train_index, test_index in kf:
if np.count_nonzero(output_data[train_index]) > 0:
try:
self.condition_models[group][model_name].fit(
group_data.iloc[train_index][input_columns],
output_data[train_index],sample_weight=weights[train_index])
except:
self.condition_models[group][model_name].fit(
group_data.iloc[train_index][input_columns],
output_data[train_index])
cv_preds = self.condition_models[group][model_name].predict_proba(
group_data.iloc[test_index][input_columns])[:, 1]
roc.update(cv_preds, output_data[test_index])
else:
continue
self.condition_models[group][
model_name + "_condition_threshold"], _ = roc.max_threshold_score(threshold_score)
print(model_name + " condition threshold: {0:0.3f}".format(
self.condition_models[group][model_name + "_condition_threshold"]))
self.condition_models[group][model_name].fit(group_data[input_columns], output_data)
|
Fit models to predict hail/no-hail and use cross-validation to determine the probaility threshold that
maximizes a skill score.
Args:
model_names: List of machine learning model names
model_objs: List of Scikit-learn ML models
input_columns: List of input variables in the training data
output_column: Column used for prediction
output_threshold: Values exceeding this threshold are considered positive events; below are nulls
num_folds: Number of folds in the cross-validation procedure
threshold_score: Score available in ContingencyTable used for determining the best probability threshold
Returns:
None
|
entailment
|
def predict_condition_models(self, model_names,
input_columns,
metadata_cols,
data_mode="forecast",
):
"""
Apply condition modelsto forecast data.
Args:
model_names: List of names associated with each condition model used for prediction
input_columns: List of columns in data used as input into the model
metadata_cols: Columns from input data that should be included in the data frame with the predictions.
data_mode: Which data subset to pull from for the predictions, "forecast" by default
Returns:
A dictionary of data frames containing probabilities of the event and specified metadata
"""
groups = self.condition_models.keys()
predictions = pd.DataFrame(self.data[data_mode]["combo"][metadata_cols])
for group in groups:
print(group)
print(self.condition_models[group])
g_idxs = self.data[data_mode]["combo"][self.group_col] == group
group_count = np.count_nonzero(g_idxs)
if group_count > 0:
for m, model_name in enumerate(model_names):
mn = model_name.replace(" ", "-")
predictions.loc[g_idxs, mn + "_conditionprob"] = self.condition_models[group][
model_name].predict_proba(
self.data[data_mode]["combo"].loc[g_idxs, input_columns])[:, 1]
predictions.loc[g_idxs,
mn + "_conditionthresh"] = np.where(predictions.loc[g_idxs, mn + "_conditionprob"]
>= self.condition_models[group][
model_name + "_condition_threshold"], 1, 0)
return predictions
|
Apply condition modelsto forecast data.
Args:
model_names: List of names associated with each condition model used for prediction
input_columns: List of columns in data used as input into the model
metadata_cols: Columns from input data that should be included in the data frame with the predictions.
data_mode: Which data subset to pull from for the predictions, "forecast" by default
Returns:
A dictionary of data frames containing probabilities of the event and specified metadata
|
entailment
|
def fit_size_distribution_models(self, model_names, model_objs, input_columns,
output_columns=None, calibrate=False):
"""
Fits multitask machine learning models to predict the parameters of a size distribution
Args:
model_names: List of machine learning model names
model_objs: scikit-learn style machine learning model objects
input_columns: Training data columns used as input for ML model
output_columns: Training data columns used for prediction
calibrate: Whether or not to fit a log-linear regression to predictions from ML model
"""
if output_columns is None:
output_columns = ["Shape", "Location", "Scale"]
groups = np.unique(self.data["train"]["member"][self.group_col])
weights=None
for group in groups:
group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group]
group_data = group_data.dropna()
group_data = group_data[group_data[output_columns[-1]] > 0]
if self.sector:
lon_obj = group_data.loc[:,'Centroid_Lon']
lat_obj = group_data.loc[:,'Centroid_Lat']
conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel())
center_lon, center_lat = self.proj_dict["lon_0"],self.proj_dict["lat_0"]
distances = np.array([np.sqrt((x-center_lon)**2+\
(y-center_lat)**2) for (x, y) in conus_lat_lon_points])
min_dist, max_minus_min = min(distances),max(distances)-min(distances)
distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances]
weights = np.array(distance_0_1)
self.size_distribution_models[group] = {"multi": {}, "lognorm": {}}
if calibrate:
self.size_distribution_models[group]["calshape"] = {}
self.size_distribution_models[group]["calscale"] = {}
log_labels = np.log(group_data[output_columns].values)
log_means = log_labels.mean(axis=0)
log_sds = log_labels.std(axis=0)
self.size_distribution_models[group]['lognorm']['mean'] = log_means
self.size_distribution_models[group]['lognorm']['sd'] = log_sds
for m, model_name in enumerate(model_names):
print(group, model_name)
self.size_distribution_models[group]["multi"][model_name] = deepcopy(model_objs[m])
try:
self.size_distribution_models[group]["multi"][model_name].fit(group_data[input_columns],
(log_labels - log_means) / log_sds,
sample_weight=weights)
except:
self.size_distribution_models[group]["multi"][model_name].fit(group_data[input_columns],
(log_labels - log_means) / log_sds)
if calibrate:
training_predictions = self.size_distribution_models[
group]["multi"][model_name].predict(group_data[input_columns])
self.size_distribution_models[group]["calshape"][model_name] = LinearRegression()
self.size_distribution_models[group]["calshape"][model_name].fit(training_predictions[:, 0:1],
(log_labels[:, 0] - log_means[0]) /
log_sds[
0],
sample_weight=weights)
self.size_distribution_models[group]["calscale"][model_name] = LinearRegression()
self.size_distribution_models[group]["calscale"][model_name].fit(training_predictions[:, 1:],
(log_labels[:, 1] - log_means[1]) /
log_sds[
1],
sample_weight=weights)
|
Fits multitask machine learning models to predict the parameters of a size distribution
Args:
model_names: List of machine learning model names
model_objs: scikit-learn style machine learning model objects
input_columns: Training data columns used as input for ML model
output_columns: Training data columns used for prediction
calibrate: Whether or not to fit a log-linear regression to predictions from ML model
|
entailment
|
def fit_size_distribution_component_models(self, model_names, model_objs, input_columns, output_columns):
"""
This calculates 2 principal components for the hail size distribution between the shape and scale parameters.
Separate machine learning models are fit to predict each component.
Args:
model_names: List of machine learning model names
model_objs: List of machine learning model objects.
input_columns: List of input variables
output_columns: Output columns, should contain Shape and Scale.
Returns:
"""
groups = np.unique(self.data["train"]["member"][self.group_col])
weights=None
for group in groups:
print(group)
group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group]
group_data = group_data.dropna()
group_data = group_data.loc[group_data[output_columns[-1]] > 0]
if self.sector:
lon_obj = group_data.loc[:,'Centroid_Lon']
lat_obj = group_data.loc[:,'Centroid_Lat']
conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel())
center_lon, center_lat = self.proj_dict["lon_0"],self.proj_dict["lat_0"]
distances = np.array([np.sqrt((x-center_lon)**2+\
(y-center_lat)**2) for (x, y) in conus_lat_lon_points])
min_dist, max_minus_min = min(distances),max(distances)-min(distances)
distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances]
weights = np.array(distance_0_1)
self.size_distribution_models[group] = {"lognorm": {}}
self.size_distribution_models[group]["lognorm"]["pca"] = PCA(n_components=len(output_columns))
log_labels = np.log(group_data[output_columns].values)
log_labels[:, np.where(output_columns == "Shape")[0]] *= -1
log_means = log_labels.mean(axis=0)
log_sds = log_labels.std(axis=0)
log_norm_labels = (log_labels - log_means) / log_sds
out_pc_labels = self.size_distribution_models[group]["lognorm"]["pca"].fit_transform(log_norm_labels)
self.size_distribution_models[group]['lognorm']['mean'] = log_means
self.size_distribution_models[group]['lognorm']['sd'] = log_sds
for comp in range(len(output_columns)):
self.size_distribution_models[group]["pc_{0:d}".format(comp)] = dict()
for m, model_name in enumerate(model_names):
print(model_name, comp)
self.size_distribution_models[group][
"pc_{0:d}".format(comp)][model_name] = deepcopy(model_objs[m])
try:
self.size_distribution_models[group][
"pc_{0:d}".format(comp)][model_name].fit(group_data[input_columns],
out_pc_labels[:, comp],
sample_weight=weights)
except:
self.size_distribution_models[group][
"pc_{0:d}".format(comp)][model_name].fit(group_data[input_columns],
out_pc_labels[:, comp])
return
|
This calculates 2 principal components for the hail size distribution between the shape and scale parameters.
Separate machine learning models are fit to predict each component.
Args:
model_names: List of machine learning model names
model_objs: List of machine learning model objects.
input_columns: List of input variables
output_columns: Output columns, should contain Shape and Scale.
Returns:
|
entailment
|
def predict_size_distribution_models(self, model_names, input_columns, metadata_cols,
data_mode="forecast", location=6, calibrate=False):
"""
Make predictions using fitted size distribution models.
Args:
model_names: Name of the models for predictions
input_columns: Data columns used for input into ML models
metadata_cols: Columns from input data that should be included in the data frame with the predictions.
data_mode: Set of data used as input for prediction models
location: Value of fixed location parameter
calibrate: Whether or not to apply calibration model
Returns:
Predictions in dictionary of data frames grouped by group type
"""
groups = self.size_distribution_models.keys()
predictions = {}
for group in groups:
group_data = self.data[data_mode]["combo"].loc[self.data[data_mode]["combo"][self.group_col] == group]
predictions[group] = group_data[metadata_cols]
if group_data.shape[0] > 0:
log_mean = self.size_distribution_models[group]["lognorm"]["mean"]
log_sd = self.size_distribution_models[group]["lognorm"]["sd"]
for m, model_name in enumerate(model_names):
multi_predictions = self.size_distribution_models[group]["multi"][model_name].predict(
group_data[input_columns])
if calibrate:
multi_predictions[:, 0] = self.size_distribution_models[group]["calshape"][model_name].predict(
multi_predictions[:, 0:1])
multi_predictions[:, 1] = self.size_distribution_models[group]["calscale"][model_name].predict(
multi_predictions[:, 1:])
multi_predictions = np.exp(multi_predictions * log_sd + log_mean)
if multi_predictions.shape[1] == 2:
multi_predictions_temp = np.zeros((multi_predictions.shape[0], 3))
multi_predictions_temp[:, 0] = multi_predictions[:, 0]
multi_predictions_temp[:, 1] = location
multi_predictions_temp[:, 2] = multi_predictions[:, 1]
multi_predictions = multi_predictions_temp
for p, pred_col in enumerate(["shape", "location", "scale"]):
predictions[group][model_name].loc[:, model_name.replace(" ", "-") + "_" + pred_col] = \
multi_predictions[:, p]
return predictions
|
Make predictions using fitted size distribution models.
Args:
model_names: Name of the models for predictions
input_columns: Data columns used for input into ML models
metadata_cols: Columns from input data that should be included in the data frame with the predictions.
data_mode: Set of data used as input for prediction models
location: Value of fixed location parameter
calibrate: Whether or not to apply calibration model
Returns:
Predictions in dictionary of data frames grouped by group type
|
entailment
|
def predict_size_distribution_component_models(self, model_names, input_columns, output_columns, metadata_cols,
data_mode="forecast", location=6):
"""
Make predictions using fitted size distribution models.
Args:
model_names: Name of the models for predictions
input_columns: Data columns used for input into ML models
output_columns: Names of output columns
metadata_cols: Columns from input data that should be included in the data frame with the predictions.
data_mode: Set of data used as input for prediction models
location: Value of fixed location parameter
Returns:
Predictions in dictionary of data frames grouped by group type
"""
groups = self.size_distribution_models.keys()
predictions = pd.DataFrame(self.data[data_mode]["combo"][metadata_cols])
for group in groups:
group_idxs = self.data[data_mode]["combo"][self.group_col] == group
group_count = np.count_nonzero(group_idxs)
print(self.size_distribution_models[group])
if group_count > 0:
log_mean = self.size_distribution_models[group]["lognorm"]["mean"]
log_sd = self.size_distribution_models[group]["lognorm"]["sd"]
for m, model_name in enumerate(model_names):
raw_preds = np.zeros((group_count, len(output_columns)))
for c in range(len(output_columns)):
raw_preds[:, c] = self.size_distribution_models[group][
"pc_{0:d}".format(c)][model_name].predict(self.data[data_mode]["combo"].loc[group_idxs,
input_columns])
log_norm_preds = self.size_distribution_models[group]["lognorm"]["pca"].inverse_transform(raw_preds)
log_norm_preds[:, 0] *= -1
multi_predictions = np.exp(log_norm_preds * log_sd + log_mean)
if multi_predictions.shape[1] == 2:
multi_predictions_temp = np.zeros((multi_predictions.shape[0], 3))
multi_predictions_temp[:, 0] = multi_predictions[:, 0]
multi_predictions_temp[:, 1] = location
multi_predictions_temp[:, 2] = multi_predictions[:, 1]
multi_predictions = multi_predictions_temp
for p, pred_col in enumerate(["shape", "location", "scale"]):
predictions.loc[group_idxs, model_name.replace(" ", "-") + "_" + pred_col] = \
multi_predictions[:, p]
return predictions
|
Make predictions using fitted size distribution models.
Args:
model_names: Name of the models for predictions
input_columns: Data columns used for input into ML models
output_columns: Names of output columns
metadata_cols: Columns from input data that should be included in the data frame with the predictions.
data_mode: Set of data used as input for prediction models
location: Value of fixed location parameter
Returns:
Predictions in dictionary of data frames grouped by group type
|
entailment
|
def fit_size_models(self, model_names,
model_objs,
input_columns,
output_column="Hail_Size",
output_start=5,
output_step=5,
output_stop=100):
"""
Fit size models to produce discrete pdfs of forecast hail sizes.
Args:
model_names: List of model names
model_objs: List of model objects
input_columns: List of input variables
output_column: Output variable name
output_start: Hail size bin start
output_step: hail size bin step
output_stop: hail size bin stop
"""
print("Fitting size models")
groups = self.data["train"]["member"][self.group_col].unique()
output_start = int(output_start)
output_step = int(output_step)
output_stop = int(output_stop)
for group in groups:
group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group]
group_data.dropna(inplace=True)
group_data = group_data[group_data[output_column] >= output_start]
output_data = group_data[output_column].values.astype(int)
output_data[output_data > output_stop] = output_stop
discrete_data = ((output_data - output_start) // output_step) * output_step + output_start
self.size_models[group] = {}
self.size_models[group]["outputvalues"] = np.arange(output_start, output_stop + output_step, output_step,
dtype=int)
for m, model_name in enumerate(model_names):
print("{0} {1}".format(group, model_name))
self.size_models[group][model_name] = deepcopy(model_objs[m])
self.size_models[group][model_name].fit(group_data[input_columns], discrete_data)
|
Fit size models to produce discrete pdfs of forecast hail sizes.
Args:
model_names: List of model names
model_objs: List of model objects
input_columns: List of input variables
output_column: Output variable name
output_start: Hail size bin start
output_step: hail size bin step
output_stop: hail size bin stop
|
entailment
|
def predict_size_models(self, model_names,
input_columns,
metadata_cols,
data_mode="forecast"):
"""
Apply size models to forecast data.
Args:
model_names:
input_columns:
metadata_cols:
data_mode:
"""
groups = self.size_models.keys()
predictions = {}
for group in groups:
group_data = self.data[data_mode]["combo"].loc[self.data[data_mode]["combo"][self.group_col] == group]
if group_data.shape[0] > 0:
predictions[group] = {}
output_values = self.size_models[group]["outputvalues"].astype(int)
for m, model_name in enumerate(model_names):
print("{0} {1}".format(group, model_name))
pred_col_names = [model_name.replace(" ", "-") + "_{0:02d}".format(p) for p in output_values]
predictions[group][model_name] = group_data[metadata_cols]
pred_vals = self.size_models[group][model_name].predict_proba(group_data[input_columns])
pred_classes = self.size_models[group][model_name].classes_
pred_pdf = np.zeros((pred_vals.shape[0], output_values.size))
for pcv, pc in enumerate(pred_classes):
idx = np.where(output_values == pc)[0][0]
pred_pdf[:, idx] = pred_vals[:, pcv]
for pcn, pred_col_name in enumerate(pred_col_names):
predictions[group][model_name].loc[:, pred_col_name] = pred_pdf[:, pcn]
return predictions
|
Apply size models to forecast data.
Args:
model_names:
input_columns:
metadata_cols:
data_mode:
|
entailment
|
def fit_track_models(self,
model_names,
model_objs,
input_columns,
output_columns,
output_ranges,
):
"""
Fit machine learning models to predict track error offsets.
model_names:
model_objs:
input_columns:
output_columns:
output_ranges:
"""
print("Fitting track models")
groups = self.data["train"]["member"][self.group_col].unique()
for group in groups:
group_data = self.data["train"]["combo"].loc[self.data["train"]["combo"][self.group_col] == group]
group_data = group_data.dropna()
group_data = group_data.loc[group_data["Duration_Step"] == 1]
for model_type, model_dict in self.track_models.items():
model_dict[group] = {}
output_data = group_data[output_columns[model_type]].values.astype(int)
output_data[output_data < output_ranges[model_type][0]] = output_ranges[model_type][0]
output_data[output_data > output_ranges[model_type][1]] = output_ranges[model_type][1]
discrete_data = (output_data - output_ranges[model_type][0]) // output_ranges[model_type][2] * \
output_ranges[model_type][2] + output_ranges[model_type][0]
model_dict[group]["outputvalues"] = np.arange(output_ranges[model_type][0],
output_ranges[model_type][1] +
output_ranges[model_type][2],
output_ranges[model_type][2])
for m, model_name in enumerate(model_names):
print("{0} {1} {2}".format(group, model_type, model_name))
model_dict[group][model_name] = deepcopy(model_objs[m])
model_dict[group][model_name].fit(group_data[input_columns], discrete_data)
|
Fit machine learning models to predict track error offsets.
model_names:
model_objs:
input_columns:
output_columns:
output_ranges:
|
entailment
|
def save_models(self, model_path):
"""
Save machine learning models to pickle files.
"""
for group, condition_model_set in self.condition_models.items():
for model_name, model_obj in condition_model_set.items():
out_filename = model_path + \
"{0}_{1}_condition.pkl".format(group,
model_name.replace(" ", "-"))
with open(out_filename, "wb") as pickle_file:
pickle.dump(model_obj,
pickle_file,
pickle.HIGHEST_PROTOCOL)
for group, size_model_set in self.size_models.items():
for model_name, model_obj in size_model_set.items():
out_filename = model_path + \
"{0}_{1}_size.pkl".format(group,
model_name.replace(" ", "-"))
with open(out_filename, "wb") as pickle_file:
pickle.dump(model_obj,
pickle_file,
pickle.HIGHEST_PROTOCOL)
for group, dist_model_set in self.size_distribution_models.items():
for model_type, model_objs in dist_model_set.items():
for model_name, model_obj in model_objs.items():
out_filename = model_path + \
"{0}_{1}_{2}_sizedist.pkl".format(group,
model_name.replace(" ", "-"),
model_type)
with open(out_filename, "wb") as pickle_file:
pickle.dump(model_obj,
pickle_file,
pickle.HIGHEST_PROTOCOL)
for model_type, track_type_models in self.track_models.items():
for group, track_model_set in track_type_models.items():
for model_name, model_obj in track_model_set.items():
out_filename = model_path + \
"{0}_{1}_{2}_track.pkl".format(group,
model_name.replace(" ", "-"),
model_type)
with open(out_filename, "wb") as pickle_file:
pickle.dump(model_obj,
pickle_file,
pickle.HIGHEST_PROTOCOL)
return
|
Save machine learning models to pickle files.
|
entailment
|
def load_models(self, model_path):
"""
Load models from pickle files.
"""
condition_model_files = sorted(glob(model_path + "*_condition.pkl"))
if len(condition_model_files) > 0:
for condition_model_file in condition_model_files:
model_comps = condition_model_file.split("/")[-1][:-4].split("_")
if model_comps[0] not in self.condition_models.keys():
self.condition_models[model_comps[0]] = {}
model_name = model_comps[1].replace("-", " ")
with open(condition_model_file, "rb") as cmf:
if "condition_threshold" in condition_model_file:
self.condition_models[model_comps[0]][model_name + "_condition_threshold"] = pickle.load(cmf)
else:
self.condition_models[model_comps[0]][model_name] = pickle.load(cmf)
size_model_files = sorted(glob(model_path + "*_size.pkl"))
if len(size_model_files) > 0:
for size_model_file in size_model_files:
model_comps = size_model_file.split("/")[-1][:-4].split("_")
if model_comps[0] not in self.size_models.keys():
self.size_models[model_comps[0]] = {}
model_name = model_comps[1].replace("-", " ")
with open(size_model_file, "rb") as smf:
self.size_models[model_comps[0]][model_name] = pickle.load(smf)
size_dist_model_files = sorted(glob(model_path + "*_sizedist.pkl"))
if len(size_dist_model_files) > 0:
for dist_model_file in size_dist_model_files:
model_comps = dist_model_file.split("/")[-1][:-4].split("_")
if model_comps[0] not in self.size_distribution_models.keys():
self.size_distribution_models[model_comps[0]] = {}
if "_".join(model_comps[2:-1]) not in self.size_distribution_models[model_comps[0]].keys():
self.size_distribution_models[model_comps[0]]["_".join(model_comps[2:-1])] = {}
model_name = model_comps[1].replace("-", " ")
with open(dist_model_file, "rb") as dmf:
self.size_distribution_models[model_comps[0]]["_".join(model_comps[2:-1])][
model_name] = pickle.load(dmf)
track_model_files = sorted(glob(model_path + "*_track.pkl"))
if len(track_model_files) > 0:
for track_model_file in track_model_files:
model_comps = track_model_file.split("/")[-1][:-4].split("_")
group = model_comps[0]
model_name = model_comps[1].replace("-", " ")
model_type = model_comps[2]
if model_type not in self.track_models.keys():
self.track_models[model_type] = {}
if group not in self.track_models[model_type].keys():
self.track_models[model_type][group] = {}
with open(track_model_file, "rb") as tmf:
self.track_models[model_type][group][model_name] = pickle.load(tmf)
|
Load models from pickle files.
|
entailment
|
def output_forecasts_json(self, forecasts,
condition_model_names,
size_model_names,
dist_model_names,
track_model_names,
json_data_path,
out_path):
"""
Output forecast values to geoJSON file format.
:param forecasts:
:param condition_model_names:
:param size_model_names:
:param track_model_names:
:param json_data_path:
:param out_path:
:return:
"""
total_tracks = self.data["forecast"]["total"]
for r in np.arange(total_tracks.shape[0]):
track_id = total_tracks.loc[r, "Track_ID"]
print(track_id)
track_num = track_id.split("_")[-1]
ensemble_name = total_tracks.loc[r, "Ensemble_Name"]
member = total_tracks.loc[r, "Ensemble_Member"]
group = self.data["forecast"]["member"].loc[self.data["forecast"]["member"]["Ensemble_Member"] == member,
self.group_col].values[0]
run_date = track_id.split("_")[-4][:8]
step_forecasts = {}
for ml_model in condition_model_names:
step_forecasts["condition_" + ml_model.replace(" ", "-")] = forecasts["condition"][group].loc[
forecasts["condition"][group]["Track_ID"] == track_id, ml_model]
for ml_model in size_model_names:
step_forecasts["size_" + ml_model.replace(" ", "-")] = forecasts["size"][group][ml_model].loc[
forecasts["size"][group][ml_model]["Track_ID"] == track_id]
for ml_model in dist_model_names:
step_forecasts["dist_" + ml_model.replace(" ", "-")] = forecasts["dist"][group][ml_model].loc[
forecasts["dist"][group][ml_model]["Track_ID"] == track_id]
for model_type in forecasts["track"].keys():
for ml_model in track_model_names:
mframe = forecasts["track"][model_type][group][ml_model]
step_forecasts[model_type + "_" + ml_model.replace(" ", "-")] = mframe.loc[
mframe["Track_ID"] == track_id]
json_file_name = "{0}_{1}_{2}_model_track_{3}.json".format(ensemble_name,
run_date,
member,
track_num)
full_json_path = json_data_path + "/".join([run_date, member]) + "/" + json_file_name
with open(full_json_path) as json_file_obj:
try:
track_obj = json.load(json_file_obj)
except FileNotFoundError:
print(full_json_path + " not found")
continue
for f, feature in enumerate(track_obj['features']):
del feature['properties']['attributes']
for model_name, fdata in step_forecasts.items():
ml_model_name = model_name.split("_")[1]
if "condition" in model_name:
feature['properties'][model_name] = fdata.values[f]
else:
predcols = []
for col in fdata.columns:
if ml_model_name in col:
predcols.append(col)
feature['properties'][model_name] = fdata.loc[:, predcols].values[f].tolist()
full_path = []
for part in [run_date, member]:
full_path.append(part)
if not os.access(out_path + "/".join(full_path), os.R_OK):
try:
os.mkdir(out_path + "/".join(full_path))
except OSError:
print("directory already created")
out_json_filename = out_path + "/".join(full_path) + "/" + json_file_name
with open(out_json_filename, "w") as out_json_obj:
json.dump(track_obj, out_json_obj, indent=1, sort_keys=True)
return
|
Output forecast values to geoJSON file format.
:param forecasts:
:param condition_model_names:
:param size_model_names:
:param track_model_names:
:param json_data_path:
:param out_path:
:return:
|
entailment
|
def output_forecasts_csv(self, forecasts, mode, csv_path, run_date_format="%Y%m%d-%H%M"):
"""
Output hail forecast values to csv files by run date and ensemble member.
Args:
forecasts:
mode:
csv_path:
Returns:
"""
merged_forecasts = pd.merge(forecasts["condition"],
forecasts["dist"],
on=["Step_ID","Track_ID","Ensemble_Member","Forecast_Hour"])
all_members = self.data[mode]["combo"]["Ensemble_Member"]
members = np.unique(all_members)
all_run_dates = pd.DatetimeIndex(self.data[mode]["combo"]["Run_Date"])
run_dates = pd.DatetimeIndex(np.unique(all_run_dates))
print(run_dates)
for member in members:
for run_date in run_dates:
mem_run_index = (all_run_dates == run_date) & (all_members == member)
member_forecast = merged_forecasts.loc[mem_run_index]
member_forecast.to_csv(join(csv_path, "hail_forecasts_{0}_{1}_{2}.csv".format(self.ensemble_name,
member,
run_date.strftime
(run_date_format))))
return
|
Output hail forecast values to csv files by run date and ensemble member.
Args:
forecasts:
mode:
csv_path:
Returns:
|
entailment
|
def _carregar(self):
"""Carrega (ou recarrega) a biblioteca SAT. Se a convenção de chamada
ainda não tiver sido definida, será determinada pela extensão do
arquivo da biblioteca.
:raises ValueError: Se a convenção de chamada não puder ser determinada
ou se não for um valor válido.
"""
if self._convencao is None:
if self._caminho.endswith(('.DLL', '.dll')):
self._convencao = constantes.WINDOWS_STDCALL
else:
self._convencao = constantes.STANDARD_C
if self._convencao == constantes.STANDARD_C:
loader = ctypes.CDLL
elif self._convencao == constantes.WINDOWS_STDCALL:
loader = ctypes.WinDLL
else:
raise ValueError('Convencao de chamada desconhecida: {!r}'.format(
self._convencao))
self._libsat = loader(self._caminho)
|
Carrega (ou recarrega) a biblioteca SAT. Se a convenção de chamada
ainda não tiver sido definida, será determinada pela extensão do
arquivo da biblioteca.
:raises ValueError: Se a convenção de chamada não puder ser determinada
ou se não for um valor válido.
|
entailment
|
def ativar_sat(self, tipo_certificado, cnpj, codigo_uf):
"""Função ``AtivarSAT`` conforme ER SAT, item 6.1.1.
Ativação do equipamento SAT. Dependendo do tipo do certificado, o
procedimento de ativação é complementado enviando-se o certificado
emitido pela ICP-Brasil (:meth:`comunicar_certificado_icpbrasil`).
:param int tipo_certificado: Deverá ser um dos valores
:attr:`satcomum.constantes.CERTIFICADO_ACSAT_SEFAZ`,
:attr:`satcomum.constantes.CERTIFICADO_ICPBRASIL` ou
:attr:`satcomum.constantes.CERTIFICADO_ICPBRASIL_RENOVACAO`, mas
nenhuma validação será realizada antes que a função de ativação
seja efetivamente invocada.
:param str cnpj: Número do CNPJ do estabelecimento contribuinte,
contendo apenas os dígitos. Nenhuma validação do número do CNPJ
será realizada antes que a função de ativação seja efetivamente
invocada.
:param int codigo_uf: Código da unidade federativa onde o equipamento
SAT será ativado (eg. ``35`` para o Estado de São Paulo). Nenhuma
validação do código da UF será realizada antes que a função de
ativação seja efetivamente invocada.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
"""
return self.invocar__AtivarSAT(
self.gerar_numero_sessao(), tipo_certificado,
self._codigo_ativacao, cnpj, codigo_uf)
|
Função ``AtivarSAT`` conforme ER SAT, item 6.1.1.
Ativação do equipamento SAT. Dependendo do tipo do certificado, o
procedimento de ativação é complementado enviando-se o certificado
emitido pela ICP-Brasil (:meth:`comunicar_certificado_icpbrasil`).
:param int tipo_certificado: Deverá ser um dos valores
:attr:`satcomum.constantes.CERTIFICADO_ACSAT_SEFAZ`,
:attr:`satcomum.constantes.CERTIFICADO_ICPBRASIL` ou
:attr:`satcomum.constantes.CERTIFICADO_ICPBRASIL_RENOVACAO`, mas
nenhuma validação será realizada antes que a função de ativação
seja efetivamente invocada.
:param str cnpj: Número do CNPJ do estabelecimento contribuinte,
contendo apenas os dígitos. Nenhuma validação do número do CNPJ
será realizada antes que a função de ativação seja efetivamente
invocada.
:param int codigo_uf: Código da unidade federativa onde o equipamento
SAT será ativado (eg. ``35`` para o Estado de São Paulo). Nenhuma
validação do código da UF será realizada antes que a função de
ativação seja efetivamente invocada.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
|
entailment
|
def comunicar_certificado_icpbrasil(self, certificado):
"""Função ``ComunicarCertificadoICPBRASIL`` conforme ER SAT, item 6.1.2.
Envio do certificado criado pela ICP-Brasil.
:param str certificado: Conteúdo do certificado digital criado pela
autoridade certificadora ICP-Brasil.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
"""
return self.invocar__ComunicarCertificadoICPBRASIL(
self.gerar_numero_sessao(), self._codigo_ativacao, certificado)
|
Função ``ComunicarCertificadoICPBRASIL`` conforme ER SAT, item 6.1.2.
Envio do certificado criado pela ICP-Brasil.
:param str certificado: Conteúdo do certificado digital criado pela
autoridade certificadora ICP-Brasil.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
|
entailment
|
def enviar_dados_venda(self, dados_venda):
"""Função ``EnviarDadosVenda`` conforme ER SAT, item 6.1.3. Envia o
CF-e de venda para o equipamento SAT, que o enviará para autorização
pela SEFAZ.
:param dados_venda: Uma instância de :class:`~satcfe.entidades.CFeVenda`
ou uma string contendo o XML do CF-e de venda.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
"""
cfe_venda = dados_venda \
if isinstance(dados_venda, basestring) \
else dados_venda.documento()
return self.invocar__EnviarDadosVenda(
self.gerar_numero_sessao(), self._codigo_ativacao, cfe_venda)
|
Função ``EnviarDadosVenda`` conforme ER SAT, item 6.1.3. Envia o
CF-e de venda para o equipamento SAT, que o enviará para autorização
pela SEFAZ.
:param dados_venda: Uma instância de :class:`~satcfe.entidades.CFeVenda`
ou uma string contendo o XML do CF-e de venda.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
|
entailment
|
def cancelar_ultima_venda(self, chave_cfe, dados_cancelamento):
"""Função ``CancelarUltimaVenda`` conforme ER SAT, item 6.1.4. Envia o
CF-e de cancelamento para o equipamento SAT, que o enviará para
autorização e cancelamento do CF-e pela SEFAZ.
:param chave_cfe: String contendo a chave do CF-e a ser cancelado,
prefixada com o literal ``CFe``.
:param dados_cancelamento: Uma instância
de :class:`~satcfe.entidades.CFeCancelamento` ou uma string
contendo o XML do CF-e de cancelamento.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
"""
cfe_canc = dados_cancelamento \
if isinstance(dados_cancelamento, basestring) \
else dados_cancelamento.documento()
return self.invocar__CancelarUltimaVenda(
self.gerar_numero_sessao(), self._codigo_ativacao,
chave_cfe, cfe_canc)
|
Função ``CancelarUltimaVenda`` conforme ER SAT, item 6.1.4. Envia o
CF-e de cancelamento para o equipamento SAT, que o enviará para
autorização e cancelamento do CF-e pela SEFAZ.
:param chave_cfe: String contendo a chave do CF-e a ser cancelado,
prefixada com o literal ``CFe``.
:param dados_cancelamento: Uma instância
de :class:`~satcfe.entidades.CFeCancelamento` ou uma string
contendo o XML do CF-e de cancelamento.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
|
entailment
|
def consultar_numero_sessao(self, numero_sessao):
"""Função ``ConsultarNumeroSessao`` conforme ER SAT, item 6.1.8.
Consulta o equipamento SAT por um número de sessão específico.
:param int numero_sessao: Número da sessão que se quer consultar.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
"""
return self.invocar__ConsultarNumeroSessao(self.gerar_numero_sessao(),
self._codigo_ativacao, numero_sessao)
|
Função ``ConsultarNumeroSessao`` conforme ER SAT, item 6.1.8.
Consulta o equipamento SAT por um número de sessão específico.
:param int numero_sessao: Número da sessão que se quer consultar.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
|
entailment
|
def configurar_interface_de_rede(self, configuracao):
"""Função ``ConfigurarInterfaceDeRede`` conforme ER SAT, item 6.1.9.
Configurção da interface de comunicação do equipamento SAT.
:param configuracao: Instância de :class:`~satcfe.rede.ConfiguracaoRede`
ou uma string contendo o XML com as configurações de rede.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
"""
conf_xml = configuracao \
if isinstance(configuracao, basestring) \
else configuracao.documento()
return self.invocar__ConfigurarInterfaceDeRede(
self.gerar_numero_sessao(), self._codigo_ativacao, conf_xml)
|
Função ``ConfigurarInterfaceDeRede`` conforme ER SAT, item 6.1.9.
Configurção da interface de comunicação do equipamento SAT.
:param configuracao: Instância de :class:`~satcfe.rede.ConfiguracaoRede`
ou uma string contendo o XML com as configurações de rede.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
|
entailment
|
def associar_assinatura(self, sequencia_cnpj, assinatura_ac):
"""Função ``AssociarAssinatura`` conforme ER SAT, item 6.1.10.
Associação da assinatura do aplicativo comercial.
:param sequencia_cnpj: Sequência string de 28 dígitos composta do CNPJ
do desenvolvedor da AC e do CNPJ do estabelecimento comercial
contribuinte, conforme ER SAT, item 2.3.1.
:param assinatura_ac: Sequência string contendo a assinatura digital do
parâmetro ``sequencia_cnpj`` codificada em base64.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
"""
return self.invocar__AssociarAssinatura(
self.gerar_numero_sessao(), self._codigo_ativacao,
sequencia_cnpj, assinatura_ac)
|
Função ``AssociarAssinatura`` conforme ER SAT, item 6.1.10.
Associação da assinatura do aplicativo comercial.
:param sequencia_cnpj: Sequência string de 28 dígitos composta do CNPJ
do desenvolvedor da AC e do CNPJ do estabelecimento comercial
contribuinte, conforme ER SAT, item 2.3.1.
:param assinatura_ac: Sequência string contendo a assinatura digital do
parâmetro ``sequencia_cnpj`` codificada em base64.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
|
entailment
|
def trocar_codigo_de_ativacao(self, novo_codigo_ativacao,
opcao=constantes.CODIGO_ATIVACAO_REGULAR,
codigo_emergencia=None):
"""Função ``TrocarCodigoDeAtivacao`` conforme ER SAT, item 6.1.15.
Troca do código de ativação do equipamento SAT.
:param str novo_codigo_ativacao: O novo código de ativação escolhido
pelo contribuinte.
:param int opcao: Indica se deverá ser utilizado o código de ativação
atualmente configurado, que é um código de ativação regular,
definido pelo contribuinte, ou se deverá ser usado um código de
emergência. Deverá ser o valor de uma das constantes
:attr:`satcomum.constantes.CODIGO_ATIVACAO_REGULAR` (padrão) ou
:attr:`satcomum.constantes.CODIGO_ATIVACAO_EMERGENCIA`.
Nenhuma validação será realizada antes que a função seja
efetivamente invocada. Entretanto, se opção de código de ativação
indicada for ``CODIGO_ATIVACAO_EMERGENCIA``, então o argumento que
informa o ``codigo_emergencia`` será checado e deverá avaliar como
verdadeiro.
:param str codigo_emergencia: O código de ativação de emergência, que
é definido pelo fabricante do equipamento SAT. Este código deverá
ser usado quando o usuário perder o código de ativação regular, e
precisar definir um novo código de ativação. Note que, o argumento
``opcao`` deverá ser informado com o valor
:attr:`satcomum.constantes.CODIGO_ATIVACAO_EMERGENCIA` para que
este código de emergência seja considerado.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
:raises ValueError: Se o novo código de ativação avaliar como falso
(possuir uma string nula por exemplo) ou se o código de emergencia
avaliar como falso quando a opção for pelo código de ativação de
emergência.
.. warning::
Os argumentos da função ``TrocarCodigoDeAtivacao`` requerem que o
novo código de ativação seja especificado duas vezes (dois
argumentos com o mesmo conteúdo, como confirmação). Este método irá
simplesmente informar duas vezes o argumento
``novo_codigo_ativacao`` na função SAT, mantendo a confirmação do
código de ativação fora do escopo desta API.
"""
if not novo_codigo_ativacao:
raise ValueError('Novo codigo de ativacao invalido: {!r}'.format(
novo_codigo_ativacao))
codigo_ativacao = self._codigo_ativacao
if opcao == constantes.CODIGO_ATIVACAO_EMERGENCIA:
if codigo_emergencia:
codigo_ativacao = codigo_emergencia
else:
raise ValueError('Codigo de ativacao de emergencia invalido: '
'{!r} (opcao={!r})'.format(codigo_emergencia, opcao))
return self.invocar__TrocarCodigoDeAtivacao(
self.gerar_numero_sessao(), codigo_ativacao, opcao,
novo_codigo_ativacao, novo_codigo_ativacao)
|
Função ``TrocarCodigoDeAtivacao`` conforme ER SAT, item 6.1.15.
Troca do código de ativação do equipamento SAT.
:param str novo_codigo_ativacao: O novo código de ativação escolhido
pelo contribuinte.
:param int opcao: Indica se deverá ser utilizado o código de ativação
atualmente configurado, que é um código de ativação regular,
definido pelo contribuinte, ou se deverá ser usado um código de
emergência. Deverá ser o valor de uma das constantes
:attr:`satcomum.constantes.CODIGO_ATIVACAO_REGULAR` (padrão) ou
:attr:`satcomum.constantes.CODIGO_ATIVACAO_EMERGENCIA`.
Nenhuma validação será realizada antes que a função seja
efetivamente invocada. Entretanto, se opção de código de ativação
indicada for ``CODIGO_ATIVACAO_EMERGENCIA``, então o argumento que
informa o ``codigo_emergencia`` será checado e deverá avaliar como
verdadeiro.
:param str codigo_emergencia: O código de ativação de emergência, que
é definido pelo fabricante do equipamento SAT. Este código deverá
ser usado quando o usuário perder o código de ativação regular, e
precisar definir um novo código de ativação. Note que, o argumento
``opcao`` deverá ser informado com o valor
:attr:`satcomum.constantes.CODIGO_ATIVACAO_EMERGENCIA` para que
este código de emergência seja considerado.
:return: Retorna *verbatim* a resposta da função SAT.
:rtype: string
:raises ValueError: Se o novo código de ativação avaliar como falso
(possuir uma string nula por exemplo) ou se o código de emergencia
avaliar como falso quando a opção for pelo código de ativação de
emergência.
.. warning::
Os argumentos da função ``TrocarCodigoDeAtivacao`` requerem que o
novo código de ativação seja especificado duas vezes (dois
argumentos com o mesmo conteúdo, como confirmação). Este método irá
simplesmente informar duas vezes o argumento
``novo_codigo_ativacao`` na função SAT, mantendo a confirmação do
código de ativação fora do escopo desta API.
|
entailment
|
def load_forecasts(self):
"""
Loads the forecast files and gathers the forecast information into pandas DataFrames.
"""
forecast_path = self.forecast_json_path + "/{0}/{1}/".format(self.run_date.strftime("%Y%m%d"),
self.ensemble_member)
forecast_files = sorted(glob(forecast_path + "*.json"))
for forecast_file in forecast_files:
file_obj = open(forecast_file)
json_obj = json.load(file_obj)
file_obj.close()
track_id = json_obj['properties']["id"]
obs_track_id = json_obj['properties']["obs_track_id"]
forecast_hours = json_obj['properties']['times']
duration = json_obj['properties']['duration']
for f, feature in enumerate(json_obj['features']):
area = np.sum(feature["properties"]["masks"])
step_id = track_id + "_{0:02d}".format(f)
for model_type in self.model_types:
for model_name in self.model_names[model_type]:
prediction = feature['properties'][model_type + "_" + model_name.replace(" ", "-")]
if model_type == "condition":
prediction = [prediction]
row = [track_id, obs_track_id, self.ensemble_name, self.ensemble_member, forecast_hours[f],
f + 1, duration, area] + prediction
self.forecasts[model_type][model_name].loc[step_id] = row
|
Loads the forecast files and gathers the forecast information into pandas DataFrames.
|
entailment
|
def load_obs(self):
"""
Loads the track total and step files and merges the information into a single data frame.
"""
track_total_file = self.track_data_csv_path + \
"track_total_{0}_{1}_{2}.csv".format(self.ensemble_name,
self.ensemble_member,
self.run_date.strftime("%Y%m%d"))
track_step_file = self.track_data_csv_path + \
"track_step_{0}_{1}_{2}.csv".format(self.ensemble_name,
self.ensemble_member,
self.run_date.strftime("%Y%m%d"))
track_total_cols = ["Track_ID", "Translation_Error_X", "Translation_Error_Y", "Start_Time_Error"]
track_step_cols = ["Step_ID", "Track_ID", "Hail_Size", "Shape", "Location", "Scale"]
track_total_data = pd.read_csv(track_total_file, usecols=track_total_cols)
track_step_data = pd.read_csv(track_step_file, usecols=track_step_cols)
obs_data = pd.merge(track_step_data, track_total_data, on="Track_ID", how="left")
self.obs = obs_data
|
Loads the track total and step files and merges the information into a single data frame.
|
entailment
|
def merge_obs(self):
"""
Match forecasts and observations.
"""
for model_type in self.model_types:
self.matched_forecasts[model_type] = {}
for model_name in self.model_names[model_type]:
self.matched_forecasts[model_type][model_name] = pd.merge(self.forecasts[model_type][model_name],
self.obs, right_on="Step_ID", how="left",
left_index=True)
|
Match forecasts and observations.
|
entailment
|
def crps(self, model_type, model_name, condition_model_name, condition_threshold, query=None):
"""
Calculates the cumulative ranked probability score (CRPS) on the forecast data.
Args:
model_type: model type being evaluated.
model_name: machine learning model being evaluated.
condition_model_name: Name of the hail/no-hail model being evaluated
condition_threshold: Threshold for using hail size CDF
query: pandas query string to filter the forecasts based on the metadata
Returns:
a DistributedCRPS object
"""
def gamma_cdf(x, a, loc, b):
if a == 0 or b == 0:
cdf = np.ones(x.shape)
else:
cdf = gamma.cdf(x, a, loc, b)
return cdf
crps_obj = DistributedCRPS(self.dist_thresholds)
if query is not None:
sub_forecasts = self.matched_forecasts[model_type][model_name].query(query)
sub_forecasts = sub_forecasts.reset_index(drop=True)
condition_forecasts = self.matched_forecasts["condition"][condition_model_name].query(query)
condition_forecasts = condition_forecasts.reset_index(drop=True)
else:
sub_forecasts = self.matched_forecasts[model_type][model_name]
condition_forecasts = self.matched_forecasts["condition"][condition_model_name]
if sub_forecasts.shape[0] > 0:
if model_type == "dist":
forecast_cdfs = np.zeros((sub_forecasts.shape[0], self.dist_thresholds.size))
for f in range(sub_forecasts.shape[0]):
condition_prob = condition_forecasts.loc[f, self.forecast_bins["condition"][0]]
if condition_prob >= condition_threshold:
f_params = [0, 0, 0]
else:
f_params = sub_forecasts[self.forecast_bins[model_type]].values[f]
forecast_cdfs[f] = gamma_cdf(self.dist_thresholds, f_params[0], f_params[1], f_params[2])
obs_cdfs = np.array([gamma_cdf(self.dist_thresholds, *params)
for params in sub_forecasts[self.type_cols[model_type]].values])
crps_obj.update(forecast_cdfs, obs_cdfs)
else:
crps_obj.update(sub_forecasts[self.forecast_bins[model_type].astype(str)].values,
sub_forecasts[self.type_cols[model_type]].values)
return crps_obj
|
Calculates the cumulative ranked probability score (CRPS) on the forecast data.
Args:
model_type: model type being evaluated.
model_name: machine learning model being evaluated.
condition_model_name: Name of the hail/no-hail model being evaluated
condition_threshold: Threshold for using hail size CDF
query: pandas query string to filter the forecasts based on the metadata
Returns:
a DistributedCRPS object
|
entailment
|
def roc(self, model_type, model_name, intensity_threshold, prob_thresholds, query=None):
"""
Calculates a ROC curve at a specified intensity threshold.
Args:
model_type: type of model being evaluated (e.g. size).
model_name: machine learning model being evaluated
intensity_threshold: forecast bin used as the split point for evaluation
prob_thresholds: Array of probability thresholds being evaluated.
query: str to filter forecasts based on values of forecasts, obs, and metadata.
Returns:
A DistributedROC object
"""
roc_obj = DistributedROC(prob_thresholds, 0.5)
if query is not None:
sub_forecasts = self.matched_forecasts[model_type][model_name].query(query)
sub_forecasts = sub_forecasts.reset_index(drop=True)
else:
sub_forecasts = self.matched_forecasts[model_type][model_name]
obs_values = np.zeros(sub_forecasts.shape[0])
if sub_forecasts.shape[0] > 0:
if model_type == "dist":
forecast_values = np.array([gamma_sf(intensity_threshold, *params)
for params in sub_forecasts[self.forecast_bins[model_type]].values])
obs_probs = np.array([gamma_sf(intensity_threshold, *params)
for params in sub_forecasts[self.type_cols[model_type]].values])
obs_values[obs_probs >= 0.01] = 1
elif len(self.forecast_bins[model_type]) > 1:
fbin = np.argmin(np.abs(self.forecast_bins[model_type] - intensity_threshold))
forecast_values = 1 - sub_forecasts[self.forecast_bins[model_type].astype(str)].values.cumsum(axis=1)[:,
fbin]
obs_values[sub_forecasts[self.type_cols[model_type]].values >= intensity_threshold] = 1
else:
forecast_values = sub_forecasts[self.forecast_bins[model_type].astype(str)[0]].values
obs_values[sub_forecasts[self.type_cols[model_type]].values >= intensity_threshold] = 1
roc_obj.update(forecast_values, obs_values)
return roc_obj
|
Calculates a ROC curve at a specified intensity threshold.
Args:
model_type: type of model being evaluated (e.g. size).
model_name: machine learning model being evaluated
intensity_threshold: forecast bin used as the split point for evaluation
prob_thresholds: Array of probability thresholds being evaluated.
query: str to filter forecasts based on values of forecasts, obs, and metadata.
Returns:
A DistributedROC object
|
entailment
|
def sample_forecast_max_hail(self, dist_model_name, condition_model_name,
num_samples, condition_threshold=0.5, query=None):
"""
Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes.
Hail sizes are sampled from each predicted gamma distribution. The total number of samples equals
num_samples * area of the hail object. To get the maximum hail size for each realization, the maximum
value within each area sample is used.
Args:
dist_model_name: Name of the distribution machine learning model being evaluated
condition_model_name: Name of the hail/no-hail model being evaluated
num_samples: Number of maximum hail samples to draw
condition_threshold: Threshold for drawing hail samples
query: A str that selects a subset of the data for evaluation
Returns:
A numpy array containing maximum hail samples for each forecast object.
"""
if query is not None:
dist_forecasts = self.matched_forecasts["dist"][dist_model_name].query(query)
dist_forecasts = dist_forecasts.reset_index(drop=True)
condition_forecasts = self.matched_forecasts["condition"][condition_model_name].query(query)
condition_forecasts = condition_forecasts.reset_index(drop=True)
else:
dist_forecasts = self.matched_forecasts["dist"][dist_model_name]
condition_forecasts = self.matched_forecasts["condition"][condition_model_name]
max_hail_samples = np.zeros((dist_forecasts.shape[0], num_samples))
areas = dist_forecasts["Area"].values
for f in np.arange(dist_forecasts.shape[0]):
condition_prob = condition_forecasts.loc[f, self.forecast_bins["condition"][0]]
if condition_prob >= condition_threshold:
max_hail_samples[f] = np.sort(gamma.rvs(*dist_forecasts.loc[f, self.forecast_bins["dist"]].values,
size=(num_samples, areas[f])).max(axis=1))
return max_hail_samples
|
Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes.
Hail sizes are sampled from each predicted gamma distribution. The total number of samples equals
num_samples * area of the hail object. To get the maximum hail size for each realization, the maximum
value within each area sample is used.
Args:
dist_model_name: Name of the distribution machine learning model being evaluated
condition_model_name: Name of the hail/no-hail model being evaluated
num_samples: Number of maximum hail samples to draw
condition_threshold: Threshold for drawing hail samples
query: A str that selects a subset of the data for evaluation
Returns:
A numpy array containing maximum hail samples for each forecast object.
|
entailment
|
def get_params(self):
"""Get signature and params
"""
params = {
'key': self.get_app_key(),
'uid': self.user_id,
'widget': self.widget_code
}
products_number = len(self.products)
if self.get_api_type() == self.API_GOODS:
if isinstance(self.products, list):
if products_number == 1:
product = self.products[0]
if isinstance(product, Product):
post_trial_product = None
if isinstance(product.get_trial_product(), Product):
post_trial_product = product
product = product.get_trial_product()
params['amount'] = product.get_amount()
params['currencyCode'] = product.get_currency_code()
params['ag_name'] = product.get_name()
params['ag_external_id'] = product.get_id()
params['ag_type'] = product.get_type()
if product.get_type() == Product.TYPE_SUBSCRIPTION:
params['ag_period_length'] = product.get_period_length()
params['ag_period_type'] = product.get_period_type()
if product.is_recurring():
params['ag_recurring'] = 1 if product.is_recurring() else 0
if post_trial_product:
params['ag_trial'] = 1
params['ag_post_trial_external_id'] = post_trial_product.get_id()
params['ag_post_trial_period_length'] = post_trial_product.get_period_length()
params['ag_post_trial_period_type'] = post_trial_product.get_period_type()
params['ag_post_trial_name'] = post_trial_product.get_name()
params['post_trial_amount'] = post_trial_product.get_amount()
params['post_trial_currencyCode'] = post_trial_product.get_currency_code()
else:
self.append_to_errors('Not a Product instance')
else:
self.append_to_errors('Only 1 product is allowed')
elif self.get_api_type() == self.API_CART:
index = 0
for product in self.products:
params['external_ids[' + str(index) + ']'] = product.get_id()
if product.get_amount() > 0:
params['prices[' + str(index) + ']'] = product.get_amount()
if product.get_currency_code() != '' and product.get_currency_code() is not None:
params['currencies[' + str(index) + ']'] = product.get_currency_code()
index += 1
params['sign_version'] = signature_version = str(self.get_default_widget_signature())
if not self.is_empty(self.extra_params, 'sign_version'):
signature_version = params['sign_version'] = str(self.extra_params['sign_version'])
params = self.array_merge(params, self.extra_params)
params['sign'] = self.calculate_signature(params, self.get_secret_key(), int(signature_version))
return params
|
Get signature and params
|
entailment
|
def hms(segundos): # TODO: mover para util.py
"""
Retorna o número de horas, minutos e segundos a partir do total de
segundos informado.
.. sourcecode:: python
>>> hms(1)
(0, 0, 1)
>>> hms(60)
(0, 1, 0)
>>> hms(3600)
(1, 0, 0)
>>> hms(3601)
(1, 0, 1)
>>> hms(3661)
(1, 1, 1)
:param int segundos: O número total de segundos.
:returns: Uma tupla contendo trẽs elementos representando, respectivamente,
o número de horas, minutos e segundos calculados a partir do total de
segundos.
:rtype: tuple
"""
h = (segundos / 3600)
m = (segundos - (3600 * h)) / 60
s = (segundos - (3600 * h) - (m * 60));
return (h, m, s)
|
Retorna o número de horas, minutos e segundos a partir do total de
segundos informado.
.. sourcecode:: python
>>> hms(1)
(0, 0, 1)
>>> hms(60)
(0, 1, 0)
>>> hms(3600)
(1, 0, 0)
>>> hms(3601)
(1, 0, 1)
>>> hms(3661)
(1, 1, 1)
:param int segundos: O número total de segundos.
:returns: Uma tupla contendo trẽs elementos representando, respectivamente,
o número de horas, minutos e segundos calculados a partir do total de
segundos.
:rtype: tuple
|
entailment
|
def hms_humanizado(segundos): # TODO: mover para util.py
"""
Retorna um texto legível que descreve o total de horas, minutos e segundos
calculados a partir do total de segundos informados.
.. sourcecode:: python
>>> hms_humanizado(0)
'zero segundos'
>>> hms_humanizado(1)
'1 segundo'
>>> hms_humanizado(2)
'2 segundos'
>>> hms_humanizado(3600)
'1 hora'
>>> hms_humanizado(3602)
'1 hora e 2 segundos'
>>> hms_humanizado(3721)
'1 hora, 2 minutos e 1 segundo'
:rtype: str
"""
p = lambda n, s, p: p if n > 1 else s
h, m, s = hms(segundos)
tokens = [
'' if h == 0 else '{:d} {}'.format(h, p(h, 'hora', 'horas')),
'' if m == 0 else '{:d} {}'.format(m, p(m, 'minuto', 'minutos')),
'' if s == 0 else '{:d} {}'.format(s, p(s, 'segundo', 'segundos'))]
tokens = [token for token in tokens if token]
if len(tokens) == 1:
return tokens[0]
if len(tokens) > 1:
return '{} e {}'.format(', '.join(tokens[:-1]), tokens[-1])
return 'zero segundos'
|
Retorna um texto legível que descreve o total de horas, minutos e segundos
calculados a partir do total de segundos informados.
.. sourcecode:: python
>>> hms_humanizado(0)
'zero segundos'
>>> hms_humanizado(1)
'1 segundo'
>>> hms_humanizado(2)
'2 segundos'
>>> hms_humanizado(3600)
'1 hora'
>>> hms_humanizado(3602)
'1 hora e 2 segundos'
>>> hms_humanizado(3721)
'1 hora, 2 minutos e 1 segundo'
:rtype: str
|
entailment
|
def format_grib_name(self, selected_variable):
"""
Assigns name to grib2 message number with name 'unknown'. Names based on NOAA grib2 abbreviations.
Args:
selected_variable(str): name of selected variable for loading
Names:
3: LCDC: Low Cloud Cover
4: MCDC: Medium Cloud Cover
5: HCDC: High Cloud Cover
197: RETOP: Echo Top
198: MAXREF: Hourly Maximum of Simulated Reflectivity at 1 km AGL
199: MXUPHL: Hourly Maximum of Updraft Helicity over Layer 2km to 5 km AGL, and 0km to 3km AGL
examples:' MXUPHL_5000' or 'MXUPHL_3000'
200: MNUPHL: Hourly Minimum of Updraft Helicity at same levels of MXUPHL
examples:' MNUPHL_5000' or 'MNUPHL_3000'
220: MAXUVV: Hourly Maximum of Upward Vertical Velocity in the lowest 400hPa
221: MAXDVV: Hourly Maximum of Downward Vertical Velocity in the lowest 400hPa
222: MAXUW: U Component of Hourly Maximum 10m Wind Speed
223: MAXVW: V Component of Hourly Maximum 10m Wind Speed
Returns:
Given an uknown string name of a variable, returns the grib2 message Id
and units of the variable, based on the self.unknown_name and
self.unknown_units dictonaries above. Allows access of
data values of unknown variable name, given the ID.
"""
names = self.unknown_names
units = self.unknown_units
for key, value in names.items():
if selected_variable == value:
Id = key
u = units[key]
return Id, u
|
Assigns name to grib2 message number with name 'unknown'. Names based on NOAA grib2 abbreviations.
Args:
selected_variable(str): name of selected variable for loading
Names:
3: LCDC: Low Cloud Cover
4: MCDC: Medium Cloud Cover
5: HCDC: High Cloud Cover
197: RETOP: Echo Top
198: MAXREF: Hourly Maximum of Simulated Reflectivity at 1 km AGL
199: MXUPHL: Hourly Maximum of Updraft Helicity over Layer 2km to 5 km AGL, and 0km to 3km AGL
examples:' MXUPHL_5000' or 'MXUPHL_3000'
200: MNUPHL: Hourly Minimum of Updraft Helicity at same levels of MXUPHL
examples:' MNUPHL_5000' or 'MNUPHL_3000'
220: MAXUVV: Hourly Maximum of Upward Vertical Velocity in the lowest 400hPa
221: MAXDVV: Hourly Maximum of Downward Vertical Velocity in the lowest 400hPa
222: MAXUW: U Component of Hourly Maximum 10m Wind Speed
223: MAXVW: V Component of Hourly Maximum 10m Wind Speed
Returns:
Given an uknown string name of a variable, returns the grib2 message Id
and units of the variable, based on the self.unknown_name and
self.unknown_units dictonaries above. Allows access of
data values of unknown variable name, given the ID.
|
entailment
|
def load_data(self):
"""
Loads data from grib2 file objects or list of grib2 file objects. Handles specific grib2 variable names
and grib2 message numbers.
Returns:
Array of data loaded from files in (time, y, x) dimensions, Units
"""
file_objects = self.file_objects
var = self.variable
valid_date = self.valid_dates
data = self.data
unknown_names = self.unknown_names
unknown_units = self.unknown_units
member = self.member
lat = self.lat
lon = self.lon
if self.sector_ind_path:
inds_file = pd.read_csv(self.sector_ind_path+'sector_data_indices.csv')
inds = inds_file.loc[:,'indices']
out_x = self.mapping_data["x"]
if not file_objects:
print()
print("No {0} model runs on {1}".format(member,self.run_date))
print()
units = None
return self.data, units
for f, file in enumerate(file_objects):
grib = pygrib.open(file)
if type(var) is int:
data_values = grib[var].values
#lat, lon = grib[var].latlons()
#proj = Proj(grib[var].projparams)
if grib[var].units == 'unknown':
Id = grib[var].parameterNumber
units = self.unknown_units[Id]
else:
units = grib[var].units
elif type(var) is str:
if '_' in var:
variable = var.split('_')[0]
level = int(var.split('_')[1])
if variable in unknown_names.values():
Id, units = self.format_grib_name(variable)
data_values = grib.select(parameterNumber=Id, level=level)[0].values
#lat, lon = grib.select(parameterNumber=Id, level=level)[0].latlons()
#proj = Proj(grib.select(parameterNumber=Id, level=level)[0].projparams)
else:
data_values = grib.select(name=variable, level=level)[0].values
units = grib.select(name=variable, level=level)[0].units
#lat, lon = grib.select(name=variable, level=level)[0].latlons()
#proj = Proj(grib.select(name=variable, level=level)[0].projparams)
else:
if var in unknown_names.values():
Id, units = self.format_grib_name(var)
data_values = grib.select(parameterNumber=Id)[0].values
#lat, lon = grib.select(parameterNumber=Id)[0].latlons()
#proj = Proj(grib.select(parameterNumber=Id)[0].projparams)
elif len(grib.select(name=var)) > 1:
raise NameError("Multiple '{0}' records found. Rename with level:'{0}_level'".format(var))
else:
data_values = grib.select(name=var)[0].values
units = grib.select(name=var)[0].units
#lat, lon = grib.select(name=var)[0].latlons()
#proj = Proj(grib.select(name=var)[0].projparams)
if data is None:
data = np.empty((len(valid_date), out_x.shape[0], out_x.shape[1]), dtype=float)
if self.sector_ind_path:
data[f] = data_values[:].flatten()[inds].reshape(out_x.shape)
else:
data[f]=data_values[:]
else:
if self.sector_ind_path:
data[f] = data_values[:].flatten()[inds].reshape(out_x.shape)
else:
data[f]=data_values[:]
return data, units
|
Loads data from grib2 file objects or list of grib2 file objects. Handles specific grib2 variable names
and grib2 message numbers.
Returns:
Array of data loaded from files in (time, y, x) dimensions, Units
|
entailment
|
def load_forecasts(self):
"""
Load the forecast files into memory.
"""
run_date_str = self.run_date.strftime("%Y%m%d")
for model_name in self.model_names:
self.raw_forecasts[model_name] = {}
forecast_file = self.forecast_path + run_date_str + "/" + \
model_name.replace(" ", "-") + "_hailprobs_{0}_{1}.nc".format(self.ensemble_member, run_date_str)
forecast_obj = Dataset(forecast_file)
forecast_hours = forecast_obj.variables["forecast_hour"][:]
valid_hour_indices = np.where((self.start_hour <= forecast_hours) & (forecast_hours <= self.end_hour))[0]
for size_threshold in self.size_thresholds:
self.raw_forecasts[model_name][size_threshold] = \
forecast_obj.variables["prob_hail_{0:02d}_mm".format(size_threshold)][valid_hour_indices]
forecast_obj.close()
|
Load the forecast files into memory.
|
entailment
|
def get_window_forecasts(self):
"""
Aggregate the forecasts within the specified time windows.
"""
for model_name in self.model_names:
self.window_forecasts[model_name] = {}
for size_threshold in self.size_thresholds:
self.window_forecasts[model_name][size_threshold] = \
np.array([self.raw_forecasts[model_name][size_threshold][sl].sum(axis=0)
for sl in self.hour_windows])
|
Aggregate the forecasts within the specified time windows.
|
entailment
|
def load_obs(self, mask_threshold=0.5):
"""
Loads observations and masking grid (if needed).
:param mask_threshold: Values greater than the threshold are kept, others are masked.
:return:
"""
start_date = self.run_date + timedelta(hours=self.start_hour)
end_date = self.run_date + timedelta(hours=self.end_hour)
mrms_grid = MRMSGrid(start_date, end_date, self.mrms_variable, self.mrms_path)
mrms_grid.load_data()
if len(mrms_grid.data) > 0:
self.raw_obs[self.mrms_variable] = np.where(mrms_grid.data > 100, 100, mrms_grid.data)
self.window_obs[self.mrms_variable] = np.array([self.raw_obs[self.mrms_variable][sl].max(axis=0)
for sl in self.hour_windows])
if self.obs_mask:
mask_grid = MRMSGrid(start_date, end_date, self.mask_variable, self.mrms_path)
mask_grid.load_data()
self.raw_obs[self.mask_variable] = np.where(mask_grid.data >= mask_threshold, 1, 0)
self.window_obs[self.mask_variable] = np.array([self.raw_obs[self.mask_variable][sl].max(axis=0)
for sl in self.hour_windows])
|
Loads observations and masking grid (if needed).
:param mask_threshold: Values greater than the threshold are kept, others are masked.
:return:
|
entailment
|
def dilate_obs(self, dilation_radius):
"""
Use a dilation filter to grow positive observation areas by a specified number of grid points
:param dilation_radius: Number of times to dilate the grid.
:return:
"""
for s in self.size_thresholds:
self.dilated_obs[s] = np.zeros(self.window_obs[self.mrms_variable].shape)
for t in range(self.dilated_obs[s].shape[0]):
self.dilated_obs[s][t][binary_dilation(self.window_obs[self.mrms_variable][t] >= s, iterations=dilation_radius)] = 1
|
Use a dilation filter to grow positive observation areas by a specified number of grid points
:param dilation_radius: Number of times to dilate the grid.
:return:
|
entailment
|
def roc_curves(self, prob_thresholds):
"""
Generate ROC Curve objects for each machine learning model, size threshold, and time window.
:param prob_thresholds: Probability thresholds for the ROC Curve
:param dilation_radius: Number of times to dilate the observation grid.
:return: a dictionary of DistributedROC objects.
"""
all_roc_curves = {}
for model_name in self.model_names:
all_roc_curves[model_name] = {}
for size_threshold in self.size_thresholds:
all_roc_curves[model_name][size_threshold] = {}
for h, hour_window in enumerate(self.hour_windows):
hour_range = (hour_window.start, hour_window.stop)
all_roc_curves[model_name][size_threshold][hour_range] = \
DistributedROC(prob_thresholds, 1)
if self.obs_mask:
all_roc_curves[model_name][size_threshold][hour_range].update(
self.window_forecasts[model_name][size_threshold][h][
self.window_obs[self.mask_variable][h] > 0],
self.dilated_obs[size_threshold][h][self.window_obs[self.mask_variable][h] > 0]
)
else:
all_roc_curves[model_name][size_threshold][hour_range].update(
self.window_forecasts[model_name][size_threshold][h],
self.dilated_obs[size_threshold][h]
)
return all_roc_curves
|
Generate ROC Curve objects for each machine learning model, size threshold, and time window.
:param prob_thresholds: Probability thresholds for the ROC Curve
:param dilation_radius: Number of times to dilate the observation grid.
:return: a dictionary of DistributedROC objects.
|
entailment
|
def reliability_curves(self, prob_thresholds):
"""
Output reliability curves for each machine learning model, size threshold, and time window.
:param prob_thresholds:
:param dilation_radius:
:return:
"""
all_rel_curves = {}
for model_name in self.model_names:
all_rel_curves[model_name] = {}
for size_threshold in self.size_thresholds:
all_rel_curves[model_name][size_threshold] = {}
for h, hour_window in enumerate(self.hour_windows):
hour_range = (hour_window.start, hour_window.stop)
all_rel_curves[model_name][size_threshold][hour_range] = \
DistributedReliability(prob_thresholds, 1)
if self.obs_mask:
all_rel_curves[model_name][size_threshold][hour_range].update(
self.window_forecasts[model_name][size_threshold][h][
self.window_obs[self.mask_variable][h] > 0],
self.dilated_obs[size_threshold][h][self.window_obs[self.mask_variable][h] > 0]
)
else:
all_rel_curves[model_name][size_threshold][hour_range].update(
self.window_forecasts[model_name][size_threshold][h],
self.dilated_obs[size_threshold][h]
)
return all_rel_curves
|
Output reliability curves for each machine learning model, size threshold, and time window.
:param prob_thresholds:
:param dilation_radius:
:return:
|
entailment
|
def load_map_coordinates(map_file):
"""
Loads map coordinates from netCDF or pickle file created by util.makeMapGrids.
Args:
map_file: Filename for the file containing coordinate information.
Returns:
Latitude and longitude grids as numpy arrays.
"""
if map_file[-4:] == ".pkl":
map_data = pickle.load(open(map_file))
lon = map_data['lon']
lat = map_data['lat']
else:
map_data = Dataset(map_file)
if "lon" in map_data.variables.keys():
lon = map_data.variables['lon'][:]
lat = map_data.variables['lat'][:]
else:
lon = map_data.variables["XLONG"][0]
lat = map_data.variables["XLAT"][0]
return lon, lat
|
Loads map coordinates from netCDF or pickle file created by util.makeMapGrids.
Args:
map_file: Filename for the file containing coordinate information.
Returns:
Latitude and longitude grids as numpy arrays.
|
entailment
|
def interpolate_mrms_day(start_date, variable, interp_type, mrms_path, map_filename, out_path):
"""
For a given day, this module interpolates hourly MRMS data to a specified latitude and
longitude grid, and saves the interpolated grids to CF-compliant netCDF4 files.
Args:
start_date (datetime.datetime): Date of data being interpolated
variable (str): MRMS variable
interp_type (str): Whether to use maximum neighbor or spline
mrms_path (str): Path to top-level directory of MRMS GRIB2 files
map_filename (str): Name of the map filename. Supports ARPS map file format and netCDF files containing latitude
and longitude variables
out_path (str): Path to location where interpolated netCDF4 files are saved.
"""
try:
print(start_date, variable)
end_date = start_date + timedelta(hours=23)
mrms = MRMSGrid(start_date, end_date, variable, mrms_path)
if mrms.data is not None:
if map_filename[-3:] == "map":
mapping_data = make_proj_grids(*read_arps_map_file(map_filename))
mrms.interpolate_to_netcdf(mapping_data['lon'], mapping_data['lat'], out_path, interp_type=interp_type)
elif map_filename[-3:] == "txt":
mapping_data = make_proj_grids(*read_ncar_map_file(map_filename))
mrms.interpolate_to_netcdf(mapping_data["lon"], mapping_data["lat"], out_path, interp_type=interp_type)
else:
lon, lat = load_map_coordinates(map_filename)
mrms.interpolate_to_netcdf(lon, lat, out_path, interp_type=interp_type)
except Exception as e:
# This exception catches any errors when run in multiprocessing, prints the stack trace,
# and ends the process. Otherwise the process will stall.
print(traceback.format_exc())
raise e
|
For a given day, this module interpolates hourly MRMS data to a specified latitude and
longitude grid, and saves the interpolated grids to CF-compliant netCDF4 files.
Args:
start_date (datetime.datetime): Date of data being interpolated
variable (str): MRMS variable
interp_type (str): Whether to use maximum neighbor or spline
mrms_path (str): Path to top-level directory of MRMS GRIB2 files
map_filename (str): Name of the map filename. Supports ARPS map file format and netCDF files containing latitude
and longitude variables
out_path (str): Path to location where interpolated netCDF4 files are saved.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.