sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def __clear_covers(self):
"""Clear all covered matrix cells"""
for i in range(self.n):
self.row_covered[i] = False
self.col_covered[i] = False
|
Clear all covered matrix cells
|
entailment
|
def __erase_primes(self):
"""Erase all prime markings"""
for i in range(self.n):
for j in range(self.n):
if self.marked[i][j] == 2:
self.marked[i][j] = 0
|
Erase all prime markings
|
entailment
|
def update(self, a, b, c, d):
"""
Update contingency table with new values without creating a new object.
"""
self.table.ravel()[:] = [a, b, c, d]
self.N = self.table.sum()
|
Update contingency table with new values without creating a new object.
|
entailment
|
def bias(self):
"""
Frequency Bias.
Formula: (a+b)/(a+c)"""
return (self.table[0, 0] + self.table[0, 1]) / (self.table[0, 0] + self.table[1, 0])
|
Frequency Bias.
Formula: (a+b)/(a+c)
|
entailment
|
def csi(self):
"""Gilbert's Score or Threat Score or Critical Success Index a/(a+b+c)"""
return self.table[0, 0] / (self.table[0, 0] + self.table[0, 1] + self.table[1, 0])
|
Gilbert's Score or Threat Score or Critical Success Index a/(a+b+c)
|
entailment
|
def ets(self):
"""Equitable Threat Score, Gilbert Skill Score, v, (a - R)/(a + b + c - R), R=(a+b)(a+c)/N"""
r = (self.table[0, 0] + self.table[0, 1]) * (self.table[0, 0] + self.table[1, 0]) / self.N
return (self.table[0, 0] - r) / (self.table[0, 0] + self.table[0, 1] + self.table[1, 0] - r)
|
Equitable Threat Score, Gilbert Skill Score, v, (a - R)/(a + b + c - R), R=(a+b)(a+c)/N
|
entailment
|
def hss(self):
"""Doolittle (Heidke) Skill Score. 2(ad-bc)/((a+b)(b+d) + (a+c)(c+d))"""
return 2 * (self.table[0, 0] * self.table[1, 1] - self.table[0, 1] * self.table[1, 0]) / (
(self.table[0, 0] + self.table[0, 1]) * (self.table[0, 1] + self.table[1, 1]) +
(self.table[0, 0] + self.table[1, 0]) * (self.table[1, 0] + self.table[1, 1]))
|
Doolittle (Heidke) Skill Score. 2(ad-bc)/((a+b)(b+d) + (a+c)(c+d))
|
entailment
|
def pss(self):
"""Peirce (Hansen-Kuipers, True) Skill Score (ad - bc)/((a+c)(b+d))"""
return (self.table[0, 0] * self.table[1, 1] - self.table[0, 1] * self.table[1, 0]) / \
((self.table[0, 0] + self.table[1, 0]) * (self.table[0, 1] + self.table[1, 1]))
|
Peirce (Hansen-Kuipers, True) Skill Score (ad - bc)/((a+c)(b+d))
|
entailment
|
def css(self):
"""Clayton Skill Score (ad - bc)/((a+b)(c+d))"""
return (self.table[0, 0] * self.table[1, 1] - self.table[0, 1] * self.table[1, 0]) / \
((self.table[0, 0] + self.table[0, 1]) * (self.table[1, 0] + self.table[1, 1]))
|
Clayton Skill Score (ad - bc)/((a+b)(c+d))
|
entailment
|
def load_tree_object(filename):
"""
Load scikit-learn decision tree ensemble object from file.
Parameters
----------
filename : str
Name of the pickle file containing the tree object.
Returns
-------
tree ensemble object
"""
with open(filename) as file_obj:
tree_ensemble_obj = pickle.load(file_obj)
return tree_ensemble_obj
|
Load scikit-learn decision tree ensemble object from file.
Parameters
----------
filename : str
Name of the pickle file containing the tree object.
Returns
-------
tree ensemble object
|
entailment
|
def output_tree_ensemble(tree_ensemble_obj, output_filename, attribute_names=None):
"""
Write each decision tree in an ensemble to a file.
Parameters
----------
tree_ensemble_obj : sklearn.ensemble object
Random Forest or Gradient Boosted Regression object
output_filename : str
File where trees are written
attribute_names : list
List of attribute names to be used in place of indices if available.
"""
for t, tree in enumerate(tree_ensemble_obj.estimators_):
print("Writing Tree {0:d}".format(t))
out_file = open(output_filename + ".{0:d}.tree", "w")
#out_file.write("Tree {0:d}\n".format(t))
tree_str = print_tree_recursive(tree.tree_, 0, attribute_names)
out_file.write(tree_str)
#out_file.write("\n")
out_file.close()
return
|
Write each decision tree in an ensemble to a file.
Parameters
----------
tree_ensemble_obj : sklearn.ensemble object
Random Forest or Gradient Boosted Regression object
output_filename : str
File where trees are written
attribute_names : list
List of attribute names to be used in place of indices if available.
|
entailment
|
def print_tree_recursive(tree_obj, node_index, attribute_names=None):
"""
Recursively writes a string representation of a decision tree object.
Parameters
----------
tree_obj : sklearn.tree._tree.Tree object
A base decision tree object
node_index : int
Index of the node being printed
attribute_names : list
List of attribute names
Returns
-------
tree_str : str
String representation of decision tree in the same format as the parf library.
"""
tree_str = ""
if node_index == 0:
tree_str += "{0:d}\n".format(tree_obj.node_count)
if tree_obj.feature[node_index] >= 0:
if attribute_names is None:
attr_val = "{0:d}".format(tree_obj.feature[node_index])
else:
attr_val = attribute_names[tree_obj.feature[node_index]]
tree_str += "b {0:d} {1} {2:0.4f} {3:d} {4:1.5e}\n".format(node_index,
attr_val,
tree_obj.weighted_n_node_samples[node_index],
tree_obj.n_node_samples[node_index],
tree_obj.threshold[node_index])
else:
if tree_obj.max_n_classes > 1:
leaf_value = "{0:d}".format(tree_obj.value[node_index].argmax())
else:
leaf_value = "{0}".format(tree_obj.value[node_index][0][0])
tree_str += "l {0:d} {1} {2:0.4f} {3:d}\n".format(node_index,
leaf_value,
tree_obj.weighted_n_node_samples[node_index],
tree_obj.n_node_samples[node_index])
if tree_obj.children_left[node_index] > 0:
tree_str += print_tree_recursive(tree_obj, tree_obj.children_left[node_index], attribute_names)
if tree_obj.children_right[node_index] > 0:
tree_str += print_tree_recursive(tree_obj, tree_obj.children_right[node_index], attribute_names)
return tree_str
|
Recursively writes a string representation of a decision tree object.
Parameters
----------
tree_obj : sklearn.tree._tree.Tree object
A base decision tree object
node_index : int
Index of the node being printed
attribute_names : list
List of attribute names
Returns
-------
tree_str : str
String representation of decision tree in the same format as the parf library.
|
entailment
|
def set_classifier_mask(self, v, base_mask=True):
"""Computes the mask used to create the training and validation set"""
base = self._base
v = tonparray(v)
a = np.unique(v)
if a[0] != -1 or a[1] != 1:
raise RuntimeError("The labels must be -1 and 1 (%s)" % a)
mask = np.zeros_like(v)
cnt = min([(v == x).sum() for x in a]) * base._tr_fraction
cnt = int(round(cnt))
for i in a:
index = np.where((v == i) & base_mask)[0]
np.random.shuffle(index)
mask[index[:cnt]] = True
base._mask = SparseArray.fromlist(mask)
return SparseArray.fromlist(v)
|
Computes the mask used to create the training and validation set
|
entailment
|
def set_regression_mask(self, v):
"""Computes the mask used to create the training and validation set"""
base = self._base
index = np.arange(v.size())
np.random.shuffle(index)
ones = np.ones(v.size())
ones[index[int(base._tr_fraction * v.size()):]] = 0
base._mask = SparseArray.fromlist(ones)
|
Computes the mask used to create the training and validation set
|
entailment
|
def fitness(self, v):
"Fitness function in the training set"
base = self._base
if base._classifier:
if base._multiple_outputs:
hy = SparseArray.argmax(v.hy)
fit_func = base._fitness_function
if fit_func == 'macro-F1' or fit_func == 'a_F1':
f1_score = self.score
mf1, mf1_v = f1_score.a_F1(base._y_klass, hy, base._mask_ts.index)
v._error = mf1_v - 1
v.fitness = mf1 - 1
elif fit_func == 'DotF1' or fit_func == 'g_F1':
f1_score = self.score
mf1, mf1_v = f1_score.g_F1(base._y_klass, hy, base._mask_ts.index)
v._error = mf1_v - 1
v.fitness = mf1 - 1
elif fit_func == 'DotRecallDotPrecision' or fit_func == 'g_g_recall_precision':
f1_score = self.score
mf1, mf1_v = f1_score.g_g_recall_precision(base._y_klass, hy,
base._mask_ts.index)
v._error = mf1_v - 1
v.fitness = mf1 - 1
elif fit_func == 'BER' or fit_func == 'a_recall':
f1_score = self.score
mf1, mf1_v = f1_score.a_recall(base._y_klass, hy, base._mask_ts.index)
v._error = mf1_v - 1
v.fitness = mf1 - 1
elif fit_func == 'DotRecall' or fit_func == 'g_recall':
f1_score = self.score
mf1, mf1_v = f1_score.g_recall(base._y_klass, hy,
base._mask_ts.index)
v._error = mf1_v - 1
v.fitness = mf1 - 1
elif fit_func == 'macro-Precision' or fit_func == 'a_precision':
f1_score = self.score
mf1, mf1_v = f1_score.a_precision(base._y_klass, hy,
base._mask_ts.index)
v._error = mf1_v - 1
v.fitness = mf1 - 1
elif fit_func == 'DotPrecision' or fit_func == 'g_precision':
f1_score = self.score
mf1, mf1_v = f1_score.g_precision(base._y_klass, hy,
base._mask_ts.index)
v._error = mf1_v - 1
v.fitness = mf1 - 1
elif fit_func == 'accDotMacroF1':
f1_score = self.score
mf1, mf1_v = f1_score.accDotMacroF1(base._y_klass, hy,
base._mask_ts.index)
v._error = mf1_v - 1
v.fitness = mf1 - 1
elif fit_func == 'macro-RecallF1':
f1_score = self.score
mf1, mf1_v = f1_score.macroRecallF1(base._y_klass, hy,
base._mask_ts.index)
v._error = mf1_v - 1
v.fitness = mf1 - 1
elif fit_func == 'F1':
f1_score = self.score
f1_index = self._base._F1_index
index = self.min_class if f1_index < 0 else f1_index
mf1, mf1_v = f1_score.F1(index, base._y_klass,
hy, base._mask_ts.index)
v._error = mf1_v - 1
v.fitness = mf1 - 1
elif fit_func == 'RecallDotPrecision' or fit_func == 'g_recall_precision':
f1_score = self.score
mf1, mf1_v = f1_score.g_recall_precision(self.min_class,
base._y_klass,
hy, base._mask_ts.index)
v._error = mf1_v - 1
v.fitness = mf1 - 1
elif fit_func == 'ER' or fit_func == 'accuracy':
f1_score = self.score
mf1, mf1_v = f1_score.accuracy(base._y_klass,
hy, base._mask_ts.index)
v._error = mf1_v - 1
v.fitness = mf1 - 1
else:
raise RuntimeError('Unknown fitness function %s' % base._fitness_function)
else:
v.fitness = -base._ytr.SSE(v.hy * base._mask)
else:
if base._multiple_outputs:
_ = np.mean([a.SAE(b.mul(c)) for a, b, c in zip(base._ytr, v.hy, base._mask)])
v.fitness = - _
else:
v.fitness = -base._ytr.SAE(v.hy * base._mask)
|
Fitness function in the training set
|
entailment
|
def fitness_vs(self, v):
"""Fitness function in the validation set
In classification it uses BER and RSE in regression"""
base = self._base
if base._classifier:
if base._multiple_outputs:
v.fitness_vs = v._error
# if base._fitness_function == 'macro-F1':
# v.fitness_vs = v._error
# elif base._fitness_function == 'BER':
# v.fitness_vs = v._error
# elif base._fitness_function == 'macro-Precision':
# v.fitness_vs = v._error
# elif base._fitness_function == 'accDotMacroF1':
# v.fitness_vs = v._error
# elif base._fitness_function == 'macro-RecallF1':
# v.fitness_vs = v._error
# elif base._fitness_function == 'F1':
# v.fitness_vs = v._error
# else:
# v.fitness_vs = - v._error.dot(base._mask_vs) / base._mask_vs.sum()
else:
v.fitness_vs = -((base.y - v.hy.sign()).sign().fabs() *
base._mask_vs).sum()
else:
mask = base._mask
y = base.y
hy = v.hy
if not isinstance(mask, list):
mask = [mask]
y = [y]
hy = [hy]
fit = []
for _mask, _y, _hy in zip(mask, y, hy):
m = (_mask + -1).fabs()
x = _y * m
y = _hy * m
a = (x - y).sq().sum()
b = (x + -x.sum() / x.size()).sq().sum()
fit.append(-a / b)
v.fitness_vs = np.mean(fit)
|
Fitness function in the validation set
In classification it uses BER and RSE in regression
|
entailment
|
def set_fitness(self, v):
"""Set the fitness to a new node.
Returns false in case fitness is not finite"""
base = self._base
self.fitness(v)
if not np.isfinite(v.fitness):
self.del_error(v)
return False
if base._tr_fraction < 1:
self.fitness_vs(v)
if not np.isfinite(v.fitness_vs):
self.del_error(v)
return False
self.del_error(v)
return True
|
Set the fitness to a new node.
Returns false in case fitness is not finite
|
entailment
|
def analisar(retorno):
"""Constrói uma :class:`RespostaCancelarUltimaVenda` a partir do
retorno informado.
:param unicode retorno: Retorno da função ``CancelarUltimaVenda``.
"""
resposta = analisar_retorno(forcar_unicode(retorno),
funcao='EnviarDadosVenda',
classe_resposta=RespostaCancelarUltimaVenda,
campos=(
('numeroSessao', int),
('EEEEE', unicode),
('CCCC', unicode),
('mensagem', unicode),
('cod', unicode),
('mensagemSEFAZ', unicode),
('arquivoCFeBase64', unicode),
('timeStamp', as_datetime),
('chaveConsulta', unicode),
('valorTotalCFe', Decimal),
('CPFCNPJValue', unicode),
('assinaturaQRCODE', unicode),
),
campos_alternativos=[
# se a venda falhar apenas os primeiros seis campos
# especificados na ER deverão ser retornados...
(
('numeroSessao', int),
('EEEEE', unicode),
('CCCC', unicode),
('mensagem', unicode),
('cod', unicode),
('mensagemSEFAZ', unicode),
),
# por via das dúvidas, considera o padrão de campos,
# caso não haja nenhuma coincidência...
RespostaSAT.CAMPOS,
]
)
if resposta.EEEEE not in ('07000',):
raise ExcecaoRespostaSAT(resposta)
return resposta
|
Constrói uma :class:`RespostaCancelarUltimaVenda` a partir do
retorno informado.
:param unicode retorno: Retorno da função ``CancelarUltimaVenda``.
|
entailment
|
def convert_data_element_to_data_and_metadata_1(data_element) -> DataAndMetadata.DataAndMetadata:
"""Convert a data element to xdata. No data copying occurs.
The data element can have the following keys:
data (required)
is_sequence, collection_dimension_count, datum_dimension_count (optional description of the data)
spatial_calibrations (optional list of spatial calibration dicts, scale, offset, units)
intensity_calibration (optional intensity calibration dict, scale, offset, units)
metadata (optional)
properties (get stored into metadata.hardware_source)
one of either timestamp or datetime_modified
if datetime_modified (dst, tz) it is converted and used as timestamp
then timezone gets stored into metadata.description.timezone.
"""
# data. takes ownership.
data = data_element["data"]
dimensional_shape = Image.dimensional_shape_from_data(data)
is_sequence = data_element.get("is_sequence", False)
dimension_count = len(Image.dimensional_shape_from_data(data))
adjusted_dimension_count = dimension_count - (1 if is_sequence else 0)
collection_dimension_count = data_element.get("collection_dimension_count", 2 if adjusted_dimension_count in (3, 4) else 0)
datum_dimension_count = data_element.get("datum_dimension_count", adjusted_dimension_count - collection_dimension_count)
data_descriptor = DataAndMetadata.DataDescriptor(is_sequence, collection_dimension_count, datum_dimension_count)
# dimensional calibrations
dimensional_calibrations = None
if "spatial_calibrations" in data_element:
dimensional_calibrations_list = data_element.get("spatial_calibrations")
if len(dimensional_calibrations_list) == len(dimensional_shape):
dimensional_calibrations = list()
for dimension_calibration in dimensional_calibrations_list:
offset = float(dimension_calibration.get("offset", 0.0))
scale = float(dimension_calibration.get("scale", 1.0))
units = dimension_calibration.get("units", "")
units = str(units) if units is not None else str()
if scale != 0.0:
dimensional_calibrations.append(Calibration.Calibration(offset, scale, units))
else:
dimensional_calibrations.append(Calibration.Calibration())
# intensity calibration
intensity_calibration = None
if "intensity_calibration" in data_element:
intensity_calibration_dict = data_element.get("intensity_calibration")
offset = float(intensity_calibration_dict.get("offset", 0.0))
scale = float(intensity_calibration_dict.get("scale", 1.0))
units = intensity_calibration_dict.get("units", "")
units = str(units) if units is not None else str()
if scale != 0.0:
intensity_calibration = Calibration.Calibration(offset, scale, units)
# properties (general tags)
metadata = dict()
if "metadata" in data_element:
metadata.update(Utility.clean_dict(data_element.get("metadata")))
if "properties" in data_element and data_element["properties"]:
hardware_source_metadata = metadata.setdefault("hardware_source", dict())
hardware_source_metadata.update(Utility.clean_dict(data_element.get("properties")))
# dates are _local_ time and must use this specific ISO 8601 format. 2013-11-17T08:43:21.389391
# time zones are offsets (east of UTC) in the following format "+HHMM" or "-HHMM"
# daylight savings times are time offset (east of UTC) in format "+MM" or "-MM"
# timezone is for conversion and is the Olson timezone string.
# datetime.datetime.strptime(datetime.datetime.isoformat(datetime.datetime.now()), "%Y-%m-%dT%H:%M:%S.%f" )
# datetime_modified, datetime_modified_tz, datetime_modified_dst, datetime_modified_tzname is the time at which this image was modified.
# datetime_original, datetime_original_tz, datetime_original_dst, datetime_original_tzname is the time at which this image was created.
timestamp = data_element.get("timestamp", datetime.datetime.utcnow())
datetime_item = data_element.get("datetime_modified", Utility.get_datetime_item_from_utc_datetime(timestamp))
local_datetime = Utility.get_datetime_from_datetime_item(datetime_item)
dst_value = datetime_item.get("dst", "+00")
tz_value = datetime_item.get("tz", "+0000")
timezone = datetime_item.get("timezone")
time_zone = { "dst": dst_value, "tz": tz_value}
if timezone is not None:
time_zone["timezone"] = timezone
# note: dst is informational only; tz already include dst
tz_adjust = (int(tz_value[1:3]) * 60 + int(tz_value[3:5])) * (-1 if tz_value[0] == '-' else 1)
utc_datetime = local_datetime - datetime.timedelta(minutes=tz_adjust) # tz_adjust already contains dst_adjust
timestamp = utc_datetime
return DataAndMetadata.new_data_and_metadata(data,
intensity_calibration=intensity_calibration,
dimensional_calibrations=dimensional_calibrations,
metadata=metadata,
timestamp=timestamp,
data_descriptor=data_descriptor,
timezone=timezone,
timezone_offset=tz_value)
|
Convert a data element to xdata. No data copying occurs.
The data element can have the following keys:
data (required)
is_sequence, collection_dimension_count, datum_dimension_count (optional description of the data)
spatial_calibrations (optional list of spatial calibration dicts, scale, offset, units)
intensity_calibration (optional intensity calibration dict, scale, offset, units)
metadata (optional)
properties (get stored into metadata.hardware_source)
one of either timestamp or datetime_modified
if datetime_modified (dst, tz) it is converted and used as timestamp
then timezone gets stored into metadata.description.timezone.
|
entailment
|
def output_sector_csv(self,csv_path,file_dict_key,out_path):
"""
Segment forecast tracks to only output data contined within a
region in the CONUS, as defined by the mapfile.
Args:
csv_path(str): Path to the full CONUS csv file.
file_dict_key(str): Dictionary key for the csv files,
currently either 'track_step' or 'track_total'
out_path (str): Path to output new segmented csv files.
Returns:
Segmented forecast tracks in a csv file.
"""
csv_file = csv_path + "{0}_{1}_{2}_{3}.csv".format(
file_dict_key,
self.ensemble_name,
self.member,
self.run_date.strftime(self.date_format))
if exists(csv_file):
csv_data = pd.read_csv(csv_file)
if self.inds is None:
lon_obj = csv_data.loc[:,"Centroid_Lon"]
lat_obj = csv_data.loc[:,"Centroid_Lat"]
self.inds = np.where((self.ne_lat>=lat_obj)&(self.sw_lat<=lat_obj)\
&(self.ne_lon>=lon_obj)&(self.sw_lon<=lon_obj))[0]
if np.shape(self.inds)[0] > 0:
csv_data = csv_data.reindex(np.array(self.inds))
sector_csv_filename = out_path + "{0}_{1}_{2}_{3}.csv".format(
file_dict_key,
self.ensemble_name,
self.member,
self.run_date.strftime(self.date_format))
print("Output sector csv file " + sector_csv_filename)
csv_data.to_csv(sector_csv_filename,
na_rep="nan",
float_format="%0.5f",
index=False)
os.chmod(sector_csv_filename, 0o666)
else:
print('No {0} {1} sector data found'.format(self.member,
self.run_date.strftime("%Y%m%d")))
else:
print('No {0} {1} csv file found'.format(self.member,
self.run_date.strftime("%Y%m%d")))
return
|
Segment forecast tracks to only output data contined within a
region in the CONUS, as defined by the mapfile.
Args:
csv_path(str): Path to the full CONUS csv file.
file_dict_key(str): Dictionary key for the csv files,
currently either 'track_step' or 'track_total'
out_path (str): Path to output new segmented csv files.
Returns:
Segmented forecast tracks in a csv file.
|
entailment
|
def output_sector_netcdf(self,netcdf_path,out_path,patch_radius,config):
"""
Segment patches of forecast tracks to only output data contined within a
region in the CONUS, as defined by the mapfile.
Args:
netcdf_path (str): Path to the full CONUS netcdf patch file.
out_path (str): Path to output new segmented netcdf files.
patch_radius (int): Size of the patch radius.
config (dict): Dictonary containing information about data and
ML variables
Returns:
Segmented patch netcdf files.
"""
nc_data = self.load_netcdf_data(netcdf_path,patch_radius)
if nc_data is not None:
out_filename = out_path + "{0}_{1}_{2}_model_patches.nc".format(
self.ensemble_name,
self.run_date.strftime(self.date_format),
self.member)
out_file = Dataset(out_filename, "w")
out_file.createDimension("p", np.shape(nc_data.variables['p'])[0])
out_file.createDimension("row", np.shape(nc_data.variables['row'])[0])
out_file.createDimension("col", np.shape(nc_data.variables['col'])[0])
out_file.createVariable("p", "i4", ("p",))
out_file.createVariable("row", "i4", ("row",))
out_file.createVariable("col", "i4", ("col",))
out_file.variables["p"][:] = nc_data.variables['p'][:]
out_file.variables["row"][:] = nc_data.variables['row'][:]
out_file.variables["col"][:] = nc_data.variables['col'][:]
out_file.Conventions = "CF-1.6"
out_file.title = "{0} Storm Patches for run {1} member {2}".format(self.ensemble_name,
self.run_date.strftime(self.date_format),
self.member)
out_file.object_variable = config.watershed_variable
meta_variables = ["lon", "lat", "i", "j", "x", "y", "masks"]
meta_units = ["degrees_east", "degrees_north", "", "", "m", "m", ""]
center_vars = ["time", "centroid_lon", "centroid_lat", "centroid_i", "centroid_j", "track_id", "track_step"]
center_units = ["hours since {0}".format(self.run_date.strftime("%Y-%m-%d %H:%M:%S")),
"degrees_east",
"degrees_north",
"",
"",
"",
""]
label_columns = ["Matched", "Max_Hail_Size", "Num_Matches", "Shape", "Location", "Scale"]
for m, meta_variable in enumerate(meta_variables):
if meta_variable in ["i", "j", "masks"]:
dtype = "i4"
else:
dtype = "f4"
m_var = out_file.createVariable(meta_variable, dtype, ("p", "row", "col"), complevel=1, zlib=True)
m_var.long_name = meta_variable
m_var.units = meta_units[m]
for c, center_var in enumerate(center_vars):
if center_var in ["time", "track_id", "track_step"]:
dtype = "i4"
else:
dtype = "f4"
c_var = out_file.createVariable(center_var, dtype, ("p",), zlib=True, complevel=1)
c_var.long_name = center_var
c_var.units =center_units[c]
for storm_variable in config.storm_variables:
s_var = out_file.createVariable(storm_variable + "_curr", "f4", ("p", "row", "col"), complevel=1, zlib=True)
s_var.long_name = storm_variable
s_var.units = ""
for potential_variable in config.potential_variables:
p_var = out_file.createVariable(potential_variable + "_prev", "f4", ("p", "row", "col"),
complevel=1, zlib=True)
p_var.long_name = potential_variable
p_var.units = ""
if config.train:
for label_column in label_columns:
if label_column in ["Matched", "Num_Matches"]:
dtype = "i4"
else:
dtype = "f4"
l_var = out_file.createVariable(label_column, dtype, ("p",), zlib=True, complevel=1)
l_var.long_name = label_column
l_var.units = ""
out_file.variables["time"][:] = nc_data.variables['time'][:]
for c_var in ["lon", "lat"]:
out_file.variables["centroid_" + c_var][:] = nc_data.variables['centroid_' + c_var][:]
for c_var in ["i", "j"]:
out_file.variables["centroid_" + c_var][:] = nc_data.variables["centroid_" + c_var][:]
out_file.variables["track_id"][:] = nc_data.variables['track_id'][:]
out_file.variables["track_step"][:] = nc_data.variables['track_step'][:]
for meta_var in meta_variables:
if meta_var in ["lon", "lat"]:
out_file.variables[meta_var][:] = nc_data.variables[meta_var][:]
else:
out_file.variables[meta_var][:] = nc_data.variables[meta_var][:]
for storm_variable in config.storm_variables:
out_file.variables[storm_variable + "_curr"][:] = nc_data.variables[storm_variable + '_curr'][:]
for p_variable in config.potential_variables:
out_file.variables[p_variable + "_prev"][:] = nc_data.variables[p_variable + '_prev'][:]
if config.train:
for label_column in label_columns:
try:
out_file.variables[label_column][:] = nc_data.variables[label_column][:]
except:
out_file.variables[label_column][:] = 0
out_file.close()
print("Output sector nc file " + out_filename)
else:
print('No {0} {1} netcdf file/sector data found'.format(self.member,
self.run_date.strftime("%Y%m%d")))
return
|
Segment patches of forecast tracks to only output data contined within a
region in the CONUS, as defined by the mapfile.
Args:
netcdf_path (str): Path to the full CONUS netcdf patch file.
out_path (str): Path to output new segmented netcdf files.
patch_radius (int): Size of the patch radius.
config (dict): Dictonary containing information about data and
ML variables
Returns:
Segmented patch netcdf files.
|
entailment
|
def clean_dict(d0, clean_item_fn=None):
"""
Return a json-clean dict. Will log info message for failures.
"""
clean_item_fn = clean_item_fn if clean_item_fn else clean_item
d = dict()
for key in d0:
cleaned_item = clean_item_fn(d0[key])
if cleaned_item is not None:
d[key] = cleaned_item
return d
|
Return a json-clean dict. Will log info message for failures.
|
entailment
|
def clean_list(l0, clean_item_fn=None):
"""
Return a json-clean list. Will log info message for failures.
"""
clean_item_fn = clean_item_fn if clean_item_fn else clean_item
l = list()
for index, item in enumerate(l0):
cleaned_item = clean_item_fn(item)
l.append(cleaned_item)
return l
|
Return a json-clean list. Will log info message for failures.
|
entailment
|
def clean_tuple(t0, clean_item_fn=None):
"""
Return a json-clean tuple. Will log info message for failures.
"""
clean_item_fn = clean_item_fn if clean_item_fn else clean_item
l = list()
for index, item in enumerate(t0):
cleaned_item = clean_item_fn(item)
l.append(cleaned_item)
return tuple(l)
|
Return a json-clean tuple. Will log info message for failures.
|
entailment
|
def clean_item(i):
"""
Return a json-clean item or None. Will log info message for failure.
"""
itype = type(i)
if itype == dict:
return clean_dict(i)
elif itype == list:
return clean_list(i)
elif itype == tuple:
return clean_tuple(i)
elif itype == numpy.float32:
return float(i)
elif itype == numpy.float64:
return float(i)
elif itype == numpy.int16:
return int(i)
elif itype == numpy.uint16:
return int(i)
elif itype == numpy.int32:
return int(i)
elif itype == numpy.uint32:
return int(i)
elif itype == numpy.int64:
return int(i)
elif itype == numpy.uint64:
return int(i)
elif itype == float:
return i
elif itype == str:
return i
elif itype == int:
return i
elif itype == bool:
return i
elif itype == type(None):
return i
logging.info("[1] Unable to handle type %s", itype)
import traceback
traceback.print_stack()
return None
|
Return a json-clean item or None. Will log info message for failure.
|
entailment
|
def clean_item_no_list(i):
"""
Return a json-clean item or None. Will log info message for failure.
"""
itype = type(i)
if itype == dict:
return clean_dict(i, clean_item_no_list)
elif itype == list:
return clean_tuple(i, clean_item_no_list)
elif itype == tuple:
return clean_tuple(i, clean_item_no_list)
elif itype == numpy.float32:
return float(i)
elif itype == numpy.float64:
return float(i)
elif itype == numpy.int16:
return int(i)
elif itype == numpy.uint16:
return int(i)
elif itype == numpy.int32:
return int(i)
elif itype == numpy.uint32:
return int(i)
elif itype == float:
return i
elif itype == str:
return i
elif itype == int:
return i
elif itype == bool:
return i
elif itype == type(None):
return i
logging.info("[2] Unable to handle type %s", itype)
return None
|
Return a json-clean item or None. Will log info message for failure.
|
entailment
|
def sample_stack_all(count=10, interval=0.1):
"""Sample the stack in a thread and print it at regular intervals."""
def print_stack_all(l, ll):
l1 = list()
l1.append("*** STACKTRACE - START ***")
code = []
for threadId, stack in sys._current_frames().items():
sub_code = []
sub_code.append("# ThreadID: %s" % threadId)
for filename, lineno, name, line in traceback.extract_stack(stack):
sub_code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
sub_code.append(" %s" % (line.strip()))
if not "in select" in sub_code[-2] and \
not "in wait" in sub_code[-2] and \
not "in print_stack_all" in sub_code[-2] and \
not "in sample_stack_all" in sub_code[-2] and \
not "in checkcache" in sub_code[-2] and \
not "do_sleep" in sub_code[-2] and \
not "sleep" in sub_code[-1] and \
not any(["in do_sample" in s for s in sub_code]):
code.extend(sub_code)
for line in code:
l1.append(line)
l1.append("*** STACKTRACE - END ***")
with l:
ll.extend(l1)
def do_sample():
l = threading.RLock()
ll = list()
for i in range(count):
print_stack_all(l, ll)
time.sleep(interval)
with l:
print("\n".join(ll))
threading.Thread(target=do_sample).start()
|
Sample the stack in a thread and print it at regular intervals.
|
entailment
|
def decision_function(self, X):
"Decision function i.e. the raw data of the prediction"
self._X = Model.convert_features(X)
self._eval()
return self._ind[0].hy
|
Decision function i.e. the raw data of the prediction
|
entailment
|
def _eval(self):
"Evaluates a individual using recursion and self._pos as pointer"
pos = self._pos
self._pos += 1
node = self._ind[pos]
if isinstance(node, Function):
args = [self._eval() for x in range(node.nargs)]
node.eval(args)
for x in args:
x.hy = None
x.hy_test = None
else:
node.eval(self._X)
return node
|
Evaluates a individual using recursion and self._pos as pointer
|
entailment
|
def create_random_ind_full(self, depth=0):
"Random individual using full method"
lst = []
self._create_random_ind_full(depth=depth, output=lst)
return lst
|
Random individual using full method
|
entailment
|
def grow_use_function(self, depth=0):
"Select either function or terminal in grow method"
if depth == 0:
return False
if depth == self._depth:
return True
return np.random.random() < 0.5
|
Select either function or terminal in grow method
|
entailment
|
def create_random_ind_grow(self, depth=0):
"Random individual using grow method"
lst = []
self._depth = depth
self._create_random_ind_grow(depth=depth, output=lst)
return lst
|
Random individual using grow method
|
entailment
|
def create_population(self, popsize=1000, min_depth=2,
max_depth=4,
X=None):
"Creates random population using ramped half-and-half method"
import itertools
args = [x for x in itertools.product(range(min_depth,
max_depth+1),
[True, False])]
index = 0
output = []
while len(output) < popsize:
depth, full = args[index]
index += 1
if index >= len(args):
index = 0
if full:
ind = self.create_random_ind_full(depth=depth)
else:
ind = self.create_random_ind_grow(depth=depth)
flag = True
if X is not None:
x = Individual(ind)
x.decision_function(X)
flag = x.individual[0].isfinite()
l_vars = (flag, len(output), full, depth, len(ind))
l_str = " flag: %s len(output): %s full: %s depth: %s len(ind): %s"
self._logger.debug(l_str % l_vars)
if flag:
output.append(ind)
return output
|
Creates random population using ramped half-and-half method
|
entailment
|
def decision_function(self, X, **kwargs):
"Decision function i.e. the raw data of the prediction"
if X is None:
return self._hy_test
X = self.convert_features(X)
if len(X) < self.nvar:
_ = 'Number of variables differ, trained with %s given %s' % (self.nvar, len(X))
raise RuntimeError(_)
hist = self._hist
for node in hist:
if node.height:
node.eval(hist)
else:
node.eval(X)
node.normalize()
r = node.hy
for i in hist[:-1]:
i.hy = None
i.hy_test = None
gc.collect()
return r
|
Decision function i.e. the raw data of the prediction
|
entailment
|
def fitness_vs(self):
"Median Fitness in the validation set"
l = [x.fitness_vs for x in self.models]
return np.median(l)
|
Median Fitness in the validation set
|
entailment
|
def graphviz(self, directory, **kwargs):
"Directory to store the graphviz models"
import os
if not os.path.isdir(directory):
os.mkdir(directory)
output = os.path.join(directory, 'evodag-%s')
for k, m in enumerate(self.models):
m.graphviz(output % k, **kwargs)
|
Directory to store the graphviz models
|
entailment
|
def load_data(self, num_samples=1000, percentiles=None):
"""
Args:
num_samples: Number of random samples at each grid point
percentiles: Which percentiles to extract from the random samples
Returns:
"""
self.percentiles = percentiles
self.num_samples = num_samples
if self.model_name.lower() in ["wrf"]:
mo = ModelOutput(self.ensemble_name, self.member, self.run_date, self.variable,
self.start_date, self.end_date, self.path, self.map_file, self.single_step)
mo.load_data()
self.data = mo.data[:]
if mo.units == "m":
self.data *= 1000
self.units = "mm"
else:
self.units = mo.units
else:
if self.track_forecasts is None:
self.load_track_data()
self.units = "mm"
self.data = np.zeros((self.forecast_hours.size,
self.mapping_data["lon"].shape[0],
self.mapping_data["lon"].shape[1]), dtype=np.float32)
if self.percentiles is not None:
self.percentile_data = np.zeros([len(self.percentiles)] + list(self.data.shape))
full_condition_name = "condition_" + self.condition_model_name.replace(" ", "-")
dist_model_name = "dist" + "_" + self.model_name.replace(" ", "-")
for track_forecast in self.track_forecasts:
times = track_forecast["properties"]["times"]
for s, step in enumerate(track_forecast["features"]):
forecast_params = step["properties"][dist_model_name]
if self.condition_model_name is not None:
condition = step["properties"][full_condition_name]
else:
condition = None
forecast_time = self.run_date + timedelta(hours=times[s])
if forecast_time in self.times:
t = np.where(self.times == forecast_time)[0][0]
mask = np.array(step["properties"]["masks"], dtype=int).ravel()
rankings = np.argsort(np.array(step["properties"]["timesteps"]).ravel()[mask==1])
i = np.array(step["properties"]["i"], dtype=int).ravel()[mask == 1][rankings]
j = np.array(step["properties"]["j"], dtype=int).ravel()[mask == 1][rankings]
if rankings.size > 0 and forecast_params[0] > 0.1 and 1 < forecast_params[2] < 100:
raw_samples = np.sort(gamma.rvs(forecast_params[0], loc=forecast_params[1],
scale=forecast_params[2],
size=(num_samples, rankings.size)),
axis=1)
if self.percentiles is None:
samples = raw_samples.mean(axis=0)
if condition >= self.condition_threshold:
self.data[t, i, j] = samples
else:
for p, percentile in enumerate(self.percentiles):
if percentile != "mean":
if condition >= self.condition_threshold:
self.percentile_data[p, t, i, j] = np.percentile(raw_samples, percentile,
axis=0)
else:
if condition >= self.condition_threshold:
self.percentile_data[p, t, i, j] = np.mean(raw_samples, axis=0)
samples = raw_samples.mean(axis=0)
if condition >= self.condition_threshold:
self.data[t, i, j] = samples
|
Args:
num_samples: Number of random samples at each grid point
percentiles: Which percentiles to extract from the random samples
Returns:
|
entailment
|
def neighborhood_probability(self, threshold, radius):
"""
Calculate a probability based on the number of grid points in an area that exceed a threshold.
Args:
threshold:
radius:
Returns:
"""
weights = disk(radius, dtype=np.uint8)
thresh_data = np.zeros(self.data.shape[1:], dtype=np.uint8)
neighbor_prob = np.zeros(self.data.shape, dtype=np.float32)
for t in np.arange(self.data.shape[0]):
thresh_data[self.data[t] >= threshold] = 1
maximized = fftconvolve(thresh_data, weights, mode="same")
maximized[maximized > 1] = 1
maximized[maximized < 1] = 0
neighbor_prob[t] = fftconvolve(maximized, weights, mode="same")
thresh_data[:] = 0
neighbor_prob[neighbor_prob < 1] = 0
neighbor_prob /= weights.sum()
return neighbor_prob
|
Calculate a probability based on the number of grid points in an area that exceed a threshold.
Args:
threshold:
radius:
Returns:
|
entailment
|
def encode_grib2_percentile(self):
"""
Encodes member percentile data to GRIB2 format.
Returns:
Series of GRIB2 messages
"""
lscale = 1e6
grib_id_start = [7, 0, 14, 14, 2]
gdsinfo = np.array([0, np.product(self.data.shape[-2:]), 0, 0, 30], dtype=np.int32)
lon_0 = self.proj_dict["lon_0"]
sw_lon = self.grid_dict["sw_lon"]
if lon_0 < 0:
lon_0 += 360
if sw_lon < 0:
sw_lon += 360
gdtmp1 = [1, 0, self.proj_dict['a'], 0, float(self.proj_dict['a']), 0, float(self.proj_dict['b']),
self.data.shape[-1], self.data.shape[-2], self.grid_dict["sw_lat"] * lscale,
sw_lon * lscale, 0, self.proj_dict["lat_0"] * lscale,
lon_0 * lscale,
self.grid_dict["dx"] * 1e3, self.grid_dict["dy"] * 1e3, 0b00000000, 0b01000000,
self.proj_dict["lat_1"] * lscale,
self.proj_dict["lat_2"] * lscale, -90 * lscale, 0]
pdtmp1 = np.array([1, # parameter category Moisture
31, # parameter number Hail
4, # Type of generating process Ensemble Forecast
0, # Background generating process identifier
31, # Generating process or model from NCEP
0, # Hours after reference time data cutoff
0, # Minutes after reference time data cutoff
1, # Forecast time units Hours
0, # Forecast time
1, # Type of first fixed surface Ground
1, # Scale value of first fixed surface
0, # Value of first fixed surface
1, # Type of second fixed surface
1, # Scale value of 2nd fixed surface
0, # Value of 2nd fixed surface
0, # Derived forecast type
self.num_samples # Number of ensemble members
], dtype=np.int32)
grib_objects = pd.Series(index=self.times, data=[None] * self.times.size, dtype=object)
drtmp1 = np.array([0, 0, 4, 8, 0], dtype=np.int32)
for t, time in enumerate(self.times):
time_list = list(self.run_date.utctimetuple()[0:6])
if grib_objects[time] is None:
grib_objects[time] = Grib2Encode(0, np.array(grib_id_start + time_list + [2, 1], dtype=np.int32))
grib_objects[time].addgrid(gdsinfo, gdtmp1)
pdtmp1[8] = (time.to_pydatetime() - self.run_date).total_seconds() / 3600.0
data = self.percentile_data[:, t] / 1000.0
masked_data = np.ma.array(data, mask=data <= 0)
for p, percentile in enumerate(self.percentiles):
print("GRIB {3} Percentile {0}. Max: {1} Min: {2}".format(percentile,
masked_data[p].max(),
masked_data[p].min(),
time))
if percentile in range(1, 100):
pdtmp1[-2] = percentile
grib_objects[time].addfield(6, pdtmp1[:-1], 0, drtmp1, masked_data[p])
else:
pdtmp1[-2] = 0
grib_objects[time].addfield(2, pdtmp1, 0, drtmp1, masked_data[p])
return grib_objects
|
Encodes member percentile data to GRIB2 format.
Returns:
Series of GRIB2 messages
|
entailment
|
def encode_grib2_data(self):
"""
Encodes member percentile data to GRIB2 format.
Returns:
Series of GRIB2 messages
"""
lscale = 1e6
grib_id_start = [7, 0, 14, 14, 2]
gdsinfo = np.array([0, np.product(self.data.shape[-2:]), 0, 0, 30], dtype=np.int32)
lon_0 = self.proj_dict["lon_0"]
sw_lon = self.grid_dict["sw_lon"]
if lon_0 < 0:
lon_0 += 360
if sw_lon < 0:
sw_lon += 360
gdtmp1 = [1, 0, self.proj_dict['a'], 0, float(self.proj_dict['a']), 0, float(self.proj_dict['b']),
self.data.shape[-1], self.data.shape[-2], self.grid_dict["sw_lat"] * lscale,
sw_lon * lscale, 0, self.proj_dict["lat_0"] * lscale,
lon_0 * lscale,
self.grid_dict["dx"] * 1e3, self.grid_dict["dy"] * 1e3, 0b00000000, 0b01000000,
self.proj_dict["lat_1"] * lscale,
self.proj_dict["lat_2"] * lscale, -90 * lscale, 0]
pdtmp1 = np.array([1, # parameter category Moisture
31, # parameter number Hail
4, # Type of generating process Ensemble Forecast
0, # Background generating process identifier
31, # Generating process or model from NCEP
0, # Hours after reference time data cutoff
0, # Minutes after reference time data cutoff
1, # Forecast time units Hours
0, # Forecast time
1, # Type of first fixed surface Ground
1, # Scale value of first fixed surface
0, # Value of first fixed surface
1, # Type of second fixed surface
1, # Scale value of 2nd fixed surface
0, # Value of 2nd fixed surface
0, # Derived forecast type
1 # Number of ensemble members
], dtype=np.int32)
grib_objects = pd.Series(index=self.times, data=[None] * self.times.size, dtype=object)
drtmp1 = np.array([0, 0, 4, 8, 0], dtype=np.int32)
for t, time in enumerate(self.times):
time_list = list(self.run_date.utctimetuple()[0:6])
if grib_objects[time] is None:
grib_objects[time] = Grib2Encode(0, np.array(grib_id_start + time_list + [2, 1], dtype=np.int32))
grib_objects[time].addgrid(gdsinfo, gdtmp1)
pdtmp1[8] = (time.to_pydatetime() - self.run_date).total_seconds() / 3600.0
data = self.data[t] / 1000.0
data[np.isnan(data)] = 0
masked_data = np.ma.array(data, mask=data<=0)
pdtmp1[-2] = 0
grib_objects[time].addfield(1, pdtmp1, 0, drtmp1, masked_data)
return grib_objects
|
Encodes member percentile data to GRIB2 format.
Returns:
Series of GRIB2 messages
|
entailment
|
def load_data(self):
"""
Loads data from each ensemble member.
"""
for m, member in enumerate(self.members):
mo = ModelOutput(self.ensemble_name, member, self.run_date, self.variable,
self.start_date, self.end_date, self.path, self.map_file, self.single_step)
mo.load_data()
if self.data is None:
self.data = np.zeros((len(self.members), mo.data.shape[0], mo.data.shape[1], mo.data.shape[2]),
dtype=np.float32)
if mo.units == "m":
self.data[m] = mo.data * 1000
self.units = "mm"
else:
self.data[m] = mo.data
if self.units == "":
self.units = mo.units
del mo.data
del mo
|
Loads data from each ensemble member.
|
entailment
|
def point_consensus(self, consensus_type):
"""
Calculate grid-point statistics across ensemble members.
Args:
consensus_type: mean, std, median, max, or percentile_nn
Returns:
EnsembleConsensus containing point statistic
"""
if "mean" in consensus_type:
consensus_data = np.mean(self.data, axis=0)
elif "std" in consensus_type:
consensus_data = np.std(self.data, axis=0)
elif "median" in consensus_type:
consensus_data = np.median(self.data, axis=0)
elif "max" in consensus_type:
consensus_data = np.max(self.data, axis=0)
elif "percentile" in consensus_type:
percentile = int(consensus_type.split("_")[1])
consensus_data = np.percentile(self.data, percentile, axis=0)
else:
consensus_data = np.zeros(self.data.shape[1:])
consensus = EnsembleConsensus(consensus_data, consensus_type, self.ensemble_name,
self.run_date, self.variable, self.start_date, self.end_date, self.units)
return consensus
|
Calculate grid-point statistics across ensemble members.
Args:
consensus_type: mean, std, median, max, or percentile_nn
Returns:
EnsembleConsensus containing point statistic
|
entailment
|
def point_probability(self, threshold):
"""
Determine the probability of exceeding a threshold at a grid point based on the ensemble forecasts at
that point.
Args:
threshold: If >= threshold assigns a 1 to member, otherwise 0.
Returns:
EnsembleConsensus
"""
point_prob = np.zeros(self.data.shape[1:])
for t in range(self.data.shape[1]):
point_prob[t] = np.where(self.data[:, t] >= threshold, 1.0, 0.0).mean(axis=0)
return EnsembleConsensus(point_prob, "point_probability", self.ensemble_name,
self.run_date, self.variable + "_{0:0.2f}_{1}".format(threshold,
self.units.replace(" ", "_")),
self.start_date, self.end_date, "")
|
Determine the probability of exceeding a threshold at a grid point based on the ensemble forecasts at
that point.
Args:
threshold: If >= threshold assigns a 1 to member, otherwise 0.
Returns:
EnsembleConsensus
|
entailment
|
def neighborhood_probability(self, threshold, radius, sigmas=None):
"""
Hourly probability of exceeding a threshold based on model values within a specified radius of a point.
Args:
threshold (float): probability of exceeding this threshold
radius (int): distance from point in number of grid points to include in neighborhood calculation.
sigmas (array of ints): Radii for Gaussian filter used to smooth neighborhood probabilities.
Returns:
list of EnsembleConsensus objects containing neighborhood probabilities for each forecast hour.
"""
if sigmas is None:
sigmas = [0]
weights = disk(radius)
filtered_prob = []
for sigma in sigmas:
filtered_prob.append(EnsembleConsensus(np.zeros(self.data.shape[1:], dtype=np.float32),
"neighbor_prob_r_{0:d}_s_{1:d}".format(radius, sigma),
self.ensemble_name,
self.run_date, self.variable + "_{0:0.2f}".format(threshold),
self.start_date, self.end_date, ""))
thresh_data = np.zeros(self.data.shape[2:], dtype=np.uint8)
neighbor_prob = np.zeros(self.data.shape[2:], dtype=np.float32)
for t in range(self.data.shape[1]):
for m in range(self.data.shape[0]):
thresh_data[self.data[m, t] >= threshold] = 1
maximized = fftconvolve(thresh_data, weights, mode="same")
maximized[maximized > 1] = 1
maximized[maximized < 1] = 0
neighbor_prob += fftconvolve(maximized, weights, mode="same")
neighbor_prob[neighbor_prob < 1] = 0
thresh_data[:] = 0
neighbor_prob /= (self.data.shape[0] * float(weights.sum()))
for s, sigma in enumerate(sigmas):
if sigma > 0:
filtered_prob[s].data[t] = gaussian_filter(neighbor_prob, sigma=sigma)
else:
filtered_prob[s].data[t] = neighbor_prob
neighbor_prob[:] = 0
return filtered_prob
|
Hourly probability of exceeding a threshold based on model values within a specified radius of a point.
Args:
threshold (float): probability of exceeding this threshold
radius (int): distance from point in number of grid points to include in neighborhood calculation.
sigmas (array of ints): Radii for Gaussian filter used to smooth neighborhood probabilities.
Returns:
list of EnsembleConsensus objects containing neighborhood probabilities for each forecast hour.
|
entailment
|
def period_max_neighborhood_probability(self, threshold, radius, sigmas=None):
"""
Calculates the neighborhood probability of exceeding a threshold at any time over the period loaded.
Args:
threshold (float): splitting threshold for probability calculatations
radius (int): distance from point in number of grid points to include in neighborhood calculation.
sigmas (array of ints): Radii for Gaussian filter used to smooth neighborhood probabilities.
Returns:
list of EnsembleConsensus objects
"""
if sigmas is None:
sigmas = [0]
weights = disk(radius)
neighborhood_prob = np.zeros(self.data.shape[2:], dtype=np.float32)
thresh_data = np.zeros(self.data.shape[2:], dtype=np.uint8)
for m in range(self.data.shape[0]):
thresh_data[self.data[m].max(axis=0) >= threshold] = 1
maximized = fftconvolve(thresh_data, weights, mode="same")
maximized[maximized > 1] = 1
neighborhood_prob += fftconvolve(maximized, weights, mode="same")
neighborhood_prob[neighborhood_prob < 1] = 0
neighborhood_prob /= (self.data.shape[0] * float(weights.sum()))
consensus_probs = []
for sigma in sigmas:
if sigma > 0:
filtered_prob = gaussian_filter(neighborhood_prob, sigma=sigma)
else:
filtered_prob = neighborhood_prob
ec = EnsembleConsensus(filtered_prob,
"neighbor_prob_{0:02d}-hour_r_{1:d}_s_{2:d}".format(self.data.shape[1],
radius, sigma),
self.ensemble_name,
self.run_date, self.variable + "_{0:0.2f}".format(float(threshold)),
self.start_date, self.end_date, "")
consensus_probs.append(ec)
return consensus_probs
|
Calculates the neighborhood probability of exceeding a threshold at any time over the period loaded.
Args:
threshold (float): splitting threshold for probability calculatations
radius (int): distance from point in number of grid points to include in neighborhood calculation.
sigmas (array of ints): Radii for Gaussian filter used to smooth neighborhood probabilities.
Returns:
list of EnsembleConsensus objects
|
entailment
|
def load_data(self, grid_method="gamma", num_samples=1000, condition_threshold=0.5, zero_inflate=False,
percentile=None):
"""
Reads the track forecasts and converts them to grid point values based on random sampling.
Args:
grid_method: "gamma" by default
num_samples: Number of samples drawn from predicted pdf
condition_threshold: Objects are not written to the grid if condition model probability is below this
threshold.
zero_inflate: Whether to sample zeros from a Bernoulli sampler based on the condition model probability
percentile: If None, outputs the mean of the samples at each grid point, otherwise outputs the specified
percentile from 0 to 100.
Returns:
0 if tracks are successfully sampled on to grid. If no tracks are found, returns -1.
"""
self.percentile = percentile
if self.track_forecasts == {}:
self.load_track_forecasts()
if self.track_forecasts == {}:
return -1
if self.data is None:
self.data = np.zeros((len(self.members), self.times.size, self.grid_shape[0], self.grid_shape[1]),
dtype=np.float32)
else:
self.data[:] = 0
if grid_method in ["mean", "median", "samples"]:
for m, member in enumerate(self.members):
print("Sampling " + member)
for track_forecast in self.track_forecasts[member]:
times = track_forecast["properties"]["times"]
for s, step in enumerate(track_forecast["features"]):
forecast_pdf = np.array(step['properties'][self.variable + "_" +
self.ensemble_name.replace(" ", "-")])
forecast_time = self.run_date + timedelta(hours=times[s])
t = np.where(self.times == forecast_time)[0][0]
mask = np.array(step['properties']["masks"], dtype=int)
i = np.array(step['properties']["i"], dtype=int)
i = i[mask == 1]
j = np.array(step['properties']["j"], dtype=int)
j = j[mask == 1]
if grid_method == "samples":
intensities = np.array(step["properties"]["timesteps"], dtype=float)[mask == 1]
rankings = np.argsort(intensities)
samples = np.random.choice(self.forecast_bins, size=intensities.size, replace=True,
p=forecast_pdf)
self.data[m, t, i[rankings], j[rankings]] = samples
else:
if grid_method == "mean":
forecast_value = np.sum(forecast_pdf * self.forecast_bins)
elif grid_method == "median":
forecast_cdf = np.cumsum(forecast_pdf)
forecast_value = self.forecast_bins[np.argmin(np.abs(forecast_cdf - 0.5))]
else:
forecast_value = 0
self.data[m, t, i, j] = forecast_value
if grid_method in ["gamma"]:
full_condition_name = "condition_" + self.condition_model_name.replace(" ", "-")
dist_model_name = self.variable + "_" + self.ensemble_name.replace(" ", "-")
for m, member in enumerate(self.members):
for track_forecast in self.track_forecasts[member]:
times = track_forecast["properties"]["times"]
for s, step in enumerate(track_forecast["features"]):
forecast_params = step["properties"][dist_model_name]
if self.condition_model_name is not None:
condition = step["properties"][full_condition_name]
else:
condition = None
forecast_time = self.run_date + timedelta(hours=times[s])
if forecast_time in self.times:
t = np.where(self.times == forecast_time)[0][0]
mask = np.array(step["properties"]["masks"], dtype=int)
rankings = np.argsort(step["properties"]["timesteps"])[mask == 1]
i = np.array(step["properties"]["i"], dtype=int)[mask == 1][rankings]
j = np.array(step["properties"]["j"], dtype=int)[mask == 1][rankings]
if rankings.size > 0:
raw_samples = np.sort(gamma.rvs(forecast_params[0], loc=forecast_params[1],
scale=forecast_params[2],
size=(num_samples, rankings.size)),
axis=1)
if zero_inflate:
raw_samples *= bernoulli.rvs(condition,
size=(num_samples, rankings.size))
if percentile is None:
samples = raw_samples.mean(axis=0)
else:
samples = np.percentile(raw_samples, percentile, axis=0)
if condition is None or condition >= condition_threshold:
self.data[m, t, i, j] = samples
return 0
|
Reads the track forecasts and converts them to grid point values based on random sampling.
Args:
grid_method: "gamma" by default
num_samples: Number of samples drawn from predicted pdf
condition_threshold: Objects are not written to the grid if condition model probability is below this
threshold.
zero_inflate: Whether to sample zeros from a Bernoulli sampler based on the condition model probability
percentile: If None, outputs the mean of the samples at each grid point, otherwise outputs the specified
percentile from 0 to 100.
Returns:
0 if tracks are successfully sampled on to grid. If no tracks are found, returns -1.
|
entailment
|
def write_grib2(self, path):
"""
Writes data to grib2 file. Currently, grib codes are set by hand to hail.
Args:
path: Path to directory containing grib2 files.
Returns:
"""
if self.percentile is None:
var_type = "mean"
else:
var_type = "p{0:02d}".format(self.percentile)
lscale = 1e6
grib_id_start = [7, 0, 14, 14, 2]
gdsinfo = np.array([0, np.product(self.data.shape[-2:]), 0, 0, 30], dtype=np.int32)
lon_0 = self.proj_dict["lon_0"]
sw_lon = self.grid_dict["sw_lon"]
if lon_0 < 0:
lon_0 += 360
if sw_lon < 0:
sw_lon += 360
gdtmp1 = np.array([7, 1, self.proj_dict['a'], 1, self.proj_dict['a'], 1, self.proj_dict['b'],
self.data.shape[-2], self.data.shape[-1], self.grid_dict["sw_lat"] * lscale,
sw_lon * lscale, 0, self.proj_dict["lat_0"] * lscale,
lon_0 * lscale,
self.grid_dict["dx"] * 1e3, self.grid_dict["dy"] * 1e3, 0,
self.proj_dict["lat_1"] * lscale,
self.proj_dict["lat_2"] * lscale, 0, 0], dtype=np.int32)
pdtmp1 = np.array([1, 31, 2, 0, 116, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 192, 0, self.data.shape[0]], dtype=np.int32)
for m, member in enumerate(self.members):
pdtmp1[-2] = m
for t, time in enumerate(self.times):
time_list = list(time.utctimetuple()[0:6])
grbe = Grib2Encode(0, np.array(grib_id_start + time_list + [2, 1], dtype=np.int32))
grbe.addgrid(gdsinfo, gdtmp1)
pdtmp1[8] = (time.to_pydatetime() - self.run_date).total_seconds() / 3600.0
drtmp1 = np.array([0, 0, 4, 8, 0], dtype=np.int32)
data = self.data[m, t].astype(np.float32) / 1000.0
masked_data = np.ma.array(data, mask=data <= 0)
grbe.addfield(1, pdtmp1, 0, drtmp1, masked_data)
grbe.end()
filename = path + "{0}_{1}_mlhail_{2}_{3}.grib2".format(self.ensemble_name.replace(" ", "-"), member,
var_type,
time.to_datetime().strftime("%Y%m%d%H%M"))
print("Writing to " + filename)
grib_file = open(filename, "wb")
grib_file.write(grbe.msg)
grib_file.close()
return
|
Writes data to grib2 file. Currently, grib codes are set by hand to hail.
Args:
path: Path to directory containing grib2 files.
Returns:
|
entailment
|
def init_file(self, filename, time_units="seconds since 1970-01-01T00:00"):
"""
Initializes netCDF file for writing
Args:
filename: Name of the netCDF file
time_units: Units for the time variable in format "<time> since <date string>"
Returns:
Dataset object
"""
if os.access(filename, os.R_OK):
out_data = Dataset(filename, "r+")
else:
out_data = Dataset(filename, "w")
if len(self.data.shape) == 2:
for d, dim in enumerate(["y", "x"]):
out_data.createDimension(dim, self.data.shape[d])
else:
for d, dim in enumerate(["y", "x"]):
out_data.createDimension(dim, self.data.shape[d+1])
out_data.createDimension("time", len(self.times))
time_var = out_data.createVariable("time", "i8", ("time",))
time_var[:] = date2num(self.times.to_pydatetime(), time_units)
time_var.units = time_units
out_data.Conventions = "CF-1.6"
return out_data
|
Initializes netCDF file for writing
Args:
filename: Name of the netCDF file
time_units: Units for the time variable in format "<time> since <date string>"
Returns:
Dataset object
|
entailment
|
def write_to_file(self, out_data):
"""
Outputs data to a netCDF file. If the file does not exist, it will be created. Otherwise, additional variables
are appended to the current file
Args:
out_data: Full-path and name of output netCDF file
"""
full_var_name = self.consensus_type + "_" + self.variable
if "-hour" in self.consensus_type:
if full_var_name not in out_data.variables.keys():
var = out_data.createVariable(full_var_name, "f4", ("y", "x"), zlib=True,
least_significant_digit=3, shuffle=True)
else:
var = out_data.variables[full_var_name]
var.coordinates = "y x"
else:
if full_var_name not in out_data.variables.keys():
var = out_data.createVariable(full_var_name, "f4", ("time", "y", "x"), zlib=True,
least_significant_digit=3, shuffle=True)
else:
var = out_data.variables[full_var_name]
var.coordinates = "time y x"
var[:] = self.data
var.units = self.units
var.long_name = self.consensus_type + "_" + self.variable
return
|
Outputs data to a netCDF file. If the file does not exist, it will be created. Otherwise, additional variables
are appended to the current file
Args:
out_data: Full-path and name of output netCDF file
|
entailment
|
def restore(self, workspace_uuid):
"""
Restore the workspace to the given workspace_uuid.
If workspace_uuid is None then create a new workspace and use it.
"""
workspace = next((workspace for workspace in self.document_model.workspaces if workspace.uuid == workspace_uuid), None)
if workspace is None:
workspace = self.new_workspace()
self._change_workspace(workspace)
|
Restore the workspace to the given workspace_uuid.
If workspace_uuid is None then create a new workspace and use it.
|
entailment
|
def new_workspace(self, name=None, layout=None, workspace_id=None, index=None) -> WorkspaceLayout.WorkspaceLayout:
""" Create a new workspace, insert into document_model, and return it. """
workspace = WorkspaceLayout.WorkspaceLayout()
self.document_model.insert_workspace(index if index is not None else len(self.document_model.workspaces), workspace)
d = create_image_desc()
d["selected"] = True
workspace.layout = layout if layout is not None else d
workspace.name = name if name is not None else _("Workspace")
if workspace_id:
workspace.workspace_id = workspace_id
return workspace
|
Create a new workspace, insert into document_model, and return it.
|
entailment
|
def ensure_workspace(self, name, layout, workspace_id):
"""Looks for a workspace with workspace_id.
If none is found, create a new one, add it, and change to it.
"""
workspace = next((workspace for workspace in self.document_model.workspaces if workspace.workspace_id == workspace_id), None)
if not workspace:
workspace = self.new_workspace(name=name, layout=layout, workspace_id=workspace_id)
self._change_workspace(workspace)
|
Looks for a workspace with workspace_id.
If none is found, create a new one, add it, and change to it.
|
entailment
|
def create_workspace(self) -> None:
""" Pose a dialog to name and create a workspace. """
def create_clicked(text):
if text:
command = Workspace.CreateWorkspaceCommand(self, text)
command.perform()
self.document_controller.push_undo_command(command)
self.pose_get_string_message_box(caption=_("Enter a name for the workspace"), text=_("Workspace"),
accepted_fn=create_clicked, accepted_text=_("Create"),
message_box_id="create_workspace")
|
Pose a dialog to name and create a workspace.
|
entailment
|
def rename_workspace(self) -> None:
""" Pose a dialog to rename the workspace. """
def rename_clicked(text):
if len(text) > 0:
command = Workspace.RenameWorkspaceCommand(self, text)
command.perform()
self.document_controller.push_undo_command(command)
self.pose_get_string_message_box(caption=_("Enter new name for workspace"), text=self.__workspace.name,
accepted_fn=rename_clicked, accepted_text=_("Rename"),
message_box_id="rename_workspace")
|
Pose a dialog to rename the workspace.
|
entailment
|
def remove_workspace(self):
""" Pose a dialog to confirm removal then remove workspace. """
def confirm_clicked():
if len(self.document_model.workspaces) > 1:
command = Workspace.RemoveWorkspaceCommand(self)
command.perform()
self.document_controller.push_undo_command(command)
caption = _("Remove workspace named '{0}'?").format(self.__workspace.name)
self.pose_confirmation_message_box(caption, confirm_clicked, accepted_text=_("Remove Workspace"),
message_box_id="remove_workspace")
|
Pose a dialog to confirm removal then remove workspace.
|
entailment
|
def clone_workspace(self) -> None:
""" Pose a dialog to name and clone a workspace. """
def clone_clicked(text):
if text:
command = Workspace.CloneWorkspaceCommand(self, text)
command.perform()
self.document_controller.push_undo_command(command)
self.pose_get_string_message_box(caption=_("Enter a name for the workspace"), text=self.__workspace.name,
accepted_fn=clone_clicked, accepted_text=_("Clone"),
message_box_id="clone_workspace")
|
Pose a dialog to name and clone a workspace.
|
entailment
|
def __replace_displayed_display_item(self, display_panel, display_item, d=None) -> Undo.UndoableCommand:
""" Used in drag/drop support. """
self.document_controller.replaced_display_panel_content = display_panel.save_contents()
command = DisplayPanel.ReplaceDisplayPanelCommand(self)
if display_item:
display_panel.set_display_panel_display_item(display_item, detect_controller=True)
elif d is not None:
display_panel.change_display_panel_content(d)
display_panel.request_focus()
self.__sync_layout()
return command
|
Used in drag/drop support.
|
entailment
|
def bootstrap(score_objs, n_boot=1000):
"""
Given a set of DistributedROC or DistributedReliability objects, this function performs a
bootstrap resampling of the objects and returns n_boot aggregations of them.
Args:
score_objs: A list of DistributedROC or DistributedReliability objects. Objects must have an __add__ method
n_boot (int): Number of bootstrap samples
Returns:
An array of DistributedROC or DistributedReliability
"""
all_samples = np.random.choice(score_objs, size=(n_boot, len(score_objs)), replace=True)
return all_samples.sum(axis=1)
|
Given a set of DistributedROC or DistributedReliability objects, this function performs a
bootstrap resampling of the objects and returns n_boot aggregations of them.
Args:
score_objs: A list of DistributedROC or DistributedReliability objects. Objects must have an __add__ method
n_boot (int): Number of bootstrap samples
Returns:
An array of DistributedROC or DistributedReliability
|
entailment
|
def update(self, forecasts, observations):
"""
Update the ROC curve with a set of forecasts and observations
Args:
forecasts: 1D array of forecast values
observations: 1D array of observation values.
"""
for t, threshold in enumerate(self.thresholds):
tp = np.count_nonzero((forecasts >= threshold) & (observations >= self.obs_threshold))
fp = np.count_nonzero((forecasts >= threshold) &
(observations < self.obs_threshold))
fn = np.count_nonzero((forecasts < threshold) &
(observations >= self.obs_threshold))
tn = np.count_nonzero((forecasts < threshold) &
(observations < self.obs_threshold))
self.contingency_tables.iloc[t] += [tp, fp, fn, tn]
|
Update the ROC curve with a set of forecasts and observations
Args:
forecasts: 1D array of forecast values
observations: 1D array of observation values.
|
entailment
|
def merge(self, other_roc):
"""
Ingest the values of another DistributedROC object into this one and update the statistics inplace.
Args:
other_roc: another DistributedROC object.
"""
if other_roc.thresholds.size == self.thresholds.size and np.all(other_roc.thresholds == self.thresholds):
self.contingency_tables += other_roc.contingency_tables
else:
print("Input table thresholds do not match.")
|
Ingest the values of another DistributedROC object into this one and update the statistics inplace.
Args:
other_roc: another DistributedROC object.
|
entailment
|
def roc_curve(self):
"""
Generate a ROC curve from the contingency table by calculating the probability of detection (TP/(TP+FN)) and the
probability of false detection (FP/(FP+TN)).
Returns:
A pandas.DataFrame containing the POD, POFD, and the corresponding probability thresholds.
"""
pod = self.contingency_tables["TP"].astype(float) / (self.contingency_tables["TP"] +
self.contingency_tables["FN"])
pofd = self.contingency_tables["FP"].astype(float) / (self.contingency_tables["FP"] +
self.contingency_tables["TN"])
return pd.DataFrame({"POD": pod, "POFD": pofd, "Thresholds": self.thresholds},
columns=["POD", "POFD", "Thresholds"])
|
Generate a ROC curve from the contingency table by calculating the probability of detection (TP/(TP+FN)) and the
probability of false detection (FP/(FP+TN)).
Returns:
A pandas.DataFrame containing the POD, POFD, and the corresponding probability thresholds.
|
entailment
|
def performance_curve(self):
"""
Calculate the Probability of Detection and False Alarm Ratio in order to output a performance diagram.
Returns:
pandas.DataFrame containing POD, FAR, and probability thresholds.
"""
pod = self.contingency_tables["TP"] / (self.contingency_tables["TP"] + self.contingency_tables["FN"])
far = self.contingency_tables["FP"] / (self.contingency_tables["FP"] + self.contingency_tables["TP"])
far[(self.contingency_tables["FP"] + self.contingency_tables["TP"]) == 0] = np.nan
return pd.DataFrame({"POD": pod, "FAR": far, "Thresholds": self.thresholds},
columns=["POD", "FAR", "Thresholds"])
|
Calculate the Probability of Detection and False Alarm Ratio in order to output a performance diagram.
Returns:
pandas.DataFrame containing POD, FAR, and probability thresholds.
|
entailment
|
def auc(self):
"""
Calculate the Area Under the ROC Curve (AUC).
"""
roc_curve = self.roc_curve()
return np.abs(np.trapz(roc_curve['POD'], x=roc_curve['POFD']))
|
Calculate the Area Under the ROC Curve (AUC).
|
entailment
|
def max_csi(self):
"""
Calculate the maximum Critical Success Index across all probability thresholds
Returns:
The maximum CSI as a float
"""
csi = self.contingency_tables["TP"] / (self.contingency_tables["TP"] + self.contingency_tables["FN"] +
self.contingency_tables["FP"])
return csi.max()
|
Calculate the maximum Critical Success Index across all probability thresholds
Returns:
The maximum CSI as a float
|
entailment
|
def get_contingency_tables(self):
"""
Create an Array of ContingencyTable objects for each probability threshold.
Returns:
Array of ContingencyTable objects
"""
return np.array([ContingencyTable(*ct) for ct in self.contingency_tables.values])
|
Create an Array of ContingencyTable objects for each probability threshold.
Returns:
Array of ContingencyTable objects
|
entailment
|
def from_str(self, in_str):
"""
Read the DistributedROC string and parse the contingency table values from it.
Args:
in_str (str): The string output from the __str__ method
"""
parts = in_str.split(";")
for part in parts:
var_name, value = part.split(":")
if var_name == "Obs_Threshold":
self.obs_threshold = float(value)
elif var_name == "Thresholds":
self.thresholds = np.array(value.split(), dtype=float)
self.contingency_tables = pd.DataFrame(columns=self.contingency_tables.columns,
data=np.zeros((self.thresholds.size,
self.contingency_tables.columns.size)))
elif var_name in self.contingency_tables.columns:
self.contingency_tables[var_name] = np.array(value.split(), dtype=int)
|
Read the DistributedROC string and parse the contingency table values from it.
Args:
in_str (str): The string output from the __str__ method
|
entailment
|
def update(self, forecasts, observations):
"""
Update the statistics with a set of forecasts and observations.
Args:
forecasts (numpy.ndarray): Array of forecast probability values
observations (numpy.ndarray): Array of observation values
"""
for t, threshold in enumerate(self.thresholds[:-1]):
self.frequencies.loc[t, "Positive_Freq"] += np.count_nonzero((threshold <= forecasts) &
(forecasts < self.thresholds[t+1]) &
(observations >= self.obs_threshold))
self.frequencies.loc[t, "Total_Freq"] += np.count_nonzero((threshold <= forecasts) &
(forecasts < self.thresholds[t+1]))
|
Update the statistics with a set of forecasts and observations.
Args:
forecasts (numpy.ndarray): Array of forecast probability values
observations (numpy.ndarray): Array of observation values
|
entailment
|
def merge(self, other_rel):
"""
Ingest another DistributedReliability and add its contents to the current object.
Args:
other_rel: a Distributed reliability object.
"""
if other_rel.thresholds.size == self.thresholds.size and np.all(other_rel.thresholds == self.thresholds):
self.frequencies += other_rel.frequencies
else:
print("Input table thresholds do not match.")
|
Ingest another DistributedReliability and add its contents to the current object.
Args:
other_rel: a Distributed reliability object.
|
entailment
|
def reliability_curve(self):
"""
Calculates the reliability diagram statistics. The key columns are Bin_Start and Positive_Relative_Freq
Returns:
pandas.DataFrame
"""
total = self.frequencies["Total_Freq"].sum()
curve = pd.DataFrame(columns=["Bin_Start", "Bin_End", "Bin_Center",
"Positive_Relative_Freq", "Total_Relative_Freq"])
curve["Bin_Start"] = self.thresholds[:-1]
curve["Bin_End"] = self.thresholds[1:]
curve["Bin_Center"] = 0.5 * (self.thresholds[:-1] + self.thresholds[1:])
curve["Positive_Relative_Freq"] = self.frequencies["Positive_Freq"] / self.frequencies["Total_Freq"]
curve["Total_Relative_Freq"] = self.frequencies["Total_Freq"] / total
return curve
|
Calculates the reliability diagram statistics. The key columns are Bin_Start and Positive_Relative_Freq
Returns:
pandas.DataFrame
|
entailment
|
def brier_score_components(self):
"""
Calculate the components of the Brier score decomposition: reliability, resolution, and uncertainty.
"""
rel_curve = self.reliability_curve()
total = self.frequencies["Total_Freq"].sum()
climo_freq = float(self.frequencies["Positive_Freq"].sum()) / self.frequencies["Total_Freq"].sum()
reliability = np.sum(self.frequencies["Total_Freq"] * (rel_curve["Bin_Start"] -
rel_curve["Positive_Relative_Freq"]) ** 2) / total
resolution = np.sum(self.frequencies["Total_Freq"] * (rel_curve["Positive_Relative_Freq"] - climo_freq) ** 2) \
/ total
uncertainty = climo_freq * (1 - climo_freq)
return reliability, resolution, uncertainty
|
Calculate the components of the Brier score decomposition: reliability, resolution, and uncertainty.
|
entailment
|
def brier_score(self):
"""
Calculate the Brier Score
"""
reliability, resolution, uncertainty = self.brier_score_components()
return reliability - resolution + uncertainty
|
Calculate the Brier Score
|
entailment
|
def brier_skill_score(self):
"""
Calculate the Brier Skill Score
"""
reliability, resolution, uncertainty = self.brier_score_components()
return (resolution - reliability) / uncertainty
|
Calculate the Brier Skill Score
|
entailment
|
def update(self, forecasts, observations):
"""
Update the statistics with forecasts and observations.
Args:
forecasts: The discrete Cumulative Distribution Functions of
observations:
"""
if len(observations.shape) == 1:
obs_cdfs = np.zeros((observations.size, self.thresholds.size))
for o, observation in enumerate(observations):
obs_cdfs[o, self.thresholds >= observation] = 1
else:
obs_cdfs = observations
self.errors["F_2"] += np.sum(forecasts ** 2, axis=0)
self.errors["F_O"] += np.sum(forecasts * obs_cdfs, axis=0)
self.errors["O_2"] += np.sum(obs_cdfs ** 2, axis=0)
self.errors["O"] += np.sum(obs_cdfs, axis=0)
self.num_forecasts += forecasts.shape[0]
|
Update the statistics with forecasts and observations.
Args:
forecasts: The discrete Cumulative Distribution Functions of
observations:
|
entailment
|
def crps(self):
"""
Calculates the continuous ranked probability score.
"""
return np.sum(self.errors["F_2"].values - self.errors["F_O"].values * 2.0 + self.errors["O_2"].values) / \
(self.thresholds.size * self.num_forecasts)
|
Calculates the continuous ranked probability score.
|
entailment
|
def crps_climo(self):
"""
Calculate the climatological CRPS.
"""
o_bar = self.errors["O"].values / float(self.num_forecasts)
crps_c = np.sum(self.num_forecasts * (o_bar ** 2) - o_bar * self.errors["O"].values * 2.0 +
self.errors["O_2"].values) / float(self.thresholds.size * self.num_forecasts)
return crps_c
|
Calculate the climatological CRPS.
|
entailment
|
def crpss(self):
"""
Calculate the continous ranked probability skill score from existing data.
"""
crps_f = self.crps()
crps_c = self.crps_climo()
return 1.0 - float(crps_f) / float(crps_c)
|
Calculate the continous ranked probability skill score from existing data.
|
entailment
|
def checar(cliente_sat):
"""
Checa em sequência os alertas registrados (veja :func:`registrar`) contra os
dados da consulta ao status operacional do equipamento SAT. Este método irá
então resultar em uma lista dos alertas ativos.
:param cliente_sat: Uma instância de
:class:`satcfe.clientelocal.ClienteSATLocal` ou
:class:`satcfe.clientesathub.ClienteSATHub` onde será invocado o método
para consulta ao status operacional do equipamento SAT.
:rtype: list
"""
resposta = cliente_sat.consultar_status_operacional()
alertas = []
for classe_alerta in AlertaOperacao.alertas_registrados:
alerta = classe_alerta(resposta)
if alerta.checar():
alertas.append(alerta)
return alertas
|
Checa em sequência os alertas registrados (veja :func:`registrar`) contra os
dados da consulta ao status operacional do equipamento SAT. Este método irá
então resultar em uma lista dos alertas ativos.
:param cliente_sat: Uma instância de
:class:`satcfe.clientelocal.ClienteSATLocal` ou
:class:`satcfe.clientesathub.ClienteSATHub` onde será invocado o método
para consulta ao status operacional do equipamento SAT.
:rtype: list
|
entailment
|
def has_metadata_value(metadata_source, key: str) -> bool:
"""Return whether the metadata value for the given key exists.
There are a set of predefined keys that, when used, will be type checked and be interoperable with other
applications. Please consult reference documentation for valid keys.
If using a custom key, we recommend structuring your keys in the '<group>.<attribute>' format followed
by the predefined keys. e.g. 'session.instrument' or 'camera.binning'.
Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer
using the ``metadata_value`` methods over directly accessing ``metadata``.
"""
desc = session_key_map.get(key)
if desc is not None:
d = getattr(metadata_source, "session_metadata", dict())
for k in desc['path'][:-1]:
d = d.setdefault(k, dict()) if d is not None else None
if d is not None:
return desc['path'][-1] in d
desc = key_map.get(key)
if desc is not None:
d = getattr(metadata_source, "metadata", dict())
for k in desc['path'][:-1]:
d = d.setdefault(k, dict()) if d is not None else None
if d is not None:
return desc['path'][-1] in d
raise False
|
Return whether the metadata value for the given key exists.
There are a set of predefined keys that, when used, will be type checked and be interoperable with other
applications. Please consult reference documentation for valid keys.
If using a custom key, we recommend structuring your keys in the '<group>.<attribute>' format followed
by the predefined keys. e.g. 'session.instrument' or 'camera.binning'.
Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer
using the ``metadata_value`` methods over directly accessing ``metadata``.
|
entailment
|
def get_metadata_value(metadata_source, key: str) -> typing.Any:
"""Get the metadata value for the given key.
There are a set of predefined keys that, when used, will be type checked and be interoperable with other
applications. Please consult reference documentation for valid keys.
If using a custom key, we recommend structuring your keys in the '<group>.<attribute>' format followed
by the predefined keys. e.g. 'session.instrument' or 'camera.binning'.
Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer
using the ``metadata_value`` methods over directly accessing ``metadata``.
"""
desc = session_key_map.get(key)
if desc is not None:
v = getattr(metadata_source, "session_metadata", dict())
for k in desc['path']:
v = v.get(k) if v is not None else None
return v
desc = key_map.get(key)
if desc is not None:
v = getattr(metadata_source, "metadata", dict())
for k in desc['path']:
v = v.get(k) if v is not None else None
return v
raise KeyError()
|
Get the metadata value for the given key.
There are a set of predefined keys that, when used, will be type checked and be interoperable with other
applications. Please consult reference documentation for valid keys.
If using a custom key, we recommend structuring your keys in the '<group>.<attribute>' format followed
by the predefined keys. e.g. 'session.instrument' or 'camera.binning'.
Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer
using the ``metadata_value`` methods over directly accessing ``metadata``.
|
entailment
|
def set_metadata_value(metadata_source, key: str, value: typing.Any) -> None:
"""Set the metadata value for the given key.
There are a set of predefined keys that, when used, will be type checked and be interoperable with other
applications. Please consult reference documentation for valid keys.
If using a custom key, we recommend structuring your keys in the '<group>.<attribute>' format followed
by the predefined keys. e.g. 'session.instrument' or 'camera.binning'.
Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer
using the ``metadata_value`` methods over directly accessing ``metadata``.
"""
desc = session_key_map.get(key)
if desc is not None:
d0 = getattr(metadata_source, "session_metadata", dict())
d = d0
for k in desc['path'][:-1]:
d = d.setdefault(k, dict()) if d is not None else None
if d is not None:
d[desc['path'][-1]] = value
metadata_source.session_metadata = d0
return
desc = key_map.get(key)
if desc is not None:
d0 = getattr(metadata_source, "metadata", dict())
d = d0
for k in desc['path'][:-1]:
d = d.setdefault(k, dict()) if d is not None else None
if d is not None:
d[desc['path'][-1]] = value
metadata_source.metadata = d0
return
raise KeyError()
|
Set the metadata value for the given key.
There are a set of predefined keys that, when used, will be type checked and be interoperable with other
applications. Please consult reference documentation for valid keys.
If using a custom key, we recommend structuring your keys in the '<group>.<attribute>' format followed
by the predefined keys. e.g. 'session.instrument' or 'camera.binning'.
Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer
using the ``metadata_value`` methods over directly accessing ``metadata``.
|
entailment
|
def delete_metadata_value(metadata_source, key: str) -> None:
"""Delete the metadata value for the given key.
There are a set of predefined keys that, when used, will be type checked and be interoperable with other
applications. Please consult reference documentation for valid keys.
If using a custom key, we recommend structuring your keys in the '<dotted>.<group>.<attribute>' format followed
by the predefined keys. e.g. 'stem.session.instrument' or 'stm.camera.binning'.
Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer
using the ``metadata_value`` methods over directly accessing ``metadata``.
"""
desc = session_key_map.get(key)
if desc is not None:
d0 = getattr(metadata_source, "session_metadata", dict())
d = d0
for k in desc['path'][:-1]:
d = d.setdefault(k, dict()) if d is not None else None
if d is not None and desc['path'][-1] in d:
d.pop(desc['path'][-1], None)
metadata_source.session_metadata = d0
return
desc = key_map.get(key)
if desc is not None:
d0 = getattr(metadata_source, "metadata", dict())
d = d0
for k in desc['path'][:-1]:
d = d.setdefault(k, dict()) if d is not None else None
if d is not None and desc['path'][-1] in d:
d.pop(desc['path'][-1], None)
metadata_source.metadata = d0
return
|
Delete the metadata value for the given key.
There are a set of predefined keys that, when used, will be type checked and be interoperable with other
applications. Please consult reference documentation for valid keys.
If using a custom key, we recommend structuring your keys in the '<dotted>.<group>.<attribute>' format followed
by the predefined keys. e.g. 'stem.session.instrument' or 'stm.camera.binning'.
Also note that some predefined keys map to the metadata ``dict`` but others do not. For this reason, prefer
using the ``metadata_value`` methods over directly accessing ``metadata``.
|
entailment
|
def calculate_y_ticks(self, plot_height):
"""Calculate the y-axis items dependent on the plot height."""
calibrated_data_min = self.calibrated_data_min
calibrated_data_max = self.calibrated_data_max
calibrated_data_range = calibrated_data_max - calibrated_data_min
ticker = self.y_ticker
y_ticks = list()
for tick_value, tick_label in zip(ticker.values, ticker.labels):
if calibrated_data_range != 0.0:
y_tick = plot_height - plot_height * (tick_value - calibrated_data_min) / calibrated_data_range
else:
y_tick = plot_height - plot_height * 0.5
if y_tick >= 0 and y_tick <= plot_height:
y_ticks.append((y_tick, tick_label))
return y_ticks
|
Calculate the y-axis items dependent on the plot height.
|
entailment
|
def calculate_x_ticks(self, plot_width):
"""Calculate the x-axis items dependent on the plot width."""
x_calibration = self.x_calibration
uncalibrated_data_left = self.__uncalibrated_left_channel
uncalibrated_data_right = self.__uncalibrated_right_channel
calibrated_data_left = x_calibration.convert_to_calibrated_value(uncalibrated_data_left) if x_calibration is not None else uncalibrated_data_left
calibrated_data_right = x_calibration.convert_to_calibrated_value(uncalibrated_data_right) if x_calibration is not None else uncalibrated_data_right
calibrated_data_left, calibrated_data_right = min(calibrated_data_left, calibrated_data_right), max(calibrated_data_left, calibrated_data_right)
graph_left, graph_right, tick_values, division, precision = Geometry.make_pretty_range(calibrated_data_left, calibrated_data_right)
drawn_data_width = self.drawn_right_channel - self.drawn_left_channel
x_ticks = list()
if drawn_data_width > 0.0:
for tick_value in tick_values:
label = nice_label(tick_value, precision)
data_tick = x_calibration.convert_from_calibrated_value(tick_value) if x_calibration else tick_value
x_tick = plot_width * (data_tick - self.drawn_left_channel) / drawn_data_width
if x_tick >= 0 and x_tick <= plot_width:
x_ticks.append((x_tick, label))
return x_ticks
|
Calculate the x-axis items dependent on the plot width.
|
entailment
|
def size_to_content(self):
""" Size the canvas item to the proper height. """
new_sizing = self.copy_sizing()
new_sizing.minimum_height = 0
new_sizing.maximum_height = 0
axes = self.__axes
if axes and axes.is_valid:
if axes.x_calibration and axes.x_calibration.units:
new_sizing.minimum_height = self.font_size + 4
new_sizing.maximum_height = self.font_size + 4
self.update_sizing(new_sizing)
|
Size the canvas item to the proper height.
|
entailment
|
def size_to_content(self, get_font_metrics_fn):
""" Size the canvas item to the proper width, the maximum of any label. """
new_sizing = self.copy_sizing()
new_sizing.minimum_width = 0
new_sizing.maximum_width = 0
axes = self.__axes
if axes and axes.is_valid:
# calculate the width based on the label lengths
font = "{0:d}px".format(self.font_size)
max_width = 0
y_range = axes.calibrated_data_max - axes.calibrated_data_min
label = axes.y_ticker.value_label(axes.calibrated_data_max + y_range * 5)
max_width = max(max_width, get_font_metrics_fn(font, label).width)
label = axes.y_ticker.value_label(axes.calibrated_data_min - y_range * 5)
max_width = max(max_width, get_font_metrics_fn(font, label).width)
new_sizing.minimum_width = max_width
new_sizing.maximum_width = max_width
self.update_sizing(new_sizing)
|
Size the canvas item to the proper width, the maximum of any label.
|
entailment
|
def size_to_content(self):
""" Size the canvas item to the proper width. """
new_sizing = self.copy_sizing()
new_sizing.minimum_width = 0
new_sizing.maximum_width = 0
axes = self.__axes
if axes and axes.is_valid:
if axes.y_calibration and axes.y_calibration.units:
new_sizing.minimum_width = self.font_size + 4
new_sizing.maximum_width = self.font_size + 4
self.update_sizing(new_sizing)
|
Size the canvas item to the proper width.
|
entailment
|
def get_snippet_content(snippet_name, **format_kwargs):
""" Load the content from a snippet file which exists in SNIPPETS_ROOT """
filename = snippet_name + '.snippet'
snippet_file = os.path.join(SNIPPETS_ROOT, filename)
if not os.path.isfile(snippet_file):
raise ValueError('could not find snippet with name ' + filename)
ret = helpers.get_file_content(snippet_file)
if format_kwargs:
ret = ret.format(**format_kwargs)
return ret
|
Load the content from a snippet file which exists in SNIPPETS_ROOT
|
entailment
|
def update_display_properties(self, display_calibration_info, display_properties: typing.Mapping, display_layers: typing.Sequence[typing.Mapping]) -> None:
"""Update the display values. Called from display panel.
This method saves the display values and data and triggers an update. It should be as fast as possible.
As a layer, this canvas item will respond to the update by calling prepare_render on the layer's rendering
thread. Prepare render will call prepare_display which will construct new axes and update all of the constituent
canvas items such as the axes labels and the graph layers. Each will trigger its own update if its inputs have
changed.
The inefficiencies in this process are that the layer must re-render on each call to this function. There is
also a cost within the constituent canvas items to check whether the axes or their data has changed.
When the display is associated with a single data item, the data will be
"""
# may be called from thread; prevent a race condition with closing.
with self.__closing_lock:
if self.__closed:
return
displayed_dimensional_scales = display_calibration_info.displayed_dimensional_scales
displayed_dimensional_calibrations = display_calibration_info.displayed_dimensional_calibrations
self.__data_scale = displayed_dimensional_scales[-1] if len(displayed_dimensional_scales) > 0 else 1
self.__displayed_dimensional_calibration = displayed_dimensional_calibrations[-1] if len(displayed_dimensional_calibrations) > 0 else Calibration.Calibration(scale=displayed_dimensional_scales[-1])
self.__intensity_calibration = display_calibration_info.displayed_intensity_calibration
self.__calibration_style = display_calibration_info.calibration_style
self.__y_min = display_properties.get("y_min")
self.__y_max = display_properties.get("y_max")
self.__y_style = display_properties.get("y_style", "linear")
self.__left_channel = display_properties.get("left_channel")
self.__right_channel = display_properties.get("right_channel")
self.__legend_position = display_properties.get("legend_position")
self.__display_layers = display_layers
if self.__display_values_list and len(self.__display_values_list) > 0:
self.__xdata_list = [display_values.display_data_and_metadata if display_values else None for display_values in self.__display_values_list]
xdata0 = self.__xdata_list[0]
if xdata0:
self.__update_frame(xdata0.metadata)
else:
self.__xdata_list = list()
# update the cursor info
self.__update_cursor_info()
# mark for update. prepare display will mark children for update if necesssary.
self.update()
|
Update the display values. Called from display panel.
This method saves the display values and data and triggers an update. It should be as fast as possible.
As a layer, this canvas item will respond to the update by calling prepare_render on the layer's rendering
thread. Prepare render will call prepare_display which will construct new axes and update all of the constituent
canvas items such as the axes labels and the graph layers. Each will trigger its own update if its inputs have
changed.
The inefficiencies in this process are that the layer must re-render on each call to this function. There is
also a cost within the constituent canvas items to check whether the axes or their data has changed.
When the display is associated with a single data item, the data will be
|
entailment
|
def __view_to_intervals(self, data_and_metadata: DataAndMetadata.DataAndMetadata, intervals: typing.List[typing.Tuple[float, float]]) -> None:
"""Change the view to encompass the channels and data represented by the given intervals."""
left = None
right = None
for interval in intervals:
left = min(left, interval[0]) if left is not None else interval[0]
right = max(right, interval[1]) if right is not None else interval[1]
left = left if left is not None else 0.0
right = right if right is not None else 1.0
left_channel = int(max(0.0, left) * data_and_metadata.data_shape[-1])
right_channel = int(min(1.0, right) * data_and_metadata.data_shape[-1])
data_min = numpy.amin(data_and_metadata.data[..., left_channel:right_channel])
data_max = numpy.amax(data_and_metadata.data[..., left_channel:right_channel])
if data_min > 0 and data_max > 0:
y_min = 0.0
y_max = data_max * 1.2
elif data_min < 0 and data_max < 0:
y_min = data_min * 1.2
y_max = 0.0
else:
y_min = data_min * 1.2
y_max = data_max * 1.2
extra = (right - left) * 0.5
display_left_channel = int(max(0.0, left - extra) * data_and_metadata.data_shape[-1])
display_right_channel = int(min(1.0, right + extra) * data_and_metadata.data_shape[-1])
# command = self.delegate.create_change_display_command()
self.delegate.update_display_properties({"left_channel": display_left_channel, "right_channel": display_right_channel, "y_min": y_min, "y_max": y_max})
|
Change the view to encompass the channels and data represented by the given intervals.
|
entailment
|
def __view_to_selected_graphics(self, data_and_metadata: DataAndMetadata.DataAndMetadata) -> None:
"""Change the view to encompass the selected graphic intervals."""
all_graphics = self.__graphics
graphics = [graphic for graphic_index, graphic in enumerate(all_graphics) if self.__graphic_selection.contains(graphic_index)]
intervals = list()
for graphic in graphics:
if isinstance(graphic, Graphics.IntervalGraphic):
intervals.append(graphic.interval)
self.__view_to_intervals(data_and_metadata, intervals)
|
Change the view to encompass the selected graphic intervals.
|
entailment
|
def prepare_display(self):
"""Prepare the display.
This method gets called by the canvas layout/draw engine after being triggered by a call to `update`.
When data or display parameters change, the internal state of the line plot gets updated. This method takes
that internal state and updates the child canvas items.
This method is always run on a thread and should be fast but doesn't need to be instant.
"""
displayed_dimensional_calibration = self.__displayed_dimensional_calibration
intensity_calibration = self.__intensity_calibration
calibration_style = self.__calibration_style
y_min = self.__y_min
y_max = self.__y_max
y_style = self.__y_style
left_channel = self.__left_channel
right_channel = self.__right_channel
scalar_xdata_list = None
def calculate_scalar_xdata(xdata_list):
scalar_xdata_list = list()
for xdata in xdata_list:
if xdata:
scalar_data = Image.scalar_from_array(xdata.data)
scalar_data = Image.convert_to_grayscale(scalar_data)
scalar_intensity_calibration = calibration_style.get_intensity_calibration(xdata)
scalar_dimensional_calibrations = calibration_style.get_dimensional_calibrations(xdata.dimensional_shape, xdata.dimensional_calibrations)
if displayed_dimensional_calibration.units == scalar_dimensional_calibrations[-1].units and intensity_calibration.units == scalar_intensity_calibration.units:
# the data needs to have an intensity scale matching intensity_calibration. convert the data to use the common scale.
scale = scalar_intensity_calibration.scale / intensity_calibration.scale
offset = (scalar_intensity_calibration.offset - intensity_calibration.offset) / intensity_calibration.scale
scalar_data = scalar_data * scale + offset
scalar_xdata_list.append(DataAndMetadata.new_data_and_metadata(scalar_data, scalar_intensity_calibration, scalar_dimensional_calibrations))
else:
scalar_xdata_list.append(None)
return scalar_xdata_list
data_scale = self.__data_scale
xdata_list = self.__xdata_list
if data_scale is not None:
# update the line graph data
left_channel = left_channel if left_channel is not None else 0
right_channel = right_channel if right_channel is not None else data_scale
left_channel, right_channel = min(left_channel, right_channel), max(left_channel, right_channel)
scalar_data_list = None
if y_min is None or y_max is None and len(xdata_list) > 0:
scalar_xdata_list = calculate_scalar_xdata(xdata_list)
scalar_data_list = [xdata.data if xdata else None for xdata in scalar_xdata_list]
calibrated_data_min, calibrated_data_max, y_ticker = LineGraphCanvasItem.calculate_y_axis(scalar_data_list, y_min, y_max, intensity_calibration, y_style)
axes = LineGraphCanvasItem.LineGraphAxes(data_scale, calibrated_data_min, calibrated_data_max, left_channel, right_channel, displayed_dimensional_calibration, intensity_calibration, y_style, y_ticker)
if scalar_xdata_list is None:
if len(xdata_list) > 0:
scalar_xdata_list = calculate_scalar_xdata(xdata_list)
else:
scalar_xdata_list = list()
if self.__display_frame_rate_id:
Utility.fps_tick("prepare_"+self.__display_frame_rate_id)
colors = ('#1E90FF', "#F00", "#0F0", "#00F", "#FF0", "#0FF", "#F0F", "#888", "#800", "#080", "#008", "#CCC", "#880", "#088", "#808", "#964B00")
display_layers = self.__display_layers
if len(display_layers) == 0:
index = 0
for scalar_index, scalar_xdata in enumerate(scalar_xdata_list):
if scalar_xdata and scalar_xdata.is_data_1d:
if index < 16:
display_layers.append({"fill_color": colors[index] if index == 0 else None, "stroke_color": colors[index] if index > 0 else None, "data_index": scalar_index})
index += 1
if scalar_xdata and scalar_xdata.is_data_2d:
for row in range(min(scalar_xdata.data_shape[-1], 16)):
if index < 16:
display_layers.append({"fill_color": colors[index] if index == 0 else None, "stroke_color": colors[index] if index > 0 else None, "data_index": scalar_index, "data_row": row})
index += 1
display_layer_count = len(display_layers)
self.___has_valid_drawn_graph_data = False
for index, display_layer in enumerate(display_layers):
if index < 16:
fill_color = display_layer.get("fill_color")
stroke_color = display_layer.get("stroke_color")
data_index = display_layer.get("data_index", 0)
data_row = display_layer.get("data_row", 0)
if 0 <= data_index < len(scalar_xdata_list):
scalar_xdata = scalar_xdata_list[data_index]
if scalar_xdata:
data_row = max(0, min(scalar_xdata.dimensional_shape[0] - 1, data_row))
intensity_calibration = scalar_xdata.intensity_calibration
displayed_dimensional_calibration = scalar_xdata.dimensional_calibrations[-1]
if scalar_xdata.is_data_2d:
scalar_data = scalar_xdata.data[data_row:data_row + 1, :].reshape((scalar_xdata.dimensional_shape[-1],))
scalar_xdata = DataAndMetadata.new_data_and_metadata(scalar_data, intensity_calibration, [displayed_dimensional_calibration])
line_graph_canvas_item = self.__line_graph_stack.canvas_items[display_layer_count - (index + 1)]
line_graph_canvas_item.set_fill_color(fill_color)
line_graph_canvas_item.set_stroke_color(stroke_color)
line_graph_canvas_item.set_axes(axes)
line_graph_canvas_item.set_uncalibrated_xdata(scalar_xdata)
self.___has_valid_drawn_graph_data = scalar_xdata is not None
for index in range(len(display_layers), 16):
line_graph_canvas_item = self.__line_graph_stack.canvas_items[index]
line_graph_canvas_item.set_axes(None)
line_graph_canvas_item.set_uncalibrated_xdata(None)
legend_position = self.__legend_position
LegendEntry = collections.namedtuple("LegendEntry", ["label", "fill_color", "stroke_color"])
legend_entries = list()
for index, display_layer in enumerate(self.__display_layers):
data_index = display_layer.get("data_index", None)
data_row = display_layer.get("data_row", None)
label = display_layer.get("label", str())
if not label:
if data_index is not None and data_row is not None:
label = "Data {}:{}".format(data_index, data_row)
elif data_index is not None:
label = "Data {}".format(data_index)
else:
label = "Unknown"
fill_color = display_layer.get("fill_color")
stroke_color = display_layer.get("stroke_color")
legend_entries.append(LegendEntry(label, fill_color, stroke_color))
self.__update_canvas_items(axes, legend_position, legend_entries)
else:
for line_graph_canvas_item in self.__line_graph_stack.canvas_items:
line_graph_canvas_item.set_axes(None)
line_graph_canvas_item.set_uncalibrated_xdata(None)
self.__line_graph_xdata_list = list()
self.__update_canvas_items(LineGraphCanvasItem.LineGraphAxes(), None, None)
|
Prepare the display.
This method gets called by the canvas layout/draw engine after being triggered by a call to `update`.
When data or display parameters change, the internal state of the line plot gets updated. This method takes
that internal state and updates the child canvas items.
This method is always run on a thread and should be fast but doesn't need to be instant.
|
entailment
|
def __update_cursor_info(self):
""" Map the mouse to the 1-d position within the line graph. """
if not self.delegate: # allow display to work without delegate
return
if self.__mouse_in and self.__last_mouse:
pos_1d = None
axes = self.__axes
line_graph_canvas_item = self.line_graph_canvas_item
if axes and axes.is_valid and line_graph_canvas_item:
mouse = self.map_to_canvas_item(self.__last_mouse, line_graph_canvas_item)
plot_rect = line_graph_canvas_item.canvas_bounds
if plot_rect.contains_point(mouse):
mouse = mouse - plot_rect.origin
x = float(mouse.x) / plot_rect.width
px = axes.drawn_left_channel + x * (axes.drawn_right_channel - axes.drawn_left_channel)
pos_1d = px,
self.delegate.cursor_changed(pos_1d)
|
Map the mouse to the 1-d position within the line graph.
|
entailment
|
def find_model_patch_tracks(self):
"""
Identify storms in gridded model output and extract uniform sized patches around the storm centers of mass.
Returns:
"""
self.model_grid.load_data()
tracked_model_objects = []
model_objects = []
if self.model_grid.data is None:
print("No model output found")
return tracked_model_objects
min_orig = self.model_ew.min_thresh
max_orig = self.model_ew.max_thresh
data_increment_orig = self.model_ew.data_increment
self.model_ew.min_thresh = 0
self.model_ew.data_increment = 1
self.model_ew.max_thresh = 100
for h, hour in enumerate(self.hours):
# Identify storms at each time step and apply size filter
print("Finding {0} objects for run {1} Hour: {2:02d}".format(self.ensemble_member,
self.run_date.strftime("%Y%m%d%H"), hour))
if self.mask is not None:
model_data = self.model_grid.data[h] * self.mask
else:
model_data = self.model_grid.data[h]
model_data[:self.patch_radius] = 0
model_data[-self.patch_radius:] = 0
model_data[:, :self.patch_radius] = 0
model_data[:, -self.patch_radius:] = 0
scaled_data = np.array(rescale_data(model_data, min_orig, max_orig))
hour_labels = label_storm_objects(scaled_data, "ew",
self.model_ew.min_thresh, self.model_ew.max_thresh,
min_area=self.size_filter, max_area=self.model_ew.max_size,
max_range=self.model_ew.delta, increment=self.model_ew.data_increment,
gaussian_sd=self.gaussian_window)
model_objects.extend(extract_storm_patches(hour_labels, model_data, self.model_grid.x,
self.model_grid.y, [hour],
dx=self.model_grid.dx,
patch_radius=self.patch_radius))
for model_obj in model_objects[-1]:
dims = model_obj.timesteps[-1].shape
if h > 0:
model_obj.estimate_motion(hour, self.model_grid.data[h-1], dims[1], dims[0])
del scaled_data
del model_data
del hour_labels
tracked_model_objects.extend(track_storms(model_objects, self.hours,
self.object_matcher.cost_function_components,
self.object_matcher.max_values,
self.object_matcher.weights))
self.model_ew.min_thresh = min_orig
self.model_ew.max_thresh = max_orig
self.model_ew.data_increment = data_increment_orig
return tracked_model_objects
|
Identify storms in gridded model output and extract uniform sized patches around the storm centers of mass.
Returns:
|
entailment
|
def find_model_tracks(self):
"""
Identify storms at each model time step and link them together with object matching.
Returns:
List of STObjects containing model track information.
"""
self.model_grid.load_data()
model_objects = []
tracked_model_objects = []
if self.model_grid.data is None:
print("No model output found")
return tracked_model_objects
for h, hour in enumerate(self.hours):
# Identify storms at each time step and apply size filter
print("Finding {0} objects for run {1} Hour: {2:02d}".format(self.ensemble_member,
self.run_date.strftime("%Y%m%d%H"), hour))
if self.mask is not None:
model_data = self.model_grid.data[h] * self.mask
else:
model_data = self.model_grid.data[h]
# remember orig values
min_orig = self.model_ew.min_thresh
max_orig = self.model_ew.max_thresh
data_increment_orig = self.model_ew.data_increment
# scale to int 0-100.
scaled_data = np.array(rescale_data( self.model_grid.data[h], min_orig, max_orig))
self.model_ew.min_thresh = 0
self.model_ew.data_increment = 1
self.model_ew.max_thresh = 100
hour_labels = self.model_ew.label(gaussian_filter(scaled_data, self.gaussian_window))
hour_labels[model_data < self.model_ew.min_thresh] = 0
hour_labels = self.model_ew.size_filter(hour_labels, self.size_filter)
# Return to orig values
self.model_ew.min_thresh = min_orig
self.model_ew.max_thresh = max_orig
self.model_ew.data_increment = data_increment_orig
obj_slices = find_objects(hour_labels)
num_slices = len(obj_slices)
model_objects.append([])
if num_slices > 0:
for s, sl in enumerate(obj_slices):
model_objects[-1].append(STObject(self.model_grid.data[h][sl],
np.where(hour_labels[sl] == s + 1, 1, 0),
self.model_grid.x[sl],
self.model_grid.y[sl],
self.model_grid.i[sl],
self.model_grid.j[sl],
hour,
hour,
dx=self.model_grid.dx))
if h > 0:
dims = model_objects[-1][-1].timesteps[0].shape
model_objects[-1][-1].estimate_motion(hour, self.model_grid.data[h-1], dims[1], dims[0])
del hour_labels
del scaled_data
del model_data
for h, hour in enumerate(self.hours):
past_time_objs = []
for obj in tracked_model_objects:
# Potential trackable objects are identified
if obj.end_time == hour - 1:
past_time_objs.append(obj)
# If no objects existed in the last time step, then consider objects in current time step all new
if len(past_time_objs) == 0:
tracked_model_objects.extend(model_objects[h])
# Match from previous time step with current time step
elif len(past_time_objs) > 0 and len(model_objects[h]) > 0:
assignments = self.object_matcher.match_objects(past_time_objs, model_objects[h], hour - 1, hour)
unpaired = list(range(len(model_objects[h])))
for pair in assignments:
past_time_objs[pair[0]].extend(model_objects[h][pair[1]])
unpaired.remove(pair[1])
if len(unpaired) > 0:
for up in unpaired:
tracked_model_objects.append(model_objects[h][up])
print("Tracked Model Objects: {0:03d} Hour: {1:02d}".format(len(tracked_model_objects), hour))
return tracked_model_objects
|
Identify storms at each model time step and link them together with object matching.
Returns:
List of STObjects containing model track information.
|
entailment
|
def find_mrms_tracks(self):
"""
Identify objects from MRMS timesteps and link them together with object matching.
Returns:
List of STObjects containing MESH track information.
"""
obs_objects = []
tracked_obs_objects = []
if self.mrms_ew is not None:
self.mrms_grid.load_data()
if len(self.mrms_grid.data) != len(self.hours):
print('Less than 24 hours of observation data found')
return tracked_obs_objects
for h, hour in enumerate(self.hours):
mrms_data = np.zeros(self.mrms_grid.data[h].shape)
mrms_data[:] = np.array(self.mrms_grid.data[h])
mrms_data[mrms_data < 0] = 0
hour_labels = self.mrms_ew.size_filter(self.mrms_ew.label(gaussian_filter(mrms_data,
self.gaussian_window)),
self.size_filter)
hour_labels[mrms_data < self.mrms_ew.min_thresh] = 0
obj_slices = find_objects(hour_labels)
num_slices = len(obj_slices)
obs_objects.append([])
if num_slices > 0:
for sl in obj_slices:
obs_objects[-1].append(STObject(mrms_data[sl],
np.where(hour_labels[sl] > 0, 1, 0),
self.model_grid.x[sl],
self.model_grid.y[sl],
self.model_grid.i[sl],
self.model_grid.j[sl],
hour,
hour,
dx=self.model_grid.dx))
if h > 0:
dims = obs_objects[-1][-1].timesteps[0].shape
obs_objects[-1][-1].estimate_motion(hour, self.mrms_grid.data[h-1], dims[1], dims[0])
for h, hour in enumerate(self.hours):
past_time_objs = []
for obj in tracked_obs_objects:
if obj.end_time == hour - 1:
past_time_objs.append(obj)
if len(past_time_objs) == 0:
tracked_obs_objects.extend(obs_objects[h])
elif len(past_time_objs) > 0 and len(obs_objects[h]) > 0:
assignments = self.object_matcher.match_objects(past_time_objs, obs_objects[h], hour - 1, hour)
unpaired = list(range(len(obs_objects[h])))
for pair in assignments:
past_time_objs[pair[0]].extend(obs_objects[h][pair[1]])
unpaired.remove(pair[1])
if len(unpaired) > 0:
for up in unpaired:
tracked_obs_objects.append(obs_objects[h][up])
print("Tracked Obs Objects: {0:03d} Hour: {1:02d}".format(len(tracked_obs_objects), hour))
return tracked_obs_objects
|
Identify objects from MRMS timesteps and link them together with object matching.
Returns:
List of STObjects containing MESH track information.
|
entailment
|
def match_tracks(self, model_tracks, obs_tracks, unique_matches=True, closest_matches=False):
"""
Match forecast and observed tracks.
Args:
model_tracks:
obs_tracks:
unique_matches:
closest_matches:
Returns:
"""
if unique_matches:
pairings = self.track_matcher.match_tracks(model_tracks, obs_tracks, closest_matches=closest_matches)
else:
pairings = self.track_matcher.neighbor_matches(model_tracks, obs_tracks)
return pairings
|
Match forecast and observed tracks.
Args:
model_tracks:
obs_tracks:
unique_matches:
closest_matches:
Returns:
|
entailment
|
def extract_model_attributes(self, tracked_model_objects, storm_variables, potential_variables,
tendency_variables=None, future_variables=None):
"""
Extract model attribute data for each model track. Storm variables are those that describe the model storm
directly, such as radar reflectivity or updraft helicity. Potential variables describe the surrounding
environmental conditions of the storm, and should be extracted from the timestep before the storm arrives to
reduce the chance of the storm contaminating the environmental values. Examples of potential variables include
CAPE, shear, temperature, and dewpoint. Future variables are fields that occur in the hour after the extracted
field.
Args:
tracked_model_objects: List of STObjects describing each forecasted storm
storm_variables: List of storm variable names
potential_variables: List of potential variable names.
tendency_variables: List of tendency variables
"""
if tendency_variables is None:
tendency_variables = []
if future_variables is None:
future_variables = []
model_grids = {}
for l_var in ["lon", "lat"]:
for model_obj in tracked_model_objects:
model_obj.extract_attribute_array(getattr(self.model_grid, l_var), l_var)
for storm_var in storm_variables:
print("Storm {0} {1} {2}".format(storm_var,self.ensemble_member, self.run_date.strftime("%Y%m%d")))
model_grids[storm_var] = ModelOutput(self.ensemble_name, self.ensemble_member,
self.run_date, storm_var, self.start_date - timedelta(hours=1),
self.end_date + timedelta(hours=1),
self.model_path,self.model_map_file,
self.sector_ind_path,self.single_step)
model_grids[storm_var].load_data()
for model_obj in tracked_model_objects:
model_obj.extract_attribute_grid(model_grids[storm_var])
if storm_var not in potential_variables + tendency_variables + future_variables:
del model_grids[storm_var]
for potential_var in potential_variables:
print("Potential {0} {1} {2}".format(potential_var,self.ensemble_member, self.run_date.strftime("%Y%m%d")))
if potential_var not in model_grids.keys():
model_grids[potential_var] = ModelOutput(self.ensemble_name, self.ensemble_member,
self.run_date, potential_var,
self.start_date - timedelta(hours=1),
self.end_date + timedelta(hours=1),
self.model_path, self.model_map_file,
self.sector_ind_path,self.single_step)
model_grids[potential_var].load_data()
for model_obj in tracked_model_objects:
model_obj.extract_attribute_grid(model_grids[potential_var], potential=True)
if potential_var not in tendency_variables + future_variables:
del model_grids[potential_var]
for future_var in future_variables:
print("Future {0} {1} {2}".format(future_var, self.ensemble_member, self.run_date.strftime("%Y%m%d")))
if future_var not in model_grids.keys():
model_grids[future_var] = ModelOutput(self.ensemble_name, self.ensemble_member,
self.run_date, future_var,
self.start_date - timedelta(hours=1),
self.end_date + timedelta(hours=1),
self.model_path, self.model_map_file,
self.sector_ind_path,self.single_step)
model_grids[future_var].load_data()
for model_obj in tracked_model_objects:
model_obj.extract_attribute_grid(model_grids[future_var], future=True)
if future_var not in tendency_variables:
del model_grids[future_var]
for tendency_var in tendency_variables:
print("Tendency {0} {1} {2}".format(tendency_var, self.ensemble_member, self.run_date.strftime("%Y%m%d")))
if tendency_var not in model_grids.keys():
model_grids[tendency_var] = ModelOutput(self.ensemble_name, self.ensemble_member,
self.run_date, tendency_var,
self.start_date - timedelta(hours=1),
self.end_date,
self.model_path, self.model_map_file,
self.sector_ind_path,self.single_step)
for model_obj in tracked_model_objects:
model_obj.extract_tendency_grid(model_grids[tendency_var])
del model_grids[tendency_var]
|
Extract model attribute data for each model track. Storm variables are those that describe the model storm
directly, such as radar reflectivity or updraft helicity. Potential variables describe the surrounding
environmental conditions of the storm, and should be extracted from the timestep before the storm arrives to
reduce the chance of the storm contaminating the environmental values. Examples of potential variables include
CAPE, shear, temperature, and dewpoint. Future variables are fields that occur in the hour after the extracted
field.
Args:
tracked_model_objects: List of STObjects describing each forecasted storm
storm_variables: List of storm variable names
potential_variables: List of potential variable names.
tendency_variables: List of tendency variables
|
entailment
|
def match_hail_sizes(model_tracks, obs_tracks, track_pairings):
"""
Given forecast and observed track pairings, maximum hail sizes are associated with each paired forecast storm
track timestep. If the duration of the forecast and observed tracks differ, then interpolation is used for the
intermediate timesteps.
Args:
model_tracks: List of model track STObjects
obs_tracks: List of observed STObjects
track_pairings: list of tuples containing the indices of the paired (forecast, observed) tracks
"""
unpaired = list(range(len(model_tracks)))
for p, pair in enumerate(track_pairings):
model_track = model_tracks[pair[0]]
unpaired.remove(pair[0])
obs_track = obs_tracks[pair[1]]
obs_hail_sizes = np.array([step[obs_track.masks[t] == 1].max()
for t, step in enumerate(obs_track.timesteps)])
if obs_track.times.size > 1 and model_track.times.size > 1:
normalized_obs_times = 1.0 / (obs_track.times.max() - obs_track.times.min())\
* (obs_track.times - obs_track.times.min())
normalized_model_times = 1.0 / (model_track.times.max() - model_track.times.min())\
* (model_track.times - model_track.times.min())
hail_interp = interp1d(normalized_obs_times, obs_hail_sizes, kind="nearest",
bounds_error=False, fill_value=0)
model_track.observations = hail_interp(normalized_model_times)
elif obs_track.times.size == 1:
model_track.observations = np.ones(model_track.times.shape) * obs_hail_sizes[0]
elif model_track.times.size == 1:
model_track.observations = np.array([obs_hail_sizes.max()])
print(pair[0], "obs", obs_hail_sizes)
print(pair[0], "model", model_track.observations)
for u in unpaired:
model_tracks[u].observations = np.zeros(model_tracks[u].times.shape)
|
Given forecast and observed track pairings, maximum hail sizes are associated with each paired forecast storm
track timestep. If the duration of the forecast and observed tracks differ, then interpolation is used for the
intermediate timesteps.
Args:
model_tracks: List of model track STObjects
obs_tracks: List of observed STObjects
track_pairings: list of tuples containing the indices of the paired (forecast, observed) tracks
|
entailment
|
def match_hail_size_step_distributions(self, model_tracks, obs_tracks, track_pairings):
"""
Given a matching set of observed tracks for each model track,
Args:
model_tracks:
obs_tracks:
track_pairings:
Returns:
"""
label_columns = ["Matched", "Max_Hail_Size", "Num_Matches", "Shape", "Location", "Scale"]
s = 0
for m, model_track in enumerate(model_tracks):
model_track.observations = pd.DataFrame(index=model_track.times, columns=label_columns, dtype=np.float64)
model_track.observations.loc[:, :] = 0
model_track.observations["Matched"] = model_track.observations["Matched"].astype(np.int32)
for t, time in enumerate(model_track.times):
model_track.observations.loc[time, "Matched"] = track_pairings.loc[s, "Matched"]
if model_track.observations.loc[time, "Matched"] > 0:
all_hail_sizes = []
step_pairs = track_pairings.loc[s, "Pairings"]
for step_pair in step_pairs:
obs_step = obs_tracks[step_pair[0]].timesteps[step_pair[1]].ravel()
obs_mask = obs_tracks[step_pair[0]].masks[step_pair[1]].ravel()
all_hail_sizes.append(obs_step[(obs_mask == 1) & (obs_step >= self.mrms_ew.min_thresh)])
combined_hail_sizes = np.concatenate(all_hail_sizes)
min_hail = combined_hail_sizes.min() - 0.1
model_track.observations.loc[time, "Max_Hail_Size"] = combined_hail_sizes.max()
model_track.observations.loc[time, "Num_Matches"] = step_pairs.shape[0]
model_track.observations.loc[time, ["Shape", "Location", "Scale"]] = gamma.fit(combined_hail_sizes,
floc=min_hail)
s += 1
|
Given a matching set of observed tracks for each model track,
Args:
model_tracks:
obs_tracks:
track_pairings:
Returns:
|
entailment
|
def calc_track_errors(model_tracks, obs_tracks, track_pairings):
"""
Calculates spatial and temporal translation errors between matched
forecast and observed tracks.
Args:
model_tracks: List of model track STObjects
obs_tracks: List of observed track STObjects
track_pairings: List of tuples pairing forecast and observed tracks.
Returns:
pandas DataFrame containing different track errors
"""
columns = ['obs_track_id',
'translation_error_x',
'translation_error_y',
'start_time_difference',
'end_time_difference',
]
track_errors = pd.DataFrame(index=list(range(len(model_tracks))),
columns=columns)
for p, pair in enumerate(track_pairings):
model_track = model_tracks[pair[0]]
if type(pair[1]) in [int, np.int64]:
obs_track = obs_tracks[pair[1]]
else:
obs_track = obs_tracks[pair[1][0]]
model_com = model_track.center_of_mass(model_track.start_time)
obs_com = obs_track.center_of_mass(obs_track.start_time)
track_errors.loc[pair[0], 'obs_track_id'] = pair[1] if type(pair[1]) in [int, np.int64] else pair[1][0]
track_errors.loc[pair[0], 'translation_error_x'] = model_com[0] - obs_com[0]
track_errors.loc[pair[0], 'translation_error_y'] = model_com[1] - obs_com[1]
track_errors.loc[pair[0], 'start_time_difference'] = model_track.start_time - obs_track.start_time
track_errors.loc[pair[0], 'end_time_difference'] = model_track.end_time - obs_track.end_time
return track_errors
|
Calculates spatial and temporal translation errors between matched
forecast and observed tracks.
Args:
model_tracks: List of model track STObjects
obs_tracks: List of observed track STObjects
track_pairings: List of tuples pairing forecast and observed tracks.
Returns:
pandas DataFrame containing different track errors
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.