repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
djgagne/hagelslag
hagelslag/util/convert_mrms_grids.py
MRMSGrid.load_data
def load_data(self): """ Loads data from MRMS GRIB2 files and handles compression duties if files are compressed. """ data = [] loaded_dates = [] loaded_indices = [] for t, timestamp in enumerate(self.all_dates): date_str = timestamp.date().strftime("%Y%m%d") full_path = self.path_start + date_str + "/" if self.variable in os.listdir(full_path): full_path += self.variable + "/" data_files = sorted(os.listdir(full_path)) file_dates = pd.to_datetime([d.split("_")[-1][0:13] for d in data_files]) if timestamp in file_dates: data_file = data_files[np.where(timestamp==file_dates)[0][0]] print(full_path + data_file) if data_file[-2:] == "gz": subprocess.call(["gunzip", full_path + data_file]) file_obj = Nio.open_file(full_path + data_file[:-3]) else: file_obj = Nio.open_file(full_path + data_file) var_name = sorted(file_obj.variables.keys())[0] data.append(file_obj.variables[var_name][:]) if self.lon is None: self.lon = file_obj.variables["lon_0"][:] # Translates longitude values from 0:360 to -180:180 if np.count_nonzero(self.lon > 180) > 0: self.lon -= 360 self.lat = file_obj.variables["lat_0"][:] file_obj.close() if data_file[-2:] == "gz": subprocess.call(["gzip", full_path + data_file[:-3]]) else: subprocess.call(["gzip", full_path + data_file]) loaded_dates.append(timestamp) loaded_indices.append(t) if len(loaded_dates) > 0: self.loaded_dates = pd.DatetimeIndex(loaded_dates) self.data = np.ones((self.all_dates.shape[0], data[0].shape[0], data[0].shape[1])) * -9999 self.data[loaded_indices] = np.array(data)
python
def load_data(self): """ Loads data from MRMS GRIB2 files and handles compression duties if files are compressed. """ data = [] loaded_dates = [] loaded_indices = [] for t, timestamp in enumerate(self.all_dates): date_str = timestamp.date().strftime("%Y%m%d") full_path = self.path_start + date_str + "/" if self.variable in os.listdir(full_path): full_path += self.variable + "/" data_files = sorted(os.listdir(full_path)) file_dates = pd.to_datetime([d.split("_")[-1][0:13] for d in data_files]) if timestamp in file_dates: data_file = data_files[np.where(timestamp==file_dates)[0][0]] print(full_path + data_file) if data_file[-2:] == "gz": subprocess.call(["gunzip", full_path + data_file]) file_obj = Nio.open_file(full_path + data_file[:-3]) else: file_obj = Nio.open_file(full_path + data_file) var_name = sorted(file_obj.variables.keys())[0] data.append(file_obj.variables[var_name][:]) if self.lon is None: self.lon = file_obj.variables["lon_0"][:] # Translates longitude values from 0:360 to -180:180 if np.count_nonzero(self.lon > 180) > 0: self.lon -= 360 self.lat = file_obj.variables["lat_0"][:] file_obj.close() if data_file[-2:] == "gz": subprocess.call(["gzip", full_path + data_file[:-3]]) else: subprocess.call(["gzip", full_path + data_file]) loaded_dates.append(timestamp) loaded_indices.append(t) if len(loaded_dates) > 0: self.loaded_dates = pd.DatetimeIndex(loaded_dates) self.data = np.ones((self.all_dates.shape[0], data[0].shape[0], data[0].shape[1])) * -9999 self.data[loaded_indices] = np.array(data)
[ "def", "load_data", "(", "self", ")", ":", "data", "=", "[", "]", "loaded_dates", "=", "[", "]", "loaded_indices", "=", "[", "]", "for", "t", ",", "timestamp", "in", "enumerate", "(", "self", ".", "all_dates", ")", ":", "date_str", "=", "timestamp", ".", "date", "(", ")", ".", "strftime", "(", "\"%Y%m%d\"", ")", "full_path", "=", "self", ".", "path_start", "+", "date_str", "+", "\"/\"", "if", "self", ".", "variable", "in", "os", ".", "listdir", "(", "full_path", ")", ":", "full_path", "+=", "self", ".", "variable", "+", "\"/\"", "data_files", "=", "sorted", "(", "os", ".", "listdir", "(", "full_path", ")", ")", "file_dates", "=", "pd", ".", "to_datetime", "(", "[", "d", ".", "split", "(", "\"_\"", ")", "[", "-", "1", "]", "[", "0", ":", "13", "]", "for", "d", "in", "data_files", "]", ")", "if", "timestamp", "in", "file_dates", ":", "data_file", "=", "data_files", "[", "np", ".", "where", "(", "timestamp", "==", "file_dates", ")", "[", "0", "]", "[", "0", "]", "]", "print", "(", "full_path", "+", "data_file", ")", "if", "data_file", "[", "-", "2", ":", "]", "==", "\"gz\"", ":", "subprocess", ".", "call", "(", "[", "\"gunzip\"", ",", "full_path", "+", "data_file", "]", ")", "file_obj", "=", "Nio", ".", "open_file", "(", "full_path", "+", "data_file", "[", ":", "-", "3", "]", ")", "else", ":", "file_obj", "=", "Nio", ".", "open_file", "(", "full_path", "+", "data_file", ")", "var_name", "=", "sorted", "(", "file_obj", ".", "variables", ".", "keys", "(", ")", ")", "[", "0", "]", "data", ".", "append", "(", "file_obj", ".", "variables", "[", "var_name", "]", "[", ":", "]", ")", "if", "self", ".", "lon", "is", "None", ":", "self", ".", "lon", "=", "file_obj", ".", "variables", "[", "\"lon_0\"", "]", "[", ":", "]", "# Translates longitude values from 0:360 to -180:180", "if", "np", ".", "count_nonzero", "(", "self", ".", "lon", ">", "180", ")", ">", "0", ":", "self", ".", "lon", "-=", "360", "self", ".", "lat", "=", "file_obj", ".", "variables", "[", "\"lat_0\"", "]", "[", ":", "]", "file_obj", ".", "close", "(", ")", "if", "data_file", "[", "-", "2", ":", "]", "==", "\"gz\"", ":", "subprocess", ".", "call", "(", "[", "\"gzip\"", ",", "full_path", "+", "data_file", "[", ":", "-", "3", "]", "]", ")", "else", ":", "subprocess", ".", "call", "(", "[", "\"gzip\"", ",", "full_path", "+", "data_file", "]", ")", "loaded_dates", ".", "append", "(", "timestamp", ")", "loaded_indices", ".", "append", "(", "t", ")", "if", "len", "(", "loaded_dates", ")", ">", "0", ":", "self", ".", "loaded_dates", "=", "pd", ".", "DatetimeIndex", "(", "loaded_dates", ")", "self", ".", "data", "=", "np", ".", "ones", "(", "(", "self", ".", "all_dates", ".", "shape", "[", "0", "]", ",", "data", "[", "0", "]", ".", "shape", "[", "0", "]", ",", "data", "[", "0", "]", ".", "shape", "[", "1", "]", ")", ")", "*", "-", "9999", "self", ".", "data", "[", "loaded_indices", "]", "=", "np", ".", "array", "(", "data", ")" ]
Loads data from MRMS GRIB2 files and handles compression duties if files are compressed.
[ "Loads", "data", "from", "MRMS", "GRIB2", "files", "and", "handles", "compression", "duties", "if", "files", "are", "compressed", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/util/convert_mrms_grids.py#L134-L174
djgagne/hagelslag
hagelslag/util/convert_mrms_grids.py
MRMSGrid.interpolate_grid
def interpolate_grid(self, in_lon, in_lat): """ Interpolates MRMS data to a different grid using cubic bivariate splines """ out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1])) for d in range(self.data.shape[0]): print("Loading ", d, self.variable, self.start_date) if self.data[d].max() > -999: step = self.data[d] step[step < 0] = 0 if self.lat[-1] < self.lat[0]: spline = RectBivariateSpline(self.lat[::-1], self.lon, step[::-1], kx=3, ky=3) else: spline = RectBivariateSpline(self.lat, self.lon, step, kx=3, ky=3) print("Evaluating", d, self.variable, self.start_date) flat_data = spline.ev(in_lat.ravel(), in_lon.ravel()) out_data[d] = flat_data.reshape(in_lon.shape) del spline else: print(d, " is missing") out_data[d] = -9999 return out_data
python
def interpolate_grid(self, in_lon, in_lat): """ Interpolates MRMS data to a different grid using cubic bivariate splines """ out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1])) for d in range(self.data.shape[0]): print("Loading ", d, self.variable, self.start_date) if self.data[d].max() > -999: step = self.data[d] step[step < 0] = 0 if self.lat[-1] < self.lat[0]: spline = RectBivariateSpline(self.lat[::-1], self.lon, step[::-1], kx=3, ky=3) else: spline = RectBivariateSpline(self.lat, self.lon, step, kx=3, ky=3) print("Evaluating", d, self.variable, self.start_date) flat_data = spline.ev(in_lat.ravel(), in_lon.ravel()) out_data[d] = flat_data.reshape(in_lon.shape) del spline else: print(d, " is missing") out_data[d] = -9999 return out_data
[ "def", "interpolate_grid", "(", "self", ",", "in_lon", ",", "in_lat", ")", ":", "out_data", "=", "np", ".", "zeros", "(", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ",", "in_lon", ".", "shape", "[", "0", "]", ",", "in_lon", ".", "shape", "[", "1", "]", ")", ")", "for", "d", "in", "range", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ")", ":", "print", "(", "\"Loading \"", ",", "d", ",", "self", ".", "variable", ",", "self", ".", "start_date", ")", "if", "self", ".", "data", "[", "d", "]", ".", "max", "(", ")", ">", "-", "999", ":", "step", "=", "self", ".", "data", "[", "d", "]", "step", "[", "step", "<", "0", "]", "=", "0", "if", "self", ".", "lat", "[", "-", "1", "]", "<", "self", ".", "lat", "[", "0", "]", ":", "spline", "=", "RectBivariateSpline", "(", "self", ".", "lat", "[", ":", ":", "-", "1", "]", ",", "self", ".", "lon", ",", "step", "[", ":", ":", "-", "1", "]", ",", "kx", "=", "3", ",", "ky", "=", "3", ")", "else", ":", "spline", "=", "RectBivariateSpline", "(", "self", ".", "lat", ",", "self", ".", "lon", ",", "step", ",", "kx", "=", "3", ",", "ky", "=", "3", ")", "print", "(", "\"Evaluating\"", ",", "d", ",", "self", ".", "variable", ",", "self", ".", "start_date", ")", "flat_data", "=", "spline", ".", "ev", "(", "in_lat", ".", "ravel", "(", ")", ",", "in_lon", ".", "ravel", "(", ")", ")", "out_data", "[", "d", "]", "=", "flat_data", ".", "reshape", "(", "in_lon", ".", "shape", ")", "del", "spline", "else", ":", "print", "(", "d", ",", "\" is missing\"", ")", "out_data", "[", "d", "]", "=", "-", "9999", "return", "out_data" ]
Interpolates MRMS data to a different grid using cubic bivariate splines
[ "Interpolates", "MRMS", "data", "to", "a", "different", "grid", "using", "cubic", "bivariate", "splines" ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/util/convert_mrms_grids.py#L176-L197
djgagne/hagelslag
hagelslag/util/convert_mrms_grids.py
MRMSGrid.max_neighbor
def max_neighbor(self, in_lon, in_lat, radius=0.05): """ Finds the largest value within a given radius of a point on the interpolated grid. Args: in_lon: 2D array of longitude values in_lat: 2D array of latitude values radius: radius of influence for largest neighbor search in degrees Returns: Array of interpolated data """ out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1])) in_tree = cKDTree(np.vstack((in_lat.ravel(), in_lon.ravel())).T) out_indices = np.indices(out_data.shape[1:]) out_rows = out_indices[0].ravel() out_cols = out_indices[1].ravel() for d in range(self.data.shape[0]): nz_points = np.where(self.data[d] > 0) if len(nz_points[0]) > 0: nz_vals = self.data[d][nz_points] nz_rank = np.argsort(nz_vals) original_points = cKDTree(np.vstack((self.lat[nz_points[0][nz_rank]], self.lon[nz_points[1][nz_rank]])).T) all_neighbors = original_points.query_ball_tree(in_tree, radius, p=2, eps=0) for n, neighbors in enumerate(all_neighbors): if len(neighbors) > 0: out_data[d, out_rows[neighbors], out_cols[neighbors]] = nz_vals[nz_rank][n] return out_data
python
def max_neighbor(self, in_lon, in_lat, radius=0.05): """ Finds the largest value within a given radius of a point on the interpolated grid. Args: in_lon: 2D array of longitude values in_lat: 2D array of latitude values radius: radius of influence for largest neighbor search in degrees Returns: Array of interpolated data """ out_data = np.zeros((self.data.shape[0], in_lon.shape[0], in_lon.shape[1])) in_tree = cKDTree(np.vstack((in_lat.ravel(), in_lon.ravel())).T) out_indices = np.indices(out_data.shape[1:]) out_rows = out_indices[0].ravel() out_cols = out_indices[1].ravel() for d in range(self.data.shape[0]): nz_points = np.where(self.data[d] > 0) if len(nz_points[0]) > 0: nz_vals = self.data[d][nz_points] nz_rank = np.argsort(nz_vals) original_points = cKDTree(np.vstack((self.lat[nz_points[0][nz_rank]], self.lon[nz_points[1][nz_rank]])).T) all_neighbors = original_points.query_ball_tree(in_tree, radius, p=2, eps=0) for n, neighbors in enumerate(all_neighbors): if len(neighbors) > 0: out_data[d, out_rows[neighbors], out_cols[neighbors]] = nz_vals[nz_rank][n] return out_data
[ "def", "max_neighbor", "(", "self", ",", "in_lon", ",", "in_lat", ",", "radius", "=", "0.05", ")", ":", "out_data", "=", "np", ".", "zeros", "(", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ",", "in_lon", ".", "shape", "[", "0", "]", ",", "in_lon", ".", "shape", "[", "1", "]", ")", ")", "in_tree", "=", "cKDTree", "(", "np", ".", "vstack", "(", "(", "in_lat", ".", "ravel", "(", ")", ",", "in_lon", ".", "ravel", "(", ")", ")", ")", ".", "T", ")", "out_indices", "=", "np", ".", "indices", "(", "out_data", ".", "shape", "[", "1", ":", "]", ")", "out_rows", "=", "out_indices", "[", "0", "]", ".", "ravel", "(", ")", "out_cols", "=", "out_indices", "[", "1", "]", ".", "ravel", "(", ")", "for", "d", "in", "range", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ")", ":", "nz_points", "=", "np", ".", "where", "(", "self", ".", "data", "[", "d", "]", ">", "0", ")", "if", "len", "(", "nz_points", "[", "0", "]", ")", ">", "0", ":", "nz_vals", "=", "self", ".", "data", "[", "d", "]", "[", "nz_points", "]", "nz_rank", "=", "np", ".", "argsort", "(", "nz_vals", ")", "original_points", "=", "cKDTree", "(", "np", ".", "vstack", "(", "(", "self", ".", "lat", "[", "nz_points", "[", "0", "]", "[", "nz_rank", "]", "]", ",", "self", ".", "lon", "[", "nz_points", "[", "1", "]", "[", "nz_rank", "]", "]", ")", ")", ".", "T", ")", "all_neighbors", "=", "original_points", ".", "query_ball_tree", "(", "in_tree", ",", "radius", ",", "p", "=", "2", ",", "eps", "=", "0", ")", "for", "n", ",", "neighbors", "in", "enumerate", "(", "all_neighbors", ")", ":", "if", "len", "(", "neighbors", ")", ">", "0", ":", "out_data", "[", "d", ",", "out_rows", "[", "neighbors", "]", ",", "out_cols", "[", "neighbors", "]", "]", "=", "nz_vals", "[", "nz_rank", "]", "[", "n", "]", "return", "out_data" ]
Finds the largest value within a given radius of a point on the interpolated grid. Args: in_lon: 2D array of longitude values in_lat: 2D array of latitude values radius: radius of influence for largest neighbor search in degrees Returns: Array of interpolated data
[ "Finds", "the", "largest", "value", "within", "a", "given", "radius", "of", "a", "point", "on", "the", "interpolated", "grid", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/util/convert_mrms_grids.py#L199-L226
djgagne/hagelslag
hagelslag/util/convert_mrms_grids.py
MRMSGrid.interpolate_to_netcdf
def interpolate_to_netcdf(self, in_lon, in_lat, out_path, date_unit="seconds since 1970-01-01T00:00", interp_type="spline"): """ Calls the interpolation function and then saves the MRMS data to a netCDF file. It will also create separate directories for each variable if they are not already available. """ if interp_type == "spline": out_data = self.interpolate_grid(in_lon, in_lat) else: out_data = self.max_neighbor(in_lon, in_lat) if not os.access(out_path + self.variable, os.R_OK): try: os.mkdir(out_path + self.variable) except OSError: print(out_path + self.variable + " already created") out_file = out_path + self.variable + "/" + "{0}_{1}_{2}.nc".format(self.variable, self.start_date.strftime("%Y%m%d-%H:%M"), self.end_date.strftime("%Y%m%d-%H:%M")) out_obj = Dataset(out_file, "w") out_obj.createDimension("time", out_data.shape[0]) out_obj.createDimension("y", out_data.shape[1]) out_obj.createDimension("x", out_data.shape[2]) data_var = out_obj.createVariable(self.variable, "f4", ("time", "y", "x"), zlib=True, fill_value=-9999.0, least_significant_digit=3) data_var[:] = out_data data_var.long_name = self.variable data_var.coordinates = "latitude longitude" if "MESH" in self.variable or "QPE" in self.variable: data_var.units = "mm" elif "Reflectivity" in self.variable: data_var.units = "dBZ" elif "Rotation" in self.variable: data_var.units = "s-1" else: data_var.units = "" out_lon = out_obj.createVariable("longitude", "f4", ("y", "x"), zlib=True) out_lon[:] = in_lon out_lon.units = "degrees_east" out_lat = out_obj.createVariable("latitude", "f4", ("y", "x"), zlib=True) out_lat[:] = in_lat out_lat.units = "degrees_north" dates = out_obj.createVariable("time", "i8", ("time",), zlib=True) dates[:] = np.round(date2num(self.all_dates.to_pydatetime(), date_unit)).astype(np.int64) dates.long_name = "Valid date" dates.units = date_unit out_obj.Conventions="CF-1.6" out_obj.close() return
python
def interpolate_to_netcdf(self, in_lon, in_lat, out_path, date_unit="seconds since 1970-01-01T00:00", interp_type="spline"): """ Calls the interpolation function and then saves the MRMS data to a netCDF file. It will also create separate directories for each variable if they are not already available. """ if interp_type == "spline": out_data = self.interpolate_grid(in_lon, in_lat) else: out_data = self.max_neighbor(in_lon, in_lat) if not os.access(out_path + self.variable, os.R_OK): try: os.mkdir(out_path + self.variable) except OSError: print(out_path + self.variable + " already created") out_file = out_path + self.variable + "/" + "{0}_{1}_{2}.nc".format(self.variable, self.start_date.strftime("%Y%m%d-%H:%M"), self.end_date.strftime("%Y%m%d-%H:%M")) out_obj = Dataset(out_file, "w") out_obj.createDimension("time", out_data.shape[0]) out_obj.createDimension("y", out_data.shape[1]) out_obj.createDimension("x", out_data.shape[2]) data_var = out_obj.createVariable(self.variable, "f4", ("time", "y", "x"), zlib=True, fill_value=-9999.0, least_significant_digit=3) data_var[:] = out_data data_var.long_name = self.variable data_var.coordinates = "latitude longitude" if "MESH" in self.variable or "QPE" in self.variable: data_var.units = "mm" elif "Reflectivity" in self.variable: data_var.units = "dBZ" elif "Rotation" in self.variable: data_var.units = "s-1" else: data_var.units = "" out_lon = out_obj.createVariable("longitude", "f4", ("y", "x"), zlib=True) out_lon[:] = in_lon out_lon.units = "degrees_east" out_lat = out_obj.createVariable("latitude", "f4", ("y", "x"), zlib=True) out_lat[:] = in_lat out_lat.units = "degrees_north" dates = out_obj.createVariable("time", "i8", ("time",), zlib=True) dates[:] = np.round(date2num(self.all_dates.to_pydatetime(), date_unit)).astype(np.int64) dates.long_name = "Valid date" dates.units = date_unit out_obj.Conventions="CF-1.6" out_obj.close() return
[ "def", "interpolate_to_netcdf", "(", "self", ",", "in_lon", ",", "in_lat", ",", "out_path", ",", "date_unit", "=", "\"seconds since 1970-01-01T00:00\"", ",", "interp_type", "=", "\"spline\"", ")", ":", "if", "interp_type", "==", "\"spline\"", ":", "out_data", "=", "self", ".", "interpolate_grid", "(", "in_lon", ",", "in_lat", ")", "else", ":", "out_data", "=", "self", ".", "max_neighbor", "(", "in_lon", ",", "in_lat", ")", "if", "not", "os", ".", "access", "(", "out_path", "+", "self", ".", "variable", ",", "os", ".", "R_OK", ")", ":", "try", ":", "os", ".", "mkdir", "(", "out_path", "+", "self", ".", "variable", ")", "except", "OSError", ":", "print", "(", "out_path", "+", "self", ".", "variable", "+", "\" already created\"", ")", "out_file", "=", "out_path", "+", "self", ".", "variable", "+", "\"/\"", "+", "\"{0}_{1}_{2}.nc\"", ".", "format", "(", "self", ".", "variable", ",", "self", ".", "start_date", ".", "strftime", "(", "\"%Y%m%d-%H:%M\"", ")", ",", "self", ".", "end_date", ".", "strftime", "(", "\"%Y%m%d-%H:%M\"", ")", ")", "out_obj", "=", "Dataset", "(", "out_file", ",", "\"w\"", ")", "out_obj", ".", "createDimension", "(", "\"time\"", ",", "out_data", ".", "shape", "[", "0", "]", ")", "out_obj", ".", "createDimension", "(", "\"y\"", ",", "out_data", ".", "shape", "[", "1", "]", ")", "out_obj", ".", "createDimension", "(", "\"x\"", ",", "out_data", ".", "shape", "[", "2", "]", ")", "data_var", "=", "out_obj", ".", "createVariable", "(", "self", ".", "variable", ",", "\"f4\"", ",", "(", "\"time\"", ",", "\"y\"", ",", "\"x\"", ")", ",", "zlib", "=", "True", ",", "fill_value", "=", "-", "9999.0", ",", "least_significant_digit", "=", "3", ")", "data_var", "[", ":", "]", "=", "out_data", "data_var", ".", "long_name", "=", "self", ".", "variable", "data_var", ".", "coordinates", "=", "\"latitude longitude\"", "if", "\"MESH\"", "in", "self", ".", "variable", "or", "\"QPE\"", "in", "self", ".", "variable", ":", "data_var", ".", "units", "=", "\"mm\"", "elif", "\"Reflectivity\"", "in", "self", ".", "variable", ":", "data_var", ".", "units", "=", "\"dBZ\"", "elif", "\"Rotation\"", "in", "self", ".", "variable", ":", "data_var", ".", "units", "=", "\"s-1\"", "else", ":", "data_var", ".", "units", "=", "\"\"", "out_lon", "=", "out_obj", ".", "createVariable", "(", "\"longitude\"", ",", "\"f4\"", ",", "(", "\"y\"", ",", "\"x\"", ")", ",", "zlib", "=", "True", ")", "out_lon", "[", ":", "]", "=", "in_lon", "out_lon", ".", "units", "=", "\"degrees_east\"", "out_lat", "=", "out_obj", ".", "createVariable", "(", "\"latitude\"", ",", "\"f4\"", ",", "(", "\"y\"", ",", "\"x\"", ")", ",", "zlib", "=", "True", ")", "out_lat", "[", ":", "]", "=", "in_lat", "out_lat", ".", "units", "=", "\"degrees_north\"", "dates", "=", "out_obj", ".", "createVariable", "(", "\"time\"", ",", "\"i8\"", ",", "(", "\"time\"", ",", ")", ",", "zlib", "=", "True", ")", "dates", "[", ":", "]", "=", "np", ".", "round", "(", "date2num", "(", "self", ".", "all_dates", ".", "to_pydatetime", "(", ")", ",", "date_unit", ")", ")", ".", "astype", "(", "np", ".", "int64", ")", "dates", ".", "long_name", "=", "\"Valid date\"", "dates", ".", "units", "=", "date_unit", "out_obj", ".", "Conventions", "=", "\"CF-1.6\"", "out_obj", ".", "close", "(", ")", "return" ]
Calls the interpolation function and then saves the MRMS data to a netCDF file. It will also create separate directories for each variable if they are not already available.
[ "Calls", "the", "interpolation", "function", "and", "then", "saves", "the", "MRMS", "data", "to", "a", "netCDF", "file", ".", "It", "will", "also", "create", "separate", "directories", "for", "each", "variable", "if", "they", "are", "not", "already", "available", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/util/convert_mrms_grids.py#L228-L276
nion-software/nionswift
nion/swift/model/HardwareSource.py
get_data_generator_by_id
def get_data_generator_by_id(hardware_source_id, sync=True): """ Return a generator for data. :param bool sync: whether to wait for current frame to finish then collect next frame NOTE: a new ndarray is created for each call. """ hardware_source = HardwareSourceManager().get_hardware_source_for_hardware_source_id(hardware_source_id) def get_last_data(): return hardware_source.get_next_xdatas_to_finish()[0].data.copy() yield get_last_data
python
def get_data_generator_by_id(hardware_source_id, sync=True): """ Return a generator for data. :param bool sync: whether to wait for current frame to finish then collect next frame NOTE: a new ndarray is created for each call. """ hardware_source = HardwareSourceManager().get_hardware_source_for_hardware_source_id(hardware_source_id) def get_last_data(): return hardware_source.get_next_xdatas_to_finish()[0].data.copy() yield get_last_data
[ "def", "get_data_generator_by_id", "(", "hardware_source_id", ",", "sync", "=", "True", ")", ":", "hardware_source", "=", "HardwareSourceManager", "(", ")", ".", "get_hardware_source_for_hardware_source_id", "(", "hardware_source_id", ")", "def", "get_last_data", "(", ")", ":", "return", "hardware_source", ".", "get_next_xdatas_to_finish", "(", ")", "[", "0", "]", ".", "data", ".", "copy", "(", ")", "yield", "get_last_data" ]
Return a generator for data. :param bool sync: whether to wait for current frame to finish then collect next frame NOTE: a new ndarray is created for each call.
[ "Return", "a", "generator", "for", "data", "." ]
train
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1053-L1064
nion-software/nionswift
nion/swift/model/HardwareSource.py
parse_hardware_aliases_config_file
def parse_hardware_aliases_config_file(config_path): """ Parse config file for aliases and automatically register them. Returns True if alias file was found and parsed (successfully or unsuccessfully). Returns False if alias file was not found. Config file is a standard .ini file with a section """ if os.path.exists(config_path): logging.info("Parsing alias file {:s}".format(config_path)) try: config = configparser.ConfigParser() config.read(config_path) for section in config.sections(): device = config.get(section, "device") hardware_alias = config.get(section, "hardware_alias") display_name = config.get(section, "display_name") try: logging.info("Adding alias {:s} for device {:s}, display name: {:s} ".format(hardware_alias, device, display_name)) HardwareSourceManager().make_instrument_alias(device, hardware_alias, _(display_name)) except Exception as e: logging.info("Error creating hardware alias {:s} for device {:s} ".format(hardware_alias, device)) logging.info(traceback.format_exc()) except Exception as e: logging.info("Error reading alias file from: " + config_path) logging.info(traceback.format_exc()) return True return False
python
def parse_hardware_aliases_config_file(config_path): """ Parse config file for aliases and automatically register them. Returns True if alias file was found and parsed (successfully or unsuccessfully). Returns False if alias file was not found. Config file is a standard .ini file with a section """ if os.path.exists(config_path): logging.info("Parsing alias file {:s}".format(config_path)) try: config = configparser.ConfigParser() config.read(config_path) for section in config.sections(): device = config.get(section, "device") hardware_alias = config.get(section, "hardware_alias") display_name = config.get(section, "display_name") try: logging.info("Adding alias {:s} for device {:s}, display name: {:s} ".format(hardware_alias, device, display_name)) HardwareSourceManager().make_instrument_alias(device, hardware_alias, _(display_name)) except Exception as e: logging.info("Error creating hardware alias {:s} for device {:s} ".format(hardware_alias, device)) logging.info(traceback.format_exc()) except Exception as e: logging.info("Error reading alias file from: " + config_path) logging.info(traceback.format_exc()) return True return False
[ "def", "parse_hardware_aliases_config_file", "(", "config_path", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "config_path", ")", ":", "logging", ".", "info", "(", "\"Parsing alias file {:s}\"", ".", "format", "(", "config_path", ")", ")", "try", ":", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "config", ".", "read", "(", "config_path", ")", "for", "section", "in", "config", ".", "sections", "(", ")", ":", "device", "=", "config", ".", "get", "(", "section", ",", "\"device\"", ")", "hardware_alias", "=", "config", ".", "get", "(", "section", ",", "\"hardware_alias\"", ")", "display_name", "=", "config", ".", "get", "(", "section", ",", "\"display_name\"", ")", "try", ":", "logging", ".", "info", "(", "\"Adding alias {:s} for device {:s}, display name: {:s} \"", ".", "format", "(", "hardware_alias", ",", "device", ",", "display_name", ")", ")", "HardwareSourceManager", "(", ")", ".", "make_instrument_alias", "(", "device", ",", "hardware_alias", ",", "_", "(", "display_name", ")", ")", "except", "Exception", "as", "e", ":", "logging", ".", "info", "(", "\"Error creating hardware alias {:s} for device {:s} \"", ".", "format", "(", "hardware_alias", ",", "device", ")", ")", "logging", ".", "info", "(", "traceback", ".", "format_exc", "(", ")", ")", "except", "Exception", "as", "e", ":", "logging", ".", "info", "(", "\"Error reading alias file from: \"", "+", "config_path", ")", "logging", ".", "info", "(", "traceback", ".", "format_exc", "(", ")", ")", "return", "True", "return", "False" ]
Parse config file for aliases and automatically register them. Returns True if alias file was found and parsed (successfully or unsuccessfully). Returns False if alias file was not found. Config file is a standard .ini file with a section
[ "Parse", "config", "file", "for", "aliases", "and", "automatically", "register", "them", "." ]
train
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1072-L1101
nion-software/nionswift
nion/swift/model/HardwareSource.py
HardwareSourceManager.make_instrument_alias
def make_instrument_alias(self, instrument_id, alias_instrument_id, display_name): """ Configure an alias. Callers can use the alias to refer to the instrument or hardware source. The alias should be lowercase, no spaces. The display name may be used to display alias to the user. Neither the original instrument or hardware source id and the alias id should ever be visible to end users. :param str instrument_id: the hardware source id (lowercase, no spaces) :param str alias_instrument_id: the alias of the hardware source id (lowercase, no spaces) :param str display_name: the display name for the alias """ self.__aliases[alias_instrument_id] = (instrument_id, display_name) for f in self.aliases_updated: f()
python
def make_instrument_alias(self, instrument_id, alias_instrument_id, display_name): """ Configure an alias. Callers can use the alias to refer to the instrument or hardware source. The alias should be lowercase, no spaces. The display name may be used to display alias to the user. Neither the original instrument or hardware source id and the alias id should ever be visible to end users. :param str instrument_id: the hardware source id (lowercase, no spaces) :param str alias_instrument_id: the alias of the hardware source id (lowercase, no spaces) :param str display_name: the display name for the alias """ self.__aliases[alias_instrument_id] = (instrument_id, display_name) for f in self.aliases_updated: f()
[ "def", "make_instrument_alias", "(", "self", ",", "instrument_id", ",", "alias_instrument_id", ",", "display_name", ")", ":", "self", ".", "__aliases", "[", "alias_instrument_id", "]", "=", "(", "instrument_id", ",", "display_name", ")", "for", "f", "in", "self", ".", "aliases_updated", ":", "f", "(", ")" ]
Configure an alias. Callers can use the alias to refer to the instrument or hardware source. The alias should be lowercase, no spaces. The display name may be used to display alias to the user. Neither the original instrument or hardware source id and the alias id should ever be visible to end users. :param str instrument_id: the hardware source id (lowercase, no spaces) :param str alias_instrument_id: the alias of the hardware source id (lowercase, no spaces) :param str display_name: the display name for the alias
[ "Configure", "an", "alias", "." ]
train
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L168-L182
nion-software/nionswift
nion/swift/model/HardwareSource.py
DataChannel.update
def update(self, data_and_metadata: DataAndMetadata.DataAndMetadata, state: str, sub_area, view_id) -> None: """Called from hardware source when new data arrives.""" self.__state = state self.__sub_area = sub_area hardware_source_id = self.__hardware_source.hardware_source_id channel_index = self.index channel_id = self.channel_id channel_name = self.name metadata = copy.deepcopy(data_and_metadata.metadata) hardware_source_metadata = dict() hardware_source_metadata["hardware_source_id"] = hardware_source_id hardware_source_metadata["channel_index"] = channel_index if channel_id is not None: hardware_source_metadata["reference_key"] = "_".join([hardware_source_id, channel_id]) hardware_source_metadata["channel_id"] = channel_id else: hardware_source_metadata["reference_key"] = hardware_source_id if channel_name is not None: hardware_source_metadata["channel_name"] = channel_name if view_id: hardware_source_metadata["view_id"] = view_id metadata.setdefault("hardware_source", dict()).update(hardware_source_metadata) data = data_and_metadata.data master_data = self.__data_and_metadata.data if self.__data_and_metadata else None data_matches = master_data is not None and data.shape == master_data.shape and data.dtype == master_data.dtype if data_matches and sub_area is not None: top = sub_area[0][0] bottom = sub_area[0][0] + sub_area[1][0] left = sub_area[0][1] right = sub_area[0][1] + sub_area[1][1] if top > 0 or left > 0 or bottom < data.shape[0] or right < data.shape[1]: master_data = numpy.copy(master_data) master_data[top:bottom, left:right] = data[top:bottom, left:right] else: master_data = numpy.copy(data) else: master_data = data # numpy.copy(data). assume data does not need a copy. data_descriptor = data_and_metadata.data_descriptor intensity_calibration = data_and_metadata.intensity_calibration if data_and_metadata else None dimensional_calibrations = data_and_metadata.dimensional_calibrations if data_and_metadata else None timestamp = data_and_metadata.timestamp new_extended_data = DataAndMetadata.new_data_and_metadata(master_data, intensity_calibration=intensity_calibration, dimensional_calibrations=dimensional_calibrations, metadata=metadata, timestamp=timestamp, data_descriptor=data_descriptor) self.__data_and_metadata = new_extended_data self.data_channel_updated_event.fire(new_extended_data) self.is_dirty = True
python
def update(self, data_and_metadata: DataAndMetadata.DataAndMetadata, state: str, sub_area, view_id) -> None: """Called from hardware source when new data arrives.""" self.__state = state self.__sub_area = sub_area hardware_source_id = self.__hardware_source.hardware_source_id channel_index = self.index channel_id = self.channel_id channel_name = self.name metadata = copy.deepcopy(data_and_metadata.metadata) hardware_source_metadata = dict() hardware_source_metadata["hardware_source_id"] = hardware_source_id hardware_source_metadata["channel_index"] = channel_index if channel_id is not None: hardware_source_metadata["reference_key"] = "_".join([hardware_source_id, channel_id]) hardware_source_metadata["channel_id"] = channel_id else: hardware_source_metadata["reference_key"] = hardware_source_id if channel_name is not None: hardware_source_metadata["channel_name"] = channel_name if view_id: hardware_source_metadata["view_id"] = view_id metadata.setdefault("hardware_source", dict()).update(hardware_source_metadata) data = data_and_metadata.data master_data = self.__data_and_metadata.data if self.__data_and_metadata else None data_matches = master_data is not None and data.shape == master_data.shape and data.dtype == master_data.dtype if data_matches and sub_area is not None: top = sub_area[0][0] bottom = sub_area[0][0] + sub_area[1][0] left = sub_area[0][1] right = sub_area[0][1] + sub_area[1][1] if top > 0 or left > 0 or bottom < data.shape[0] or right < data.shape[1]: master_data = numpy.copy(master_data) master_data[top:bottom, left:right] = data[top:bottom, left:right] else: master_data = numpy.copy(data) else: master_data = data # numpy.copy(data). assume data does not need a copy. data_descriptor = data_and_metadata.data_descriptor intensity_calibration = data_and_metadata.intensity_calibration if data_and_metadata else None dimensional_calibrations = data_and_metadata.dimensional_calibrations if data_and_metadata else None timestamp = data_and_metadata.timestamp new_extended_data = DataAndMetadata.new_data_and_metadata(master_data, intensity_calibration=intensity_calibration, dimensional_calibrations=dimensional_calibrations, metadata=metadata, timestamp=timestamp, data_descriptor=data_descriptor) self.__data_and_metadata = new_extended_data self.data_channel_updated_event.fire(new_extended_data) self.is_dirty = True
[ "def", "update", "(", "self", ",", "data_and_metadata", ":", "DataAndMetadata", ".", "DataAndMetadata", ",", "state", ":", "str", ",", "sub_area", ",", "view_id", ")", "->", "None", ":", "self", ".", "__state", "=", "state", "self", ".", "__sub_area", "=", "sub_area", "hardware_source_id", "=", "self", ".", "__hardware_source", ".", "hardware_source_id", "channel_index", "=", "self", ".", "index", "channel_id", "=", "self", ".", "channel_id", "channel_name", "=", "self", ".", "name", "metadata", "=", "copy", ".", "deepcopy", "(", "data_and_metadata", ".", "metadata", ")", "hardware_source_metadata", "=", "dict", "(", ")", "hardware_source_metadata", "[", "\"hardware_source_id\"", "]", "=", "hardware_source_id", "hardware_source_metadata", "[", "\"channel_index\"", "]", "=", "channel_index", "if", "channel_id", "is", "not", "None", ":", "hardware_source_metadata", "[", "\"reference_key\"", "]", "=", "\"_\"", ".", "join", "(", "[", "hardware_source_id", ",", "channel_id", "]", ")", "hardware_source_metadata", "[", "\"channel_id\"", "]", "=", "channel_id", "else", ":", "hardware_source_metadata", "[", "\"reference_key\"", "]", "=", "hardware_source_id", "if", "channel_name", "is", "not", "None", ":", "hardware_source_metadata", "[", "\"channel_name\"", "]", "=", "channel_name", "if", "view_id", ":", "hardware_source_metadata", "[", "\"view_id\"", "]", "=", "view_id", "metadata", ".", "setdefault", "(", "\"hardware_source\"", ",", "dict", "(", ")", ")", ".", "update", "(", "hardware_source_metadata", ")", "data", "=", "data_and_metadata", ".", "data", "master_data", "=", "self", ".", "__data_and_metadata", ".", "data", "if", "self", ".", "__data_and_metadata", "else", "None", "data_matches", "=", "master_data", "is", "not", "None", "and", "data", ".", "shape", "==", "master_data", ".", "shape", "and", "data", ".", "dtype", "==", "master_data", ".", "dtype", "if", "data_matches", "and", "sub_area", "is", "not", "None", ":", "top", "=", "sub_area", "[", "0", "]", "[", "0", "]", "bottom", "=", "sub_area", "[", "0", "]", "[", "0", "]", "+", "sub_area", "[", "1", "]", "[", "0", "]", "left", "=", "sub_area", "[", "0", "]", "[", "1", "]", "right", "=", "sub_area", "[", "0", "]", "[", "1", "]", "+", "sub_area", "[", "1", "]", "[", "1", "]", "if", "top", ">", "0", "or", "left", ">", "0", "or", "bottom", "<", "data", ".", "shape", "[", "0", "]", "or", "right", "<", "data", ".", "shape", "[", "1", "]", ":", "master_data", "=", "numpy", ".", "copy", "(", "master_data", ")", "master_data", "[", "top", ":", "bottom", ",", "left", ":", "right", "]", "=", "data", "[", "top", ":", "bottom", ",", "left", ":", "right", "]", "else", ":", "master_data", "=", "numpy", ".", "copy", "(", "data", ")", "else", ":", "master_data", "=", "data", "# numpy.copy(data). assume data does not need a copy.", "data_descriptor", "=", "data_and_metadata", ".", "data_descriptor", "intensity_calibration", "=", "data_and_metadata", ".", "intensity_calibration", "if", "data_and_metadata", "else", "None", "dimensional_calibrations", "=", "data_and_metadata", ".", "dimensional_calibrations", "if", "data_and_metadata", "else", "None", "timestamp", "=", "data_and_metadata", ".", "timestamp", "new_extended_data", "=", "DataAndMetadata", ".", "new_data_and_metadata", "(", "master_data", ",", "intensity_calibration", "=", "intensity_calibration", ",", "dimensional_calibrations", "=", "dimensional_calibrations", ",", "metadata", "=", "metadata", ",", "timestamp", "=", "timestamp", ",", "data_descriptor", "=", "data_descriptor", ")", "self", ".", "__data_and_metadata", "=", "new_extended_data", "self", ".", "data_channel_updated_event", ".", "fire", "(", "new_extended_data", ")", "self", ".", "is_dirty", "=", "True" ]
Called from hardware source when new data arrives.
[ "Called", "from", "hardware", "source", "when", "new", "data", "arrives", "." ]
train
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L487-L536
nion-software/nionswift
nion/swift/model/HardwareSource.py
DataChannel.start
def start(self): """Called from hardware source when data starts streaming.""" old_start_count = self.__start_count self.__start_count += 1 if old_start_count == 0: self.data_channel_start_event.fire()
python
def start(self): """Called from hardware source when data starts streaming.""" old_start_count = self.__start_count self.__start_count += 1 if old_start_count == 0: self.data_channel_start_event.fire()
[ "def", "start", "(", "self", ")", ":", "old_start_count", "=", "self", ".", "__start_count", "self", ".", "__start_count", "+=", "1", "if", "old_start_count", "==", "0", ":", "self", ".", "data_channel_start_event", ".", "fire", "(", ")" ]
Called from hardware source when data starts streaming.
[ "Called", "from", "hardware", "source", "when", "data", "starts", "streaming", "." ]
train
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L538-L543
nion-software/nionswift
nion/swift/model/HardwareSource.py
SumProcessor.connect_data_item_reference
def connect_data_item_reference(self, data_item_reference): """Connect to the data item reference, creating a crop graphic if necessary. If the data item reference does not yet have an associated data item, add a listener and wait for the data item to be set, then connect. """ display_item = data_item_reference.display_item data_item = display_item.data_item if display_item else None if data_item and display_item: self.__connect_display(display_item) else: def data_item_reference_changed(): self.__data_item_reference_changed_event_listener.close() self.connect_data_item_reference(data_item_reference) # ugh. recursive mess. self.__data_item_reference_changed_event_listener = data_item_reference.data_item_reference_changed_event.listen(data_item_reference_changed)
python
def connect_data_item_reference(self, data_item_reference): """Connect to the data item reference, creating a crop graphic if necessary. If the data item reference does not yet have an associated data item, add a listener and wait for the data item to be set, then connect. """ display_item = data_item_reference.display_item data_item = display_item.data_item if display_item else None if data_item and display_item: self.__connect_display(display_item) else: def data_item_reference_changed(): self.__data_item_reference_changed_event_listener.close() self.connect_data_item_reference(data_item_reference) # ugh. recursive mess. self.__data_item_reference_changed_event_listener = data_item_reference.data_item_reference_changed_event.listen(data_item_reference_changed)
[ "def", "connect_data_item_reference", "(", "self", ",", "data_item_reference", ")", ":", "display_item", "=", "data_item_reference", ".", "display_item", "data_item", "=", "display_item", ".", "data_item", "if", "display_item", "else", "None", "if", "data_item", "and", "display_item", ":", "self", ".", "__connect_display", "(", "display_item", ")", "else", ":", "def", "data_item_reference_changed", "(", ")", ":", "self", ".", "__data_item_reference_changed_event_listener", ".", "close", "(", ")", "self", ".", "connect_data_item_reference", "(", "data_item_reference", ")", "# ugh. recursive mess.", "self", ".", "__data_item_reference_changed_event_listener", "=", "data_item_reference", ".", "data_item_reference_changed_event", ".", "listen", "(", "data_item_reference_changed", ")" ]
Connect to the data item reference, creating a crop graphic if necessary. If the data item reference does not yet have an associated data item, add a listener and wait for the data item to be set, then connect.
[ "Connect", "to", "the", "data", "item", "reference", "creating", "a", "crop", "graphic", "if", "necessary", "." ]
train
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1001-L1015
nion-software/nionswift
nion/swift/model/HardwareSource.py
DataChannelBuffer.grab_earliest
def grab_earliest(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the earliest data from the buffer, blocking until one is available.""" timeout = timeout if timeout is not None else 10.0 with self.__buffer_lock: if len(self.__buffer) == 0: done_event = threading.Event() self.__done_events.append(done_event) self.__buffer_lock.release() done = done_event.wait(timeout) self.__buffer_lock.acquire() if not done: raise Exception("Could not grab latest.") return self.__buffer.pop(0)
python
def grab_earliest(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the earliest data from the buffer, blocking until one is available.""" timeout = timeout if timeout is not None else 10.0 with self.__buffer_lock: if len(self.__buffer) == 0: done_event = threading.Event() self.__done_events.append(done_event) self.__buffer_lock.release() done = done_event.wait(timeout) self.__buffer_lock.acquire() if not done: raise Exception("Could not grab latest.") return self.__buffer.pop(0)
[ "def", "grab_earliest", "(", "self", ",", "timeout", ":", "float", "=", "None", ")", "->", "typing", ".", "List", "[", "DataAndMetadata", ".", "DataAndMetadata", "]", ":", "timeout", "=", "timeout", "if", "timeout", "is", "not", "None", "else", "10.0", "with", "self", ".", "__buffer_lock", ":", "if", "len", "(", "self", ".", "__buffer", ")", "==", "0", ":", "done_event", "=", "threading", ".", "Event", "(", ")", "self", ".", "__done_events", ".", "append", "(", "done_event", ")", "self", ".", "__buffer_lock", ".", "release", "(", ")", "done", "=", "done_event", ".", "wait", "(", "timeout", ")", "self", ".", "__buffer_lock", ".", "acquire", "(", ")", "if", "not", "done", ":", "raise", "Exception", "(", "\"Could not grab latest.\"", ")", "return", "self", ".", "__buffer", ".", "pop", "(", "0", ")" ]
Grab the earliest data from the buffer, blocking until one is available.
[ "Grab", "the", "earliest", "data", "from", "the", "buffer", "blocking", "until", "one", "is", "available", "." ]
train
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1194-L1206
nion-software/nionswift
nion/swift/model/HardwareSource.py
DataChannelBuffer.grab_next
def grab_next(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the next data to finish from the buffer, blocking until one is available.""" with self.__buffer_lock: self.__buffer = list() return self.grab_latest(timeout)
python
def grab_next(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the next data to finish from the buffer, blocking until one is available.""" with self.__buffer_lock: self.__buffer = list() return self.grab_latest(timeout)
[ "def", "grab_next", "(", "self", ",", "timeout", ":", "float", "=", "None", ")", "->", "typing", ".", "List", "[", "DataAndMetadata", ".", "DataAndMetadata", "]", ":", "with", "self", ".", "__buffer_lock", ":", "self", ".", "__buffer", "=", "list", "(", ")", "return", "self", ".", "grab_latest", "(", "timeout", ")" ]
Grab the next data to finish from the buffer, blocking until one is available.
[ "Grab", "the", "next", "data", "to", "finish", "from", "the", "buffer", "blocking", "until", "one", "is", "available", "." ]
train
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1208-L1212
nion-software/nionswift
nion/swift/model/HardwareSource.py
DataChannelBuffer.grab_following
def grab_following(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the next data to start from the buffer, blocking until one is available.""" self.grab_next(timeout) return self.grab_next(timeout)
python
def grab_following(self, timeout: float=None) -> typing.List[DataAndMetadata.DataAndMetadata]: """Grab the next data to start from the buffer, blocking until one is available.""" self.grab_next(timeout) return self.grab_next(timeout)
[ "def", "grab_following", "(", "self", ",", "timeout", ":", "float", "=", "None", ")", "->", "typing", ".", "List", "[", "DataAndMetadata", ".", "DataAndMetadata", "]", ":", "self", ".", "grab_next", "(", "timeout", ")", "return", "self", ".", "grab_next", "(", "timeout", ")" ]
Grab the next data to start from the buffer, blocking until one is available.
[ "Grab", "the", "next", "data", "to", "start", "from", "the", "buffer", "blocking", "until", "one", "is", "available", "." ]
train
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1214-L1217
nion-software/nionswift
nion/swift/model/HardwareSource.py
DataChannelBuffer.pause
def pause(self) -> None: """Pause recording. Thread safe and UI safe.""" with self.__state_lock: if self.__state == DataChannelBuffer.State.started: self.__state = DataChannelBuffer.State.paused
python
def pause(self) -> None: """Pause recording. Thread safe and UI safe.""" with self.__state_lock: if self.__state == DataChannelBuffer.State.started: self.__state = DataChannelBuffer.State.paused
[ "def", "pause", "(", "self", ")", "->", "None", ":", "with", "self", ".", "__state_lock", ":", "if", "self", ".", "__state", "==", "DataChannelBuffer", ".", "State", ".", "started", ":", "self", ".", "__state", "=", "DataChannelBuffer", ".", "State", ".", "paused" ]
Pause recording. Thread safe and UI safe.
[ "Pause", "recording", "." ]
train
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1226-L1232
nion-software/nionswift
nion/swift/model/HardwareSource.py
DataChannelBuffer.resume
def resume(self) -> None: """Resume recording after pause. Thread safe and UI safe.""" with self.__state_lock: if self.__state == DataChannelBuffer.State.paused: self.__state = DataChannelBuffer.State.started
python
def resume(self) -> None: """Resume recording after pause. Thread safe and UI safe.""" with self.__state_lock: if self.__state == DataChannelBuffer.State.paused: self.__state = DataChannelBuffer.State.started
[ "def", "resume", "(", "self", ")", "->", "None", ":", "with", "self", ".", "__state_lock", ":", "if", "self", ".", "__state", "==", "DataChannelBuffer", ".", "State", ".", "paused", ":", "self", ".", "__state", "=", "DataChannelBuffer", ".", "State", ".", "started" ]
Resume recording after pause. Thread safe and UI safe.
[ "Resume", "recording", "after", "pause", "." ]
train
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1234-L1240
nvictus/priority-queue-dictionary
pqdict/__init__.py
nlargest
def nlargest(n, mapping): """ Takes a mapping and returns the n keys associated with the largest values in descending order. If the mapping has fewer than n items, all its keys are returned. Equivalent to: ``next(zip(*heapq.nlargest(mapping.items(), key=lambda x: x[1])))`` Returns ------- list of up to n keys from the mapping """ try: it = mapping.iteritems() except AttributeError: it = iter(mapping.items()) pq = minpq() try: for i in range(n): pq.additem(*next(it)) except StopIteration: pass try: while it: pq.pushpopitem(*next(it)) except StopIteration: pass out = list(pq.popkeys()) out.reverse() return out
python
def nlargest(n, mapping): """ Takes a mapping and returns the n keys associated with the largest values in descending order. If the mapping has fewer than n items, all its keys are returned. Equivalent to: ``next(zip(*heapq.nlargest(mapping.items(), key=lambda x: x[1])))`` Returns ------- list of up to n keys from the mapping """ try: it = mapping.iteritems() except AttributeError: it = iter(mapping.items()) pq = minpq() try: for i in range(n): pq.additem(*next(it)) except StopIteration: pass try: while it: pq.pushpopitem(*next(it)) except StopIteration: pass out = list(pq.popkeys()) out.reverse() return out
[ "def", "nlargest", "(", "n", ",", "mapping", ")", ":", "try", ":", "it", "=", "mapping", ".", "iteritems", "(", ")", "except", "AttributeError", ":", "it", "=", "iter", "(", "mapping", ".", "items", "(", ")", ")", "pq", "=", "minpq", "(", ")", "try", ":", "for", "i", "in", "range", "(", "n", ")", ":", "pq", ".", "additem", "(", "*", "next", "(", "it", ")", ")", "except", "StopIteration", ":", "pass", "try", ":", "while", "it", ":", "pq", ".", "pushpopitem", "(", "*", "next", "(", "it", ")", ")", "except", "StopIteration", ":", "pass", "out", "=", "list", "(", "pq", ".", "popkeys", "(", ")", ")", "out", ".", "reverse", "(", ")", "return", "out" ]
Takes a mapping and returns the n keys associated with the largest values in descending order. If the mapping has fewer than n items, all its keys are returned. Equivalent to: ``next(zip(*heapq.nlargest(mapping.items(), key=lambda x: x[1])))`` Returns ------- list of up to n keys from the mapping
[ "Takes", "a", "mapping", "and", "returns", "the", "n", "keys", "associated", "with", "the", "largest", "values", "in", "descending", "order", ".", "If", "the", "mapping", "has", "fewer", "than", "n", "items", "all", "its", "keys", "are", "returned", "." ]
train
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L512-L543
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.fromkeys
def fromkeys(cls, iterable, value, **kwargs): """ Return a new pqict mapping keys from an iterable to the same value. """ return cls(((k, value) for k in iterable), **kwargs)
python
def fromkeys(cls, iterable, value, **kwargs): """ Return a new pqict mapping keys from an iterable to the same value. """ return cls(((k, value) for k in iterable), **kwargs)
[ "def", "fromkeys", "(", "cls", ",", "iterable", ",", "value", ",", "*", "*", "kwargs", ")", ":", "return", "cls", "(", "(", "(", "k", ",", "value", ")", "for", "k", "in", "iterable", ")", ",", "*", "*", "kwargs", ")" ]
Return a new pqict mapping keys from an iterable to the same value.
[ "Return", "a", "new", "pqict", "mapping", "keys", "from", "an", "iterable", "to", "the", "same", "value", "." ]
train
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L121-L126
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.copy
def copy(self): """ Return a shallow copy of a pqdict. """ return self.__class__(self, key=self._keyfn, precedes=self._precedes)
python
def copy(self): """ Return a shallow copy of a pqdict. """ return self.__class__(self, key=self._keyfn, precedes=self._precedes)
[ "def", "copy", "(", "self", ")", ":", "return", "self", ".", "__class__", "(", "self", ",", "key", "=", "self", ".", "_keyfn", ",", "precedes", "=", "self", ".", "_precedes", ")" ]
Return a shallow copy of a pqdict.
[ "Return", "a", "shallow", "copy", "of", "a", "pqdict", "." ]
train
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L201-L206
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.pop
def pop(self, key=__marker, default=__marker): """ If ``key`` is in the pqdict, remove it and return its priority value, else return ``default``. If ``default`` is not provided and ``key`` is not in the pqdict, raise a ``KeyError``. If ``key`` is not provided, remove the top item and return its key, or raise ``KeyError`` if the pqdict is empty. """ heap = self._heap position = self._position # pq semantics: remove and return top *key* (value is discarded) if key is self.__marker: if not heap: raise KeyError('pqdict is empty') key = heap[0].key del self[key] return key # dict semantics: remove and return *value* mapped from key try: pos = position.pop(key) # raises KeyError except KeyError: if default is self.__marker: raise return default else: node_to_delete = heap[pos] end = heap.pop() if end is not node_to_delete: heap[pos] = end position[end.key] = pos self._reheapify(pos) value = node_to_delete.value del node_to_delete return value
python
def pop(self, key=__marker, default=__marker): """ If ``key`` is in the pqdict, remove it and return its priority value, else return ``default``. If ``default`` is not provided and ``key`` is not in the pqdict, raise a ``KeyError``. If ``key`` is not provided, remove the top item and return its key, or raise ``KeyError`` if the pqdict is empty. """ heap = self._heap position = self._position # pq semantics: remove and return top *key* (value is discarded) if key is self.__marker: if not heap: raise KeyError('pqdict is empty') key = heap[0].key del self[key] return key # dict semantics: remove and return *value* mapped from key try: pos = position.pop(key) # raises KeyError except KeyError: if default is self.__marker: raise return default else: node_to_delete = heap[pos] end = heap.pop() if end is not node_to_delete: heap[pos] = end position[end.key] = pos self._reheapify(pos) value = node_to_delete.value del node_to_delete return value
[ "def", "pop", "(", "self", ",", "key", "=", "__marker", ",", "default", "=", "__marker", ")", ":", "heap", "=", "self", ".", "_heap", "position", "=", "self", ".", "_position", "# pq semantics: remove and return top *key* (value is discarded)", "if", "key", "is", "self", ".", "__marker", ":", "if", "not", "heap", ":", "raise", "KeyError", "(", "'pqdict is empty'", ")", "key", "=", "heap", "[", "0", "]", ".", "key", "del", "self", "[", "key", "]", "return", "key", "# dict semantics: remove and return *value* mapped from key", "try", ":", "pos", "=", "position", ".", "pop", "(", "key", ")", "# raises KeyError", "except", "KeyError", ":", "if", "default", "is", "self", ".", "__marker", ":", "raise", "return", "default", "else", ":", "node_to_delete", "=", "heap", "[", "pos", "]", "end", "=", "heap", ".", "pop", "(", ")", "if", "end", "is", "not", "node_to_delete", ":", "heap", "[", "pos", "]", "=", "end", "position", "[", "end", ".", "key", "]", "=", "pos", "self", ".", "_reheapify", "(", "pos", ")", "value", "=", "node_to_delete", ".", "value", "del", "node_to_delete", "return", "value" ]
If ``key`` is in the pqdict, remove it and return its priority value, else return ``default``. If ``default`` is not provided and ``key`` is not in the pqdict, raise a ``KeyError``. If ``key`` is not provided, remove the top item and return its key, or raise ``KeyError`` if the pqdict is empty.
[ "If", "key", "is", "in", "the", "pqdict", "remove", "it", "and", "return", "its", "priority", "value", "else", "return", "default", ".", "If", "default", "is", "not", "provided", "and", "key", "is", "not", "in", "the", "pqdict", "raise", "a", "KeyError", "." ]
train
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L208-L243
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.popitem
def popitem(self): """ Remove and return the item with highest priority. Raises ``KeyError`` if pqdict is empty. """ heap = self._heap position = self._position try: end = heap.pop(-1) except IndexError: raise KeyError('pqdict is empty') if heap: node = heap[0] heap[0] = end position[end.key] = 0 self._sink(0) else: node = end del position[node.key] return node.key, node.value
python
def popitem(self): """ Remove and return the item with highest priority. Raises ``KeyError`` if pqdict is empty. """ heap = self._heap position = self._position try: end = heap.pop(-1) except IndexError: raise KeyError('pqdict is empty') if heap: node = heap[0] heap[0] = end position[end.key] = 0 self._sink(0) else: node = end del position[node.key] return node.key, node.value
[ "def", "popitem", "(", "self", ")", ":", "heap", "=", "self", ".", "_heap", "position", "=", "self", ".", "_position", "try", ":", "end", "=", "heap", ".", "pop", "(", "-", "1", ")", "except", "IndexError", ":", "raise", "KeyError", "(", "'pqdict is empty'", ")", "if", "heap", ":", "node", "=", "heap", "[", "0", "]", "heap", "[", "0", "]", "=", "end", "position", "[", "end", ".", "key", "]", "=", "0", "self", ".", "_sink", "(", "0", ")", "else", ":", "node", "=", "end", "del", "position", "[", "node", ".", "key", "]", "return", "node", ".", "key", ",", "node", ".", "value" ]
Remove and return the item with highest priority. Raises ``KeyError`` if pqdict is empty.
[ "Remove", "and", "return", "the", "item", "with", "highest", "priority", ".", "Raises", "KeyError", "if", "pqdict", "is", "empty", "." ]
train
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L260-L282
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.topitem
def topitem(self): """ Return the item with highest priority. Raises ``KeyError`` if pqdict is empty. """ try: node = self._heap[0] except IndexError: raise KeyError('pqdict is empty') return node.key, node.value
python
def topitem(self): """ Return the item with highest priority. Raises ``KeyError`` if pqdict is empty. """ try: node = self._heap[0] except IndexError: raise KeyError('pqdict is empty') return node.key, node.value
[ "def", "topitem", "(", "self", ")", ":", "try", ":", "node", "=", "self", ".", "_heap", "[", "0", "]", "except", "IndexError", ":", "raise", "KeyError", "(", "'pqdict is empty'", ")", "return", "node", ".", "key", ",", "node", ".", "value" ]
Return the item with highest priority. Raises ``KeyError`` if pqdict is empty.
[ "Return", "the", "item", "with", "highest", "priority", ".", "Raises", "KeyError", "if", "pqdict", "is", "empty", "." ]
train
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L284-L294
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.additem
def additem(self, key, value): """ Add a new item. Raises ``KeyError`` if key is already in the pqdict. """ if key in self._position: raise KeyError('%s is already in the queue' % repr(key)) self[key] = value
python
def additem(self, key, value): """ Add a new item. Raises ``KeyError`` if key is already in the pqdict. """ if key in self._position: raise KeyError('%s is already in the queue' % repr(key)) self[key] = value
[ "def", "additem", "(", "self", ",", "key", ",", "value", ")", ":", "if", "key", "in", "self", ".", "_position", ":", "raise", "KeyError", "(", "'%s is already in the queue'", "%", "repr", "(", "key", ")", ")", "self", "[", "key", "]", "=", "value" ]
Add a new item. Raises ``KeyError`` if key is already in the pqdict.
[ "Add", "a", "new", "item", ".", "Raises", "KeyError", "if", "key", "is", "already", "in", "the", "pqdict", "." ]
train
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L296-L303
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.pushpopitem
def pushpopitem(self, key, value, node_factory=_Node): """ Equivalent to inserting a new item followed by removing the top priority item, but faster. Raises ``KeyError`` if the new key is already in the pqdict. """ heap = self._heap position = self._position precedes = self._precedes prio = self._keyfn(value) if self._keyfn else value node = node_factory(key, value, prio) if key in self: raise KeyError('%s is already in the queue' % repr(key)) if heap and precedes(heap[0].prio, node.prio): node, heap[0] = heap[0], node position[key] = 0 del position[node.key] self._sink(0) return node.key, node.value
python
def pushpopitem(self, key, value, node_factory=_Node): """ Equivalent to inserting a new item followed by removing the top priority item, but faster. Raises ``KeyError`` if the new key is already in the pqdict. """ heap = self._heap position = self._position precedes = self._precedes prio = self._keyfn(value) if self._keyfn else value node = node_factory(key, value, prio) if key in self: raise KeyError('%s is already in the queue' % repr(key)) if heap and precedes(heap[0].prio, node.prio): node, heap[0] = heap[0], node position[key] = 0 del position[node.key] self._sink(0) return node.key, node.value
[ "def", "pushpopitem", "(", "self", ",", "key", ",", "value", ",", "node_factory", "=", "_Node", ")", ":", "heap", "=", "self", ".", "_heap", "position", "=", "self", ".", "_position", "precedes", "=", "self", ".", "_precedes", "prio", "=", "self", ".", "_keyfn", "(", "value", ")", "if", "self", ".", "_keyfn", "else", "value", "node", "=", "node_factory", "(", "key", ",", "value", ",", "prio", ")", "if", "key", "in", "self", ":", "raise", "KeyError", "(", "'%s is already in the queue'", "%", "repr", "(", "key", ")", ")", "if", "heap", "and", "precedes", "(", "heap", "[", "0", "]", ".", "prio", ",", "node", ".", "prio", ")", ":", "node", ",", "heap", "[", "0", "]", "=", "heap", "[", "0", "]", ",", "node", "position", "[", "key", "]", "=", "0", "del", "position", "[", "node", ".", "key", "]", "self", ".", "_sink", "(", "0", ")", "return", "node", ".", "key", ",", "node", ".", "value" ]
Equivalent to inserting a new item followed by removing the top priority item, but faster. Raises ``KeyError`` if the new key is already in the pqdict.
[ "Equivalent", "to", "inserting", "a", "new", "item", "followed", "by", "removing", "the", "top", "priority", "item", "but", "faster", ".", "Raises", "KeyError", "if", "the", "new", "key", "is", "already", "in", "the", "pqdict", "." ]
train
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L305-L324
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.updateitem
def updateitem(self, key, new_val): """ Update the priority value of an existing item. Raises ``KeyError`` if key is not in the pqdict. """ if key not in self._position: raise KeyError(key) self[key] = new_val
python
def updateitem(self, key, new_val): """ Update the priority value of an existing item. Raises ``KeyError`` if key is not in the pqdict. """ if key not in self._position: raise KeyError(key) self[key] = new_val
[ "def", "updateitem", "(", "self", ",", "key", ",", "new_val", ")", ":", "if", "key", "not", "in", "self", ".", "_position", ":", "raise", "KeyError", "(", "key", ")", "self", "[", "key", "]", "=", "new_val" ]
Update the priority value of an existing item. Raises ``KeyError`` if key is not in the pqdict.
[ "Update", "the", "priority", "value", "of", "an", "existing", "item", ".", "Raises", "KeyError", "if", "key", "is", "not", "in", "the", "pqdict", "." ]
train
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L326-L334
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.replace_key
def replace_key(self, key, new_key): """ Replace the key of an existing heap node in place. Raises ``KeyError`` if the key to replace does not exist or if the new key is already in the pqdict. """ heap = self._heap position = self._position if new_key in self: raise KeyError('%s is already in the queue' % repr(new_key)) pos = position.pop(key) # raises appropriate KeyError position[new_key] = pos heap[pos].key = new_key
python
def replace_key(self, key, new_key): """ Replace the key of an existing heap node in place. Raises ``KeyError`` if the key to replace does not exist or if the new key is already in the pqdict. """ heap = self._heap position = self._position if new_key in self: raise KeyError('%s is already in the queue' % repr(new_key)) pos = position.pop(key) # raises appropriate KeyError position[new_key] = pos heap[pos].key = new_key
[ "def", "replace_key", "(", "self", ",", "key", ",", "new_key", ")", ":", "heap", "=", "self", ".", "_heap", "position", "=", "self", ".", "_position", "if", "new_key", "in", "self", ":", "raise", "KeyError", "(", "'%s is already in the queue'", "%", "repr", "(", "new_key", ")", ")", "pos", "=", "position", ".", "pop", "(", "key", ")", "# raises appropriate KeyError", "position", "[", "new_key", "]", "=", "pos", "heap", "[", "pos", "]", ".", "key", "=", "new_key" ]
Replace the key of an existing heap node in place. Raises ``KeyError`` if the key to replace does not exist or if the new key is already in the pqdict.
[ "Replace", "the", "key", "of", "an", "existing", "heap", "node", "in", "place", ".", "Raises", "KeyError", "if", "the", "key", "to", "replace", "does", "not", "exist", "or", "if", "the", "new", "key", "is", "already", "in", "the", "pqdict", "." ]
train
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L336-L349
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.swap_priority
def swap_priority(self, key1, key2): """ Fast way to swap the priority level of two items in the pqdict. Raises ``KeyError`` if either key does not exist. """ heap = self._heap position = self._position if key1 not in self or key2 not in self: raise KeyError pos1, pos2 = position[key1], position[key2] heap[pos1].key, heap[pos2].key = key2, key1 position[key1], position[key2] = pos2, pos1
python
def swap_priority(self, key1, key2): """ Fast way to swap the priority level of two items in the pqdict. Raises ``KeyError`` if either key does not exist. """ heap = self._heap position = self._position if key1 not in self or key2 not in self: raise KeyError pos1, pos2 = position[key1], position[key2] heap[pos1].key, heap[pos2].key = key2, key1 position[key1], position[key2] = pos2, pos1
[ "def", "swap_priority", "(", "self", ",", "key1", ",", "key2", ")", ":", "heap", "=", "self", ".", "_heap", "position", "=", "self", ".", "_position", "if", "key1", "not", "in", "self", "or", "key2", "not", "in", "self", ":", "raise", "KeyError", "pos1", ",", "pos2", "=", "position", "[", "key1", "]", ",", "position", "[", "key2", "]", "heap", "[", "pos1", "]", ".", "key", ",", "heap", "[", "pos2", "]", ".", "key", "=", "key2", ",", "key1", "position", "[", "key1", "]", ",", "position", "[", "key2", "]", "=", "pos2", ",", "pos1" ]
Fast way to swap the priority level of two items in the pqdict. Raises ``KeyError`` if either key does not exist.
[ "Fast", "way", "to", "swap", "the", "priority", "level", "of", "two", "items", "in", "the", "pqdict", ".", "Raises", "KeyError", "if", "either", "key", "does", "not", "exist", "." ]
train
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L351-L363
nvictus/priority-queue-dictionary
pqdict/__init__.py
pqdict.heapify
def heapify(self, key=__marker): """ Repair a broken heap. If the state of an item's priority value changes you can re-sort the relevant item only by providing ``key``. """ if key is self.__marker: n = len(self._heap) for pos in reversed(range(n//2)): self._sink(pos) else: try: pos = self._position[key] except KeyError: raise KeyError(key) self._reheapify(pos)
python
def heapify(self, key=__marker): """ Repair a broken heap. If the state of an item's priority value changes you can re-sort the relevant item only by providing ``key``. """ if key is self.__marker: n = len(self._heap) for pos in reversed(range(n//2)): self._sink(pos) else: try: pos = self._position[key] except KeyError: raise KeyError(key) self._reheapify(pos)
[ "def", "heapify", "(", "self", ",", "key", "=", "__marker", ")", ":", "if", "key", "is", "self", ".", "__marker", ":", "n", "=", "len", "(", "self", ".", "_heap", ")", "for", "pos", "in", "reversed", "(", "range", "(", "n", "//", "2", ")", ")", ":", "self", ".", "_sink", "(", "pos", ")", "else", ":", "try", ":", "pos", "=", "self", ".", "_position", "[", "key", "]", "except", "KeyError", ":", "raise", "KeyError", "(", "key", ")", "self", ".", "_reheapify", "(", "pos", ")" ]
Repair a broken heap. If the state of an item's priority value changes you can re-sort the relevant item only by providing ``key``.
[ "Repair", "a", "broken", "heap", ".", "If", "the", "state", "of", "an", "item", "s", "priority", "value", "changes", "you", "can", "re", "-", "sort", "the", "relevant", "item", "only", "by", "providing", "key", "." ]
train
https://github.com/nvictus/priority-queue-dictionary/blob/577f9d3086058bec0e49cc2050dd9454b788d93b/pqdict/__init__.py#L398-L413
ajk8/hatchery
hatchery/project.py
package_has_version_file
def package_has_version_file(package_name): """ Check to make sure _version.py is contained in the package """ version_file_path = helpers.package_file_path('_version.py', package_name) return os.path.isfile(version_file_path)
python
def package_has_version_file(package_name): """ Check to make sure _version.py is contained in the package """ version_file_path = helpers.package_file_path('_version.py', package_name) return os.path.isfile(version_file_path)
[ "def", "package_has_version_file", "(", "package_name", ")", ":", "version_file_path", "=", "helpers", ".", "package_file_path", "(", "'_version.py'", ",", "package_name", ")", "return", "os", ".", "path", ".", "isfile", "(", "version_file_path", ")" ]
Check to make sure _version.py is contained in the package
[ "Check", "to", "make", "sure", "_version", ".", "py", "is", "contained", "in", "the", "package" ]
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L45-L48
ajk8/hatchery
hatchery/project.py
get_project_name
def get_project_name(): """ Grab the project name out of setup.py """ setup_py_content = helpers.get_file_content('setup.py') ret = helpers.value_of_named_argument_in_function( 'name', 'setup', setup_py_content, resolve_varname=True ) if ret and ret[0] == ret[-1] in ('"', "'"): ret = ret[1:-1] return ret
python
def get_project_name(): """ Grab the project name out of setup.py """ setup_py_content = helpers.get_file_content('setup.py') ret = helpers.value_of_named_argument_in_function( 'name', 'setup', setup_py_content, resolve_varname=True ) if ret and ret[0] == ret[-1] in ('"', "'"): ret = ret[1:-1] return ret
[ "def", "get_project_name", "(", ")", ":", "setup_py_content", "=", "helpers", ".", "get_file_content", "(", "'setup.py'", ")", "ret", "=", "helpers", ".", "value_of_named_argument_in_function", "(", "'name'", ",", "'setup'", ",", "setup_py_content", ",", "resolve_varname", "=", "True", ")", "if", "ret", "and", "ret", "[", "0", "]", "==", "ret", "[", "-", "1", "]", "in", "(", "'\"'", ",", "\"'\"", ")", ":", "ret", "=", "ret", "[", "1", ":", "-", "1", "]", "return", "ret" ]
Grab the project name out of setup.py
[ "Grab", "the", "project", "name", "out", "of", "setup", ".", "py" ]
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L78-L86
ajk8/hatchery
hatchery/project.py
get_version
def get_version(package_name, ignore_cache=False): """ Get the version which is currently configured by the package """ if ignore_cache: with microcache.temporarily_disabled(): found = helpers.regex_in_package_file( VERSION_SET_REGEX, '_version.py', package_name, return_match=True ) else: found = helpers.regex_in_package_file( VERSION_SET_REGEX, '_version.py', package_name, return_match=True ) if found is None: raise ProjectError('found {}, but __version__ is not defined') current_version = found['version'] return current_version
python
def get_version(package_name, ignore_cache=False): """ Get the version which is currently configured by the package """ if ignore_cache: with microcache.temporarily_disabled(): found = helpers.regex_in_package_file( VERSION_SET_REGEX, '_version.py', package_name, return_match=True ) else: found = helpers.regex_in_package_file( VERSION_SET_REGEX, '_version.py', package_name, return_match=True ) if found is None: raise ProjectError('found {}, but __version__ is not defined') current_version = found['version'] return current_version
[ "def", "get_version", "(", "package_name", ",", "ignore_cache", "=", "False", ")", ":", "if", "ignore_cache", ":", "with", "microcache", ".", "temporarily_disabled", "(", ")", ":", "found", "=", "helpers", ".", "regex_in_package_file", "(", "VERSION_SET_REGEX", ",", "'_version.py'", ",", "package_name", ",", "return_match", "=", "True", ")", "else", ":", "found", "=", "helpers", ".", "regex_in_package_file", "(", "VERSION_SET_REGEX", ",", "'_version.py'", ",", "package_name", ",", "return_match", "=", "True", ")", "if", "found", "is", "None", ":", "raise", "ProjectError", "(", "'found {}, but __version__ is not defined'", ")", "current_version", "=", "found", "[", "'version'", "]", "return", "current_version" ]
Get the version which is currently configured by the package
[ "Get", "the", "version", "which", "is", "currently", "configured", "by", "the", "package" ]
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L89-L103
ajk8/hatchery
hatchery/project.py
set_version
def set_version(package_name, version_str): """ Set the version in _version.py to version_str """ current_version = get_version(package_name) version_file_path = helpers.package_file_path('_version.py', package_name) version_file_content = helpers.get_file_content(version_file_path) version_file_content = version_file_content.replace(current_version, version_str) with open(version_file_path, 'w') as version_file: version_file.write(version_file_content)
python
def set_version(package_name, version_str): """ Set the version in _version.py to version_str """ current_version = get_version(package_name) version_file_path = helpers.package_file_path('_version.py', package_name) version_file_content = helpers.get_file_content(version_file_path) version_file_content = version_file_content.replace(current_version, version_str) with open(version_file_path, 'w') as version_file: version_file.write(version_file_content)
[ "def", "set_version", "(", "package_name", ",", "version_str", ")", ":", "current_version", "=", "get_version", "(", "package_name", ")", "version_file_path", "=", "helpers", ".", "package_file_path", "(", "'_version.py'", ",", "package_name", ")", "version_file_content", "=", "helpers", ".", "get_file_content", "(", "version_file_path", ")", "version_file_content", "=", "version_file_content", ".", "replace", "(", "current_version", ",", "version_str", ")", "with", "open", "(", "version_file_path", ",", "'w'", ")", "as", "version_file", ":", "version_file", ".", "write", "(", "version_file_content", ")" ]
Set the version in _version.py to version_str
[ "Set", "the", "version", "in", "_version", ".", "py", "to", "version_str" ]
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L106-L113
ajk8/hatchery
hatchery/project.py
version_is_valid
def version_is_valid(version_str): """ Check to see if the version specified is a valid as far as pkg_resources is concerned >>> version_is_valid('blah') False >>> version_is_valid('1.2.3') True """ try: packaging.version.Version(version_str) except packaging.version.InvalidVersion: return False return True
python
def version_is_valid(version_str): """ Check to see if the version specified is a valid as far as pkg_resources is concerned >>> version_is_valid('blah') False >>> version_is_valid('1.2.3') True """ try: packaging.version.Version(version_str) except packaging.version.InvalidVersion: return False return True
[ "def", "version_is_valid", "(", "version_str", ")", ":", "try", ":", "packaging", ".", "version", ".", "Version", "(", "version_str", ")", "except", "packaging", ".", "version", ".", "InvalidVersion", ":", "return", "False", "return", "True" ]
Check to see if the version specified is a valid as far as pkg_resources is concerned >>> version_is_valid('blah') False >>> version_is_valid('1.2.3') True
[ "Check", "to", "see", "if", "the", "version", "specified", "is", "a", "valid", "as", "far", "as", "pkg_resources", "is", "concerned" ]
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L116-L128
ajk8/hatchery
hatchery/project.py
_get_uploaded_versions_warehouse
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True): """ Query the pypi index at index_url using warehouse api to find all of the "releases" """ url = '/'.join((index_url, project_name, 'json')) response = requests.get(url, verify=requests_verify) if response.status_code == 200: return response.json()['releases'].keys() return None
python
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True): """ Query the pypi index at index_url using warehouse api to find all of the "releases" """ url = '/'.join((index_url, project_name, 'json')) response = requests.get(url, verify=requests_verify) if response.status_code == 200: return response.json()['releases'].keys() return None
[ "def", "_get_uploaded_versions_warehouse", "(", "project_name", ",", "index_url", ",", "requests_verify", "=", "True", ")", ":", "url", "=", "'/'", ".", "join", "(", "(", "index_url", ",", "project_name", ",", "'json'", ")", ")", "response", "=", "requests", ".", "get", "(", "url", ",", "verify", "=", "requests_verify", ")", "if", "response", ".", "status_code", "==", "200", ":", "return", "response", ".", "json", "(", ")", "[", "'releases'", "]", ".", "keys", "(", ")", "return", "None" ]
Query the pypi index at index_url using warehouse api to find all of the "releases"
[ "Query", "the", "pypi", "index", "at", "index_url", "using", "warehouse", "api", "to", "find", "all", "of", "the", "releases" ]
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L131-L137
ajk8/hatchery
hatchery/project.py
_get_uploaded_versions_pypicloud
def _get_uploaded_versions_pypicloud(project_name, index_url, requests_verify=True): """ Query the pypi index at index_url using pypicloud api to find all versions """ api_url = index_url for suffix in ('/pypi', '/pypi/', '/simple', '/simple/'): if api_url.endswith(suffix): api_url = api_url[:len(suffix) * -1] + '/api/package' break url = '/'.join((api_url, project_name)) response = requests.get(url, verify=requests_verify) if response.status_code == 200: return [p['version'] for p in response.json()['packages']] return None
python
def _get_uploaded_versions_pypicloud(project_name, index_url, requests_verify=True): """ Query the pypi index at index_url using pypicloud api to find all versions """ api_url = index_url for suffix in ('/pypi', '/pypi/', '/simple', '/simple/'): if api_url.endswith(suffix): api_url = api_url[:len(suffix) * -1] + '/api/package' break url = '/'.join((api_url, project_name)) response = requests.get(url, verify=requests_verify) if response.status_code == 200: return [p['version'] for p in response.json()['packages']] return None
[ "def", "_get_uploaded_versions_pypicloud", "(", "project_name", ",", "index_url", ",", "requests_verify", "=", "True", ")", ":", "api_url", "=", "index_url", "for", "suffix", "in", "(", "'/pypi'", ",", "'/pypi/'", ",", "'/simple'", ",", "'/simple/'", ")", ":", "if", "api_url", ".", "endswith", "(", "suffix", ")", ":", "api_url", "=", "api_url", "[", ":", "len", "(", "suffix", ")", "*", "-", "1", "]", "+", "'/api/package'", "break", "url", "=", "'/'", ".", "join", "(", "(", "api_url", ",", "project_name", ")", ")", "response", "=", "requests", ".", "get", "(", "url", ",", "verify", "=", "requests_verify", ")", "if", "response", ".", "status_code", "==", "200", ":", "return", "[", "p", "[", "'version'", "]", "for", "p", "in", "response", ".", "json", "(", ")", "[", "'packages'", "]", "]", "return", "None" ]
Query the pypi index at index_url using pypicloud api to find all versions
[ "Query", "the", "pypi", "index", "at", "index_url", "using", "pypicloud", "api", "to", "find", "all", "versions" ]
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L140-L151
ajk8/hatchery
hatchery/project.py
version_already_uploaded
def version_already_uploaded(project_name, version_str, index_url, requests_verify=True): """ Check to see if the version specified has already been uploaded to the configured index """ all_versions = _get_uploaded_versions(project_name, index_url, requests_verify) return version_str in all_versions
python
def version_already_uploaded(project_name, version_str, index_url, requests_verify=True): """ Check to see if the version specified has already been uploaded to the configured index """ all_versions = _get_uploaded_versions(project_name, index_url, requests_verify) return version_str in all_versions
[ "def", "version_already_uploaded", "(", "project_name", ",", "version_str", ",", "index_url", ",", "requests_verify", "=", "True", ")", ":", "all_versions", "=", "_get_uploaded_versions", "(", "project_name", ",", "index_url", ",", "requests_verify", ")", "return", "version_str", "in", "all_versions" ]
Check to see if the version specified has already been uploaded to the configured index
[ "Check", "to", "see", "if", "the", "version", "specified", "has", "already", "been", "uploaded", "to", "the", "configured", "index" ]
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L168-L172
ajk8/hatchery
hatchery/project.py
convert_readme_to_rst
def convert_readme_to_rst(): """ Attempt to convert a README.md file into README.rst """ project_files = os.listdir('.') for filename in project_files: if filename.lower() == 'readme': raise ProjectError( 'found {} in project directory...'.format(filename) + 'not sure what to do with it, refusing to convert' ) elif filename.lower() == 'readme.rst': raise ProjectError( 'found {} in project directory...'.format(filename) + 'refusing to overwrite' ) for filename in project_files: if filename.lower() == 'readme.md': rst_filename = 'README.rst' logger.info('converting {} to {}'.format(filename, rst_filename)) try: rst_content = pypandoc.convert(filename, 'rst') with open('README.rst', 'w') as rst_file: rst_file.write(rst_content) return except OSError as e: raise ProjectError( 'could not convert readme to rst due to pypandoc error:' + os.linesep + str(e) ) raise ProjectError('could not find any README.md file to convert')
python
def convert_readme_to_rst(): """ Attempt to convert a README.md file into README.rst """ project_files = os.listdir('.') for filename in project_files: if filename.lower() == 'readme': raise ProjectError( 'found {} in project directory...'.format(filename) + 'not sure what to do with it, refusing to convert' ) elif filename.lower() == 'readme.rst': raise ProjectError( 'found {} in project directory...'.format(filename) + 'refusing to overwrite' ) for filename in project_files: if filename.lower() == 'readme.md': rst_filename = 'README.rst' logger.info('converting {} to {}'.format(filename, rst_filename)) try: rst_content = pypandoc.convert(filename, 'rst') with open('README.rst', 'w') as rst_file: rst_file.write(rst_content) return except OSError as e: raise ProjectError( 'could not convert readme to rst due to pypandoc error:' + os.linesep + str(e) ) raise ProjectError('could not find any README.md file to convert')
[ "def", "convert_readme_to_rst", "(", ")", ":", "project_files", "=", "os", ".", "listdir", "(", "'.'", ")", "for", "filename", "in", "project_files", ":", "if", "filename", ".", "lower", "(", ")", "==", "'readme'", ":", "raise", "ProjectError", "(", "'found {} in project directory...'", ".", "format", "(", "filename", ")", "+", "'not sure what to do with it, refusing to convert'", ")", "elif", "filename", ".", "lower", "(", ")", "==", "'readme.rst'", ":", "raise", "ProjectError", "(", "'found {} in project directory...'", ".", "format", "(", "filename", ")", "+", "'refusing to overwrite'", ")", "for", "filename", "in", "project_files", ":", "if", "filename", ".", "lower", "(", ")", "==", "'readme.md'", ":", "rst_filename", "=", "'README.rst'", "logger", ".", "info", "(", "'converting {} to {}'", ".", "format", "(", "filename", ",", "rst_filename", ")", ")", "try", ":", "rst_content", "=", "pypandoc", ".", "convert", "(", "filename", ",", "'rst'", ")", "with", "open", "(", "'README.rst'", ",", "'w'", ")", "as", "rst_file", ":", "rst_file", ".", "write", "(", "rst_content", ")", "return", "except", "OSError", "as", "e", ":", "raise", "ProjectError", "(", "'could not convert readme to rst due to pypandoc error:'", "+", "os", ".", "linesep", "+", "str", "(", "e", ")", ")", "raise", "ProjectError", "(", "'could not find any README.md file to convert'", ")" ]
Attempt to convert a README.md file into README.rst
[ "Attempt", "to", "convert", "a", "README", ".", "md", "file", "into", "README", ".", "rst" ]
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L208-L235
ajk8/hatchery
hatchery/project.py
get_packaged_files
def get_packaged_files(package_name): """ Collect relative paths to all files which have already been packaged """ if not os.path.isdir('dist'): return [] return [os.path.join('dist', filename) for filename in os.listdir('dist')]
python
def get_packaged_files(package_name): """ Collect relative paths to all files which have already been packaged """ if not os.path.isdir('dist'): return [] return [os.path.join('dist', filename) for filename in os.listdir('dist')]
[ "def", "get_packaged_files", "(", "package_name", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "'dist'", ")", ":", "return", "[", "]", "return", "[", "os", ".", "path", ".", "join", "(", "'dist'", ",", "filename", ")", "for", "filename", "in", "os", ".", "listdir", "(", "'dist'", ")", "]" ]
Collect relative paths to all files which have already been packaged
[ "Collect", "relative", "paths", "to", "all", "files", "which", "have", "already", "been", "packaged" ]
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L238-L242
ajk8/hatchery
hatchery/project.py
multiple_packaged_versions
def multiple_packaged_versions(package_name): """ Look through built package directory and see if there are multiple versions there """ dist_files = os.listdir('dist') versions = set() for filename in dist_files: version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename) if version: versions.add(version) return len(versions) > 1
python
def multiple_packaged_versions(package_name): """ Look through built package directory and see if there are multiple versions there """ dist_files = os.listdir('dist') versions = set() for filename in dist_files: version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename) if version: versions.add(version) return len(versions) > 1
[ "def", "multiple_packaged_versions", "(", "package_name", ")", ":", "dist_files", "=", "os", ".", "listdir", "(", "'dist'", ")", "versions", "=", "set", "(", ")", "for", "filename", "in", "dist_files", ":", "version", "=", "funcy", ".", "re_find", "(", "r'{}-(.+).tar.gz'", ".", "format", "(", "package_name", ")", ",", "filename", ")", "if", "version", ":", "versions", ".", "add", "(", "version", ")", "return", "len", "(", "versions", ")", ">", "1" ]
Look through built package directory and see if there are multiple versions there
[ "Look", "through", "built", "package", "directory", "and", "see", "if", "there", "are", "multiple", "versions", "there" ]
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L245-L253
djgagne/hagelslag
hagelslag/data/HailForecastGrid.py
HailForecastGrid.period_neighborhood_probability
def period_neighborhood_probability(self, radius, smoothing, threshold, stride,start_time,end_time): """ Calculate the neighborhood probability over the full period of the forecast Args: radius: circular radius from each point in km smoothing: width of Gaussian smoother in km threshold: intensity of exceedance stride: number of grid points to skip for reduced neighborhood grid Returns: (neighborhood probabilities) """ neighbor_x = self.x[::stride, ::stride] neighbor_y = self.y[::stride, ::stride] neighbor_kd_tree = cKDTree(np.vstack((neighbor_x.ravel(), neighbor_y.ravel())).T) neighbor_prob = np.zeros((self.data.shape[0], neighbor_x.shape[0], neighbor_x.shape[1])) print('Forecast Hours: {0}-{1}'.format(start_time, end_time)) for m in range(len(self.members)): period_max = self.data[m,start_time:end_time,:,:].max(axis=0) valid_i, valid_j = np.where(period_max >= threshold) print(self.members[m], len(valid_i)) if len(valid_i) > 0: var_kd_tree = cKDTree(np.vstack((self.x[valid_i, valid_j], self.y[valid_i, valid_j])).T) exceed_points = np.unique(np.concatenate(var_kd_tree.query_ball_tree(neighbor_kd_tree, radius))).astype(int) exceed_i, exceed_j = np.unravel_index(exceed_points, neighbor_x.shape) neighbor_prob[m][exceed_i, exceed_j] = 1 if smoothing > 0: neighbor_prob[m] = gaussian_filter(neighbor_prob[m], smoothing,mode='constant') return neighbor_prob
python
def period_neighborhood_probability(self, radius, smoothing, threshold, stride,start_time,end_time): """ Calculate the neighborhood probability over the full period of the forecast Args: radius: circular radius from each point in km smoothing: width of Gaussian smoother in km threshold: intensity of exceedance stride: number of grid points to skip for reduced neighborhood grid Returns: (neighborhood probabilities) """ neighbor_x = self.x[::stride, ::stride] neighbor_y = self.y[::stride, ::stride] neighbor_kd_tree = cKDTree(np.vstack((neighbor_x.ravel(), neighbor_y.ravel())).T) neighbor_prob = np.zeros((self.data.shape[0], neighbor_x.shape[0], neighbor_x.shape[1])) print('Forecast Hours: {0}-{1}'.format(start_time, end_time)) for m in range(len(self.members)): period_max = self.data[m,start_time:end_time,:,:].max(axis=0) valid_i, valid_j = np.where(period_max >= threshold) print(self.members[m], len(valid_i)) if len(valid_i) > 0: var_kd_tree = cKDTree(np.vstack((self.x[valid_i, valid_j], self.y[valid_i, valid_j])).T) exceed_points = np.unique(np.concatenate(var_kd_tree.query_ball_tree(neighbor_kd_tree, radius))).astype(int) exceed_i, exceed_j = np.unravel_index(exceed_points, neighbor_x.shape) neighbor_prob[m][exceed_i, exceed_j] = 1 if smoothing > 0: neighbor_prob[m] = gaussian_filter(neighbor_prob[m], smoothing,mode='constant') return neighbor_prob
[ "def", "period_neighborhood_probability", "(", "self", ",", "radius", ",", "smoothing", ",", "threshold", ",", "stride", ",", "start_time", ",", "end_time", ")", ":", "neighbor_x", "=", "self", ".", "x", "[", ":", ":", "stride", ",", ":", ":", "stride", "]", "neighbor_y", "=", "self", ".", "y", "[", ":", ":", "stride", ",", ":", ":", "stride", "]", "neighbor_kd_tree", "=", "cKDTree", "(", "np", ".", "vstack", "(", "(", "neighbor_x", ".", "ravel", "(", ")", ",", "neighbor_y", ".", "ravel", "(", ")", ")", ")", ".", "T", ")", "neighbor_prob", "=", "np", ".", "zeros", "(", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ",", "neighbor_x", ".", "shape", "[", "0", "]", ",", "neighbor_x", ".", "shape", "[", "1", "]", ")", ")", "print", "(", "'Forecast Hours: {0}-{1}'", ".", "format", "(", "start_time", ",", "end_time", ")", ")", "for", "m", "in", "range", "(", "len", "(", "self", ".", "members", ")", ")", ":", "period_max", "=", "self", ".", "data", "[", "m", ",", "start_time", ":", "end_time", ",", ":", ",", ":", "]", ".", "max", "(", "axis", "=", "0", ")", "valid_i", ",", "valid_j", "=", "np", ".", "where", "(", "period_max", ">=", "threshold", ")", "print", "(", "self", ".", "members", "[", "m", "]", ",", "len", "(", "valid_i", ")", ")", "if", "len", "(", "valid_i", ")", ">", "0", ":", "var_kd_tree", "=", "cKDTree", "(", "np", ".", "vstack", "(", "(", "self", ".", "x", "[", "valid_i", ",", "valid_j", "]", ",", "self", ".", "y", "[", "valid_i", ",", "valid_j", "]", ")", ")", ".", "T", ")", "exceed_points", "=", "np", ".", "unique", "(", "np", ".", "concatenate", "(", "var_kd_tree", ".", "query_ball_tree", "(", "neighbor_kd_tree", ",", "radius", ")", ")", ")", ".", "astype", "(", "int", ")", "exceed_i", ",", "exceed_j", "=", "np", ".", "unravel_index", "(", "exceed_points", ",", "neighbor_x", ".", "shape", ")", "neighbor_prob", "[", "m", "]", "[", "exceed_i", ",", "exceed_j", "]", "=", "1", "if", "smoothing", ">", "0", ":", "neighbor_prob", "[", "m", "]", "=", "gaussian_filter", "(", "neighbor_prob", "[", "m", "]", ",", "smoothing", ",", "mode", "=", "'constant'", ")", "return", "neighbor_prob" ]
Calculate the neighborhood probability over the full period of the forecast Args: radius: circular radius from each point in km smoothing: width of Gaussian smoother in km threshold: intensity of exceedance stride: number of grid points to skip for reduced neighborhood grid Returns: (neighborhood probabilities)
[ "Calculate", "the", "neighborhood", "probability", "over", "the", "full", "period", "of", "the", "forecast" ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/data/HailForecastGrid.py#L94-L123
base4sistemas/satcfe
satcfe/resposta/consultarnumerosessao.py
RespostaConsultarNumeroSessao.analisar
def analisar(retorno): """Constrói uma :class:`RespostaSAT` ou especialização dependendo da função SAT encontrada na sessão consultada. :param unicode retorno: Retorno da função ``ConsultarNumeroSessao``. """ if '|' not in retorno: raise ErroRespostaSATInvalida('Resposta nao possui pipes ' 'separando os campos: {!r}'.format(retorno)) resposta = _RespostaParcial(*(retorno.split('|')[:2])) for faixa, construtor in _RESPOSTAS_POSSIVEIS: if int(resposta.EEEEE) in xrange(faixa, faixa+1000): return construtor(retorno) return RespostaConsultarNumeroSessao._pos_analise(retorno)
python
def analisar(retorno): """Constrói uma :class:`RespostaSAT` ou especialização dependendo da função SAT encontrada na sessão consultada. :param unicode retorno: Retorno da função ``ConsultarNumeroSessao``. """ if '|' not in retorno: raise ErroRespostaSATInvalida('Resposta nao possui pipes ' 'separando os campos: {!r}'.format(retorno)) resposta = _RespostaParcial(*(retorno.split('|')[:2])) for faixa, construtor in _RESPOSTAS_POSSIVEIS: if int(resposta.EEEEE) in xrange(faixa, faixa+1000): return construtor(retorno) return RespostaConsultarNumeroSessao._pos_analise(retorno)
[ "def", "analisar", "(", "retorno", ")", ":", "if", "'|'", "not", "in", "retorno", ":", "raise", "ErroRespostaSATInvalida", "(", "'Resposta nao possui pipes '", "'separando os campos: {!r}'", ".", "format", "(", "retorno", ")", ")", "resposta", "=", "_RespostaParcial", "(", "*", "(", "retorno", ".", "split", "(", "'|'", ")", "[", ":", "2", "]", ")", ")", "for", "faixa", ",", "construtor", "in", "_RESPOSTAS_POSSIVEIS", ":", "if", "int", "(", "resposta", ".", "EEEEE", ")", "in", "xrange", "(", "faixa", ",", "faixa", "+", "1000", ")", ":", "return", "construtor", "(", "retorno", ")", "return", "RespostaConsultarNumeroSessao", ".", "_pos_analise", "(", "retorno", ")" ]
Constrói uma :class:`RespostaSAT` ou especialização dependendo da função SAT encontrada na sessão consultada. :param unicode retorno: Retorno da função ``ConsultarNumeroSessao``.
[ "Constrói", "uma", ":", "class", ":", "RespostaSAT", "ou", "especialização", "dependendo", "da", "função", "SAT", "encontrada", "na", "sessão", "consultada", "." ]
train
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/resposta/consultarnumerosessao.py#L65-L81
base4sistemas/satcfe
satcfe/resposta/padrao.py
analisar_retorno
def analisar_retorno(retorno, classe_resposta=RespostaSAT, campos=RespostaSAT.CAMPOS, campos_alternativos=[], funcao=None, manter_verbatim=True): """Analisa o retorno (supostamente um retorno de uma função do SAT) conforme o padrão e campos esperados. O retorno deverá possuir dados separados entre si através de pipes e o número de campos deverá coincidir com os campos especificados. O campos devem ser especificados como uma tupla onde cada elemento da tupla deverá ser uma tupla contendo dois elementos: o nome do campo e uma função de conversão a partir de uma string unicode. Por exemplo: .. sourcecode:: python >>> retorno = '123456|08000|SAT em operacao||' >>> resposta = analisar_retorno(retorno, funcao='ConsultarSAT') >>> resposta.numeroSessao 123456 >>> resposta.EEEEE u'08000' >>> resposta.mensagem u'SAT em operacao' >>> resposta.cod u'' >>> resposta.mensagemSEFAZ u'' >>> resposta.atributos.funcao 'ConsultarSAT' >>> resposta.atributos.verbatim '123456|08000|SAT em operacao||' :param unicode retorno: O conteúdo **unicode** da resposta retornada pela função da DLL SAT. :param type classe_resposta: O tipo :class:`RespostaSAT` ou especialização que irá representar o retorno, após sua decomposição em campos. :param tuple campos: Especificação dos campos (nomes) e seus conversores a a partir do tipo ``unicode``. :param list campos_alternativos: Especifica conjuntos de campos alternativos que serão considerados caso o número de campos encontrados na resposta não coincida com o número de campos especificados em ``campos``. Para que a relação alternativa de campos funcione, é importante que cada relação de campos alternativos tenha um número diferente de campos. :param str funcao: Nome da função da DLL SAT que gerou o retorno, que estará disponível nos atributos adicionais à resposta. :param bool manter_verbatim: Se uma cópia verbatim da resposta deverá ser mantida nos atributos adicionais à resposta. :raises ErroRespostaSATInvalida: Se o retorno não estiver em conformidade com o padrão esperado ou se não possuir os campos especificados. :return: Uma instância de :class:`RespostaSAT` ou especialização. :rtype: satcfe.resposta.padrao.RespostaSAT """ if '|' not in retorno: raise ErroRespostaSATInvalida('Resposta nao possui pipes separando os ' 'campos: "%s"' % as_ascii(retorno)) partes = retorno.split('|') if len(partes) != len(campos): # procura por uma relação alternativa de campos do retorno for relacao_alternativa in campos_alternativos: if len(partes) == len(relacao_alternativa): relacao_campos = relacao_alternativa break else: raise ErroRespostaSATInvalida('Resposta nao possui o numero ' 'esperado de campos. Esperados %d campos, mas ' 'contem %d: "%s"' % ( len(campos), len(partes), as_ascii(retorno),)) else: relacao_campos = campos resultado = {} def _enumerate(sequence): for index, value in enumerate(sequence): yield index, value[0], value[1] for indice, campo, conversor in _enumerate(relacao_campos): resultado[campo] = conversor(partes[indice]) resposta = classe_resposta(**resultado) resposta.atributos.funcao = funcao resposta.atributos.verbatim = retorno if manter_verbatim else None return resposta
python
def analisar_retorno(retorno, classe_resposta=RespostaSAT, campos=RespostaSAT.CAMPOS, campos_alternativos=[], funcao=None, manter_verbatim=True): """Analisa o retorno (supostamente um retorno de uma função do SAT) conforme o padrão e campos esperados. O retorno deverá possuir dados separados entre si através de pipes e o número de campos deverá coincidir com os campos especificados. O campos devem ser especificados como uma tupla onde cada elemento da tupla deverá ser uma tupla contendo dois elementos: o nome do campo e uma função de conversão a partir de uma string unicode. Por exemplo: .. sourcecode:: python >>> retorno = '123456|08000|SAT em operacao||' >>> resposta = analisar_retorno(retorno, funcao='ConsultarSAT') >>> resposta.numeroSessao 123456 >>> resposta.EEEEE u'08000' >>> resposta.mensagem u'SAT em operacao' >>> resposta.cod u'' >>> resposta.mensagemSEFAZ u'' >>> resposta.atributos.funcao 'ConsultarSAT' >>> resposta.atributos.verbatim '123456|08000|SAT em operacao||' :param unicode retorno: O conteúdo **unicode** da resposta retornada pela função da DLL SAT. :param type classe_resposta: O tipo :class:`RespostaSAT` ou especialização que irá representar o retorno, após sua decomposição em campos. :param tuple campos: Especificação dos campos (nomes) e seus conversores a a partir do tipo ``unicode``. :param list campos_alternativos: Especifica conjuntos de campos alternativos que serão considerados caso o número de campos encontrados na resposta não coincida com o número de campos especificados em ``campos``. Para que a relação alternativa de campos funcione, é importante que cada relação de campos alternativos tenha um número diferente de campos. :param str funcao: Nome da função da DLL SAT que gerou o retorno, que estará disponível nos atributos adicionais à resposta. :param bool manter_verbatim: Se uma cópia verbatim da resposta deverá ser mantida nos atributos adicionais à resposta. :raises ErroRespostaSATInvalida: Se o retorno não estiver em conformidade com o padrão esperado ou se não possuir os campos especificados. :return: Uma instância de :class:`RespostaSAT` ou especialização. :rtype: satcfe.resposta.padrao.RespostaSAT """ if '|' not in retorno: raise ErroRespostaSATInvalida('Resposta nao possui pipes separando os ' 'campos: "%s"' % as_ascii(retorno)) partes = retorno.split('|') if len(partes) != len(campos): # procura por uma relação alternativa de campos do retorno for relacao_alternativa in campos_alternativos: if len(partes) == len(relacao_alternativa): relacao_campos = relacao_alternativa break else: raise ErroRespostaSATInvalida('Resposta nao possui o numero ' 'esperado de campos. Esperados %d campos, mas ' 'contem %d: "%s"' % ( len(campos), len(partes), as_ascii(retorno),)) else: relacao_campos = campos resultado = {} def _enumerate(sequence): for index, value in enumerate(sequence): yield index, value[0], value[1] for indice, campo, conversor in _enumerate(relacao_campos): resultado[campo] = conversor(partes[indice]) resposta = classe_resposta(**resultado) resposta.atributos.funcao = funcao resposta.atributos.verbatim = retorno if manter_verbatim else None return resposta
[ "def", "analisar_retorno", "(", "retorno", ",", "classe_resposta", "=", "RespostaSAT", ",", "campos", "=", "RespostaSAT", ".", "CAMPOS", ",", "campos_alternativos", "=", "[", "]", ",", "funcao", "=", "None", ",", "manter_verbatim", "=", "True", ")", ":", "if", "'|'", "not", "in", "retorno", ":", "raise", "ErroRespostaSATInvalida", "(", "'Resposta nao possui pipes separando os '", "'campos: \"%s\"'", "%", "as_ascii", "(", "retorno", ")", ")", "partes", "=", "retorno", ".", "split", "(", "'|'", ")", "if", "len", "(", "partes", ")", "!=", "len", "(", "campos", ")", ":", "# procura por uma relação alternativa de campos do retorno", "for", "relacao_alternativa", "in", "campos_alternativos", ":", "if", "len", "(", "partes", ")", "==", "len", "(", "relacao_alternativa", ")", ":", "relacao_campos", "=", "relacao_alternativa", "break", "else", ":", "raise", "ErroRespostaSATInvalida", "(", "'Resposta nao possui o numero '", "'esperado de campos. Esperados %d campos, mas '", "'contem %d: \"%s\"'", "%", "(", "len", "(", "campos", ")", ",", "len", "(", "partes", ")", ",", "as_ascii", "(", "retorno", ")", ",", ")", ")", "else", ":", "relacao_campos", "=", "campos", "resultado", "=", "{", "}", "def", "_enumerate", "(", "sequence", ")", ":", "for", "index", ",", "value", "in", "enumerate", "(", "sequence", ")", ":", "yield", "index", ",", "value", "[", "0", "]", ",", "value", "[", "1", "]", "for", "indice", ",", "campo", ",", "conversor", "in", "_enumerate", "(", "relacao_campos", ")", ":", "resultado", "[", "campo", "]", "=", "conversor", "(", "partes", "[", "indice", "]", ")", "resposta", "=", "classe_resposta", "(", "*", "*", "resultado", ")", "resposta", ".", "atributos", ".", "funcao", "=", "funcao", "resposta", ".", "atributos", ".", "verbatim", "=", "retorno", "if", "manter_verbatim", "else", "None", "return", "resposta" ]
Analisa o retorno (supostamente um retorno de uma função do SAT) conforme o padrão e campos esperados. O retorno deverá possuir dados separados entre si através de pipes e o número de campos deverá coincidir com os campos especificados. O campos devem ser especificados como uma tupla onde cada elemento da tupla deverá ser uma tupla contendo dois elementos: o nome do campo e uma função de conversão a partir de uma string unicode. Por exemplo: .. sourcecode:: python >>> retorno = '123456|08000|SAT em operacao||' >>> resposta = analisar_retorno(retorno, funcao='ConsultarSAT') >>> resposta.numeroSessao 123456 >>> resposta.EEEEE u'08000' >>> resposta.mensagem u'SAT em operacao' >>> resposta.cod u'' >>> resposta.mensagemSEFAZ u'' >>> resposta.atributos.funcao 'ConsultarSAT' >>> resposta.atributos.verbatim '123456|08000|SAT em operacao||' :param unicode retorno: O conteúdo **unicode** da resposta retornada pela função da DLL SAT. :param type classe_resposta: O tipo :class:`RespostaSAT` ou especialização que irá representar o retorno, após sua decomposição em campos. :param tuple campos: Especificação dos campos (nomes) e seus conversores a a partir do tipo ``unicode``. :param list campos_alternativos: Especifica conjuntos de campos alternativos que serão considerados caso o número de campos encontrados na resposta não coincida com o número de campos especificados em ``campos``. Para que a relação alternativa de campos funcione, é importante que cada relação de campos alternativos tenha um número diferente de campos. :param str funcao: Nome da função da DLL SAT que gerou o retorno, que estará disponível nos atributos adicionais à resposta. :param bool manter_verbatim: Se uma cópia verbatim da resposta deverá ser mantida nos atributos adicionais à resposta. :raises ErroRespostaSATInvalida: Se o retorno não estiver em conformidade com o padrão esperado ou se não possuir os campos especificados. :return: Uma instância de :class:`RespostaSAT` ou especialização. :rtype: satcfe.resposta.padrao.RespostaSAT
[ "Analisa", "o", "retorno", "(", "supostamente", "um", "retorno", "de", "uma", "função", "do", "SAT", ")", "conforme", "o", "padrão", "e", "campos", "esperados", ".", "O", "retorno", "deverá", "possuir", "dados", "separados", "entre", "si", "através", "de", "pipes", "e", "o", "número", "de", "campos", "deverá", "coincidir", "com", "os", "campos", "especificados", "." ]
train
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/resposta/padrao.py#L175-L268
base4sistemas/satcfe
satcfe/resposta/padrao.py
RespostaSAT.comunicar_certificado_icpbrasil
def comunicar_certificado_icpbrasil(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.comunicar_certificado_icpbrasil`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='ComunicarCertificadoICPBRASIL') if resposta.EEEEE not in ('05000',): raise ExcecaoRespostaSAT(resposta) return resposta
python
def comunicar_certificado_icpbrasil(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.comunicar_certificado_icpbrasil`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='ComunicarCertificadoICPBRASIL') if resposta.EEEEE not in ('05000',): raise ExcecaoRespostaSAT(resposta) return resposta
[ "def", "comunicar_certificado_icpbrasil", "(", "retorno", ")", ":", "resposta", "=", "analisar_retorno", "(", "forcar_unicode", "(", "retorno", ")", ",", "funcao", "=", "'ComunicarCertificadoICPBRASIL'", ")", "if", "resposta", ".", "EEEEE", "not", "in", "(", "'05000'", ",", ")", ":", "raise", "ExcecaoRespostaSAT", "(", "resposta", ")", "return", "resposta" ]
Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.comunicar_certificado_icpbrasil`.
[ "Constrói", "uma", ":", "class", ":", "RespostaSAT", "para", "o", "retorno", "(", "unicode", ")", "da", "função", ":", "meth", ":", "~satcfe", ".", "base", ".", "FuncoesSAT", ".", "comunicar_certificado_icpbrasil", "." ]
train
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/resposta/padrao.py#L80-L88
base4sistemas/satcfe
satcfe/resposta/padrao.py
RespostaSAT.consultar_sat
def consultar_sat(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.consultar_sat`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='ConsultarSAT') if resposta.EEEEE not in ('08000',): raise ExcecaoRespostaSAT(resposta) return resposta
python
def consultar_sat(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.consultar_sat`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='ConsultarSAT') if resposta.EEEEE not in ('08000',): raise ExcecaoRespostaSAT(resposta) return resposta
[ "def", "consultar_sat", "(", "retorno", ")", ":", "resposta", "=", "analisar_retorno", "(", "forcar_unicode", "(", "retorno", ")", ",", "funcao", "=", "'ConsultarSAT'", ")", "if", "resposta", ".", "EEEEE", "not", "in", "(", "'08000'", ",", ")", ":", "raise", "ExcecaoRespostaSAT", "(", "resposta", ")", "return", "resposta" ]
Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.consultar_sat`.
[ "Constrói", "uma", ":", "class", ":", "RespostaSAT", "para", "o", "retorno", "(", "unicode", ")", "da", "função", ":", "meth", ":", "~satcfe", ".", "base", ".", "FuncoesSAT", ".", "consultar_sat", "." ]
train
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/resposta/padrao.py#L92-L100
base4sistemas/satcfe
satcfe/resposta/padrao.py
RespostaSAT.configurar_interface_de_rede
def configurar_interface_de_rede(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.configurar_interface_de_rede`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='ConfigurarInterfaceDeRede') if resposta.EEEEE not in ('12000',): raise ExcecaoRespostaSAT(resposta) return resposta
python
def configurar_interface_de_rede(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.configurar_interface_de_rede`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='ConfigurarInterfaceDeRede') if resposta.EEEEE not in ('12000',): raise ExcecaoRespostaSAT(resposta) return resposta
[ "def", "configurar_interface_de_rede", "(", "retorno", ")", ":", "resposta", "=", "analisar_retorno", "(", "forcar_unicode", "(", "retorno", ")", ",", "funcao", "=", "'ConfigurarInterfaceDeRede'", ")", "if", "resposta", ".", "EEEEE", "not", "in", "(", "'12000'", ",", ")", ":", "raise", "ExcecaoRespostaSAT", "(", "resposta", ")", "return", "resposta" ]
Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.configurar_interface_de_rede`.
[ "Constrói", "uma", ":", "class", ":", "RespostaSAT", "para", "o", "retorno", "(", "unicode", ")", "da", "função", ":", "meth", ":", "~satcfe", ".", "base", ".", "FuncoesSAT", ".", "configurar_interface_de_rede", "." ]
train
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/resposta/padrao.py#L104-L112
base4sistemas/satcfe
satcfe/resposta/padrao.py
RespostaSAT.associar_assinatura
def associar_assinatura(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.associar_assinatura`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='AssociarAssinatura') if resposta.EEEEE not in ('13000',): raise ExcecaoRespostaSAT(resposta) return resposta
python
def associar_assinatura(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.associar_assinatura`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='AssociarAssinatura') if resposta.EEEEE not in ('13000',): raise ExcecaoRespostaSAT(resposta) return resposta
[ "def", "associar_assinatura", "(", "retorno", ")", ":", "resposta", "=", "analisar_retorno", "(", "forcar_unicode", "(", "retorno", ")", ",", "funcao", "=", "'AssociarAssinatura'", ")", "if", "resposta", ".", "EEEEE", "not", "in", "(", "'13000'", ",", ")", ":", "raise", "ExcecaoRespostaSAT", "(", "resposta", ")", "return", "resposta" ]
Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.associar_assinatura`.
[ "Constrói", "uma", ":", "class", ":", "RespostaSAT", "para", "o", "retorno", "(", "unicode", ")", "da", "função", ":", "meth", ":", "~satcfe", ".", "base", ".", "FuncoesSAT", ".", "associar_assinatura", "." ]
train
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/resposta/padrao.py#L116-L124
base4sistemas/satcfe
satcfe/resposta/padrao.py
RespostaSAT.atualizar_software_sat
def atualizar_software_sat(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.atualizar_software_sat`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='AtualizarSoftwareSAT') if resposta.EEEEE not in ('14000',): raise ExcecaoRespostaSAT(resposta) return resposta
python
def atualizar_software_sat(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.atualizar_software_sat`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='AtualizarSoftwareSAT') if resposta.EEEEE not in ('14000',): raise ExcecaoRespostaSAT(resposta) return resposta
[ "def", "atualizar_software_sat", "(", "retorno", ")", ":", "resposta", "=", "analisar_retorno", "(", "forcar_unicode", "(", "retorno", ")", ",", "funcao", "=", "'AtualizarSoftwareSAT'", ")", "if", "resposta", ".", "EEEEE", "not", "in", "(", "'14000'", ",", ")", ":", "raise", "ExcecaoRespostaSAT", "(", "resposta", ")", "return", "resposta" ]
Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.atualizar_software_sat`.
[ "Constrói", "uma", ":", "class", ":", "RespostaSAT", "para", "o", "retorno", "(", "unicode", ")", "da", "função", ":", "meth", ":", "~satcfe", ".", "base", ".", "FuncoesSAT", ".", "atualizar_software_sat", "." ]
train
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/resposta/padrao.py#L128-L136
base4sistemas/satcfe
satcfe/resposta/padrao.py
RespostaSAT.bloquear_sat
def bloquear_sat(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.bloquear_sat`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='BloquearSAT') if resposta.EEEEE not in ('16000',): raise ExcecaoRespostaSAT(resposta) return resposta
python
def bloquear_sat(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.bloquear_sat`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='BloquearSAT') if resposta.EEEEE not in ('16000',): raise ExcecaoRespostaSAT(resposta) return resposta
[ "def", "bloquear_sat", "(", "retorno", ")", ":", "resposta", "=", "analisar_retorno", "(", "forcar_unicode", "(", "retorno", ")", ",", "funcao", "=", "'BloquearSAT'", ")", "if", "resposta", ".", "EEEEE", "not", "in", "(", "'16000'", ",", ")", ":", "raise", "ExcecaoRespostaSAT", "(", "resposta", ")", "return", "resposta" ]
Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.bloquear_sat`.
[ "Constrói", "uma", ":", "class", ":", "RespostaSAT", "para", "o", "retorno", "(", "unicode", ")", "da", "função", ":", "meth", ":", "~satcfe", ".", "base", ".", "FuncoesSAT", ".", "bloquear_sat", "." ]
train
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/resposta/padrao.py#L140-L148
base4sistemas/satcfe
satcfe/resposta/padrao.py
RespostaSAT.desbloquear_sat
def desbloquear_sat(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.desbloquear_sat`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='DesbloquearSAT') if resposta.EEEEE not in ('17000',): raise ExcecaoRespostaSAT(resposta) return resposta
python
def desbloquear_sat(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.desbloquear_sat`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='DesbloquearSAT') if resposta.EEEEE not in ('17000',): raise ExcecaoRespostaSAT(resposta) return resposta
[ "def", "desbloquear_sat", "(", "retorno", ")", ":", "resposta", "=", "analisar_retorno", "(", "forcar_unicode", "(", "retorno", ")", ",", "funcao", "=", "'DesbloquearSAT'", ")", "if", "resposta", ".", "EEEEE", "not", "in", "(", "'17000'", ",", ")", ":", "raise", "ExcecaoRespostaSAT", "(", "resposta", ")", "return", "resposta" ]
Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.desbloquear_sat`.
[ "Constrói", "uma", ":", "class", ":", "RespostaSAT", "para", "o", "retorno", "(", "unicode", ")", "da", "função", ":", "meth", ":", "~satcfe", ".", "base", ".", "FuncoesSAT", ".", "desbloquear_sat", "." ]
train
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/resposta/padrao.py#L152-L160
base4sistemas/satcfe
satcfe/resposta/padrao.py
RespostaSAT.trocar_codigo_de_ativacao
def trocar_codigo_de_ativacao(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.trocar_codigo_de_ativacao`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='TrocarCodigoDeAtivacao') if resposta.EEEEE not in ('18000',): raise ExcecaoRespostaSAT(resposta) return resposta
python
def trocar_codigo_de_ativacao(retorno): """Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.trocar_codigo_de_ativacao`. """ resposta = analisar_retorno(forcar_unicode(retorno), funcao='TrocarCodigoDeAtivacao') if resposta.EEEEE not in ('18000',): raise ExcecaoRespostaSAT(resposta) return resposta
[ "def", "trocar_codigo_de_ativacao", "(", "retorno", ")", ":", "resposta", "=", "analisar_retorno", "(", "forcar_unicode", "(", "retorno", ")", ",", "funcao", "=", "'TrocarCodigoDeAtivacao'", ")", "if", "resposta", ".", "EEEEE", "not", "in", "(", "'18000'", ",", ")", ":", "raise", "ExcecaoRespostaSAT", "(", "resposta", ")", "return", "resposta" ]
Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função :meth:`~satcfe.base.FuncoesSAT.trocar_codigo_de_ativacao`.
[ "Constrói", "uma", ":", "class", ":", "RespostaSAT", "para", "o", "retorno", "(", "unicode", ")", "da", "função", ":", "meth", ":", "~satcfe", ".", "base", ".", "FuncoesSAT", ".", "trocar_codigo_de_ativacao", "." ]
train
https://github.com/base4sistemas/satcfe/blob/cb8e8815f4133d3e3d94cf526fa86767b4521ed9/satcfe/resposta/padrao.py#L164-L172
djgagne/hagelslag
hagelslag/data/ModelOutput.py
ModelOutput.load_data
def load_data(self): """ Load the specified variable from the ensemble files, then close the files. """ if self.ensemble_name.upper() == "SSEF": if self.variable[0:2] == "rh": pressure_level = self.variable[2:] relh_vars = ["sph", "tmp"] relh_vals = {} for var in relh_vars: mg = SSEFModelGrid(self.member_name, self.run_date, var + pressure_level, self.start_date, self.end_date, self.path, single_step=self.single_step) relh_vals[var], units = mg.load_data() mg.close() self.data = relative_humidity_pressure_level(relh_vals["tmp"], relh_vals["sph"], float(pressure_level) * 100) self.units = "%" elif self.variable == "melth": input_vars = ["hgtsfc", "hgt700", "hgt500", "tmp700", "tmp500"] input_vals = {} for var in input_vars: mg = SSEFModelGrid(self.member_name, self.run_date, var, self.start_date, self.end_date, self.path, single_step=self.single_step) input_vals[var], units = mg.load_data() mg.close() self.data = melting_layer_height(input_vals["hgtsfc"], input_vals["hgt700"], input_vals["hgt500"], input_vals["tmp700"], input_vals["tmp500"]) self.units = "m" else: mg = SSEFModelGrid(self.member_name, self.run_date, self.variable, self.start_date, self.end_date, self.path, single_step=self.single_step) self.data, self.units = mg.load_data() mg.close() elif self.ensemble_name.upper() == "NCAR": mg = NCARModelGrid(self.member_name, self.run_date, self.variable, self.start_date, self.end_date, self.path, single_step=self.single_step) self.data, self.units = mg.load_data() mg.close() elif self.ensemble_name.upper() == "HREFV2": proj_dict, grid_dict = read_ncar_map_file(self.map_file) mapping_data = make_proj_grids(proj_dict, grid_dict) mg = HREFv2ModelGrid(self.member_name, self.run_date, self.variable, self.start_date, self.end_date, self.path, mapping_data, self.sector_ind_path, single_step=self.single_step) self.data, self.units = mg.load_data() elif self.ensemble_name.upper() == "VSE": mg = VSEModelGrid(self.member_name, self.run_date, self.variable, self.start_date, self.end_date, self.path, single_step=self.single_step) self.data, self.units = mg.load_data() mg.close() elif self.ensemble_name.upper() == "HRRR": mg = HRRRModelGrid(self.run_date, self.variable, self.start_date, self.end_date, self.path) self.data, self.units = mg.load_data() mg.close() elif self.ensemble_name.upper() == "NCARSTORM": mg = NCARStormEventModelGrid(self.run_date, self.variable, self.start_date, self.end_date, self.path) self.data, self.units = mg.load_data() mg.close() else: print(self.ensemble_name + " not supported.")
python
def load_data(self): """ Load the specified variable from the ensemble files, then close the files. """ if self.ensemble_name.upper() == "SSEF": if self.variable[0:2] == "rh": pressure_level = self.variable[2:] relh_vars = ["sph", "tmp"] relh_vals = {} for var in relh_vars: mg = SSEFModelGrid(self.member_name, self.run_date, var + pressure_level, self.start_date, self.end_date, self.path, single_step=self.single_step) relh_vals[var], units = mg.load_data() mg.close() self.data = relative_humidity_pressure_level(relh_vals["tmp"], relh_vals["sph"], float(pressure_level) * 100) self.units = "%" elif self.variable == "melth": input_vars = ["hgtsfc", "hgt700", "hgt500", "tmp700", "tmp500"] input_vals = {} for var in input_vars: mg = SSEFModelGrid(self.member_name, self.run_date, var, self.start_date, self.end_date, self.path, single_step=self.single_step) input_vals[var], units = mg.load_data() mg.close() self.data = melting_layer_height(input_vals["hgtsfc"], input_vals["hgt700"], input_vals["hgt500"], input_vals["tmp700"], input_vals["tmp500"]) self.units = "m" else: mg = SSEFModelGrid(self.member_name, self.run_date, self.variable, self.start_date, self.end_date, self.path, single_step=self.single_step) self.data, self.units = mg.load_data() mg.close() elif self.ensemble_name.upper() == "NCAR": mg = NCARModelGrid(self.member_name, self.run_date, self.variable, self.start_date, self.end_date, self.path, single_step=self.single_step) self.data, self.units = mg.load_data() mg.close() elif self.ensemble_name.upper() == "HREFV2": proj_dict, grid_dict = read_ncar_map_file(self.map_file) mapping_data = make_proj_grids(proj_dict, grid_dict) mg = HREFv2ModelGrid(self.member_name, self.run_date, self.variable, self.start_date, self.end_date, self.path, mapping_data, self.sector_ind_path, single_step=self.single_step) self.data, self.units = mg.load_data() elif self.ensemble_name.upper() == "VSE": mg = VSEModelGrid(self.member_name, self.run_date, self.variable, self.start_date, self.end_date, self.path, single_step=self.single_step) self.data, self.units = mg.load_data() mg.close() elif self.ensemble_name.upper() == "HRRR": mg = HRRRModelGrid(self.run_date, self.variable, self.start_date, self.end_date, self.path) self.data, self.units = mg.load_data() mg.close() elif self.ensemble_name.upper() == "NCARSTORM": mg = NCARStormEventModelGrid(self.run_date, self.variable, self.start_date, self.end_date, self.path) self.data, self.units = mg.load_data() mg.close() else: print(self.ensemble_name + " not supported.")
[ "def", "load_data", "(", "self", ")", ":", "if", "self", ".", "ensemble_name", ".", "upper", "(", ")", "==", "\"SSEF\"", ":", "if", "self", ".", "variable", "[", "0", ":", "2", "]", "==", "\"rh\"", ":", "pressure_level", "=", "self", ".", "variable", "[", "2", ":", "]", "relh_vars", "=", "[", "\"sph\"", ",", "\"tmp\"", "]", "relh_vals", "=", "{", "}", "for", "var", "in", "relh_vars", ":", "mg", "=", "SSEFModelGrid", "(", "self", ".", "member_name", ",", "self", ".", "run_date", ",", "var", "+", "pressure_level", ",", "self", ".", "start_date", ",", "self", ".", "end_date", ",", "self", ".", "path", ",", "single_step", "=", "self", ".", "single_step", ")", "relh_vals", "[", "var", "]", ",", "units", "=", "mg", ".", "load_data", "(", ")", "mg", ".", "close", "(", ")", "self", ".", "data", "=", "relative_humidity_pressure_level", "(", "relh_vals", "[", "\"tmp\"", "]", ",", "relh_vals", "[", "\"sph\"", "]", ",", "float", "(", "pressure_level", ")", "*", "100", ")", "self", ".", "units", "=", "\"%\"", "elif", "self", ".", "variable", "==", "\"melth\"", ":", "input_vars", "=", "[", "\"hgtsfc\"", ",", "\"hgt700\"", ",", "\"hgt500\"", ",", "\"tmp700\"", ",", "\"tmp500\"", "]", "input_vals", "=", "{", "}", "for", "var", "in", "input_vars", ":", "mg", "=", "SSEFModelGrid", "(", "self", ".", "member_name", ",", "self", ".", "run_date", ",", "var", ",", "self", ".", "start_date", ",", "self", ".", "end_date", ",", "self", ".", "path", ",", "single_step", "=", "self", ".", "single_step", ")", "input_vals", "[", "var", "]", ",", "units", "=", "mg", ".", "load_data", "(", ")", "mg", ".", "close", "(", ")", "self", ".", "data", "=", "melting_layer_height", "(", "input_vals", "[", "\"hgtsfc\"", "]", ",", "input_vals", "[", "\"hgt700\"", "]", ",", "input_vals", "[", "\"hgt500\"", "]", ",", "input_vals", "[", "\"tmp700\"", "]", ",", "input_vals", "[", "\"tmp500\"", "]", ")", "self", ".", "units", "=", "\"m\"", "else", ":", "mg", "=", "SSEFModelGrid", "(", "self", ".", "member_name", ",", "self", ".", "run_date", ",", "self", ".", "variable", ",", "self", ".", "start_date", ",", "self", ".", "end_date", ",", "self", ".", "path", ",", "single_step", "=", "self", ".", "single_step", ")", "self", ".", "data", ",", "self", ".", "units", "=", "mg", ".", "load_data", "(", ")", "mg", ".", "close", "(", ")", "elif", "self", ".", "ensemble_name", ".", "upper", "(", ")", "==", "\"NCAR\"", ":", "mg", "=", "NCARModelGrid", "(", "self", ".", "member_name", ",", "self", ".", "run_date", ",", "self", ".", "variable", ",", "self", ".", "start_date", ",", "self", ".", "end_date", ",", "self", ".", "path", ",", "single_step", "=", "self", ".", "single_step", ")", "self", ".", "data", ",", "self", ".", "units", "=", "mg", ".", "load_data", "(", ")", "mg", ".", "close", "(", ")", "elif", "self", ".", "ensemble_name", ".", "upper", "(", ")", "==", "\"HREFV2\"", ":", "proj_dict", ",", "grid_dict", "=", "read_ncar_map_file", "(", "self", ".", "map_file", ")", "mapping_data", "=", "make_proj_grids", "(", "proj_dict", ",", "grid_dict", ")", "mg", "=", "HREFv2ModelGrid", "(", "self", ".", "member_name", ",", "self", ".", "run_date", ",", "self", ".", "variable", ",", "self", ".", "start_date", ",", "self", ".", "end_date", ",", "self", ".", "path", ",", "mapping_data", ",", "self", ".", "sector_ind_path", ",", "single_step", "=", "self", ".", "single_step", ")", "self", ".", "data", ",", "self", ".", "units", "=", "mg", ".", "load_data", "(", ")", "elif", "self", ".", "ensemble_name", ".", "upper", "(", ")", "==", "\"VSE\"", ":", "mg", "=", "VSEModelGrid", "(", "self", ".", "member_name", ",", "self", ".", "run_date", ",", "self", ".", "variable", ",", "self", ".", "start_date", ",", "self", ".", "end_date", ",", "self", ".", "path", ",", "single_step", "=", "self", ".", "single_step", ")", "self", ".", "data", ",", "self", ".", "units", "=", "mg", ".", "load_data", "(", ")", "mg", ".", "close", "(", ")", "elif", "self", ".", "ensemble_name", ".", "upper", "(", ")", "==", "\"HRRR\"", ":", "mg", "=", "HRRRModelGrid", "(", "self", ".", "run_date", ",", "self", ".", "variable", ",", "self", ".", "start_date", ",", "self", ".", "end_date", ",", "self", ".", "path", ")", "self", ".", "data", ",", "self", ".", "units", "=", "mg", ".", "load_data", "(", ")", "mg", ".", "close", "(", ")", "elif", "self", ".", "ensemble_name", ".", "upper", "(", ")", "==", "\"NCARSTORM\"", ":", "mg", "=", "NCARStormEventModelGrid", "(", "self", ".", "run_date", ",", "self", ".", "variable", ",", "self", ".", "start_date", ",", "self", ".", "end_date", ",", "self", ".", "path", ")", "self", ".", "data", ",", "self", ".", "units", "=", "mg", ".", "load_data", "(", ")", "mg", ".", "close", "(", ")", "else", ":", "print", "(", "self", ".", "ensemble_name", "+", "\" not supported.\"", ")" ]
Load the specified variable from the ensemble files, then close the files.
[ "Load", "the", "specified", "variable", "from", "the", "ensemble", "files", "then", "close", "the", "files", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/data/ModelOutput.py#L67-L172
djgagne/hagelslag
hagelslag/data/ModelOutput.py
ModelOutput.load_map_info
def load_map_info(self, map_file): """ Load map projection information and create latitude, longitude, x, y, i, and j grids for the projection. Args: map_file: File specifying the projection information. """ if self.ensemble_name.upper() == "SSEF": proj_dict, grid_dict = read_arps_map_file(map_file) self.dx = int(grid_dict["dx"]) mapping_data = make_proj_grids(proj_dict, grid_dict) for m, v in mapping_data.items(): setattr(self, m, v) self.i, self.j = np.indices(self.lon.shape) self.proj = get_proj_obj(proj_dict) elif self.ensemble_name.upper() in ["NCAR", "NCARSTORM", "HRRR", "VSE", "HREFV2"]: proj_dict, grid_dict = read_ncar_map_file(map_file) if self.member_name[0:7] == "1km_pbl": # Don't just look at the first 3 characters. You have to differentiate '1km_pbl1' and '1km_on_3km_pbl1' grid_dict["dx"] = 1000 grid_dict["dy"] = 1000 grid_dict["sw_lon"] = 258.697 grid_dict["sw_lat"] = 23.999 grid_dict["ne_lon"] = 282.868269206236 grid_dict["ne_lat"] = 36.4822338520542 self.dx = int(grid_dict["dx"]) mapping_data = make_proj_grids(proj_dict, grid_dict) for m, v in mapping_data.items(): setattr(self, m, v) self.i, self.j = np.indices(self.lon.shape) self.proj = get_proj_obj(proj_dict)
python
def load_map_info(self, map_file): """ Load map projection information and create latitude, longitude, x, y, i, and j grids for the projection. Args: map_file: File specifying the projection information. """ if self.ensemble_name.upper() == "SSEF": proj_dict, grid_dict = read_arps_map_file(map_file) self.dx = int(grid_dict["dx"]) mapping_data = make_proj_grids(proj_dict, grid_dict) for m, v in mapping_data.items(): setattr(self, m, v) self.i, self.j = np.indices(self.lon.shape) self.proj = get_proj_obj(proj_dict) elif self.ensemble_name.upper() in ["NCAR", "NCARSTORM", "HRRR", "VSE", "HREFV2"]: proj_dict, grid_dict = read_ncar_map_file(map_file) if self.member_name[0:7] == "1km_pbl": # Don't just look at the first 3 characters. You have to differentiate '1km_pbl1' and '1km_on_3km_pbl1' grid_dict["dx"] = 1000 grid_dict["dy"] = 1000 grid_dict["sw_lon"] = 258.697 grid_dict["sw_lat"] = 23.999 grid_dict["ne_lon"] = 282.868269206236 grid_dict["ne_lat"] = 36.4822338520542 self.dx = int(grid_dict["dx"]) mapping_data = make_proj_grids(proj_dict, grid_dict) for m, v in mapping_data.items(): setattr(self, m, v) self.i, self.j = np.indices(self.lon.shape) self.proj = get_proj_obj(proj_dict)
[ "def", "load_map_info", "(", "self", ",", "map_file", ")", ":", "if", "self", ".", "ensemble_name", ".", "upper", "(", ")", "==", "\"SSEF\"", ":", "proj_dict", ",", "grid_dict", "=", "read_arps_map_file", "(", "map_file", ")", "self", ".", "dx", "=", "int", "(", "grid_dict", "[", "\"dx\"", "]", ")", "mapping_data", "=", "make_proj_grids", "(", "proj_dict", ",", "grid_dict", ")", "for", "m", ",", "v", "in", "mapping_data", ".", "items", "(", ")", ":", "setattr", "(", "self", ",", "m", ",", "v", ")", "self", ".", "i", ",", "self", ".", "j", "=", "np", ".", "indices", "(", "self", ".", "lon", ".", "shape", ")", "self", ".", "proj", "=", "get_proj_obj", "(", "proj_dict", ")", "elif", "self", ".", "ensemble_name", ".", "upper", "(", ")", "in", "[", "\"NCAR\"", ",", "\"NCARSTORM\"", ",", "\"HRRR\"", ",", "\"VSE\"", ",", "\"HREFV2\"", "]", ":", "proj_dict", ",", "grid_dict", "=", "read_ncar_map_file", "(", "map_file", ")", "if", "self", ".", "member_name", "[", "0", ":", "7", "]", "==", "\"1km_pbl\"", ":", "# Don't just look at the first 3 characters. You have to differentiate '1km_pbl1' and '1km_on_3km_pbl1'", "grid_dict", "[", "\"dx\"", "]", "=", "1000", "grid_dict", "[", "\"dy\"", "]", "=", "1000", "grid_dict", "[", "\"sw_lon\"", "]", "=", "258.697", "grid_dict", "[", "\"sw_lat\"", "]", "=", "23.999", "grid_dict", "[", "\"ne_lon\"", "]", "=", "282.868269206236", "grid_dict", "[", "\"ne_lat\"", "]", "=", "36.4822338520542", "self", ".", "dx", "=", "int", "(", "grid_dict", "[", "\"dx\"", "]", ")", "mapping_data", "=", "make_proj_grids", "(", "proj_dict", ",", "grid_dict", ")", "for", "m", ",", "v", "in", "mapping_data", ".", "items", "(", ")", ":", "setattr", "(", "self", ",", "m", ",", "v", ")", "self", ".", "i", ",", "self", ".", "j", "=", "np", ".", "indices", "(", "self", ".", "lon", ".", "shape", ")", "self", ".", "proj", "=", "get_proj_obj", "(", "proj_dict", ")" ]
Load map projection information and create latitude, longitude, x, y, i, and j grids for the projection. Args: map_file: File specifying the projection information.
[ "Load", "map", "projection", "information", "and", "create", "latitude", "longitude", "x", "y", "i", "and", "j", "grids", "for", "the", "projection", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/data/ModelOutput.py#L174-L204
djgagne/hagelslag
hagelslag/processing/STObject.py
read_geojson
def read_geojson(filename): """ Reads a geojson file containing an STObject and initializes a new STObject from the information in the file. Args: filename: Name of the geojson file Returns: an STObject """ json_file = open(filename) data = json.load(json_file) json_file.close() times = data["properties"]["times"] main_data = dict(timesteps=[], masks=[], x=[], y=[], i=[], j=[]) attribute_data = dict() for feature in data["features"]: for main_name in main_data.keys(): main_data[main_name].append(np.array(feature["properties"][main_name])) for k, v in feature["properties"]["attributes"].items(): if k not in attribute_data.keys(): attribute_data[k] = [np.array(v)] else: attribute_data[k].append(np.array(v)) kwargs = {} for kw in ["dx", "step", "u", "v"]: if kw in data["properties"].keys(): kwargs[kw] = data["properties"][kw] sto = STObject(main_data["timesteps"], main_data["masks"], main_data["x"], main_data["y"], main_data["i"], main_data["j"], times[0], times[-1], **kwargs) for k, v in attribute_data.items(): sto.attributes[k] = v return sto
python
def read_geojson(filename): """ Reads a geojson file containing an STObject and initializes a new STObject from the information in the file. Args: filename: Name of the geojson file Returns: an STObject """ json_file = open(filename) data = json.load(json_file) json_file.close() times = data["properties"]["times"] main_data = dict(timesteps=[], masks=[], x=[], y=[], i=[], j=[]) attribute_data = dict() for feature in data["features"]: for main_name in main_data.keys(): main_data[main_name].append(np.array(feature["properties"][main_name])) for k, v in feature["properties"]["attributes"].items(): if k not in attribute_data.keys(): attribute_data[k] = [np.array(v)] else: attribute_data[k].append(np.array(v)) kwargs = {} for kw in ["dx", "step", "u", "v"]: if kw in data["properties"].keys(): kwargs[kw] = data["properties"][kw] sto = STObject(main_data["timesteps"], main_data["masks"], main_data["x"], main_data["y"], main_data["i"], main_data["j"], times[0], times[-1], **kwargs) for k, v in attribute_data.items(): sto.attributes[k] = v return sto
[ "def", "read_geojson", "(", "filename", ")", ":", "json_file", "=", "open", "(", "filename", ")", "data", "=", "json", ".", "load", "(", "json_file", ")", "json_file", ".", "close", "(", ")", "times", "=", "data", "[", "\"properties\"", "]", "[", "\"times\"", "]", "main_data", "=", "dict", "(", "timesteps", "=", "[", "]", ",", "masks", "=", "[", "]", ",", "x", "=", "[", "]", ",", "y", "=", "[", "]", ",", "i", "=", "[", "]", ",", "j", "=", "[", "]", ")", "attribute_data", "=", "dict", "(", ")", "for", "feature", "in", "data", "[", "\"features\"", "]", ":", "for", "main_name", "in", "main_data", ".", "keys", "(", ")", ":", "main_data", "[", "main_name", "]", ".", "append", "(", "np", ".", "array", "(", "feature", "[", "\"properties\"", "]", "[", "main_name", "]", ")", ")", "for", "k", ",", "v", "in", "feature", "[", "\"properties\"", "]", "[", "\"attributes\"", "]", ".", "items", "(", ")", ":", "if", "k", "not", "in", "attribute_data", ".", "keys", "(", ")", ":", "attribute_data", "[", "k", "]", "=", "[", "np", ".", "array", "(", "v", ")", "]", "else", ":", "attribute_data", "[", "k", "]", ".", "append", "(", "np", ".", "array", "(", "v", ")", ")", "kwargs", "=", "{", "}", "for", "kw", "in", "[", "\"dx\"", ",", "\"step\"", ",", "\"u\"", ",", "\"v\"", "]", ":", "if", "kw", "in", "data", "[", "\"properties\"", "]", ".", "keys", "(", ")", ":", "kwargs", "[", "kw", "]", "=", "data", "[", "\"properties\"", "]", "[", "kw", "]", "sto", "=", "STObject", "(", "main_data", "[", "\"timesteps\"", "]", ",", "main_data", "[", "\"masks\"", "]", ",", "main_data", "[", "\"x\"", "]", ",", "main_data", "[", "\"y\"", "]", ",", "main_data", "[", "\"i\"", "]", ",", "main_data", "[", "\"j\"", "]", ",", "times", "[", "0", "]", ",", "times", "[", "-", "1", "]", ",", "*", "*", "kwargs", ")", "for", "k", ",", "v", "in", "attribute_data", ".", "items", "(", ")", ":", "sto", ".", "attributes", "[", "k", "]", "=", "v", "return", "sto" ]
Reads a geojson file containing an STObject and initializes a new STObject from the information in the file. Args: filename: Name of the geojson file Returns: an STObject
[ "Reads", "a", "geojson", "file", "containing", "an", "STObject", "and", "initializes", "a", "new", "STObject", "from", "the", "information", "in", "the", "file", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L540-L572
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.center_of_mass
def center_of_mass(self, time): """ Calculate the center of mass at a given timestep. Args: time: Time at which the center of mass calculation is performed Returns: The x- and y-coordinates of the center of mass. """ if self.start_time <= time <= self.end_time: diff = time - self.start_time valid = np.flatnonzero(self.masks[diff] != 0) if valid.size > 0: com_x = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] * self.x[diff].ravel()[valid]) com_y = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] * self.y[diff].ravel()[valid]) else: com_x = np.mean(self.x[diff]) com_y = np.mean(self.y[diff]) else: com_x = None com_y = None return com_x, com_y
python
def center_of_mass(self, time): """ Calculate the center of mass at a given timestep. Args: time: Time at which the center of mass calculation is performed Returns: The x- and y-coordinates of the center of mass. """ if self.start_time <= time <= self.end_time: diff = time - self.start_time valid = np.flatnonzero(self.masks[diff] != 0) if valid.size > 0: com_x = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] * self.x[diff].ravel()[valid]) com_y = 1.0 / self.timesteps[diff].ravel()[valid].sum() * np.sum(self.timesteps[diff].ravel()[valid] * self.y[diff].ravel()[valid]) else: com_x = np.mean(self.x[diff]) com_y = np.mean(self.y[diff]) else: com_x = None com_y = None return com_x, com_y
[ "def", "center_of_mass", "(", "self", ",", "time", ")", ":", "if", "self", ".", "start_time", "<=", "time", "<=", "self", ".", "end_time", ":", "diff", "=", "time", "-", "self", ".", "start_time", "valid", "=", "np", ".", "flatnonzero", "(", "self", ".", "masks", "[", "diff", "]", "!=", "0", ")", "if", "valid", ".", "size", ">", "0", ":", "com_x", "=", "1.0", "/", "self", ".", "timesteps", "[", "diff", "]", ".", "ravel", "(", ")", "[", "valid", "]", ".", "sum", "(", ")", "*", "np", ".", "sum", "(", "self", ".", "timesteps", "[", "diff", "]", ".", "ravel", "(", ")", "[", "valid", "]", "*", "self", ".", "x", "[", "diff", "]", ".", "ravel", "(", ")", "[", "valid", "]", ")", "com_y", "=", "1.0", "/", "self", ".", "timesteps", "[", "diff", "]", ".", "ravel", "(", ")", "[", "valid", "]", ".", "sum", "(", ")", "*", "np", ".", "sum", "(", "self", ".", "timesteps", "[", "diff", "]", ".", "ravel", "(", ")", "[", "valid", "]", "*", "self", ".", "y", "[", "diff", "]", ".", "ravel", "(", ")", "[", "valid", "]", ")", "else", ":", "com_x", "=", "np", ".", "mean", "(", "self", ".", "x", "[", "diff", "]", ")", "com_y", "=", "np", ".", "mean", "(", "self", ".", "y", "[", "diff", "]", ")", "else", ":", "com_x", "=", "None", "com_y", "=", "None", "return", "com_x", ",", "com_y" ]
Calculate the center of mass at a given timestep. Args: time: Time at which the center of mass calculation is performed Returns: The x- and y-coordinates of the center of mass.
[ "Calculate", "the", "center", "of", "mass", "at", "a", "given", "timestep", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L78-L102
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.trajectory
def trajectory(self): """ Calculates the center of mass for each time step and outputs an array Returns: """ traj = np.zeros((2, self.times.size)) for t, time in enumerate(self.times): traj[:, t] = self.center_of_mass(time) return traj
python
def trajectory(self): """ Calculates the center of mass for each time step and outputs an array Returns: """ traj = np.zeros((2, self.times.size)) for t, time in enumerate(self.times): traj[:, t] = self.center_of_mass(time) return traj
[ "def", "trajectory", "(", "self", ")", ":", "traj", "=", "np", ".", "zeros", "(", "(", "2", ",", "self", ".", "times", ".", "size", ")", ")", "for", "t", ",", "time", "in", "enumerate", "(", "self", ".", "times", ")", ":", "traj", "[", ":", ",", "t", "]", "=", "self", ".", "center_of_mass", "(", "time", ")", "return", "traj" ]
Calculates the center of mass for each time step and outputs an array Returns:
[ "Calculates", "the", "center", "of", "mass", "for", "each", "time", "step", "and", "outputs", "an", "array" ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L143-L153
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.get_corner
def get_corner(self, time): """ Gets the corner array indices of the STObject at a given time that corresponds to the upper left corner of the bounding box for the STObject. Args: time: time at which the corner is being extracted. Returns: corner index. """ if self.start_time <= time <= self.end_time: diff = time - self.start_time return self.i[diff][0, 0], self.j[diff][0, 0] else: return -1, -1
python
def get_corner(self, time): """ Gets the corner array indices of the STObject at a given time that corresponds to the upper left corner of the bounding box for the STObject. Args: time: time at which the corner is being extracted. Returns: corner index. """ if self.start_time <= time <= self.end_time: diff = time - self.start_time return self.i[diff][0, 0], self.j[diff][0, 0] else: return -1, -1
[ "def", "get_corner", "(", "self", ",", "time", ")", ":", "if", "self", ".", "start_time", "<=", "time", "<=", "self", ".", "end_time", ":", "diff", "=", "time", "-", "self", ".", "start_time", "return", "self", ".", "i", "[", "diff", "]", "[", "0", ",", "0", "]", ",", "self", ".", "j", "[", "diff", "]", "[", "0", ",", "0", "]", "else", ":", "return", "-", "1", ",", "-", "1" ]
Gets the corner array indices of the STObject at a given time that corresponds to the upper left corner of the bounding box for the STObject. Args: time: time at which the corner is being extracted. Returns: corner index.
[ "Gets", "the", "corner", "array", "indices", "of", "the", "STObject", "at", "a", "given", "time", "that", "corresponds", "to", "the", "upper", "left", "corner", "of", "the", "bounding", "box", "for", "the", "STObject", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L155-L170
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.size
def size(self, time): """ Gets the size of the object at a given time. Args: time: Time value being queried. Returns: size of the object in pixels """ if self.start_time <= time <= self.end_time: return self.masks[time - self.start_time].sum() else: return 0
python
def size(self, time): """ Gets the size of the object at a given time. Args: time: Time value being queried. Returns: size of the object in pixels """ if self.start_time <= time <= self.end_time: return self.masks[time - self.start_time].sum() else: return 0
[ "def", "size", "(", "self", ",", "time", ")", ":", "if", "self", ".", "start_time", "<=", "time", "<=", "self", ".", "end_time", ":", "return", "self", ".", "masks", "[", "time", "-", "self", ".", "start_time", "]", ".", "sum", "(", ")", "else", ":", "return", "0" ]
Gets the size of the object at a given time. Args: time: Time value being queried. Returns: size of the object in pixels
[ "Gets", "the", "size", "of", "the", "object", "at", "a", "given", "time", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L172-L185
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.max_size
def max_size(self): """ Gets the largest size of the object over all timesteps. Returns: Maximum size of the object in pixels """ sizes = np.array([m.sum() for m in self.masks]) return sizes.max()
python
def max_size(self): """ Gets the largest size of the object over all timesteps. Returns: Maximum size of the object in pixels """ sizes = np.array([m.sum() for m in self.masks]) return sizes.max()
[ "def", "max_size", "(", "self", ")", ":", "sizes", "=", "np", ".", "array", "(", "[", "m", ".", "sum", "(", ")", "for", "m", "in", "self", ".", "masks", "]", ")", "return", "sizes", ".", "max", "(", ")" ]
Gets the largest size of the object over all timesteps. Returns: Maximum size of the object in pixels
[ "Gets", "the", "largest", "size", "of", "the", "object", "over", "all", "timesteps", ".", "Returns", ":", "Maximum", "size", "of", "the", "object", "in", "pixels" ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L187-L195
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.max_intensity
def max_intensity(self, time): """ Calculate the maximum intensity found at a timestep. """ ti = np.where(time == self.times)[0][0] return self.timesteps[ti].max()
python
def max_intensity(self, time): """ Calculate the maximum intensity found at a timestep. """ ti = np.where(time == self.times)[0][0] return self.timesteps[ti].max()
[ "def", "max_intensity", "(", "self", ",", "time", ")", ":", "ti", "=", "np", ".", "where", "(", "time", "==", "self", ".", "times", ")", "[", "0", "]", "[", "0", "]", "return", "self", ".", "timesteps", "[", "ti", "]", ".", "max", "(", ")" ]
Calculate the maximum intensity found at a timestep.
[ "Calculate", "the", "maximum", "intensity", "found", "at", "a", "timestep", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L197-L203
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.extend
def extend(self, step): """ Adds the data from another STObject to this object. Args: step: another STObject being added after the current one in time. """ self.timesteps.extend(step.timesteps) self.masks.extend(step.masks) self.x.extend(step.x) self.y.extend(step.y) self.i.extend(step.i) self.j.extend(step.j) self.end_time = step.end_time self.times = np.arange(self.start_time, self.end_time + self.step, self.step) self.u = np.concatenate((self.u, step.u)) self.v = np.concatenate((self.v, step.v)) for attr in self.attributes.keys(): if attr in step.attributes.keys(): self.attributes[attr].extend(step.attributes[attr])
python
def extend(self, step): """ Adds the data from another STObject to this object. Args: step: another STObject being added after the current one in time. """ self.timesteps.extend(step.timesteps) self.masks.extend(step.masks) self.x.extend(step.x) self.y.extend(step.y) self.i.extend(step.i) self.j.extend(step.j) self.end_time = step.end_time self.times = np.arange(self.start_time, self.end_time + self.step, self.step) self.u = np.concatenate((self.u, step.u)) self.v = np.concatenate((self.v, step.v)) for attr in self.attributes.keys(): if attr in step.attributes.keys(): self.attributes[attr].extend(step.attributes[attr])
[ "def", "extend", "(", "self", ",", "step", ")", ":", "self", ".", "timesteps", ".", "extend", "(", "step", ".", "timesteps", ")", "self", ".", "masks", ".", "extend", "(", "step", ".", "masks", ")", "self", ".", "x", ".", "extend", "(", "step", ".", "x", ")", "self", ".", "y", ".", "extend", "(", "step", ".", "y", ")", "self", ".", "i", ".", "extend", "(", "step", ".", "i", ")", "self", ".", "j", ".", "extend", "(", "step", ".", "j", ")", "self", ".", "end_time", "=", "step", ".", "end_time", "self", ".", "times", "=", "np", ".", "arange", "(", "self", ".", "start_time", ",", "self", ".", "end_time", "+", "self", ".", "step", ",", "self", ".", "step", ")", "self", ".", "u", "=", "np", ".", "concatenate", "(", "(", "self", ".", "u", ",", "step", ".", "u", ")", ")", "self", ".", "v", "=", "np", ".", "concatenate", "(", "(", "self", ".", "v", ",", "step", ".", "v", ")", ")", "for", "attr", "in", "self", ".", "attributes", ".", "keys", "(", ")", ":", "if", "attr", "in", "step", ".", "attributes", ".", "keys", "(", ")", ":", "self", ".", "attributes", "[", "attr", "]", ".", "extend", "(", "step", ".", "attributes", "[", "attr", "]", ")" ]
Adds the data from another STObject to this object. Args: step: another STObject being added after the current one in time.
[ "Adds", "the", "data", "from", "another", "STObject", "to", "this", "object", ".", "Args", ":", "step", ":", "another", "STObject", "being", "added", "after", "the", "current", "one", "in", "time", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L205-L224
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.boundary_polygon
def boundary_polygon(self, time): """ Get coordinates of object boundary in counter-clockwise order """ ti = np.where(time == self.times)[0][0] com_x, com_y = self.center_of_mass(time) # If at least one point along perimeter of the mask rectangle is unmasked, find_boundaries() works. # But if all perimeter points are masked, find_boundaries() does not find the object. # Therefore, pad the mask with zeroes first and run find_boundaries on the padded array. padded_mask = np.pad(self.masks[ti], 1, 'constant', constant_values=0) chull = convex_hull_image(padded_mask) boundary_image = find_boundaries(chull, mode='inner', background=0) # Now remove the padding. boundary_image = boundary_image[1:-1,1:-1] boundary_x = self.x[ti].ravel()[boundary_image.ravel()] boundary_y = self.y[ti].ravel()[boundary_image.ravel()] r = np.sqrt((boundary_x - com_x) ** 2 + (boundary_y - com_y) ** 2) theta = np.arctan2((boundary_y - com_y), (boundary_x - com_x)) * 180.0 / np.pi + 360 polar_coords = np.array([(r[x], theta[x]) for x in range(r.size)], dtype=[('r', 'f4'), ('theta', 'f4')]) coord_order = np.argsort(polar_coords, order=['theta', 'r']) ordered_coords = np.vstack([boundary_x[coord_order], boundary_y[coord_order]]) return ordered_coords
python
def boundary_polygon(self, time): """ Get coordinates of object boundary in counter-clockwise order """ ti = np.where(time == self.times)[0][0] com_x, com_y = self.center_of_mass(time) # If at least one point along perimeter of the mask rectangle is unmasked, find_boundaries() works. # But if all perimeter points are masked, find_boundaries() does not find the object. # Therefore, pad the mask with zeroes first and run find_boundaries on the padded array. padded_mask = np.pad(self.masks[ti], 1, 'constant', constant_values=0) chull = convex_hull_image(padded_mask) boundary_image = find_boundaries(chull, mode='inner', background=0) # Now remove the padding. boundary_image = boundary_image[1:-1,1:-1] boundary_x = self.x[ti].ravel()[boundary_image.ravel()] boundary_y = self.y[ti].ravel()[boundary_image.ravel()] r = np.sqrt((boundary_x - com_x) ** 2 + (boundary_y - com_y) ** 2) theta = np.arctan2((boundary_y - com_y), (boundary_x - com_x)) * 180.0 / np.pi + 360 polar_coords = np.array([(r[x], theta[x]) for x in range(r.size)], dtype=[('r', 'f4'), ('theta', 'f4')]) coord_order = np.argsort(polar_coords, order=['theta', 'r']) ordered_coords = np.vstack([boundary_x[coord_order], boundary_y[coord_order]]) return ordered_coords
[ "def", "boundary_polygon", "(", "self", ",", "time", ")", ":", "ti", "=", "np", ".", "where", "(", "time", "==", "self", ".", "times", ")", "[", "0", "]", "[", "0", "]", "com_x", ",", "com_y", "=", "self", ".", "center_of_mass", "(", "time", ")", "# If at least one point along perimeter of the mask rectangle is unmasked, find_boundaries() works.", "# But if all perimeter points are masked, find_boundaries() does not find the object.", "# Therefore, pad the mask with zeroes first and run find_boundaries on the padded array.", "padded_mask", "=", "np", ".", "pad", "(", "self", ".", "masks", "[", "ti", "]", ",", "1", ",", "'constant'", ",", "constant_values", "=", "0", ")", "chull", "=", "convex_hull_image", "(", "padded_mask", ")", "boundary_image", "=", "find_boundaries", "(", "chull", ",", "mode", "=", "'inner'", ",", "background", "=", "0", ")", "# Now remove the padding.", "boundary_image", "=", "boundary_image", "[", "1", ":", "-", "1", ",", "1", ":", "-", "1", "]", "boundary_x", "=", "self", ".", "x", "[", "ti", "]", ".", "ravel", "(", ")", "[", "boundary_image", ".", "ravel", "(", ")", "]", "boundary_y", "=", "self", ".", "y", "[", "ti", "]", ".", "ravel", "(", ")", "[", "boundary_image", ".", "ravel", "(", ")", "]", "r", "=", "np", ".", "sqrt", "(", "(", "boundary_x", "-", "com_x", ")", "**", "2", "+", "(", "boundary_y", "-", "com_y", ")", "**", "2", ")", "theta", "=", "np", ".", "arctan2", "(", "(", "boundary_y", "-", "com_y", ")", ",", "(", "boundary_x", "-", "com_x", ")", ")", "*", "180.0", "/", "np", ".", "pi", "+", "360", "polar_coords", "=", "np", ".", "array", "(", "[", "(", "r", "[", "x", "]", ",", "theta", "[", "x", "]", ")", "for", "x", "in", "range", "(", "r", ".", "size", ")", "]", ",", "dtype", "=", "[", "(", "'r'", ",", "'f4'", ")", ",", "(", "'theta'", ",", "'f4'", ")", "]", ")", "coord_order", "=", "np", ".", "argsort", "(", "polar_coords", ",", "order", "=", "[", "'theta'", ",", "'r'", "]", ")", "ordered_coords", "=", "np", ".", "vstack", "(", "[", "boundary_x", "[", "coord_order", "]", ",", "boundary_y", "[", "coord_order", "]", "]", ")", "return", "ordered_coords" ]
Get coordinates of object boundary in counter-clockwise order
[ "Get", "coordinates", "of", "object", "boundary", "in", "counter", "-", "clockwise", "order" ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L226-L247
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.estimate_motion
def estimate_motion(self, time, intensity_grid, max_u, max_v): """ Estimate the motion of the object with cross-correlation on the intensity values from the previous time step. Args: time: time being evaluated. intensity_grid: 2D array of intensities used in cross correlation. max_u: Maximum x-component of motion. Used to limit search area. max_v: Maximum y-component of motion. Used to limit search area Returns: u, v, and the minimum error. """ ti = np.where(time == self.times)[0][0] mask_vals = np.where(self.masks[ti].ravel() == 1) i_vals = self.i[ti].ravel()[mask_vals] j_vals = self.j[ti].ravel()[mask_vals] obj_vals = self.timesteps[ti].ravel()[mask_vals] u_shifts = np.arange(-max_u, max_u + 1) v_shifts = np.arange(-max_v, max_v + 1) min_error = 99999999999.0 best_u = 0 best_v = 0 for u in u_shifts: j_shift = j_vals - u for v in v_shifts: i_shift = i_vals - v if np.all((0 <= i_shift) & (i_shift < intensity_grid.shape[0]) & (0 <= j_shift) & (j_shift < intensity_grid.shape[1])): shift_vals = intensity_grid[i_shift, j_shift] else: shift_vals = np.zeros(i_shift.shape) # This isn't correlation; it is mean absolute error. error = np.abs(shift_vals - obj_vals).mean() if error < min_error: min_error = error best_u = u * self.dx best_v = v * self.dx # 60 seems arbitrarily high #if min_error > 60: # best_u = 0 # best_v = 0 self.u[ti] = best_u self.v[ti] = best_v return best_u, best_v, min_error
python
def estimate_motion(self, time, intensity_grid, max_u, max_v): """ Estimate the motion of the object with cross-correlation on the intensity values from the previous time step. Args: time: time being evaluated. intensity_grid: 2D array of intensities used in cross correlation. max_u: Maximum x-component of motion. Used to limit search area. max_v: Maximum y-component of motion. Used to limit search area Returns: u, v, and the minimum error. """ ti = np.where(time == self.times)[0][0] mask_vals = np.where(self.masks[ti].ravel() == 1) i_vals = self.i[ti].ravel()[mask_vals] j_vals = self.j[ti].ravel()[mask_vals] obj_vals = self.timesteps[ti].ravel()[mask_vals] u_shifts = np.arange(-max_u, max_u + 1) v_shifts = np.arange(-max_v, max_v + 1) min_error = 99999999999.0 best_u = 0 best_v = 0 for u in u_shifts: j_shift = j_vals - u for v in v_shifts: i_shift = i_vals - v if np.all((0 <= i_shift) & (i_shift < intensity_grid.shape[0]) & (0 <= j_shift) & (j_shift < intensity_grid.shape[1])): shift_vals = intensity_grid[i_shift, j_shift] else: shift_vals = np.zeros(i_shift.shape) # This isn't correlation; it is mean absolute error. error = np.abs(shift_vals - obj_vals).mean() if error < min_error: min_error = error best_u = u * self.dx best_v = v * self.dx # 60 seems arbitrarily high #if min_error > 60: # best_u = 0 # best_v = 0 self.u[ti] = best_u self.v[ti] = best_v return best_u, best_v, min_error
[ "def", "estimate_motion", "(", "self", ",", "time", ",", "intensity_grid", ",", "max_u", ",", "max_v", ")", ":", "ti", "=", "np", ".", "where", "(", "time", "==", "self", ".", "times", ")", "[", "0", "]", "[", "0", "]", "mask_vals", "=", "np", ".", "where", "(", "self", ".", "masks", "[", "ti", "]", ".", "ravel", "(", ")", "==", "1", ")", "i_vals", "=", "self", ".", "i", "[", "ti", "]", ".", "ravel", "(", ")", "[", "mask_vals", "]", "j_vals", "=", "self", ".", "j", "[", "ti", "]", ".", "ravel", "(", ")", "[", "mask_vals", "]", "obj_vals", "=", "self", ".", "timesteps", "[", "ti", "]", ".", "ravel", "(", ")", "[", "mask_vals", "]", "u_shifts", "=", "np", ".", "arange", "(", "-", "max_u", ",", "max_u", "+", "1", ")", "v_shifts", "=", "np", ".", "arange", "(", "-", "max_v", ",", "max_v", "+", "1", ")", "min_error", "=", "99999999999.0", "best_u", "=", "0", "best_v", "=", "0", "for", "u", "in", "u_shifts", ":", "j_shift", "=", "j_vals", "-", "u", "for", "v", "in", "v_shifts", ":", "i_shift", "=", "i_vals", "-", "v", "if", "np", ".", "all", "(", "(", "0", "<=", "i_shift", ")", "&", "(", "i_shift", "<", "intensity_grid", ".", "shape", "[", "0", "]", ")", "&", "(", "0", "<=", "j_shift", ")", "&", "(", "j_shift", "<", "intensity_grid", ".", "shape", "[", "1", "]", ")", ")", ":", "shift_vals", "=", "intensity_grid", "[", "i_shift", ",", "j_shift", "]", "else", ":", "shift_vals", "=", "np", ".", "zeros", "(", "i_shift", ".", "shape", ")", "# This isn't correlation; it is mean absolute error.", "error", "=", "np", ".", "abs", "(", "shift_vals", "-", "obj_vals", ")", ".", "mean", "(", ")", "if", "error", "<", "min_error", ":", "min_error", "=", "error", "best_u", "=", "u", "*", "self", ".", "dx", "best_v", "=", "v", "*", "self", ".", "dx", "# 60 seems arbitrarily high", "#if min_error > 60:", "# best_u = 0", "# best_v = 0", "self", ".", "u", "[", "ti", "]", "=", "best_u", "self", ".", "v", "[", "ti", "]", "=", "best_v", "return", "best_u", ",", "best_v", ",", "min_error" ]
Estimate the motion of the object with cross-correlation on the intensity values from the previous time step. Args: time: time being evaluated. intensity_grid: 2D array of intensities used in cross correlation. max_u: Maximum x-component of motion. Used to limit search area. max_v: Maximum y-component of motion. Used to limit search area Returns: u, v, and the minimum error.
[ "Estimate", "the", "motion", "of", "the", "object", "with", "cross", "-", "correlation", "on", "the", "intensity", "values", "from", "the", "previous", "time", "step", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L249-L293
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.count_overlap
def count_overlap(self, time, other_object, other_time): """ Counts the number of points that overlap between this STObject and another STObject. Used for tracking. """ ti = np.where(time == self.times)[0][0] ma = np.where(self.masks[ti].ravel() == 1) oti = np.where(other_time == other_object.times)[0] obj_coords = np.zeros(self.masks[ti].sum(), dtype=[('x', int), ('y', int)]) other_obj_coords = np.zeros(other_object.masks[oti].sum(), dtype=[('x', int), ('y', int)]) obj_coords['x'] = self.i[ti].ravel()[ma] obj_coords['y'] = self.j[ti].ravel()[ma] other_obj_coords['x'] = other_object.i[oti][other_object.masks[oti] == 1] other_obj_coords['y'] = other_object.j[oti][other_object.masks[oti] == 1] return float(np.intersect1d(obj_coords, other_obj_coords).size) / np.maximum(self.masks[ti].sum(), other_object.masks[oti].sum())
python
def count_overlap(self, time, other_object, other_time): """ Counts the number of points that overlap between this STObject and another STObject. Used for tracking. """ ti = np.where(time == self.times)[0][0] ma = np.where(self.masks[ti].ravel() == 1) oti = np.where(other_time == other_object.times)[0] obj_coords = np.zeros(self.masks[ti].sum(), dtype=[('x', int), ('y', int)]) other_obj_coords = np.zeros(other_object.masks[oti].sum(), dtype=[('x', int), ('y', int)]) obj_coords['x'] = self.i[ti].ravel()[ma] obj_coords['y'] = self.j[ti].ravel()[ma] other_obj_coords['x'] = other_object.i[oti][other_object.masks[oti] == 1] other_obj_coords['y'] = other_object.j[oti][other_object.masks[oti] == 1] return float(np.intersect1d(obj_coords, other_obj_coords).size) / np.maximum(self.masks[ti].sum(), other_object.masks[oti].sum())
[ "def", "count_overlap", "(", "self", ",", "time", ",", "other_object", ",", "other_time", ")", ":", "ti", "=", "np", ".", "where", "(", "time", "==", "self", ".", "times", ")", "[", "0", "]", "[", "0", "]", "ma", "=", "np", ".", "where", "(", "self", ".", "masks", "[", "ti", "]", ".", "ravel", "(", ")", "==", "1", ")", "oti", "=", "np", ".", "where", "(", "other_time", "==", "other_object", ".", "times", ")", "[", "0", "]", "obj_coords", "=", "np", ".", "zeros", "(", "self", ".", "masks", "[", "ti", "]", ".", "sum", "(", ")", ",", "dtype", "=", "[", "(", "'x'", ",", "int", ")", ",", "(", "'y'", ",", "int", ")", "]", ")", "other_obj_coords", "=", "np", ".", "zeros", "(", "other_object", ".", "masks", "[", "oti", "]", ".", "sum", "(", ")", ",", "dtype", "=", "[", "(", "'x'", ",", "int", ")", ",", "(", "'y'", ",", "int", ")", "]", ")", "obj_coords", "[", "'x'", "]", "=", "self", ".", "i", "[", "ti", "]", ".", "ravel", "(", ")", "[", "ma", "]", "obj_coords", "[", "'y'", "]", "=", "self", ".", "j", "[", "ti", "]", ".", "ravel", "(", ")", "[", "ma", "]", "other_obj_coords", "[", "'x'", "]", "=", "other_object", ".", "i", "[", "oti", "]", "[", "other_object", ".", "masks", "[", "oti", "]", "==", "1", "]", "other_obj_coords", "[", "'y'", "]", "=", "other_object", ".", "j", "[", "oti", "]", "[", "other_object", ".", "masks", "[", "oti", "]", "==", "1", "]", "return", "float", "(", "np", ".", "intersect1d", "(", "obj_coords", ",", "other_obj_coords", ")", ".", "size", ")", "/", "np", ".", "maximum", "(", "self", ".", "masks", "[", "ti", "]", ".", "sum", "(", ")", ",", "other_object", ".", "masks", "[", "oti", "]", ".", "sum", "(", ")", ")" ]
Counts the number of points that overlap between this STObject and another STObject. Used for tracking.
[ "Counts", "the", "number", "of", "points", "that", "overlap", "between", "this", "STObject", "and", "another", "STObject", ".", "Used", "for", "tracking", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L295-L310
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.extract_attribute_grid
def extract_attribute_grid(self, model_grid, potential=False, future=False): """ Extracts the data from a ModelOutput or ModelGrid object within the bounding box region of the STObject. Args: model_grid: A ModelGrid or ModelOutput Object potential: Extracts from the time before instead of the same time as the object """ if potential: var_name = model_grid.variable + "-potential" timesteps = np.arange(self.start_time - 1, self.end_time) elif future: var_name = model_grid.variable + "-future" timesteps = np.arange(self.start_time + 1, self.end_time + 2) else: var_name = model_grid.variable timesteps = np.arange(self.start_time, self.end_time + 1) self.attributes[var_name] = [] for ti, t in enumerate(timesteps): self.attributes[var_name].append( model_grid.data[t - model_grid.start_hour, self.i[ti], self.j[ti]])
python
def extract_attribute_grid(self, model_grid, potential=False, future=False): """ Extracts the data from a ModelOutput or ModelGrid object within the bounding box region of the STObject. Args: model_grid: A ModelGrid or ModelOutput Object potential: Extracts from the time before instead of the same time as the object """ if potential: var_name = model_grid.variable + "-potential" timesteps = np.arange(self.start_time - 1, self.end_time) elif future: var_name = model_grid.variable + "-future" timesteps = np.arange(self.start_time + 1, self.end_time + 2) else: var_name = model_grid.variable timesteps = np.arange(self.start_time, self.end_time + 1) self.attributes[var_name] = [] for ti, t in enumerate(timesteps): self.attributes[var_name].append( model_grid.data[t - model_grid.start_hour, self.i[ti], self.j[ti]])
[ "def", "extract_attribute_grid", "(", "self", ",", "model_grid", ",", "potential", "=", "False", ",", "future", "=", "False", ")", ":", "if", "potential", ":", "var_name", "=", "model_grid", ".", "variable", "+", "\"-potential\"", "timesteps", "=", "np", ".", "arange", "(", "self", ".", "start_time", "-", "1", ",", "self", ".", "end_time", ")", "elif", "future", ":", "var_name", "=", "model_grid", ".", "variable", "+", "\"-future\"", "timesteps", "=", "np", ".", "arange", "(", "self", ".", "start_time", "+", "1", ",", "self", ".", "end_time", "+", "2", ")", "else", ":", "var_name", "=", "model_grid", ".", "variable", "timesteps", "=", "np", ".", "arange", "(", "self", ".", "start_time", ",", "self", ".", "end_time", "+", "1", ")", "self", ".", "attributes", "[", "var_name", "]", "=", "[", "]", "for", "ti", ",", "t", "in", "enumerate", "(", "timesteps", ")", ":", "self", ".", "attributes", "[", "var_name", "]", ".", "append", "(", "model_grid", ".", "data", "[", "t", "-", "model_grid", ".", "start_hour", ",", "self", ".", "i", "[", "ti", "]", ",", "self", ".", "j", "[", "ti", "]", "]", ")" ]
Extracts the data from a ModelOutput or ModelGrid object within the bounding box region of the STObject. Args: model_grid: A ModelGrid or ModelOutput Object potential: Extracts from the time before instead of the same time as the object
[ "Extracts", "the", "data", "from", "a", "ModelOutput", "or", "ModelGrid", "object", "within", "the", "bounding", "box", "region", "of", "the", "STObject", ".", "Args", ":", "model_grid", ":", "A", "ModelGrid", "or", "ModelOutput", "Object", "potential", ":", "Extracts", "from", "the", "time", "before", "instead", "of", "the", "same", "time", "as", "the", "object" ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L312-L333
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.extract_attribute_array
def extract_attribute_array(self, data_array, var_name): """ Extracts data from a 2D array that has the same dimensions as the grid used to identify the object. Args: data_array: 2D numpy array """ if var_name not in self.attributes.keys(): self.attributes[var_name] = [] for t in range(self.times.size): self.attributes[var_name].append(data_array[self.i[t], self.j[t]])
python
def extract_attribute_array(self, data_array, var_name): """ Extracts data from a 2D array that has the same dimensions as the grid used to identify the object. Args: data_array: 2D numpy array """ if var_name not in self.attributes.keys(): self.attributes[var_name] = [] for t in range(self.times.size): self.attributes[var_name].append(data_array[self.i[t], self.j[t]])
[ "def", "extract_attribute_array", "(", "self", ",", "data_array", ",", "var_name", ")", ":", "if", "var_name", "not", "in", "self", ".", "attributes", ".", "keys", "(", ")", ":", "self", ".", "attributes", "[", "var_name", "]", "=", "[", "]", "for", "t", "in", "range", "(", "self", ".", "times", ".", "size", ")", ":", "self", ".", "attributes", "[", "var_name", "]", ".", "append", "(", "data_array", "[", "self", ".", "i", "[", "t", "]", ",", "self", ".", "j", "[", "t", "]", "]", ")" ]
Extracts data from a 2D array that has the same dimensions as the grid used to identify the object. Args: data_array: 2D numpy array
[ "Extracts", "data", "from", "a", "2D", "array", "that", "has", "the", "same", "dimensions", "as", "the", "grid", "used", "to", "identify", "the", "object", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L335-L346
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.extract_tendency_grid
def extract_tendency_grid(self, model_grid): """ Extracts the difference in model outputs Args: model_grid: ModelOutput or ModelGrid object. """ var_name = model_grid.variable + "-tendency" self.attributes[var_name] = [] timesteps = np.arange(self.start_time, self.end_time + 1) for ti, t in enumerate(timesteps): t_index = t - model_grid.start_hour self.attributes[var_name].append( model_grid.data[t_index, self.i[ti], self.j[ti]] - model_grid.data[t_index - 1, self.i[ti], self.j[ti]] )
python
def extract_tendency_grid(self, model_grid): """ Extracts the difference in model outputs Args: model_grid: ModelOutput or ModelGrid object. """ var_name = model_grid.variable + "-tendency" self.attributes[var_name] = [] timesteps = np.arange(self.start_time, self.end_time + 1) for ti, t in enumerate(timesteps): t_index = t - model_grid.start_hour self.attributes[var_name].append( model_grid.data[t_index, self.i[ti], self.j[ti]] - model_grid.data[t_index - 1, self.i[ti], self.j[ti]] )
[ "def", "extract_tendency_grid", "(", "self", ",", "model_grid", ")", ":", "var_name", "=", "model_grid", ".", "variable", "+", "\"-tendency\"", "self", ".", "attributes", "[", "var_name", "]", "=", "[", "]", "timesteps", "=", "np", ".", "arange", "(", "self", ".", "start_time", ",", "self", ".", "end_time", "+", "1", ")", "for", "ti", ",", "t", "in", "enumerate", "(", "timesteps", ")", ":", "t_index", "=", "t", "-", "model_grid", ".", "start_hour", "self", ".", "attributes", "[", "var_name", "]", ".", "append", "(", "model_grid", ".", "data", "[", "t_index", ",", "self", ".", "i", "[", "ti", "]", ",", "self", ".", "j", "[", "ti", "]", "]", "-", "model_grid", ".", "data", "[", "t_index", "-", "1", ",", "self", ".", "i", "[", "ti", "]", ",", "self", ".", "j", "[", "ti", "]", "]", ")" ]
Extracts the difference in model outputs Args: model_grid: ModelOutput or ModelGrid object.
[ "Extracts", "the", "difference", "in", "model", "outputs" ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L349-L364
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.calc_attribute_statistics
def calc_attribute_statistics(self, statistic_name): """ Calculates summary statistics over the domains of each attribute. Args: statistic_name (string): numpy statistic, such as mean, std, max, min Returns: dict of statistics from each attribute grid. """ stats = {} for var, grids in self.attributes.items(): if len(grids) > 1: stats[var] = getattr(np.array([getattr(np.ma.array(x, mask=self.masks[t] == 0), statistic_name)() for t, x in enumerate(grids)]), statistic_name)() else: stats[var] = getattr(np.ma.array(grids[0], mask=self.masks[0] == 0), statistic_name)() return stats
python
def calc_attribute_statistics(self, statistic_name): """ Calculates summary statistics over the domains of each attribute. Args: statistic_name (string): numpy statistic, such as mean, std, max, min Returns: dict of statistics from each attribute grid. """ stats = {} for var, grids in self.attributes.items(): if len(grids) > 1: stats[var] = getattr(np.array([getattr(np.ma.array(x, mask=self.masks[t] == 0), statistic_name)() for t, x in enumerate(grids)]), statistic_name)() else: stats[var] = getattr(np.ma.array(grids[0], mask=self.masks[0] == 0), statistic_name)() return stats
[ "def", "calc_attribute_statistics", "(", "self", ",", "statistic_name", ")", ":", "stats", "=", "{", "}", "for", "var", ",", "grids", "in", "self", ".", "attributes", ".", "items", "(", ")", ":", "if", "len", "(", "grids", ")", ">", "1", ":", "stats", "[", "var", "]", "=", "getattr", "(", "np", ".", "array", "(", "[", "getattr", "(", "np", ".", "ma", ".", "array", "(", "x", ",", "mask", "=", "self", ".", "masks", "[", "t", "]", "==", "0", ")", ",", "statistic_name", ")", "(", ")", "for", "t", ",", "x", "in", "enumerate", "(", "grids", ")", "]", ")", ",", "statistic_name", ")", "(", ")", "else", ":", "stats", "[", "var", "]", "=", "getattr", "(", "np", ".", "ma", ".", "array", "(", "grids", "[", "0", "]", ",", "mask", "=", "self", ".", "masks", "[", "0", "]", "==", "0", ")", ",", "statistic_name", ")", "(", ")", "return", "stats" ]
Calculates summary statistics over the domains of each attribute. Args: statistic_name (string): numpy statistic, such as mean, std, max, min Returns: dict of statistics from each attribute grid.
[ "Calculates", "summary", "statistics", "over", "the", "domains", "of", "each", "attribute", ".", "Args", ":", "statistic_name", "(", "string", ")", ":", "numpy", "statistic", "such", "as", "mean", "std", "max", "min" ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L366-L383
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.calc_attribute_statistic
def calc_attribute_statistic(self, attribute, statistic, time): """ Calculate statistics based on the values of an attribute. The following statistics are supported: mean, max, min, std, ptp (range), median, skew (mean - median), and percentile_(percentile value). Args: attribute: Attribute extracted from model grid statistic: Name of statistic being used. time: timestep of the object being investigated Returns: The value of the statistic """ ti = np.where(self.times == time)[0][0] ma = np.where(self.masks[ti].ravel() == 1) if statistic in ['mean', 'max', 'min', 'std', 'ptp']: stat_val = getattr(self.attributes[attribute][ti].ravel()[ma], statistic)() elif statistic == 'median': stat_val = np.median(self.attributes[attribute][ti].ravel()[ma]) elif statistic == "skew": stat_val = np.mean(self.attributes[attribute][ti].ravel()[ma]) - \ np.median(self.attributes[attribute][ti].ravel()[ma]) elif 'percentile' in statistic: per = int(statistic.split("_")[1]) stat_val = np.percentile(self.attributes[attribute][ti].ravel()[ma], per) elif 'dt' in statistic: stat_name = statistic[:-3] if ti == 0: stat_val = 0 else: stat_val = self.calc_attribute_statistic(attribute, stat_name, time) \ - self.calc_attribute_statistic(attribute, stat_name, time - 1) else: stat_val = np.nan return stat_val
python
def calc_attribute_statistic(self, attribute, statistic, time): """ Calculate statistics based on the values of an attribute. The following statistics are supported: mean, max, min, std, ptp (range), median, skew (mean - median), and percentile_(percentile value). Args: attribute: Attribute extracted from model grid statistic: Name of statistic being used. time: timestep of the object being investigated Returns: The value of the statistic """ ti = np.where(self.times == time)[0][0] ma = np.where(self.masks[ti].ravel() == 1) if statistic in ['mean', 'max', 'min', 'std', 'ptp']: stat_val = getattr(self.attributes[attribute][ti].ravel()[ma], statistic)() elif statistic == 'median': stat_val = np.median(self.attributes[attribute][ti].ravel()[ma]) elif statistic == "skew": stat_val = np.mean(self.attributes[attribute][ti].ravel()[ma]) - \ np.median(self.attributes[attribute][ti].ravel()[ma]) elif 'percentile' in statistic: per = int(statistic.split("_")[1]) stat_val = np.percentile(self.attributes[attribute][ti].ravel()[ma], per) elif 'dt' in statistic: stat_name = statistic[:-3] if ti == 0: stat_val = 0 else: stat_val = self.calc_attribute_statistic(attribute, stat_name, time) \ - self.calc_attribute_statistic(attribute, stat_name, time - 1) else: stat_val = np.nan return stat_val
[ "def", "calc_attribute_statistic", "(", "self", ",", "attribute", ",", "statistic", ",", "time", ")", ":", "ti", "=", "np", ".", "where", "(", "self", ".", "times", "==", "time", ")", "[", "0", "]", "[", "0", "]", "ma", "=", "np", ".", "where", "(", "self", ".", "masks", "[", "ti", "]", ".", "ravel", "(", ")", "==", "1", ")", "if", "statistic", "in", "[", "'mean'", ",", "'max'", ",", "'min'", ",", "'std'", ",", "'ptp'", "]", ":", "stat_val", "=", "getattr", "(", "self", ".", "attributes", "[", "attribute", "]", "[", "ti", "]", ".", "ravel", "(", ")", "[", "ma", "]", ",", "statistic", ")", "(", ")", "elif", "statistic", "==", "'median'", ":", "stat_val", "=", "np", ".", "median", "(", "self", ".", "attributes", "[", "attribute", "]", "[", "ti", "]", ".", "ravel", "(", ")", "[", "ma", "]", ")", "elif", "statistic", "==", "\"skew\"", ":", "stat_val", "=", "np", ".", "mean", "(", "self", ".", "attributes", "[", "attribute", "]", "[", "ti", "]", ".", "ravel", "(", ")", "[", "ma", "]", ")", "-", "np", ".", "median", "(", "self", ".", "attributes", "[", "attribute", "]", "[", "ti", "]", ".", "ravel", "(", ")", "[", "ma", "]", ")", "elif", "'percentile'", "in", "statistic", ":", "per", "=", "int", "(", "statistic", ".", "split", "(", "\"_\"", ")", "[", "1", "]", ")", "stat_val", "=", "np", ".", "percentile", "(", "self", ".", "attributes", "[", "attribute", "]", "[", "ti", "]", ".", "ravel", "(", ")", "[", "ma", "]", ",", "per", ")", "elif", "'dt'", "in", "statistic", ":", "stat_name", "=", "statistic", "[", ":", "-", "3", "]", "if", "ti", "==", "0", ":", "stat_val", "=", "0", "else", ":", "stat_val", "=", "self", ".", "calc_attribute_statistic", "(", "attribute", ",", "stat_name", ",", "time", ")", "-", "self", ".", "calc_attribute_statistic", "(", "attribute", ",", "stat_name", ",", "time", "-", "1", ")", "else", ":", "stat_val", "=", "np", ".", "nan", "return", "stat_val" ]
Calculate statistics based on the values of an attribute. The following statistics are supported: mean, max, min, std, ptp (range), median, skew (mean - median), and percentile_(percentile value). Args: attribute: Attribute extracted from model grid statistic: Name of statistic being used. time: timestep of the object being investigated Returns: The value of the statistic
[ "Calculate", "statistics", "based", "on", "the", "values", "of", "an", "attribute", ".", "The", "following", "statistics", "are", "supported", ":", "mean", "max", "min", "std", "ptp", "(", "range", ")", "median", "skew", "(", "mean", "-", "median", ")", "and", "percentile_", "(", "percentile", "value", ")", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L385-L419
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.calc_timestep_statistic
def calc_timestep_statistic(self, statistic, time): """ Calculate statistics from the primary attribute of the StObject. Args: statistic: statistic being calculated time: Timestep being investigated Returns: Value of the statistic """ ti = np.where(self.times == time)[0][0] ma = np.where(self.masks[ti].ravel() == 1) if statistic in ['mean', 'max', 'min', 'std', 'ptp']: stat_val = getattr(self.timesteps[ti].ravel()[ma], statistic)() elif statistic == 'median': stat_val = np.median(self.timesteps[ti].ravel()[ma]) elif 'percentile' in statistic: per = int(statistic.split("_")[1]) stat_val = np.percentile(self.timesteps[ti].ravel()[ma], per) elif 'dt' in statistic: stat_name = statistic[:-3] if ti == 0: stat_val = 0 else: stat_val = self.calc_timestep_statistic(stat_name, time) -\ self.calc_timestep_statistic(stat_name, time - 1) else: stat_val = np.nan return stat_val
python
def calc_timestep_statistic(self, statistic, time): """ Calculate statistics from the primary attribute of the StObject. Args: statistic: statistic being calculated time: Timestep being investigated Returns: Value of the statistic """ ti = np.where(self.times == time)[0][0] ma = np.where(self.masks[ti].ravel() == 1) if statistic in ['mean', 'max', 'min', 'std', 'ptp']: stat_val = getattr(self.timesteps[ti].ravel()[ma], statistic)() elif statistic == 'median': stat_val = np.median(self.timesteps[ti].ravel()[ma]) elif 'percentile' in statistic: per = int(statistic.split("_")[1]) stat_val = np.percentile(self.timesteps[ti].ravel()[ma], per) elif 'dt' in statistic: stat_name = statistic[:-3] if ti == 0: stat_val = 0 else: stat_val = self.calc_timestep_statistic(stat_name, time) -\ self.calc_timestep_statistic(stat_name, time - 1) else: stat_val = np.nan return stat_val
[ "def", "calc_timestep_statistic", "(", "self", ",", "statistic", ",", "time", ")", ":", "ti", "=", "np", ".", "where", "(", "self", ".", "times", "==", "time", ")", "[", "0", "]", "[", "0", "]", "ma", "=", "np", ".", "where", "(", "self", ".", "masks", "[", "ti", "]", ".", "ravel", "(", ")", "==", "1", ")", "if", "statistic", "in", "[", "'mean'", ",", "'max'", ",", "'min'", ",", "'std'", ",", "'ptp'", "]", ":", "stat_val", "=", "getattr", "(", "self", ".", "timesteps", "[", "ti", "]", ".", "ravel", "(", ")", "[", "ma", "]", ",", "statistic", ")", "(", ")", "elif", "statistic", "==", "'median'", ":", "stat_val", "=", "np", ".", "median", "(", "self", ".", "timesteps", "[", "ti", "]", ".", "ravel", "(", ")", "[", "ma", "]", ")", "elif", "'percentile'", "in", "statistic", ":", "per", "=", "int", "(", "statistic", ".", "split", "(", "\"_\"", ")", "[", "1", "]", ")", "stat_val", "=", "np", ".", "percentile", "(", "self", ".", "timesteps", "[", "ti", "]", ".", "ravel", "(", ")", "[", "ma", "]", ",", "per", ")", "elif", "'dt'", "in", "statistic", ":", "stat_name", "=", "statistic", "[", ":", "-", "3", "]", "if", "ti", "==", "0", ":", "stat_val", "=", "0", "else", ":", "stat_val", "=", "self", ".", "calc_timestep_statistic", "(", "stat_name", ",", "time", ")", "-", "self", ".", "calc_timestep_statistic", "(", "stat_name", ",", "time", "-", "1", ")", "else", ":", "stat_val", "=", "np", ".", "nan", "return", "stat_val" ]
Calculate statistics from the primary attribute of the StObject. Args: statistic: statistic being calculated time: Timestep being investigated Returns: Value of the statistic
[ "Calculate", "statistics", "from", "the", "primary", "attribute", "of", "the", "StObject", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L421-L450
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.calc_shape_statistics
def calc_shape_statistics(self, stat_names): """ Calculate shape statistics using regionprops applied to the object mask. Args: stat_names: List of statistics to be extracted from those calculated by regionprops. Returns: Dictionary of shape statistics """ stats = {} try: all_props = [regionprops(m) for m in self.masks] except TypeError: print(self.masks) exit() for stat in stat_names: stats[stat] = np.mean([p[0][stat] for p in all_props]) return stats
python
def calc_shape_statistics(self, stat_names): """ Calculate shape statistics using regionprops applied to the object mask. Args: stat_names: List of statistics to be extracted from those calculated by regionprops. Returns: Dictionary of shape statistics """ stats = {} try: all_props = [regionprops(m) for m in self.masks] except TypeError: print(self.masks) exit() for stat in stat_names: stats[stat] = np.mean([p[0][stat] for p in all_props]) return stats
[ "def", "calc_shape_statistics", "(", "self", ",", "stat_names", ")", ":", "stats", "=", "{", "}", "try", ":", "all_props", "=", "[", "regionprops", "(", "m", ")", "for", "m", "in", "self", ".", "masks", "]", "except", "TypeError", ":", "print", "(", "self", ".", "masks", ")", "exit", "(", ")", "for", "stat", "in", "stat_names", ":", "stats", "[", "stat", "]", "=", "np", ".", "mean", "(", "[", "p", "[", "0", "]", "[", "stat", "]", "for", "p", "in", "all_props", "]", ")", "return", "stats" ]
Calculate shape statistics using regionprops applied to the object mask. Args: stat_names: List of statistics to be extracted from those calculated by regionprops. Returns: Dictionary of shape statistics
[ "Calculate", "shape", "statistics", "using", "regionprops", "applied", "to", "the", "object", "mask", ".", "Args", ":", "stat_names", ":", "List", "of", "statistics", "to", "be", "extracted", "from", "those", "calculated", "by", "regionprops", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L452-L470
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.calc_shape_step
def calc_shape_step(self, stat_names, time): """ Calculate shape statistics for a single time step Args: stat_names: List of shape statistics calculated from region props time: Time being investigated Returns: List of shape statistics """ ti = np.where(self.times == time)[0][0] props = regionprops(self.masks[ti], self.timesteps[ti])[0] shape_stats = [] for stat_name in stat_names: if "moments_hu" in stat_name: hu_index = int(stat_name.split("_")[-1]) hu_name = "_".join(stat_name.split("_")[:-1]) hu_val = np.log(props[hu_name][hu_index]) if np.isnan(hu_val): shape_stats.append(0) else: shape_stats.append(hu_val) else: shape_stats.append(props[stat_name]) return shape_stats
python
def calc_shape_step(self, stat_names, time): """ Calculate shape statistics for a single time step Args: stat_names: List of shape statistics calculated from region props time: Time being investigated Returns: List of shape statistics """ ti = np.where(self.times == time)[0][0] props = regionprops(self.masks[ti], self.timesteps[ti])[0] shape_stats = [] for stat_name in stat_names: if "moments_hu" in stat_name: hu_index = int(stat_name.split("_")[-1]) hu_name = "_".join(stat_name.split("_")[:-1]) hu_val = np.log(props[hu_name][hu_index]) if np.isnan(hu_val): shape_stats.append(0) else: shape_stats.append(hu_val) else: shape_stats.append(props[stat_name]) return shape_stats
[ "def", "calc_shape_step", "(", "self", ",", "stat_names", ",", "time", ")", ":", "ti", "=", "np", ".", "where", "(", "self", ".", "times", "==", "time", ")", "[", "0", "]", "[", "0", "]", "props", "=", "regionprops", "(", "self", ".", "masks", "[", "ti", "]", ",", "self", ".", "timesteps", "[", "ti", "]", ")", "[", "0", "]", "shape_stats", "=", "[", "]", "for", "stat_name", "in", "stat_names", ":", "if", "\"moments_hu\"", "in", "stat_name", ":", "hu_index", "=", "int", "(", "stat_name", ".", "split", "(", "\"_\"", ")", "[", "-", "1", "]", ")", "hu_name", "=", "\"_\"", ".", "join", "(", "stat_name", ".", "split", "(", "\"_\"", ")", "[", ":", "-", "1", "]", ")", "hu_val", "=", "np", ".", "log", "(", "props", "[", "hu_name", "]", "[", "hu_index", "]", ")", "if", "np", ".", "isnan", "(", "hu_val", ")", ":", "shape_stats", ".", "append", "(", "0", ")", "else", ":", "shape_stats", ".", "append", "(", "hu_val", ")", "else", ":", "shape_stats", ".", "append", "(", "props", "[", "stat_name", "]", ")", "return", "shape_stats" ]
Calculate shape statistics for a single time step Args: stat_names: List of shape statistics calculated from region props time: Time being investigated Returns: List of shape statistics
[ "Calculate", "shape", "statistics", "for", "a", "single", "time", "step" ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L472-L498
djgagne/hagelslag
hagelslag/processing/STObject.py
STObject.to_geojson
def to_geojson(self, filename, proj, metadata=None): """ Output the data in the STObject to a geoJSON file. Args: filename: Name of the file proj: PyProj object for converting the x and y coordinates back to latitude and longitue values. metadata: Metadata describing the object to be included in the top-level properties. """ if metadata is None: metadata = {} json_obj = {"type": "FeatureCollection", "features": [], "properties": {}} json_obj['properties']['times'] = self.times.tolist() json_obj['properties']['dx'] = self.dx json_obj['properties']['step'] = self.step json_obj['properties']['u'] = self.u.tolist() json_obj['properties']['v'] = self.v.tolist() for k, v in metadata.items(): json_obj['properties'][k] = v for t, time in enumerate(self.times): feature = {"type": "Feature", "geometry": {"type": "Polygon"}, "properties": {}} boundary_coords = self.boundary_polygon(time) lonlat = np.vstack(proj(boundary_coords[0], boundary_coords[1], inverse=True)) lonlat_list = lonlat.T.tolist() if len(lonlat_list) > 0: lonlat_list.append(lonlat_list[0]) feature["geometry"]["coordinates"] = [lonlat_list] for attr in ["timesteps", "masks", "x", "y", "i", "j"]: feature["properties"][attr] = getattr(self, attr)[t].tolist() feature["properties"]["attributes"] = {} for attr_name, steps in self.attributes.items(): feature["properties"]["attributes"][attr_name] = steps[t].tolist() json_obj['features'].append(feature) file_obj = open(filename, "w") json.dump(json_obj, file_obj, indent=1, sort_keys=True) file_obj.close() return
python
def to_geojson(self, filename, proj, metadata=None): """ Output the data in the STObject to a geoJSON file. Args: filename: Name of the file proj: PyProj object for converting the x and y coordinates back to latitude and longitue values. metadata: Metadata describing the object to be included in the top-level properties. """ if metadata is None: metadata = {} json_obj = {"type": "FeatureCollection", "features": [], "properties": {}} json_obj['properties']['times'] = self.times.tolist() json_obj['properties']['dx'] = self.dx json_obj['properties']['step'] = self.step json_obj['properties']['u'] = self.u.tolist() json_obj['properties']['v'] = self.v.tolist() for k, v in metadata.items(): json_obj['properties'][k] = v for t, time in enumerate(self.times): feature = {"type": "Feature", "geometry": {"type": "Polygon"}, "properties": {}} boundary_coords = self.boundary_polygon(time) lonlat = np.vstack(proj(boundary_coords[0], boundary_coords[1], inverse=True)) lonlat_list = lonlat.T.tolist() if len(lonlat_list) > 0: lonlat_list.append(lonlat_list[0]) feature["geometry"]["coordinates"] = [lonlat_list] for attr in ["timesteps", "masks", "x", "y", "i", "j"]: feature["properties"][attr] = getattr(self, attr)[t].tolist() feature["properties"]["attributes"] = {} for attr_name, steps in self.attributes.items(): feature["properties"]["attributes"][attr_name] = steps[t].tolist() json_obj['features'].append(feature) file_obj = open(filename, "w") json.dump(json_obj, file_obj, indent=1, sort_keys=True) file_obj.close() return
[ "def", "to_geojson", "(", "self", ",", "filename", ",", "proj", ",", "metadata", "=", "None", ")", ":", "if", "metadata", "is", "None", ":", "metadata", "=", "{", "}", "json_obj", "=", "{", "\"type\"", ":", "\"FeatureCollection\"", ",", "\"features\"", ":", "[", "]", ",", "\"properties\"", ":", "{", "}", "}", "json_obj", "[", "'properties'", "]", "[", "'times'", "]", "=", "self", ".", "times", ".", "tolist", "(", ")", "json_obj", "[", "'properties'", "]", "[", "'dx'", "]", "=", "self", ".", "dx", "json_obj", "[", "'properties'", "]", "[", "'step'", "]", "=", "self", ".", "step", "json_obj", "[", "'properties'", "]", "[", "'u'", "]", "=", "self", ".", "u", ".", "tolist", "(", ")", "json_obj", "[", "'properties'", "]", "[", "'v'", "]", "=", "self", ".", "v", ".", "tolist", "(", ")", "for", "k", ",", "v", "in", "metadata", ".", "items", "(", ")", ":", "json_obj", "[", "'properties'", "]", "[", "k", "]", "=", "v", "for", "t", ",", "time", "in", "enumerate", "(", "self", ".", "times", ")", ":", "feature", "=", "{", "\"type\"", ":", "\"Feature\"", ",", "\"geometry\"", ":", "{", "\"type\"", ":", "\"Polygon\"", "}", ",", "\"properties\"", ":", "{", "}", "}", "boundary_coords", "=", "self", ".", "boundary_polygon", "(", "time", ")", "lonlat", "=", "np", ".", "vstack", "(", "proj", "(", "boundary_coords", "[", "0", "]", ",", "boundary_coords", "[", "1", "]", ",", "inverse", "=", "True", ")", ")", "lonlat_list", "=", "lonlat", ".", "T", ".", "tolist", "(", ")", "if", "len", "(", "lonlat_list", ")", ">", "0", ":", "lonlat_list", ".", "append", "(", "lonlat_list", "[", "0", "]", ")", "feature", "[", "\"geometry\"", "]", "[", "\"coordinates\"", "]", "=", "[", "lonlat_list", "]", "for", "attr", "in", "[", "\"timesteps\"", ",", "\"masks\"", ",", "\"x\"", ",", "\"y\"", ",", "\"i\"", ",", "\"j\"", "]", ":", "feature", "[", "\"properties\"", "]", "[", "attr", "]", "=", "getattr", "(", "self", ",", "attr", ")", "[", "t", "]", ".", "tolist", "(", ")", "feature", "[", "\"properties\"", "]", "[", "\"attributes\"", "]", "=", "{", "}", "for", "attr_name", ",", "steps", "in", "self", ".", "attributes", ".", "items", "(", ")", ":", "feature", "[", "\"properties\"", "]", "[", "\"attributes\"", "]", "[", "attr_name", "]", "=", "steps", "[", "t", "]", ".", "tolist", "(", ")", "json_obj", "[", "'features'", "]", ".", "append", "(", "feature", ")", "file_obj", "=", "open", "(", "filename", ",", "\"w\"", ")", "json", ".", "dump", "(", "json_obj", ",", "file_obj", ",", "indent", "=", "1", ",", "sort_keys", "=", "True", ")", "file_obj", ".", "close", "(", ")", "return" ]
Output the data in the STObject to a geoJSON file. Args: filename: Name of the file proj: PyProj object for converting the x and y coordinates back to latitude and longitue values. metadata: Metadata describing the object to be included in the top-level properties.
[ "Output", "the", "data", "in", "the", "STObject", "to", "a", "geoJSON", "file", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L500-L538
nion-software/nionswift
nion/swift/model/MemoryStorageSystem.py
MemoryStorageSystem.rewrite_properties
def rewrite_properties(self, properties): """Set the properties and write to disk.""" with self.__library_storage_lock: self.__library_storage = properties self.__write_properties(None)
python
def rewrite_properties(self, properties): """Set the properties and write to disk.""" with self.__library_storage_lock: self.__library_storage = properties self.__write_properties(None)
[ "def", "rewrite_properties", "(", "self", ",", "properties", ")", ":", "with", "self", ".", "__library_storage_lock", ":", "self", ".", "__library_storage", "=", "properties", "self", ".", "__write_properties", "(", "None", ")" ]
Set the properties and write to disk.
[ "Set", "the", "properties", "and", "write", "to", "disk", "." ]
train
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/MemoryStorageSystem.py#L113-L117
mgraffg/EvoDAG
EvoDAG/population.py
BasePopulation.model
def model(self, v=None): "Returns the model of node v" if v is None: v = self.estopping hist = self.hist trace = self.trace(v) ins = None if self._base._probability_calibration is not None: node = hist[-1] node.normalize() X = np.array([x.full_array() for x in node.hy]).T y = np.array(self._base._y_klass.full_array()) mask = np.ones(X.shape[0], dtype=np.bool) mask[np.array(self._base._mask_ts.index)] = False ins = self._base._probability_calibration().fit(X[mask], y[mask]) if self._classifier: nclasses = self._labels.shape[0] else: nclasses = None m = Model(trace, hist, nvar=self._base._nvar, classifier=self._classifier, labels=self._labels, probability_calibration=ins, nclasses=nclasses) return m
python
def model(self, v=None): "Returns the model of node v" if v is None: v = self.estopping hist = self.hist trace = self.trace(v) ins = None if self._base._probability_calibration is not None: node = hist[-1] node.normalize() X = np.array([x.full_array() for x in node.hy]).T y = np.array(self._base._y_klass.full_array()) mask = np.ones(X.shape[0], dtype=np.bool) mask[np.array(self._base._mask_ts.index)] = False ins = self._base._probability_calibration().fit(X[mask], y[mask]) if self._classifier: nclasses = self._labels.shape[0] else: nclasses = None m = Model(trace, hist, nvar=self._base._nvar, classifier=self._classifier, labels=self._labels, probability_calibration=ins, nclasses=nclasses) return m
[ "def", "model", "(", "self", ",", "v", "=", "None", ")", ":", "if", "v", "is", "None", ":", "v", "=", "self", ".", "estopping", "hist", "=", "self", ".", "hist", "trace", "=", "self", ".", "trace", "(", "v", ")", "ins", "=", "None", "if", "self", ".", "_base", ".", "_probability_calibration", "is", "not", "None", ":", "node", "=", "hist", "[", "-", "1", "]", "node", ".", "normalize", "(", ")", "X", "=", "np", ".", "array", "(", "[", "x", ".", "full_array", "(", ")", "for", "x", "in", "node", ".", "hy", "]", ")", ".", "T", "y", "=", "np", ".", "array", "(", "self", ".", "_base", ".", "_y_klass", ".", "full_array", "(", ")", ")", "mask", "=", "np", ".", "ones", "(", "X", ".", "shape", "[", "0", "]", ",", "dtype", "=", "np", ".", "bool", ")", "mask", "[", "np", ".", "array", "(", "self", ".", "_base", ".", "_mask_ts", ".", "index", ")", "]", "=", "False", "ins", "=", "self", ".", "_base", ".", "_probability_calibration", "(", ")", ".", "fit", "(", "X", "[", "mask", "]", ",", "y", "[", "mask", "]", ")", "if", "self", ".", "_classifier", ":", "nclasses", "=", "self", ".", "_labels", ".", "shape", "[", "0", "]", "else", ":", "nclasses", "=", "None", "m", "=", "Model", "(", "trace", ",", "hist", ",", "nvar", "=", "self", ".", "_base", ".", "_nvar", ",", "classifier", "=", "self", ".", "_classifier", ",", "labels", "=", "self", ".", "_labels", ",", "probability_calibration", "=", "ins", ",", "nclasses", "=", "nclasses", ")", "return", "m" ]
Returns the model of node v
[ "Returns", "the", "model", "of", "node", "v" ]
train
https://github.com/mgraffg/EvoDAG/blob/e11fa1fd1ca9e69cca92696c86661a3dc7b3a1d5/EvoDAG/population.py#L252-L274
mgraffg/EvoDAG
EvoDAG/population.py
BasePopulation.trace
def trace(self, n): "Restore the position in the history of individual v's nodes" trace_map = {} self._trace(n, trace_map) s = list(trace_map.keys()) s.sort() return s
python
def trace(self, n): "Restore the position in the history of individual v's nodes" trace_map = {} self._trace(n, trace_map) s = list(trace_map.keys()) s.sort() return s
[ "def", "trace", "(", "self", ",", "n", ")", ":", "trace_map", "=", "{", "}", "self", ".", "_trace", "(", "n", ",", "trace_map", ")", "s", "=", "list", "(", "trace_map", ".", "keys", "(", ")", ")", "s", ".", "sort", "(", ")", "return", "s" ]
Restore the position in the history of individual v's nodes
[ "Restore", "the", "position", "in", "the", "history", "of", "individual", "v", "s", "nodes" ]
train
https://github.com/mgraffg/EvoDAG/blob/e11fa1fd1ca9e69cca92696c86661a3dc7b3a1d5/EvoDAG/population.py#L276-L282
mgraffg/EvoDAG
EvoDAG/population.py
BasePopulation.tournament
def tournament(self, negative=False): """Tournament selection and when negative is True it performs negative tournament selection""" if self.generation <= self._random_generations and not negative: return self.random_selection() if not self._negative_selection and negative: return self.random_selection(negative=negative) vars = self.random() fit = [(k, self.population[x].fitness) for k, x in enumerate(vars)] if negative: fit = min(fit, key=lambda x: x[1]) else: fit = max(fit, key=lambda x: x[1]) index = fit[0] return vars[index]
python
def tournament(self, negative=False): """Tournament selection and when negative is True it performs negative tournament selection""" if self.generation <= self._random_generations and not negative: return self.random_selection() if not self._negative_selection and negative: return self.random_selection(negative=negative) vars = self.random() fit = [(k, self.population[x].fitness) for k, x in enumerate(vars)] if negative: fit = min(fit, key=lambda x: x[1]) else: fit = max(fit, key=lambda x: x[1]) index = fit[0] return vars[index]
[ "def", "tournament", "(", "self", ",", "negative", "=", "False", ")", ":", "if", "self", ".", "generation", "<=", "self", ".", "_random_generations", "and", "not", "negative", ":", "return", "self", ".", "random_selection", "(", ")", "if", "not", "self", ".", "_negative_selection", "and", "negative", ":", "return", "self", ".", "random_selection", "(", "negative", "=", "negative", ")", "vars", "=", "self", ".", "random", "(", ")", "fit", "=", "[", "(", "k", ",", "self", ".", "population", "[", "x", "]", ".", "fitness", ")", "for", "k", ",", "x", "in", "enumerate", "(", "vars", ")", "]", "if", "negative", ":", "fit", "=", "min", "(", "fit", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "else", ":", "fit", "=", "max", "(", "fit", ",", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "index", "=", "fit", "[", "0", "]", "return", "vars", "[", "index", "]" ]
Tournament selection and when negative is True it performs negative tournament selection
[ "Tournament", "selection", "and", "when", "negative", "is", "True", "it", "performs", "negative", "tournament", "selection" ]
train
https://github.com/mgraffg/EvoDAG/blob/e11fa1fd1ca9e69cca92696c86661a3dc7b3a1d5/EvoDAG/population.py#L319-L333
mgraffg/EvoDAG
EvoDAG/population.py
BasePopulation.create_population
def create_population(self): "Create the initial population" base = self._base if base._share_inputs: used_inputs_var = SelectNumbers([x for x in range(base.nvar)]) used_inputs_naive = used_inputs_var if base._pr_variable == 0: used_inputs_var = SelectNumbers([]) used_inputs_naive = SelectNumbers([x for x in range(base.nvar)]) elif base._pr_variable == 1: used_inputs_var = SelectNumbers([x for x in range(base.nvar)]) used_inputs_naive = SelectNumbers([]) else: used_inputs_var = SelectNumbers([x for x in range(base.nvar)]) used_inputs_naive = SelectNumbers([x for x in range(base.nvar)]) nb_input = Inputs(base, used_inputs_naive, functions=base._input_functions) while ((base._all_inputs and not base.stopping_criteria_tl()) or (self.popsize < base.popsize and not base.stopping_criteria())): if base._all_inputs and used_inputs_var.empty() and used_inputs_naive.empty(): base._init_popsize = self.popsize break if nb_input.use_all_variables(): v = nb_input.all_variables() if v is None: continue elif not used_inputs_var.empty() and np.random.random() < base._pr_variable: v = self.variable_input(used_inputs_var) if v is None: used_inputs_var.pos = used_inputs_var.size continue elif not used_inputs_naive.empty(): v = nb_input.input() if not used_inputs_var.empty() and used_inputs_naive.empty(): base._pr_variable = 1 if v is None: used_inputs_naive.pos = used_inputs_naive.size if not used_inputs_var.empty(): base._pr_variable = 1 continue else: gen = self.generation self.generation = 0 v = base.random_offspring() self.generation = gen self.add(v)
python
def create_population(self): "Create the initial population" base = self._base if base._share_inputs: used_inputs_var = SelectNumbers([x for x in range(base.nvar)]) used_inputs_naive = used_inputs_var if base._pr_variable == 0: used_inputs_var = SelectNumbers([]) used_inputs_naive = SelectNumbers([x for x in range(base.nvar)]) elif base._pr_variable == 1: used_inputs_var = SelectNumbers([x for x in range(base.nvar)]) used_inputs_naive = SelectNumbers([]) else: used_inputs_var = SelectNumbers([x for x in range(base.nvar)]) used_inputs_naive = SelectNumbers([x for x in range(base.nvar)]) nb_input = Inputs(base, used_inputs_naive, functions=base._input_functions) while ((base._all_inputs and not base.stopping_criteria_tl()) or (self.popsize < base.popsize and not base.stopping_criteria())): if base._all_inputs and used_inputs_var.empty() and used_inputs_naive.empty(): base._init_popsize = self.popsize break if nb_input.use_all_variables(): v = nb_input.all_variables() if v is None: continue elif not used_inputs_var.empty() and np.random.random() < base._pr_variable: v = self.variable_input(used_inputs_var) if v is None: used_inputs_var.pos = used_inputs_var.size continue elif not used_inputs_naive.empty(): v = nb_input.input() if not used_inputs_var.empty() and used_inputs_naive.empty(): base._pr_variable = 1 if v is None: used_inputs_naive.pos = used_inputs_naive.size if not used_inputs_var.empty(): base._pr_variable = 1 continue else: gen = self.generation self.generation = 0 v = base.random_offspring() self.generation = gen self.add(v)
[ "def", "create_population", "(", "self", ")", ":", "base", "=", "self", ".", "_base", "if", "base", ".", "_share_inputs", ":", "used_inputs_var", "=", "SelectNumbers", "(", "[", "x", "for", "x", "in", "range", "(", "base", ".", "nvar", ")", "]", ")", "used_inputs_naive", "=", "used_inputs_var", "if", "base", ".", "_pr_variable", "==", "0", ":", "used_inputs_var", "=", "SelectNumbers", "(", "[", "]", ")", "used_inputs_naive", "=", "SelectNumbers", "(", "[", "x", "for", "x", "in", "range", "(", "base", ".", "nvar", ")", "]", ")", "elif", "base", ".", "_pr_variable", "==", "1", ":", "used_inputs_var", "=", "SelectNumbers", "(", "[", "x", "for", "x", "in", "range", "(", "base", ".", "nvar", ")", "]", ")", "used_inputs_naive", "=", "SelectNumbers", "(", "[", "]", ")", "else", ":", "used_inputs_var", "=", "SelectNumbers", "(", "[", "x", "for", "x", "in", "range", "(", "base", ".", "nvar", ")", "]", ")", "used_inputs_naive", "=", "SelectNumbers", "(", "[", "x", "for", "x", "in", "range", "(", "base", ".", "nvar", ")", "]", ")", "nb_input", "=", "Inputs", "(", "base", ",", "used_inputs_naive", ",", "functions", "=", "base", ".", "_input_functions", ")", "while", "(", "(", "base", ".", "_all_inputs", "and", "not", "base", ".", "stopping_criteria_tl", "(", ")", ")", "or", "(", "self", ".", "popsize", "<", "base", ".", "popsize", "and", "not", "base", ".", "stopping_criteria", "(", ")", ")", ")", ":", "if", "base", ".", "_all_inputs", "and", "used_inputs_var", ".", "empty", "(", ")", "and", "used_inputs_naive", ".", "empty", "(", ")", ":", "base", ".", "_init_popsize", "=", "self", ".", "popsize", "break", "if", "nb_input", ".", "use_all_variables", "(", ")", ":", "v", "=", "nb_input", ".", "all_variables", "(", ")", "if", "v", "is", "None", ":", "continue", "elif", "not", "used_inputs_var", ".", "empty", "(", ")", "and", "np", ".", "random", ".", "random", "(", ")", "<", "base", ".", "_pr_variable", ":", "v", "=", "self", ".", "variable_input", "(", "used_inputs_var", ")", "if", "v", "is", "None", ":", "used_inputs_var", ".", "pos", "=", "used_inputs_var", ".", "size", "continue", "elif", "not", "used_inputs_naive", ".", "empty", "(", ")", ":", "v", "=", "nb_input", ".", "input", "(", ")", "if", "not", "used_inputs_var", ".", "empty", "(", ")", "and", "used_inputs_naive", ".", "empty", "(", ")", ":", "base", ".", "_pr_variable", "=", "1", "if", "v", "is", "None", ":", "used_inputs_naive", ".", "pos", "=", "used_inputs_naive", ".", "size", "if", "not", "used_inputs_var", ".", "empty", "(", ")", ":", "base", ".", "_pr_variable", "=", "1", "continue", "else", ":", "gen", "=", "self", ".", "generation", "self", ".", "generation", "=", "0", "v", "=", "base", ".", "random_offspring", "(", ")", "self", ".", "generation", "=", "gen", "self", ".", "add", "(", "v", ")" ]
Create the initial population
[ "Create", "the", "initial", "population" ]
train
https://github.com/mgraffg/EvoDAG/blob/e11fa1fd1ca9e69cca92696c86661a3dc7b3a1d5/EvoDAG/population.py#L345-L390
mgraffg/EvoDAG
EvoDAG/population.py
BasePopulation.add
def add(self, v): "Add an individual to the population" self.population.append(v) self._current_popsize += 1 v.position = len(self._hist) self._hist.append(v) self.bsf = v self.estopping = v self._density += self.get_density(v)
python
def add(self, v): "Add an individual to the population" self.population.append(v) self._current_popsize += 1 v.position = len(self._hist) self._hist.append(v) self.bsf = v self.estopping = v self._density += self.get_density(v)
[ "def", "add", "(", "self", ",", "v", ")", ":", "self", ".", "population", ".", "append", "(", "v", ")", "self", ".", "_current_popsize", "+=", "1", "v", ".", "position", "=", "len", "(", "self", ".", "_hist", ")", "self", ".", "_hist", ".", "append", "(", "v", ")", "self", ".", "bsf", "=", "v", "self", ".", "estopping", "=", "v", "self", ".", "_density", "+=", "self", ".", "get_density", "(", "v", ")" ]
Add an individual to the population
[ "Add", "an", "individual", "to", "the", "population" ]
train
https://github.com/mgraffg/EvoDAG/blob/e11fa1fd1ca9e69cca92696c86661a3dc7b3a1d5/EvoDAG/population.py#L392-L400
mgraffg/EvoDAG
EvoDAG/population.py
BasePopulation.replace
def replace(self, v): """Replace an individual selected by negative tournament selection with individual v""" if self.popsize < self._popsize: return self.add(v) k = self.tournament(negative=True) self.clean(self.population[k]) self.population[k] = v v.position = len(self._hist) self._hist.append(v) self.bsf = v self.estopping = v self._inds_replace += 1 self._density += self.get_density(v) if self._inds_replace == self._popsize: self._inds_replace = 0 self.generation += 1 gc.collect()
python
def replace(self, v): """Replace an individual selected by negative tournament selection with individual v""" if self.popsize < self._popsize: return self.add(v) k = self.tournament(negative=True) self.clean(self.population[k]) self.population[k] = v v.position = len(self._hist) self._hist.append(v) self.bsf = v self.estopping = v self._inds_replace += 1 self._density += self.get_density(v) if self._inds_replace == self._popsize: self._inds_replace = 0 self.generation += 1 gc.collect()
[ "def", "replace", "(", "self", ",", "v", ")", ":", "if", "self", ".", "popsize", "<", "self", ".", "_popsize", ":", "return", "self", ".", "add", "(", "v", ")", "k", "=", "self", ".", "tournament", "(", "negative", "=", "True", ")", "self", ".", "clean", "(", "self", ".", "population", "[", "k", "]", ")", "self", ".", "population", "[", "k", "]", "=", "v", "v", ".", "position", "=", "len", "(", "self", ".", "_hist", ")", "self", ".", "_hist", ".", "append", "(", "v", ")", "self", ".", "bsf", "=", "v", "self", ".", "estopping", "=", "v", "self", ".", "_inds_replace", "+=", "1", "self", ".", "_density", "+=", "self", ".", "get_density", "(", "v", ")", "if", "self", ".", "_inds_replace", "==", "self", ".", "_popsize", ":", "self", ".", "_inds_replace", "=", "0", "self", ".", "generation", "+=", "1", "gc", ".", "collect", "(", ")" ]
Replace an individual selected by negative tournament selection with individual v
[ "Replace", "an", "individual", "selected", "by", "negative", "tournament", "selection", "with", "individual", "v" ]
train
https://github.com/mgraffg/EvoDAG/blob/e11fa1fd1ca9e69cca92696c86661a3dc7b3a1d5/EvoDAG/population.py#L402-L419
nion-software/nionswift
nion/swift/model/HDF5Handler.py
make_directory_if_needed
def make_directory_if_needed(directory_path): """ Make the directory path, if needed. """ if os.path.exists(directory_path): if not os.path.isdir(directory_path): raise OSError("Path is not a directory:", directory_path) else: os.makedirs(directory_path)
python
def make_directory_if_needed(directory_path): """ Make the directory path, if needed. """ if os.path.exists(directory_path): if not os.path.isdir(directory_path): raise OSError("Path is not a directory:", directory_path) else: os.makedirs(directory_path)
[ "def", "make_directory_if_needed", "(", "directory_path", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "directory_path", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "directory_path", ")", ":", "raise", "OSError", "(", "\"Path is not a directory:\"", ",", "directory_path", ")", "else", ":", "os", ".", "makedirs", "(", "directory_path", ")" ]
Make the directory path, if needed.
[ "Make", "the", "directory", "path", "if", "needed", "." ]
train
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HDF5Handler.py#L18-L26
ajk8/hatchery
hatchery/main.py
hatchery
def hatchery(): """ Main entry point for the hatchery program """ args = docopt.docopt(__doc__) task_list = args['<task>'] if not task_list or 'help' in task_list or args['--help']: print(__doc__.format(version=_version.__version__, config_files=config.CONFIG_LOCATIONS)) return 0 level_str = args['--log-level'] try: level_const = getattr(logging, level_str.upper()) logging.basicConfig(level=level_const) if level_const == logging.DEBUG: workdir.options.debug = True except LookupError: logging.basicConfig() logger.error('received invalid log level: ' + level_str) return 1 for task in task_list: if task not in ORDERED_TASKS: logger.info('starting task: check') logger.error('received invalid task: ' + task) return 1 for task in CHECK_TASKS: if task in task_list: task_check(args) break if 'package' in task_list and not args['--release-version']: logger.error('--release-version is required for the package task') return 1 config_dict = _get_config_or_die( calling_task='hatchery', required_params=['auto_push_tag'] ) if config_dict['auto_push_tag'] and 'upload' in task_list: logger.info('adding task: tag (auto_push_tag==True)') task_list.append('tag') # all commands will raise a SystemExit if they fail # check will have already been run for task in ORDERED_TASKS: if task in task_list and task != 'check': logger.info('starting task: ' + task) globals()['task_' + task](args) logger.info("all's well that ends well...hatchery out") return 0
python
def hatchery(): """ Main entry point for the hatchery program """ args = docopt.docopt(__doc__) task_list = args['<task>'] if not task_list or 'help' in task_list or args['--help']: print(__doc__.format(version=_version.__version__, config_files=config.CONFIG_LOCATIONS)) return 0 level_str = args['--log-level'] try: level_const = getattr(logging, level_str.upper()) logging.basicConfig(level=level_const) if level_const == logging.DEBUG: workdir.options.debug = True except LookupError: logging.basicConfig() logger.error('received invalid log level: ' + level_str) return 1 for task in task_list: if task not in ORDERED_TASKS: logger.info('starting task: check') logger.error('received invalid task: ' + task) return 1 for task in CHECK_TASKS: if task in task_list: task_check(args) break if 'package' in task_list and not args['--release-version']: logger.error('--release-version is required for the package task') return 1 config_dict = _get_config_or_die( calling_task='hatchery', required_params=['auto_push_tag'] ) if config_dict['auto_push_tag'] and 'upload' in task_list: logger.info('adding task: tag (auto_push_tag==True)') task_list.append('tag') # all commands will raise a SystemExit if they fail # check will have already been run for task in ORDERED_TASKS: if task in task_list and task != 'check': logger.info('starting task: ' + task) globals()['task_' + task](args) logger.info("all's well that ends well...hatchery out") return 0
[ "def", "hatchery", "(", ")", ":", "args", "=", "docopt", ".", "docopt", "(", "__doc__", ")", "task_list", "=", "args", "[", "'<task>'", "]", "if", "not", "task_list", "or", "'help'", "in", "task_list", "or", "args", "[", "'--help'", "]", ":", "print", "(", "__doc__", ".", "format", "(", "version", "=", "_version", ".", "__version__", ",", "config_files", "=", "config", ".", "CONFIG_LOCATIONS", ")", ")", "return", "0", "level_str", "=", "args", "[", "'--log-level'", "]", "try", ":", "level_const", "=", "getattr", "(", "logging", ",", "level_str", ".", "upper", "(", ")", ")", "logging", ".", "basicConfig", "(", "level", "=", "level_const", ")", "if", "level_const", "==", "logging", ".", "DEBUG", ":", "workdir", ".", "options", ".", "debug", "=", "True", "except", "LookupError", ":", "logging", ".", "basicConfig", "(", ")", "logger", ".", "error", "(", "'received invalid log level: '", "+", "level_str", ")", "return", "1", "for", "task", "in", "task_list", ":", "if", "task", "not", "in", "ORDERED_TASKS", ":", "logger", ".", "info", "(", "'starting task: check'", ")", "logger", ".", "error", "(", "'received invalid task: '", "+", "task", ")", "return", "1", "for", "task", "in", "CHECK_TASKS", ":", "if", "task", "in", "task_list", ":", "task_check", "(", "args", ")", "break", "if", "'package'", "in", "task_list", "and", "not", "args", "[", "'--release-version'", "]", ":", "logger", ".", "error", "(", "'--release-version is required for the package task'", ")", "return", "1", "config_dict", "=", "_get_config_or_die", "(", "calling_task", "=", "'hatchery'", ",", "required_params", "=", "[", "'auto_push_tag'", "]", ")", "if", "config_dict", "[", "'auto_push_tag'", "]", "and", "'upload'", "in", "task_list", ":", "logger", ".", "info", "(", "'adding task: tag (auto_push_tag==True)'", ")", "task_list", ".", "append", "(", "'tag'", ")", "# all commands will raise a SystemExit if they fail", "# check will have already been run", "for", "task", "in", "ORDERED_TASKS", ":", "if", "task", "in", "task_list", "and", "task", "!=", "'check'", ":", "logger", ".", "info", "(", "'starting task: '", "+", "task", ")", "globals", "(", ")", "[", "'task_'", "+", "task", "]", "(", "args", ")", "logger", ".", "info", "(", "\"all's well that ends well...hatchery out\"", ")", "return", "0" ]
Main entry point for the hatchery program
[ "Main", "entry", "point", "for", "the", "hatchery", "program" ]
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/main.py#L379-L430
ajk8/hatchery
hatchery/executor.py
call
def call(cmd_args, suppress_output=False): """ Call an arbitary command and return the exit value, stdout, and stderr as a tuple Command can be passed in as either a string or iterable >>> result = call('hatchery', suppress_output=True) >>> result.exitval 0 >>> result = call(['hatchery', 'notreal']) >>> result.exitval 1 """ if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args): cmd_args = shlex.split(cmd_args) logger.info('executing `{}`'.format(' '.join(cmd_args))) call_request = CallRequest(cmd_args, suppress_output=suppress_output) call_result = call_request.run() if call_result.exitval: logger.error('`{}` returned error code {}'.format(' '.join(cmd_args), call_result.exitval)) return call_result
python
def call(cmd_args, suppress_output=False): """ Call an arbitary command and return the exit value, stdout, and stderr as a tuple Command can be passed in as either a string or iterable >>> result = call('hatchery', suppress_output=True) >>> result.exitval 0 >>> result = call(['hatchery', 'notreal']) >>> result.exitval 1 """ if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args): cmd_args = shlex.split(cmd_args) logger.info('executing `{}`'.format(' '.join(cmd_args))) call_request = CallRequest(cmd_args, suppress_output=suppress_output) call_result = call_request.run() if call_result.exitval: logger.error('`{}` returned error code {}'.format(' '.join(cmd_args), call_result.exitval)) return call_result
[ "def", "call", "(", "cmd_args", ",", "suppress_output", "=", "False", ")", ":", "if", "not", "funcy", ".", "is_list", "(", "cmd_args", ")", "and", "not", "funcy", ".", "is_tuple", "(", "cmd_args", ")", ":", "cmd_args", "=", "shlex", ".", "split", "(", "cmd_args", ")", "logger", ".", "info", "(", "'executing `{}`'", ".", "format", "(", "' '", ".", "join", "(", "cmd_args", ")", ")", ")", "call_request", "=", "CallRequest", "(", "cmd_args", ",", "suppress_output", "=", "suppress_output", ")", "call_result", "=", "call_request", ".", "run", "(", ")", "if", "call_result", ".", "exitval", ":", "logger", ".", "error", "(", "'`{}` returned error code {}'", ".", "format", "(", "' '", ".", "join", "(", "cmd_args", ")", ",", "call_result", ".", "exitval", ")", ")", "return", "call_result" ]
Call an arbitary command and return the exit value, stdout, and stderr as a tuple Command can be passed in as either a string or iterable >>> result = call('hatchery', suppress_output=True) >>> result.exitval 0 >>> result = call(['hatchery', 'notreal']) >>> result.exitval 1
[ "Call", "an", "arbitary", "command", "and", "return", "the", "exit", "value", "stdout", "and", "stderr", "as", "a", "tuple" ]
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/executor.py#L66-L85
ajk8/hatchery
hatchery/executor.py
setup
def setup(cmd_args, suppress_output=False): """ Call a setup.py command or list of commands >>> result = setup('--name', suppress_output=True) >>> result.exitval 0 >>> result = setup('notreal') >>> result.exitval 1 """ if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args): cmd_args = shlex.split(cmd_args) cmd_args = [sys.executable, 'setup.py'] + [x for x in cmd_args] return call(cmd_args, suppress_output=suppress_output)
python
def setup(cmd_args, suppress_output=False): """ Call a setup.py command or list of commands >>> result = setup('--name', suppress_output=True) >>> result.exitval 0 >>> result = setup('notreal') >>> result.exitval 1 """ if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args): cmd_args = shlex.split(cmd_args) cmd_args = [sys.executable, 'setup.py'] + [x for x in cmd_args] return call(cmd_args, suppress_output=suppress_output)
[ "def", "setup", "(", "cmd_args", ",", "suppress_output", "=", "False", ")", ":", "if", "not", "funcy", ".", "is_list", "(", "cmd_args", ")", "and", "not", "funcy", ".", "is_tuple", "(", "cmd_args", ")", ":", "cmd_args", "=", "shlex", ".", "split", "(", "cmd_args", ")", "cmd_args", "=", "[", "sys", ".", "executable", ",", "'setup.py'", "]", "+", "[", "x", "for", "x", "in", "cmd_args", "]", "return", "call", "(", "cmd_args", ",", "suppress_output", "=", "suppress_output", ")" ]
Call a setup.py command or list of commands >>> result = setup('--name', suppress_output=True) >>> result.exitval 0 >>> result = setup('notreal') >>> result.exitval 1
[ "Call", "a", "setup", ".", "py", "command", "or", "list", "of", "commands" ]
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/executor.py#L88-L101
djgagne/hagelslag
hagelslag/data/MRMSGrid.py
MRMSGrid.load_data
def load_data(self): """ Loads data files and stores the output in the data attribute. """ data = [] valid_dates = [] mrms_files = np.array(sorted(os.listdir(self.path + self.variable + "/"))) mrms_file_dates = np.array([m_file.split("_")[-2].split("-")[0] for m_file in mrms_files]) old_mrms_file = None file_obj = None for t in range(self.all_dates.shape[0]): file_index = np.where(mrms_file_dates == self.all_dates[t].strftime("%Y%m%d"))[0] if len(file_index) > 0: mrms_file = mrms_files[file_index][0] if mrms_file is not None: if file_obj is not None: file_obj.close() file_obj = Dataset(self.path + self.variable + "/" + mrms_file) #old_mrms_file = mrms_file if "time" in file_obj.variables.keys(): time_var = "time" else: time_var = "date" file_valid_dates = pd.DatetimeIndex(num2date(file_obj.variables[time_var][:], file_obj.variables[time_var].units)) else: file_valid_dates = pd.DatetimeIndex([]) time_index = np.where(file_valid_dates.values == self.all_dates.values[t])[0] if len(time_index) > 0: data.append(file_obj.variables[self.variable][time_index[0]]) valid_dates.append(self.all_dates[t]) if file_obj is not None: file_obj.close() self.data = np.array(data) self.data[self.data < 0] = 0 self.data[self.data > 150] = 150 self.valid_dates = pd.DatetimeIndex(valid_dates)
python
def load_data(self): """ Loads data files and stores the output in the data attribute. """ data = [] valid_dates = [] mrms_files = np.array(sorted(os.listdir(self.path + self.variable + "/"))) mrms_file_dates = np.array([m_file.split("_")[-2].split("-")[0] for m_file in mrms_files]) old_mrms_file = None file_obj = None for t in range(self.all_dates.shape[0]): file_index = np.where(mrms_file_dates == self.all_dates[t].strftime("%Y%m%d"))[0] if len(file_index) > 0: mrms_file = mrms_files[file_index][0] if mrms_file is not None: if file_obj is not None: file_obj.close() file_obj = Dataset(self.path + self.variable + "/" + mrms_file) #old_mrms_file = mrms_file if "time" in file_obj.variables.keys(): time_var = "time" else: time_var = "date" file_valid_dates = pd.DatetimeIndex(num2date(file_obj.variables[time_var][:], file_obj.variables[time_var].units)) else: file_valid_dates = pd.DatetimeIndex([]) time_index = np.where(file_valid_dates.values == self.all_dates.values[t])[0] if len(time_index) > 0: data.append(file_obj.variables[self.variable][time_index[0]]) valid_dates.append(self.all_dates[t]) if file_obj is not None: file_obj.close() self.data = np.array(data) self.data[self.data < 0] = 0 self.data[self.data > 150] = 150 self.valid_dates = pd.DatetimeIndex(valid_dates)
[ "def", "load_data", "(", "self", ")", ":", "data", "=", "[", "]", "valid_dates", "=", "[", "]", "mrms_files", "=", "np", ".", "array", "(", "sorted", "(", "os", ".", "listdir", "(", "self", ".", "path", "+", "self", ".", "variable", "+", "\"/\"", ")", ")", ")", "mrms_file_dates", "=", "np", ".", "array", "(", "[", "m_file", ".", "split", "(", "\"_\"", ")", "[", "-", "2", "]", ".", "split", "(", "\"-\"", ")", "[", "0", "]", "for", "m_file", "in", "mrms_files", "]", ")", "old_mrms_file", "=", "None", "file_obj", "=", "None", "for", "t", "in", "range", "(", "self", ".", "all_dates", ".", "shape", "[", "0", "]", ")", ":", "file_index", "=", "np", ".", "where", "(", "mrms_file_dates", "==", "self", ".", "all_dates", "[", "t", "]", ".", "strftime", "(", "\"%Y%m%d\"", ")", ")", "[", "0", "]", "if", "len", "(", "file_index", ")", ">", "0", ":", "mrms_file", "=", "mrms_files", "[", "file_index", "]", "[", "0", "]", "if", "mrms_file", "is", "not", "None", ":", "if", "file_obj", "is", "not", "None", ":", "file_obj", ".", "close", "(", ")", "file_obj", "=", "Dataset", "(", "self", ".", "path", "+", "self", ".", "variable", "+", "\"/\"", "+", "mrms_file", ")", "#old_mrms_file = mrms_file", "if", "\"time\"", "in", "file_obj", ".", "variables", ".", "keys", "(", ")", ":", "time_var", "=", "\"time\"", "else", ":", "time_var", "=", "\"date\"", "file_valid_dates", "=", "pd", ".", "DatetimeIndex", "(", "num2date", "(", "file_obj", ".", "variables", "[", "time_var", "]", "[", ":", "]", ",", "file_obj", ".", "variables", "[", "time_var", "]", ".", "units", ")", ")", "else", ":", "file_valid_dates", "=", "pd", ".", "DatetimeIndex", "(", "[", "]", ")", "time_index", "=", "np", ".", "where", "(", "file_valid_dates", ".", "values", "==", "self", ".", "all_dates", ".", "values", "[", "t", "]", ")", "[", "0", "]", "if", "len", "(", "time_index", ")", ">", "0", ":", "data", ".", "append", "(", "file_obj", ".", "variables", "[", "self", ".", "variable", "]", "[", "time_index", "[", "0", "]", "]", ")", "valid_dates", ".", "append", "(", "self", ".", "all_dates", "[", "t", "]", ")", "if", "file_obj", "is", "not", "None", ":", "file_obj", ".", "close", "(", ")", "self", ".", "data", "=", "np", ".", "array", "(", "data", ")", "self", ".", "data", "[", "self", ".", "data", "<", "0", "]", "=", "0", "self", ".", "data", "[", "self", ".", "data", ">", "150", "]", "=", "150", "self", ".", "valid_dates", "=", "pd", ".", "DatetimeIndex", "(", "valid_dates", ")" ]
Loads data files and stores the output in the data attribute.
[ "Loads", "data", "files", "and", "stores", "the", "output", "in", "the", "data", "attribute", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/data/MRMSGrid.py#L44-L82
djgagne/hagelslag
hagelslag/processing/EnhancedWatershedSegmenter.py
rescale_data
def rescale_data(data, data_min, data_max, out_min=0.0, out_max=100.0): """ Rescale your input data so that is ranges over integer values, which will perform better in the watershed. Args: data: 2D or 3D ndarray being rescaled data_min: minimum value of input data for scaling purposes data_max: maximum value of input data for scaling purposes out_min: minimum value of scaled data out_max: maximum value of scaled data Returns: Linearly scaled ndarray """ return (out_max - out_min) / (data_max - data_min) * (data - data_min) + out_min
python
def rescale_data(data, data_min, data_max, out_min=0.0, out_max=100.0): """ Rescale your input data so that is ranges over integer values, which will perform better in the watershed. Args: data: 2D or 3D ndarray being rescaled data_min: minimum value of input data for scaling purposes data_max: maximum value of input data for scaling purposes out_min: minimum value of scaled data out_max: maximum value of scaled data Returns: Linearly scaled ndarray """ return (out_max - out_min) / (data_max - data_min) * (data - data_min) + out_min
[ "def", "rescale_data", "(", "data", ",", "data_min", ",", "data_max", ",", "out_min", "=", "0.0", ",", "out_max", "=", "100.0", ")", ":", "return", "(", "out_max", "-", "out_min", ")", "/", "(", "data_max", "-", "data_min", ")", "*", "(", "data", "-", "data_min", ")", "+", "out_min" ]
Rescale your input data so that is ranges over integer values, which will perform better in the watershed. Args: data: 2D or 3D ndarray being rescaled data_min: minimum value of input data for scaling purposes data_max: maximum value of input data for scaling purposes out_min: minimum value of scaled data out_max: maximum value of scaled data Returns: Linearly scaled ndarray
[ "Rescale", "your", "input", "data", "so", "that", "is", "ranges", "over", "integer", "values", "which", "will", "perform", "better", "in", "the", "watershed", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/EnhancedWatershedSegmenter.py#L297-L311
djgagne/hagelslag
hagelslag/processing/EnhancedWatershedSegmenter.py
EnhancedWatershed.label
def label(self, input_grid): """ Labels input grid using enhanced watershed algorithm. Args: input_grid (numpy.ndarray): Grid to be labeled. Returns: Array of labeled pixels """ marked = self.find_local_maxima(input_grid) marked = np.where(marked >= 0, 1, 0) # splabel returns two things in a tuple: an array and an integer # assign the first thing (array) to markers markers = splabel(marked)[0] return markers
python
def label(self, input_grid): """ Labels input grid using enhanced watershed algorithm. Args: input_grid (numpy.ndarray): Grid to be labeled. Returns: Array of labeled pixels """ marked = self.find_local_maxima(input_grid) marked = np.where(marked >= 0, 1, 0) # splabel returns two things in a tuple: an array and an integer # assign the first thing (array) to markers markers = splabel(marked)[0] return markers
[ "def", "label", "(", "self", ",", "input_grid", ")", ":", "marked", "=", "self", ".", "find_local_maxima", "(", "input_grid", ")", "marked", "=", "np", ".", "where", "(", "marked", ">=", "0", ",", "1", ",", "0", ")", "# splabel returns two things in a tuple: an array and an integer", "# assign the first thing (array) to markers", "markers", "=", "splabel", "(", "marked", ")", "[", "0", "]", "return", "markers" ]
Labels input grid using enhanced watershed algorithm. Args: input_grid (numpy.ndarray): Grid to be labeled. Returns: Array of labeled pixels
[ "Labels", "input", "grid", "using", "enhanced", "watershed", "algorithm", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/EnhancedWatershedSegmenter.py#L42-L57
djgagne/hagelslag
hagelslag/processing/EnhancedWatershedSegmenter.py
EnhancedWatershed.find_local_maxima
def find_local_maxima(self, input_grid): """ Finds the local maxima in the inputGrid and perform region growing to identify objects. Args: input_grid: Raw input data. Returns: array with labeled objects. """ pixels, q_data = self.quantize(input_grid) centers = OrderedDict() for p in pixels.keys(): centers[p] = [] marked = np.ones(q_data.shape, dtype=int) * self.UNMARKED MIN_INFL = int(np.round(1 + 0.5 * np.sqrt(self.max_size))) MAX_INFL = 2 * MIN_INFL marked_so_far = [] # Find the maxima. These are high-values with enough clearance # around them. # Work from high to low bins. The pixels in the highest bin mark their # neighborhoods first. If you did it from low to high the lowest maxima # would mark their neighborhoods first and interfere with the identification of higher maxima. for b in sorted(pixels.keys(),reverse=True): # Square starts large with high intensity bins and gets smaller with low intensity bins. infl_dist = MIN_INFL + int(np.round(float(b) / self.max_bin * (MAX_INFL - MIN_INFL))) for p in pixels[b]: if marked[p] == self.UNMARKED: ok = False del marked_so_far[:] # Temporarily mark unmarked points in square around point (keep track of them in list marked_so_far). # If none of the points in square were marked already from a higher intensity center, # this counts as a new center and ok=True and points will remain marked. # Otherwise ok=False and marked points that were previously unmarked will be unmarked. for (i, j), v in np.ndenumerate(marked[p[0] - infl_dist:p[0] + infl_dist + 1, p[1] - infl_dist:p[1]+ infl_dist + 1]): if v == self.UNMARKED: ok = True marked[i - infl_dist + p[0],j - infl_dist + p[1]] = b marked_so_far.append((i - infl_dist + p[0],j - infl_dist + p[1])) else: # neighborhood already taken ok = False break # ok if point and surrounding square were not marked already. if ok: # highest point in its neighborhood centers[b].append(p) else: for m in marked_so_far: marked[m] = self.UNMARKED # Erase marks and start over. You have a list of centers now. marked[:, :] = self.UNMARKED deferred_from_last = [] deferred_to_next = [] # delta (int): maximum number of increments the cluster is allowed to range over. Larger d results in clusters over larger scales. for delta in range(0, self.delta + 1): # Work from high to low bins. for b in sorted(centers.keys(), reverse=True): bin_lower = b - delta deferred_from_last[:] = deferred_to_next[:] del deferred_to_next[:] foothills = [] n_centers = len(centers[b]) tot_centers = n_centers + len(deferred_from_last) for i in range(tot_centers): # done this way to minimize memory overhead of maintaining two lists if i < n_centers: center = centers[b][i] else: center = deferred_from_last[i - n_centers] if bin_lower < 0: bin_lower = 0 if marked[center] == self.UNMARKED: captured = self.set_maximum(q_data, marked, center, bin_lower, foothills) if not captured: # decrement to lower value to see if it'll get big enough deferred_to_next.append(center) else: pass # this is the last one for this bin self.remove_foothills(q_data, marked, b, bin_lower, centers, foothills) del deferred_from_last[:] del deferred_to_next[:] return marked
python
def find_local_maxima(self, input_grid): """ Finds the local maxima in the inputGrid and perform region growing to identify objects. Args: input_grid: Raw input data. Returns: array with labeled objects. """ pixels, q_data = self.quantize(input_grid) centers = OrderedDict() for p in pixels.keys(): centers[p] = [] marked = np.ones(q_data.shape, dtype=int) * self.UNMARKED MIN_INFL = int(np.round(1 + 0.5 * np.sqrt(self.max_size))) MAX_INFL = 2 * MIN_INFL marked_so_far = [] # Find the maxima. These are high-values with enough clearance # around them. # Work from high to low bins. The pixels in the highest bin mark their # neighborhoods first. If you did it from low to high the lowest maxima # would mark their neighborhoods first and interfere with the identification of higher maxima. for b in sorted(pixels.keys(),reverse=True): # Square starts large with high intensity bins and gets smaller with low intensity bins. infl_dist = MIN_INFL + int(np.round(float(b) / self.max_bin * (MAX_INFL - MIN_INFL))) for p in pixels[b]: if marked[p] == self.UNMARKED: ok = False del marked_so_far[:] # Temporarily mark unmarked points in square around point (keep track of them in list marked_so_far). # If none of the points in square were marked already from a higher intensity center, # this counts as a new center and ok=True and points will remain marked. # Otherwise ok=False and marked points that were previously unmarked will be unmarked. for (i, j), v in np.ndenumerate(marked[p[0] - infl_dist:p[0] + infl_dist + 1, p[1] - infl_dist:p[1]+ infl_dist + 1]): if v == self.UNMARKED: ok = True marked[i - infl_dist + p[0],j - infl_dist + p[1]] = b marked_so_far.append((i - infl_dist + p[0],j - infl_dist + p[1])) else: # neighborhood already taken ok = False break # ok if point and surrounding square were not marked already. if ok: # highest point in its neighborhood centers[b].append(p) else: for m in marked_so_far: marked[m] = self.UNMARKED # Erase marks and start over. You have a list of centers now. marked[:, :] = self.UNMARKED deferred_from_last = [] deferred_to_next = [] # delta (int): maximum number of increments the cluster is allowed to range over. Larger d results in clusters over larger scales. for delta in range(0, self.delta + 1): # Work from high to low bins. for b in sorted(centers.keys(), reverse=True): bin_lower = b - delta deferred_from_last[:] = deferred_to_next[:] del deferred_to_next[:] foothills = [] n_centers = len(centers[b]) tot_centers = n_centers + len(deferred_from_last) for i in range(tot_centers): # done this way to minimize memory overhead of maintaining two lists if i < n_centers: center = centers[b][i] else: center = deferred_from_last[i - n_centers] if bin_lower < 0: bin_lower = 0 if marked[center] == self.UNMARKED: captured = self.set_maximum(q_data, marked, center, bin_lower, foothills) if not captured: # decrement to lower value to see if it'll get big enough deferred_to_next.append(center) else: pass # this is the last one for this bin self.remove_foothills(q_data, marked, b, bin_lower, centers, foothills) del deferred_from_last[:] del deferred_to_next[:] return marked
[ "def", "find_local_maxima", "(", "self", ",", "input_grid", ")", ":", "pixels", ",", "q_data", "=", "self", ".", "quantize", "(", "input_grid", ")", "centers", "=", "OrderedDict", "(", ")", "for", "p", "in", "pixels", ".", "keys", "(", ")", ":", "centers", "[", "p", "]", "=", "[", "]", "marked", "=", "np", ".", "ones", "(", "q_data", ".", "shape", ",", "dtype", "=", "int", ")", "*", "self", ".", "UNMARKED", "MIN_INFL", "=", "int", "(", "np", ".", "round", "(", "1", "+", "0.5", "*", "np", ".", "sqrt", "(", "self", ".", "max_size", ")", ")", ")", "MAX_INFL", "=", "2", "*", "MIN_INFL", "marked_so_far", "=", "[", "]", "# Find the maxima. These are high-values with enough clearance", "# around them.", "# Work from high to low bins. The pixels in the highest bin mark their", "# neighborhoods first. If you did it from low to high the lowest maxima", "# would mark their neighborhoods first and interfere with the identification of higher maxima.", "for", "b", "in", "sorted", "(", "pixels", ".", "keys", "(", ")", ",", "reverse", "=", "True", ")", ":", "# Square starts large with high intensity bins and gets smaller with low intensity bins.", "infl_dist", "=", "MIN_INFL", "+", "int", "(", "np", ".", "round", "(", "float", "(", "b", ")", "/", "self", ".", "max_bin", "*", "(", "MAX_INFL", "-", "MIN_INFL", ")", ")", ")", "for", "p", "in", "pixels", "[", "b", "]", ":", "if", "marked", "[", "p", "]", "==", "self", ".", "UNMARKED", ":", "ok", "=", "False", "del", "marked_so_far", "[", ":", "]", "# Temporarily mark unmarked points in square around point (keep track of them in list marked_so_far).", "# If none of the points in square were marked already from a higher intensity center, ", "# this counts as a new center and ok=True and points will remain marked.", "# Otherwise ok=False and marked points that were previously unmarked will be unmarked.", "for", "(", "i", ",", "j", ")", ",", "v", "in", "np", ".", "ndenumerate", "(", "marked", "[", "p", "[", "0", "]", "-", "infl_dist", ":", "p", "[", "0", "]", "+", "infl_dist", "+", "1", ",", "p", "[", "1", "]", "-", "infl_dist", ":", "p", "[", "1", "]", "+", "infl_dist", "+", "1", "]", ")", ":", "if", "v", "==", "self", ".", "UNMARKED", ":", "ok", "=", "True", "marked", "[", "i", "-", "infl_dist", "+", "p", "[", "0", "]", ",", "j", "-", "infl_dist", "+", "p", "[", "1", "]", "]", "=", "b", "marked_so_far", ".", "append", "(", "(", "i", "-", "infl_dist", "+", "p", "[", "0", "]", ",", "j", "-", "infl_dist", "+", "p", "[", "1", "]", ")", ")", "else", ":", "# neighborhood already taken", "ok", "=", "False", "break", "# ok if point and surrounding square were not marked already.", "if", "ok", ":", "# highest point in its neighborhood", "centers", "[", "b", "]", ".", "append", "(", "p", ")", "else", ":", "for", "m", "in", "marked_so_far", ":", "marked", "[", "m", "]", "=", "self", ".", "UNMARKED", "# Erase marks and start over. You have a list of centers now.", "marked", "[", ":", ",", ":", "]", "=", "self", ".", "UNMARKED", "deferred_from_last", "=", "[", "]", "deferred_to_next", "=", "[", "]", "# delta (int): maximum number of increments the cluster is allowed to range over. Larger d results in clusters over larger scales.", "for", "delta", "in", "range", "(", "0", ",", "self", ".", "delta", "+", "1", ")", ":", "# Work from high to low bins.", "for", "b", "in", "sorted", "(", "centers", ".", "keys", "(", ")", ",", "reverse", "=", "True", ")", ":", "bin_lower", "=", "b", "-", "delta", "deferred_from_last", "[", ":", "]", "=", "deferred_to_next", "[", ":", "]", "del", "deferred_to_next", "[", ":", "]", "foothills", "=", "[", "]", "n_centers", "=", "len", "(", "centers", "[", "b", "]", ")", "tot_centers", "=", "n_centers", "+", "len", "(", "deferred_from_last", ")", "for", "i", "in", "range", "(", "tot_centers", ")", ":", "# done this way to minimize memory overhead of maintaining two lists", "if", "i", "<", "n_centers", ":", "center", "=", "centers", "[", "b", "]", "[", "i", "]", "else", ":", "center", "=", "deferred_from_last", "[", "i", "-", "n_centers", "]", "if", "bin_lower", "<", "0", ":", "bin_lower", "=", "0", "if", "marked", "[", "center", "]", "==", "self", ".", "UNMARKED", ":", "captured", "=", "self", ".", "set_maximum", "(", "q_data", ",", "marked", ",", "center", ",", "bin_lower", ",", "foothills", ")", "if", "not", "captured", ":", "# decrement to lower value to see if it'll get big enough", "deferred_to_next", ".", "append", "(", "center", ")", "else", ":", "pass", "# this is the last one for this bin", "self", ".", "remove_foothills", "(", "q_data", ",", "marked", ",", "b", ",", "bin_lower", ",", "centers", ",", "foothills", ")", "del", "deferred_from_last", "[", ":", "]", "del", "deferred_to_next", "[", ":", "]", "return", "marked" ]
Finds the local maxima in the inputGrid and perform region growing to identify objects. Args: input_grid: Raw input data. Returns: array with labeled objects.
[ "Finds", "the", "local", "maxima", "in", "the", "inputGrid", "and", "perform", "region", "growing", "to", "identify", "objects", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/EnhancedWatershedSegmenter.py#L81-L166
djgagne/hagelslag
hagelslag/processing/EnhancedWatershedSegmenter.py
EnhancedWatershed.set_maximum
def set_maximum(self, q_data, marked, center, bin_lower, foothills): """ Grow a region at a certain bin level and check if the region has reached the maximum size. Args: q_data: Quantized data array marked: Array marking points that are objects center: Coordinates of the center pixel of the region being grown bin_lower: Intensity level of lower bin being evaluated foothills: List of points that are associated with a center but fall outside the the size or intensity criteria Returns: True if the object is finished growing and False if the object should be grown again at the next threshold level. """ as_bin = [] # pixels to be included in peak as_glob = [] # pixels to be globbed up as part of foothills marked_so_far = [] # pixels that have already been marked will_be_considered_again = False as_bin.append(center) center_data = q_data[center] while len(as_bin) > 0: p = as_bin.pop(-1) # remove and return last pixel in as_bin if marked[p] != self.UNMARKED: # already processed continue marked[p] = q_data[center] marked_so_far.append(p) # check neighbors for index,val in np.ndenumerate(marked[p[0] - 1:p[0] + 2, p[1] - 1:p[1] + 2]): # is neighbor part of peak or part of mountain? if val == self.UNMARKED: pixel = (index[0] - 1 + p[0],index[1] - 1 + p[1]) p_data = q_data[pixel] if (not will_be_considered_again) and (p_data >= 0) and (p_data < center_data): will_be_considered_again = True if p_data >= bin_lower and (np.abs(center_data - p_data) <= self.delta): as_bin.append(pixel) # Do not check that this is the closest: this way, a narrow channel of globbed pixels form elif p_data >= 0: as_glob.append(pixel) if bin_lower == 0: will_be_considered_again = False big_enough = len(marked_so_far) >= self.max_size if big_enough: # remove lower values within region of influence foothills.append((center, as_glob)) elif will_be_considered_again: # remove the check if you want to ignore regions smaller than max_size for m in marked_so_far: marked[m] = self.UNMARKED del as_bin[:] del as_glob[:] del marked_so_far[:] return big_enough or (not will_be_considered_again)
python
def set_maximum(self, q_data, marked, center, bin_lower, foothills): """ Grow a region at a certain bin level and check if the region has reached the maximum size. Args: q_data: Quantized data array marked: Array marking points that are objects center: Coordinates of the center pixel of the region being grown bin_lower: Intensity level of lower bin being evaluated foothills: List of points that are associated with a center but fall outside the the size or intensity criteria Returns: True if the object is finished growing and False if the object should be grown again at the next threshold level. """ as_bin = [] # pixels to be included in peak as_glob = [] # pixels to be globbed up as part of foothills marked_so_far = [] # pixels that have already been marked will_be_considered_again = False as_bin.append(center) center_data = q_data[center] while len(as_bin) > 0: p = as_bin.pop(-1) # remove and return last pixel in as_bin if marked[p] != self.UNMARKED: # already processed continue marked[p] = q_data[center] marked_so_far.append(p) # check neighbors for index,val in np.ndenumerate(marked[p[0] - 1:p[0] + 2, p[1] - 1:p[1] + 2]): # is neighbor part of peak or part of mountain? if val == self.UNMARKED: pixel = (index[0] - 1 + p[0],index[1] - 1 + p[1]) p_data = q_data[pixel] if (not will_be_considered_again) and (p_data >= 0) and (p_data < center_data): will_be_considered_again = True if p_data >= bin_lower and (np.abs(center_data - p_data) <= self.delta): as_bin.append(pixel) # Do not check that this is the closest: this way, a narrow channel of globbed pixels form elif p_data >= 0: as_glob.append(pixel) if bin_lower == 0: will_be_considered_again = False big_enough = len(marked_so_far) >= self.max_size if big_enough: # remove lower values within region of influence foothills.append((center, as_glob)) elif will_be_considered_again: # remove the check if you want to ignore regions smaller than max_size for m in marked_so_far: marked[m] = self.UNMARKED del as_bin[:] del as_glob[:] del marked_so_far[:] return big_enough or (not will_be_considered_again)
[ "def", "set_maximum", "(", "self", ",", "q_data", ",", "marked", ",", "center", ",", "bin_lower", ",", "foothills", ")", ":", "as_bin", "=", "[", "]", "# pixels to be included in peak", "as_glob", "=", "[", "]", "# pixels to be globbed up as part of foothills", "marked_so_far", "=", "[", "]", "# pixels that have already been marked", "will_be_considered_again", "=", "False", "as_bin", ".", "append", "(", "center", ")", "center_data", "=", "q_data", "[", "center", "]", "while", "len", "(", "as_bin", ")", ">", "0", ":", "p", "=", "as_bin", ".", "pop", "(", "-", "1", ")", "# remove and return last pixel in as_bin", "if", "marked", "[", "p", "]", "!=", "self", ".", "UNMARKED", ":", "# already processed", "continue", "marked", "[", "p", "]", "=", "q_data", "[", "center", "]", "marked_so_far", ".", "append", "(", "p", ")", "# check neighbors", "for", "index", ",", "val", "in", "np", ".", "ndenumerate", "(", "marked", "[", "p", "[", "0", "]", "-", "1", ":", "p", "[", "0", "]", "+", "2", ",", "p", "[", "1", "]", "-", "1", ":", "p", "[", "1", "]", "+", "2", "]", ")", ":", "# is neighbor part of peak or part of mountain?", "if", "val", "==", "self", ".", "UNMARKED", ":", "pixel", "=", "(", "index", "[", "0", "]", "-", "1", "+", "p", "[", "0", "]", ",", "index", "[", "1", "]", "-", "1", "+", "p", "[", "1", "]", ")", "p_data", "=", "q_data", "[", "pixel", "]", "if", "(", "not", "will_be_considered_again", ")", "and", "(", "p_data", ">=", "0", ")", "and", "(", "p_data", "<", "center_data", ")", ":", "will_be_considered_again", "=", "True", "if", "p_data", ">=", "bin_lower", "and", "(", "np", ".", "abs", "(", "center_data", "-", "p_data", ")", "<=", "self", ".", "delta", ")", ":", "as_bin", ".", "append", "(", "pixel", ")", "# Do not check that this is the closest: this way, a narrow channel of globbed pixels form", "elif", "p_data", ">=", "0", ":", "as_glob", ".", "append", "(", "pixel", ")", "if", "bin_lower", "==", "0", ":", "will_be_considered_again", "=", "False", "big_enough", "=", "len", "(", "marked_so_far", ")", ">=", "self", ".", "max_size", "if", "big_enough", ":", "# remove lower values within region of influence", "foothills", ".", "append", "(", "(", "center", ",", "as_glob", ")", ")", "elif", "will_be_considered_again", ":", "# remove the check if you want to ignore regions smaller than max_size", "for", "m", "in", "marked_so_far", ":", "marked", "[", "m", "]", "=", "self", ".", "UNMARKED", "del", "as_bin", "[", ":", "]", "del", "as_glob", "[", ":", "]", "del", "marked_so_far", "[", ":", "]", "return", "big_enough", "or", "(", "not", "will_be_considered_again", ")" ]
Grow a region at a certain bin level and check if the region has reached the maximum size. Args: q_data: Quantized data array marked: Array marking points that are objects center: Coordinates of the center pixel of the region being grown bin_lower: Intensity level of lower bin being evaluated foothills: List of points that are associated with a center but fall outside the the size or intensity criteria Returns: True if the object is finished growing and False if the object should be grown again at the next threshold level.
[ "Grow", "a", "region", "at", "a", "certain", "bin", "level", "and", "check", "if", "the", "region", "has", "reached", "the", "maximum", "size", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/EnhancedWatershedSegmenter.py#L168-L221
djgagne/hagelslag
hagelslag/processing/EnhancedWatershedSegmenter.py
EnhancedWatershed.remove_foothills
def remove_foothills(self, q_data, marked, bin_num, bin_lower, centers, foothills): """ Mark points determined to be foothills as globbed, so that they are not included in future searches. Also searches neighboring points to foothill points to determine if they should also be considered foothills. Args: q_data: Quantized data marked: Marked bin_num: Current bin being searched bin_lower: Next bin being searched centers: dictionary of local maxima considered to be object centers foothills: List of foothill points being removed. """ hills = [] for foot in foothills: center = foot[0] hills[:] = foot[1][:] # remove all foothills while len(hills) > 0: # mark this point pt = hills.pop(-1) marked[pt] = self.GLOBBED for s_index, val in np.ndenumerate(marked[pt[0]-1:pt[0]+2,pt[1]-1:pt[1]+2]): index = (s_index[0] - 1 + pt[0], s_index[1] - 1 + pt[1]) # is neighbor part of peak or part of mountain? if val == self.UNMARKED: # will let in even minor peaks if (q_data[index] >= 0) and \ (q_data[index] < bin_lower) and \ ((q_data[index] <= q_data[pt]) or self.is_closest(index, center, centers, bin_num)): hills.append(index) del foothills[:]
python
def remove_foothills(self, q_data, marked, bin_num, bin_lower, centers, foothills): """ Mark points determined to be foothills as globbed, so that they are not included in future searches. Also searches neighboring points to foothill points to determine if they should also be considered foothills. Args: q_data: Quantized data marked: Marked bin_num: Current bin being searched bin_lower: Next bin being searched centers: dictionary of local maxima considered to be object centers foothills: List of foothill points being removed. """ hills = [] for foot in foothills: center = foot[0] hills[:] = foot[1][:] # remove all foothills while len(hills) > 0: # mark this point pt = hills.pop(-1) marked[pt] = self.GLOBBED for s_index, val in np.ndenumerate(marked[pt[0]-1:pt[0]+2,pt[1]-1:pt[1]+2]): index = (s_index[0] - 1 + pt[0], s_index[1] - 1 + pt[1]) # is neighbor part of peak or part of mountain? if val == self.UNMARKED: # will let in even minor peaks if (q_data[index] >= 0) and \ (q_data[index] < bin_lower) and \ ((q_data[index] <= q_data[pt]) or self.is_closest(index, center, centers, bin_num)): hills.append(index) del foothills[:]
[ "def", "remove_foothills", "(", "self", ",", "q_data", ",", "marked", ",", "bin_num", ",", "bin_lower", ",", "centers", ",", "foothills", ")", ":", "hills", "=", "[", "]", "for", "foot", "in", "foothills", ":", "center", "=", "foot", "[", "0", "]", "hills", "[", ":", "]", "=", "foot", "[", "1", "]", "[", ":", "]", "# remove all foothills", "while", "len", "(", "hills", ")", ">", "0", ":", "# mark this point", "pt", "=", "hills", ".", "pop", "(", "-", "1", ")", "marked", "[", "pt", "]", "=", "self", ".", "GLOBBED", "for", "s_index", ",", "val", "in", "np", ".", "ndenumerate", "(", "marked", "[", "pt", "[", "0", "]", "-", "1", ":", "pt", "[", "0", "]", "+", "2", ",", "pt", "[", "1", "]", "-", "1", ":", "pt", "[", "1", "]", "+", "2", "]", ")", ":", "index", "=", "(", "s_index", "[", "0", "]", "-", "1", "+", "pt", "[", "0", "]", ",", "s_index", "[", "1", "]", "-", "1", "+", "pt", "[", "1", "]", ")", "# is neighbor part of peak or part of mountain?", "if", "val", "==", "self", ".", "UNMARKED", ":", "# will let in even minor peaks", "if", "(", "q_data", "[", "index", "]", ">=", "0", ")", "and", "(", "q_data", "[", "index", "]", "<", "bin_lower", ")", "and", "(", "(", "q_data", "[", "index", "]", "<=", "q_data", "[", "pt", "]", ")", "or", "self", ".", "is_closest", "(", "index", ",", "center", ",", "centers", ",", "bin_num", ")", ")", ":", "hills", ".", "append", "(", "index", ")", "del", "foothills", "[", ":", "]" ]
Mark points determined to be foothills as globbed, so that they are not included in future searches. Also searches neighboring points to foothill points to determine if they should also be considered foothills. Args: q_data: Quantized data marked: Marked bin_num: Current bin being searched bin_lower: Next bin being searched centers: dictionary of local maxima considered to be object centers foothills: List of foothill points being removed.
[ "Mark", "points", "determined", "to", "be", "foothills", "as", "globbed", "so", "that", "they", "are", "not", "included", "in", "future", "searches", ".", "Also", "searches", "neighboring", "points", "to", "foothill", "points", "to", "determine", "if", "they", "should", "also", "be", "considered", "foothills", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/EnhancedWatershedSegmenter.py#L223-L255
djgagne/hagelslag
hagelslag/processing/EnhancedWatershedSegmenter.py
EnhancedWatershed.quantize
def quantize(self, input_grid): """ Quantize a grid into discrete steps based on input parameters. Args: input_grid: 2-d array of values Returns: Dictionary of value pointing to pixel locations, and quantized 2-d array of data """ pixels = {} for i in range(self.max_bin+1): pixels[i] = [] data = (np.array(input_grid, dtype=int) - self.min_thresh) / self.data_increment data[data < 0] = -1 data[data > self.max_bin] = self.max_bin good_points = np.where(data >= 0) for g in np.arange(good_points[0].shape[0]): pixels[data[(good_points[0][g], good_points[1][g])]].append((good_points[0][g], good_points[1][g])) return pixels, data
python
def quantize(self, input_grid): """ Quantize a grid into discrete steps based on input parameters. Args: input_grid: 2-d array of values Returns: Dictionary of value pointing to pixel locations, and quantized 2-d array of data """ pixels = {} for i in range(self.max_bin+1): pixels[i] = [] data = (np.array(input_grid, dtype=int) - self.min_thresh) / self.data_increment data[data < 0] = -1 data[data > self.max_bin] = self.max_bin good_points = np.where(data >= 0) for g in np.arange(good_points[0].shape[0]): pixels[data[(good_points[0][g], good_points[1][g])]].append((good_points[0][g], good_points[1][g])) return pixels, data
[ "def", "quantize", "(", "self", ",", "input_grid", ")", ":", "pixels", "=", "{", "}", "for", "i", "in", "range", "(", "self", ".", "max_bin", "+", "1", ")", ":", "pixels", "[", "i", "]", "=", "[", "]", "data", "=", "(", "np", ".", "array", "(", "input_grid", ",", "dtype", "=", "int", ")", "-", "self", ".", "min_thresh", ")", "/", "self", ".", "data_increment", "data", "[", "data", "<", "0", "]", "=", "-", "1", "data", "[", "data", ">", "self", ".", "max_bin", "]", "=", "self", ".", "max_bin", "good_points", "=", "np", ".", "where", "(", "data", ">=", "0", ")", "for", "g", "in", "np", ".", "arange", "(", "good_points", "[", "0", "]", ".", "shape", "[", "0", "]", ")", ":", "pixels", "[", "data", "[", "(", "good_points", "[", "0", "]", "[", "g", "]", ",", "good_points", "[", "1", "]", "[", "g", "]", ")", "]", "]", ".", "append", "(", "(", "good_points", "[", "0", "]", "[", "g", "]", ",", "good_points", "[", "1", "]", "[", "g", "]", ")", ")", "return", "pixels", ",", "data" ]
Quantize a grid into discrete steps based on input parameters. Args: input_grid: 2-d array of values Returns: Dictionary of value pointing to pixel locations, and quantized 2-d array of data
[ "Quantize", "a", "grid", "into", "discrete", "steps", "based", "on", "input", "parameters", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/EnhancedWatershedSegmenter.py#L270-L290
softvar/simplegist
simplegist/mygist.py
Mygist.listall
def listall(self): ''' will display all the filenames. Result can be stored in an array for easy fetching of gistNames for future purposes. eg. a = Gist().mygists().listall() print a[0] #to fetch first gistName ''' file_name = [] r = requests.get( '%s/users/%s/gists' % (BASE_URL, self.user), headers=self.gist.header ) r_text = json.loads(r.text) limit = len(r.json()) if (r.status_code == 200 ): for g,no in zip(r_text, range(0,limit)): for key,value in r.json()[no]['files'].iteritems(): file_name.append(value['filename']) return file_name raise Exception('Username not found')
python
def listall(self): ''' will display all the filenames. Result can be stored in an array for easy fetching of gistNames for future purposes. eg. a = Gist().mygists().listall() print a[0] #to fetch first gistName ''' file_name = [] r = requests.get( '%s/users/%s/gists' % (BASE_URL, self.user), headers=self.gist.header ) r_text = json.loads(r.text) limit = len(r.json()) if (r.status_code == 200 ): for g,no in zip(r_text, range(0,limit)): for key,value in r.json()[no]['files'].iteritems(): file_name.append(value['filename']) return file_name raise Exception('Username not found')
[ "def", "listall", "(", "self", ")", ":", "file_name", "=", "[", "]", "r", "=", "requests", ".", "get", "(", "'%s/users/%s/gists'", "%", "(", "BASE_URL", ",", "self", ".", "user", ")", ",", "headers", "=", "self", ".", "gist", ".", "header", ")", "r_text", "=", "json", ".", "loads", "(", "r", ".", "text", ")", "limit", "=", "len", "(", "r", ".", "json", "(", ")", ")", "if", "(", "r", ".", "status_code", "==", "200", ")", ":", "for", "g", ",", "no", "in", "zip", "(", "r_text", ",", "range", "(", "0", ",", "limit", ")", ")", ":", "for", "key", ",", "value", "in", "r", ".", "json", "(", ")", "[", "no", "]", "[", "'files'", "]", ".", "iteritems", "(", ")", ":", "file_name", ".", "append", "(", "value", "[", "'filename'", "]", ")", "return", "file_name", "raise", "Exception", "(", "'Username not found'", ")" ]
will display all the filenames. Result can be stored in an array for easy fetching of gistNames for future purposes. eg. a = Gist().mygists().listall() print a[0] #to fetch first gistName
[ "will", "display", "all", "the", "filenames", ".", "Result", "can", "be", "stored", "in", "an", "array", "for", "easy", "fetching", "of", "gistNames", "for", "future", "purposes", ".", "eg", ".", "a", "=", "Gist", "()", ".", "mygists", "()", ".", "listall", "()", "print", "a", "[", "0", "]", "#to", "fetch", "first", "gistName" ]
train
https://github.com/softvar/simplegist/blob/8d53edd15d76c7b10fb963a659c1cf9f46f5345d/simplegist/mygist.py#L13-L34
softvar/simplegist
simplegist/mygist.py
Mygist.content
def content(self, **args): ''' Doesn't require manual fetching of gistID of a gist passing gistName will return the content of gist. In case, names are ambigious, provide GistID or it will return the contents of recent ambigious gistname ''' self.gist_name = '' if 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid') if self.gist_id: r = requests.get( '%s'%BASE_URL+'/gists/%s' %self.gist_id, headers=self.gist.header ) if (r.status_code == 200): r_text = json.loads(r.text) if self.gist_name!='': content = r.json()['files'][self.gist_name]['content'] else: for key,value in r.json()['files'].iteritems(): content = r.json()['files'][value['filename']]['content'] return content raise Exception('No such gist found')
python
def content(self, **args): ''' Doesn't require manual fetching of gistID of a gist passing gistName will return the content of gist. In case, names are ambigious, provide GistID or it will return the contents of recent ambigious gistname ''' self.gist_name = '' if 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid') if self.gist_id: r = requests.get( '%s'%BASE_URL+'/gists/%s' %self.gist_id, headers=self.gist.header ) if (r.status_code == 200): r_text = json.loads(r.text) if self.gist_name!='': content = r.json()['files'][self.gist_name]['content'] else: for key,value in r.json()['files'].iteritems(): content = r.json()['files'][value['filename']]['content'] return content raise Exception('No such gist found')
[ "def", "content", "(", "self", ",", "*", "*", "args", ")", ":", "self", ".", "gist_name", "=", "''", "if", "'name'", "in", "args", ":", "self", ".", "gist_name", "=", "args", "[", "'name'", "]", "self", ".", "gist_id", "=", "self", ".", "getMyID", "(", "self", ".", "gist_name", ")", "elif", "'id'", "in", "args", ":", "self", ".", "gist_id", "=", "args", "[", "'id'", "]", "else", ":", "raise", "Exception", "(", "'Either provide authenticated user\\'s Unambigious Gistname or any unique Gistid'", ")", "if", "self", ".", "gist_id", ":", "r", "=", "requests", ".", "get", "(", "'%s'", "%", "BASE_URL", "+", "'/gists/%s'", "%", "self", ".", "gist_id", ",", "headers", "=", "self", ".", "gist", ".", "header", ")", "if", "(", "r", ".", "status_code", "==", "200", ")", ":", "r_text", "=", "json", ".", "loads", "(", "r", ".", "text", ")", "if", "self", ".", "gist_name", "!=", "''", ":", "content", "=", "r", ".", "json", "(", ")", "[", "'files'", "]", "[", "self", ".", "gist_name", "]", "[", "'content'", "]", "else", ":", "for", "key", ",", "value", "in", "r", ".", "json", "(", ")", "[", "'files'", "]", ".", "iteritems", "(", ")", ":", "content", "=", "r", ".", "json", "(", ")", "[", "'files'", "]", "[", "value", "[", "'filename'", "]", "]", "[", "'content'", "]", "return", "content", "raise", "Exception", "(", "'No such gist found'", ")" ]
Doesn't require manual fetching of gistID of a gist passing gistName will return the content of gist. In case, names are ambigious, provide GistID or it will return the contents of recent ambigious gistname
[ "Doesn", "t", "require", "manual", "fetching", "of", "gistID", "of", "a", "gist", "passing", "gistName", "will", "return", "the", "content", "of", "gist", ".", "In", "case", "names", "are", "ambigious", "provide", "GistID", "or", "it", "will", "return", "the", "contents", "of", "recent", "ambigious", "gistname" ]
train
https://github.com/softvar/simplegist/blob/8d53edd15d76c7b10fb963a659c1cf9f46f5345d/simplegist/mygist.py#L78-L109
softvar/simplegist
simplegist/mygist.py
Mygist.edit
def edit(self, **args): ''' Doesn't require manual fetching of gistID of a gist passing gistName will return edit the gist ''' self.gist_name = '' if 'description' in args: self.description = args['description'] else: self.description = '' if 'name' in args and 'id' in args: self.gist_name = args['name'] self.gist_id = args['id'] elif 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Gist Name/ID must be provided') if 'content' in args: self.content = args['content'] else: raise Exception('Gist content can\'t be empty') if (self.gist_name == ''): self.gist_name = self.getgist(id=self.gist_id) data = {"description": self.description, "files": { self.gist_name: { "content": self.content } } } else: data = {"description": self.description, "files": { self.gist_name: { "content": self.content } } } if self.gist_id: r = requests.patch( '%s/gists/%s'%(BASE_URL,self.gist_id), headers=self.gist.header, data=json.dumps(data), ) if (r.status_code == 200): r_text = json.loads(r.text) response = { 'updated_content': self.content, 'created_at': r.json()['created_at'], 'comments':r.json()['comments'] } return response raise Exception('No such gist found')
python
def edit(self, **args): ''' Doesn't require manual fetching of gistID of a gist passing gistName will return edit the gist ''' self.gist_name = '' if 'description' in args: self.description = args['description'] else: self.description = '' if 'name' in args and 'id' in args: self.gist_name = args['name'] self.gist_id = args['id'] elif 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Gist Name/ID must be provided') if 'content' in args: self.content = args['content'] else: raise Exception('Gist content can\'t be empty') if (self.gist_name == ''): self.gist_name = self.getgist(id=self.gist_id) data = {"description": self.description, "files": { self.gist_name: { "content": self.content } } } else: data = {"description": self.description, "files": { self.gist_name: { "content": self.content } } } if self.gist_id: r = requests.patch( '%s/gists/%s'%(BASE_URL,self.gist_id), headers=self.gist.header, data=json.dumps(data), ) if (r.status_code == 200): r_text = json.loads(r.text) response = { 'updated_content': self.content, 'created_at': r.json()['created_at'], 'comments':r.json()['comments'] } return response raise Exception('No such gist found')
[ "def", "edit", "(", "self", ",", "*", "*", "args", ")", ":", "self", ".", "gist_name", "=", "''", "if", "'description'", "in", "args", ":", "self", ".", "description", "=", "args", "[", "'description'", "]", "else", ":", "self", ".", "description", "=", "''", "if", "'name'", "in", "args", "and", "'id'", "in", "args", ":", "self", ".", "gist_name", "=", "args", "[", "'name'", "]", "self", ".", "gist_id", "=", "args", "[", "'id'", "]", "elif", "'name'", "in", "args", ":", "self", ".", "gist_name", "=", "args", "[", "'name'", "]", "self", ".", "gist_id", "=", "self", ".", "getMyID", "(", "self", ".", "gist_name", ")", "elif", "'id'", "in", "args", ":", "self", ".", "gist_id", "=", "args", "[", "'id'", "]", "else", ":", "raise", "Exception", "(", "'Gist Name/ID must be provided'", ")", "if", "'content'", "in", "args", ":", "self", ".", "content", "=", "args", "[", "'content'", "]", "else", ":", "raise", "Exception", "(", "'Gist content can\\'t be empty'", ")", "if", "(", "self", ".", "gist_name", "==", "''", ")", ":", "self", ".", "gist_name", "=", "self", ".", "getgist", "(", "id", "=", "self", ".", "gist_id", ")", "data", "=", "{", "\"description\"", ":", "self", ".", "description", ",", "\"files\"", ":", "{", "self", ".", "gist_name", ":", "{", "\"content\"", ":", "self", ".", "content", "}", "}", "}", "else", ":", "data", "=", "{", "\"description\"", ":", "self", ".", "description", ",", "\"files\"", ":", "{", "self", ".", "gist_name", ":", "{", "\"content\"", ":", "self", ".", "content", "}", "}", "}", "if", "self", ".", "gist_id", ":", "r", "=", "requests", ".", "patch", "(", "'%s/gists/%s'", "%", "(", "BASE_URL", ",", "self", ".", "gist_id", ")", ",", "headers", "=", "self", ".", "gist", ".", "header", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", ")", "if", "(", "r", ".", "status_code", "==", "200", ")", ":", "r_text", "=", "json", ".", "loads", "(", "r", ".", "text", ")", "response", "=", "{", "'updated_content'", ":", "self", ".", "content", ",", "'created_at'", ":", "r", ".", "json", "(", ")", "[", "'created_at'", "]", ",", "'comments'", ":", "r", ".", "json", "(", ")", "[", "'comments'", "]", "}", "return", "response", "raise", "Exception", "(", "'No such gist found'", ")" ]
Doesn't require manual fetching of gistID of a gist passing gistName will return edit the gist
[ "Doesn", "t", "require", "manual", "fetching", "of", "gistID", "of", "a", "gist", "passing", "gistName", "will", "return", "edit", "the", "gist" ]
train
https://github.com/softvar/simplegist/blob/8d53edd15d76c7b10fb963a659c1cf9f46f5345d/simplegist/mygist.py#L132-L195
softvar/simplegist
simplegist/mygist.py
Mygist.delete
def delete(self, **args): ''' Delete a gist by gistname/gistID ''' if 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Provide GistName to delete') url = 'gists' if self.gist_id: r = requests.delete( '%s/%s/%s'%(BASE_URL,url,self.gist_id), headers=self.gist.header ) if (r.status_code == 204): response = { 'id': self.gist_id, } return response raise Exception('Can not delete gist')
python
def delete(self, **args): ''' Delete a gist by gistname/gistID ''' if 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Provide GistName to delete') url = 'gists' if self.gist_id: r = requests.delete( '%s/%s/%s'%(BASE_URL,url,self.gist_id), headers=self.gist.header ) if (r.status_code == 204): response = { 'id': self.gist_id, } return response raise Exception('Can not delete gist')
[ "def", "delete", "(", "self", ",", "*", "*", "args", ")", ":", "if", "'name'", "in", "args", ":", "self", ".", "gist_name", "=", "args", "[", "'name'", "]", "self", ".", "gist_id", "=", "self", ".", "getMyID", "(", "self", ".", "gist_name", ")", "elif", "'id'", "in", "args", ":", "self", ".", "gist_id", "=", "args", "[", "'id'", "]", "else", ":", "raise", "Exception", "(", "'Provide GistName to delete'", ")", "url", "=", "'gists'", "if", "self", ".", "gist_id", ":", "r", "=", "requests", ".", "delete", "(", "'%s/%s/%s'", "%", "(", "BASE_URL", ",", "url", ",", "self", ".", "gist_id", ")", ",", "headers", "=", "self", ".", "gist", ".", "header", ")", "if", "(", "r", ".", "status_code", "==", "204", ")", ":", "response", "=", "{", "'id'", ":", "self", ".", "gist_id", ",", "}", "return", "response", "raise", "Exception", "(", "'Can not delete gist'", ")" ]
Delete a gist by gistname/gistID
[ "Delete", "a", "gist", "by", "gistname", "/", "gistID" ]
train
https://github.com/softvar/simplegist/blob/8d53edd15d76c7b10fb963a659c1cf9f46f5345d/simplegist/mygist.py#L198-L223
softvar/simplegist
simplegist/mygist.py
Mygist.starred
def starred(self, **args): ''' List the authenticated user's starred gists ''' ids =[] r = requests.get( '%s/gists/starred'%BASE_URL, headers=self.gist.header ) if 'limit' in args: limit = args['limit'] else: limit = len(r.json()) if (r.status_code == 200): for g in range(0,limit ): ids.append('%s/%s/%s' %(GIST_URL,r.json()[g]['user']['login'],r.json()[g]['id'])) return ids raise Exception('Username not found')
python
def starred(self, **args): ''' List the authenticated user's starred gists ''' ids =[] r = requests.get( '%s/gists/starred'%BASE_URL, headers=self.gist.header ) if 'limit' in args: limit = args['limit'] else: limit = len(r.json()) if (r.status_code == 200): for g in range(0,limit ): ids.append('%s/%s/%s' %(GIST_URL,r.json()[g]['user']['login'],r.json()[g]['id'])) return ids raise Exception('Username not found')
[ "def", "starred", "(", "self", ",", "*", "*", "args", ")", ":", "ids", "=", "[", "]", "r", "=", "requests", ".", "get", "(", "'%s/gists/starred'", "%", "BASE_URL", ",", "headers", "=", "self", ".", "gist", ".", "header", ")", "if", "'limit'", "in", "args", ":", "limit", "=", "args", "[", "'limit'", "]", "else", ":", "limit", "=", "len", "(", "r", ".", "json", "(", ")", ")", "if", "(", "r", ".", "status_code", "==", "200", ")", ":", "for", "g", "in", "range", "(", "0", ",", "limit", ")", ":", "ids", ".", "append", "(", "'%s/%s/%s'", "%", "(", "GIST_URL", ",", "r", ".", "json", "(", ")", "[", "g", "]", "[", "'user'", "]", "[", "'login'", "]", ",", "r", ".", "json", "(", ")", "[", "g", "]", "[", "'id'", "]", ")", ")", "return", "ids", "raise", "Exception", "(", "'Username not found'", ")" ]
List the authenticated user's starred gists
[ "List", "the", "authenticated", "user", "s", "starred", "gists" ]
train
https://github.com/softvar/simplegist/blob/8d53edd15d76c7b10fb963a659c1cf9f46f5345d/simplegist/mygist.py#L226-L246
softvar/simplegist
simplegist/mygist.py
Mygist.links
def links(self,**args): ''' Return Gist URL-Link, Clone-Link and Script-Link to embed ''' if 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Gist Name/ID must be provided') if self.gist_id: r = requests.get( '%s/gists/%s'%(BASE_URL,self.gist_id), headers=self.gist.header, ) if (r.status_code == 200): content = { 'Github-User': r.json()['user']['login'], 'GistID': r.json()['id'], 'Gist-Link': '%s/%s/%s' %(GIST_URL,self.gist.username,r.json()['id']), 'Clone-Link': '%s/%s.git' %(GIST_URL,r.json()['id']), 'Embed-Script': '<script src="%s/%s/%s.js"</script>' %(GIST_URL,self.gist.username,r.json()['id']) } return content raise Exception('No such gist found')
python
def links(self,**args): ''' Return Gist URL-Link, Clone-Link and Script-Link to embed ''' if 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Gist Name/ID must be provided') if self.gist_id: r = requests.get( '%s/gists/%s'%(BASE_URL,self.gist_id), headers=self.gist.header, ) if (r.status_code == 200): content = { 'Github-User': r.json()['user']['login'], 'GistID': r.json()['id'], 'Gist-Link': '%s/%s/%s' %(GIST_URL,self.gist.username,r.json()['id']), 'Clone-Link': '%s/%s.git' %(GIST_URL,r.json()['id']), 'Embed-Script': '<script src="%s/%s/%s.js"</script>' %(GIST_URL,self.gist.username,r.json()['id']) } return content raise Exception('No such gist found')
[ "def", "links", "(", "self", ",", "*", "*", "args", ")", ":", "if", "'name'", "in", "args", ":", "self", ".", "gist_name", "=", "args", "[", "'name'", "]", "self", ".", "gist_id", "=", "self", ".", "getMyID", "(", "self", ".", "gist_name", ")", "elif", "'id'", "in", "args", ":", "self", ".", "gist_id", "=", "args", "[", "'id'", "]", "else", ":", "raise", "Exception", "(", "'Gist Name/ID must be provided'", ")", "if", "self", ".", "gist_id", ":", "r", "=", "requests", ".", "get", "(", "'%s/gists/%s'", "%", "(", "BASE_URL", ",", "self", ".", "gist_id", ")", ",", "headers", "=", "self", ".", "gist", ".", "header", ",", ")", "if", "(", "r", ".", "status_code", "==", "200", ")", ":", "content", "=", "{", "'Github-User'", ":", "r", ".", "json", "(", ")", "[", "'user'", "]", "[", "'login'", "]", ",", "'GistID'", ":", "r", ".", "json", "(", ")", "[", "'id'", "]", ",", "'Gist-Link'", ":", "'%s/%s/%s'", "%", "(", "GIST_URL", ",", "self", ".", "gist", ".", "username", ",", "r", ".", "json", "(", ")", "[", "'id'", "]", ")", ",", "'Clone-Link'", ":", "'%s/%s.git'", "%", "(", "GIST_URL", ",", "r", ".", "json", "(", ")", "[", "'id'", "]", ")", ",", "'Embed-Script'", ":", "'<script src=\"%s/%s/%s.js\"</script>'", "%", "(", "GIST_URL", ",", "self", ".", "gist", ".", "username", ",", "r", ".", "json", "(", ")", "[", "'id'", "]", ")", "}", "return", "content", "raise", "Exception", "(", "'No such gist found'", ")" ]
Return Gist URL-Link, Clone-Link and Script-Link to embed
[ "Return", "Gist", "URL", "-", "Link", "Clone", "-", "Link", "and", "Script", "-", "Link", "to", "embed" ]
train
https://github.com/softvar/simplegist/blob/8d53edd15d76c7b10fb963a659c1cf9f46f5345d/simplegist/mygist.py#L248-L275
djgagne/hagelslag
hagelslag/evaluation/NeighborEvaluator.py
NeighborEvaluator.load_forecasts
def load_forecasts(self): """ Load neighborhood probability forecasts. """ run_date_str = self.run_date.strftime("%Y%m%d") forecast_file = self.forecast_path + "{0}/{1}_{2}_{3}_consensus_{0}.nc".format(run_date_str, self.ensemble_name, self.model_name, self.forecast_variable) print("Forecast file: " + forecast_file) forecast_data = Dataset(forecast_file) for size_threshold in self.size_thresholds: for smoothing_radius in self.smoothing_radii: for neighbor_radius in self.neighbor_radii: hour_var = "neighbor_prob_r_{0:d}_s_{1:d}_{2}_{3:0.2f}".format(neighbor_radius, smoothing_radius, self.forecast_variable, float(size_threshold)) period_var = "neighbor_prob_{0:d}-hour_r_{1:d}_s_{2:d}_{3}_{4:0.2f}".format(self.end_hour - self.start_hour + 1, neighbor_radius, smoothing_radius, self.forecast_variable, float(size_threshold)) print("Loading forecasts {0} {1} {2} {3} {4}".format(self.run_date, self.model_name, self.forecast_variable, size_threshold, smoothing_radius)) if hour_var in forecast_data.variables.keys(): self.hourly_forecasts[hour_var] = forecast_data.variables[hour_var][:] if period_var in forecast_data.variables.keys(): self.period_forecasts[period_var] = forecast_data.variables[period_var][:] forecast_data.close()
python
def load_forecasts(self): """ Load neighborhood probability forecasts. """ run_date_str = self.run_date.strftime("%Y%m%d") forecast_file = self.forecast_path + "{0}/{1}_{2}_{3}_consensus_{0}.nc".format(run_date_str, self.ensemble_name, self.model_name, self.forecast_variable) print("Forecast file: " + forecast_file) forecast_data = Dataset(forecast_file) for size_threshold in self.size_thresholds: for smoothing_radius in self.smoothing_radii: for neighbor_radius in self.neighbor_radii: hour_var = "neighbor_prob_r_{0:d}_s_{1:d}_{2}_{3:0.2f}".format(neighbor_radius, smoothing_radius, self.forecast_variable, float(size_threshold)) period_var = "neighbor_prob_{0:d}-hour_r_{1:d}_s_{2:d}_{3}_{4:0.2f}".format(self.end_hour - self.start_hour + 1, neighbor_radius, smoothing_radius, self.forecast_variable, float(size_threshold)) print("Loading forecasts {0} {1} {2} {3} {4}".format(self.run_date, self.model_name, self.forecast_variable, size_threshold, smoothing_radius)) if hour_var in forecast_data.variables.keys(): self.hourly_forecasts[hour_var] = forecast_data.variables[hour_var][:] if period_var in forecast_data.variables.keys(): self.period_forecasts[period_var] = forecast_data.variables[period_var][:] forecast_data.close()
[ "def", "load_forecasts", "(", "self", ")", ":", "run_date_str", "=", "self", ".", "run_date", ".", "strftime", "(", "\"%Y%m%d\"", ")", "forecast_file", "=", "self", ".", "forecast_path", "+", "\"{0}/{1}_{2}_{3}_consensus_{0}.nc\"", ".", "format", "(", "run_date_str", ",", "self", ".", "ensemble_name", ",", "self", ".", "model_name", ",", "self", ".", "forecast_variable", ")", "print", "(", "\"Forecast file: \"", "+", "forecast_file", ")", "forecast_data", "=", "Dataset", "(", "forecast_file", ")", "for", "size_threshold", "in", "self", ".", "size_thresholds", ":", "for", "smoothing_radius", "in", "self", ".", "smoothing_radii", ":", "for", "neighbor_radius", "in", "self", ".", "neighbor_radii", ":", "hour_var", "=", "\"neighbor_prob_r_{0:d}_s_{1:d}_{2}_{3:0.2f}\"", ".", "format", "(", "neighbor_radius", ",", "smoothing_radius", ",", "self", ".", "forecast_variable", ",", "float", "(", "size_threshold", ")", ")", "period_var", "=", "\"neighbor_prob_{0:d}-hour_r_{1:d}_s_{2:d}_{3}_{4:0.2f}\"", ".", "format", "(", "self", ".", "end_hour", "-", "self", ".", "start_hour", "+", "1", ",", "neighbor_radius", ",", "smoothing_radius", ",", "self", ".", "forecast_variable", ",", "float", "(", "size_threshold", ")", ")", "print", "(", "\"Loading forecasts {0} {1} {2} {3} {4}\"", ".", "format", "(", "self", ".", "run_date", ",", "self", ".", "model_name", ",", "self", ".", "forecast_variable", ",", "size_threshold", ",", "smoothing_radius", ")", ")", "if", "hour_var", "in", "forecast_data", ".", "variables", ".", "keys", "(", ")", ":", "self", ".", "hourly_forecasts", "[", "hour_var", "]", "=", "forecast_data", ".", "variables", "[", "hour_var", "]", "[", ":", "]", "if", "period_var", "in", "forecast_data", ".", "variables", ".", "keys", "(", ")", ":", "self", ".", "period_forecasts", "[", "period_var", "]", "=", "forecast_data", ".", "variables", "[", "period_var", "]", "[", ":", "]", "forecast_data", ".", "close", "(", ")" ]
Load neighborhood probability forecasts.
[ "Load", "neighborhood", "probability", "forecasts", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/NeighborEvaluator.py#L62-L93
djgagne/hagelslag
hagelslag/evaluation/NeighborEvaluator.py
NeighborEvaluator.load_obs
def load_obs(self, mask_threshold=0.5): """ Loads observations and masking grid (if needed). Args: mask_threshold: Values greater than the threshold are kept, others are masked. """ print("Loading obs ", self.run_date, self.model_name, self.forecast_variable) start_date = self.run_date + timedelta(hours=self.start_hour) end_date = self.run_date + timedelta(hours=self.end_hour) mrms_grid = MRMSGrid(start_date, end_date, self.mrms_variable, self.mrms_path) mrms_grid.load_data() if len(mrms_grid.data) > 0: self.raw_obs[self.mrms_variable] = np.where(mrms_grid.data > 100, 100, mrms_grid.data) self.period_obs[self.mrms_variable] = self.raw_obs[self.mrms_variable].max(axis=0) if self.obs_mask: mask_grid = MRMSGrid(start_date, end_date, self.mask_variable, self.mrms_path) mask_grid.load_data() self.raw_obs[self.mask_variable] = np.where(mask_grid.data >= mask_threshold, 1, 0) self.period_obs[self.mask_variable] = self.raw_obs[self.mask_variable].max(axis=0)
python
def load_obs(self, mask_threshold=0.5): """ Loads observations and masking grid (if needed). Args: mask_threshold: Values greater than the threshold are kept, others are masked. """ print("Loading obs ", self.run_date, self.model_name, self.forecast_variable) start_date = self.run_date + timedelta(hours=self.start_hour) end_date = self.run_date + timedelta(hours=self.end_hour) mrms_grid = MRMSGrid(start_date, end_date, self.mrms_variable, self.mrms_path) mrms_grid.load_data() if len(mrms_grid.data) > 0: self.raw_obs[self.mrms_variable] = np.where(mrms_grid.data > 100, 100, mrms_grid.data) self.period_obs[self.mrms_variable] = self.raw_obs[self.mrms_variable].max(axis=0) if self.obs_mask: mask_grid = MRMSGrid(start_date, end_date, self.mask_variable, self.mrms_path) mask_grid.load_data() self.raw_obs[self.mask_variable] = np.where(mask_grid.data >= mask_threshold, 1, 0) self.period_obs[self.mask_variable] = self.raw_obs[self.mask_variable].max(axis=0)
[ "def", "load_obs", "(", "self", ",", "mask_threshold", "=", "0.5", ")", ":", "print", "(", "\"Loading obs \"", ",", "self", ".", "run_date", ",", "self", ".", "model_name", ",", "self", ".", "forecast_variable", ")", "start_date", "=", "self", ".", "run_date", "+", "timedelta", "(", "hours", "=", "self", ".", "start_hour", ")", "end_date", "=", "self", ".", "run_date", "+", "timedelta", "(", "hours", "=", "self", ".", "end_hour", ")", "mrms_grid", "=", "MRMSGrid", "(", "start_date", ",", "end_date", ",", "self", ".", "mrms_variable", ",", "self", ".", "mrms_path", ")", "mrms_grid", ".", "load_data", "(", ")", "if", "len", "(", "mrms_grid", ".", "data", ")", ">", "0", ":", "self", ".", "raw_obs", "[", "self", ".", "mrms_variable", "]", "=", "np", ".", "where", "(", "mrms_grid", ".", "data", ">", "100", ",", "100", ",", "mrms_grid", ".", "data", ")", "self", ".", "period_obs", "[", "self", ".", "mrms_variable", "]", "=", "self", ".", "raw_obs", "[", "self", ".", "mrms_variable", "]", ".", "max", "(", "axis", "=", "0", ")", "if", "self", ".", "obs_mask", ":", "mask_grid", "=", "MRMSGrid", "(", "start_date", ",", "end_date", ",", "self", ".", "mask_variable", ",", "self", ".", "mrms_path", ")", "mask_grid", ".", "load_data", "(", ")", "self", ".", "raw_obs", "[", "self", ".", "mask_variable", "]", "=", "np", ".", "where", "(", "mask_grid", ".", "data", ">=", "mask_threshold", ",", "1", ",", "0", ")", "self", ".", "period_obs", "[", "self", ".", "mask_variable", "]", "=", "self", ".", "raw_obs", "[", "self", ".", "mask_variable", "]", ".", "max", "(", "axis", "=", "0", ")" ]
Loads observations and masking grid (if needed). Args: mask_threshold: Values greater than the threshold are kept, others are masked.
[ "Loads", "observations", "and", "masking", "grid", "(", "if", "needed", ")", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/NeighborEvaluator.py#L95-L114
djgagne/hagelslag
hagelslag/evaluation/NeighborEvaluator.py
NeighborEvaluator.load_coordinates
def load_coordinates(self): """ Loads lat-lon coordinates from a netCDF file. """ coord_file = Dataset(self.coordinate_file) if "lon" in coord_file.variables.keys(): self.coordinates["lon"] = coord_file.variables["lon"][:] self.coordinates["lat"] = coord_file.variables["lat"][:] else: self.coordinates["lon"] = coord_file.variables["XLONG"][0] self.coordinates["lat"] = coord_file.variables["XLAT"][0] coord_file.close()
python
def load_coordinates(self): """ Loads lat-lon coordinates from a netCDF file. """ coord_file = Dataset(self.coordinate_file) if "lon" in coord_file.variables.keys(): self.coordinates["lon"] = coord_file.variables["lon"][:] self.coordinates["lat"] = coord_file.variables["lat"][:] else: self.coordinates["lon"] = coord_file.variables["XLONG"][0] self.coordinates["lat"] = coord_file.variables["XLAT"][0] coord_file.close()
[ "def", "load_coordinates", "(", "self", ")", ":", "coord_file", "=", "Dataset", "(", "self", ".", "coordinate_file", ")", "if", "\"lon\"", "in", "coord_file", ".", "variables", ".", "keys", "(", ")", ":", "self", ".", "coordinates", "[", "\"lon\"", "]", "=", "coord_file", ".", "variables", "[", "\"lon\"", "]", "[", ":", "]", "self", ".", "coordinates", "[", "\"lat\"", "]", "=", "coord_file", ".", "variables", "[", "\"lat\"", "]", "[", ":", "]", "else", ":", "self", ".", "coordinates", "[", "\"lon\"", "]", "=", "coord_file", ".", "variables", "[", "\"XLONG\"", "]", "[", "0", "]", "self", ".", "coordinates", "[", "\"lat\"", "]", "=", "coord_file", ".", "variables", "[", "\"XLAT\"", "]", "[", "0", "]", "coord_file", ".", "close", "(", ")" ]
Loads lat-lon coordinates from a netCDF file.
[ "Loads", "lat", "-", "lon", "coordinates", "from", "a", "netCDF", "file", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/NeighborEvaluator.py#L116-L127
djgagne/hagelslag
hagelslag/evaluation/NeighborEvaluator.py
NeighborEvaluator.evaluate_hourly_forecasts
def evaluate_hourly_forecasts(self): """ Calculates ROC curves and Reliability scores for each forecast hour. Returns: A pandas DataFrame containing forecast metadata as well as DistributedROC and Reliability objects. """ score_columns = ["Run_Date", "Forecast_Hour", "Ensemble Name", "Model_Name", "Forecast_Variable", "Neighbor_Radius", "Smoothing_Radius", "Size_Threshold", "ROC", "Reliability"] all_scores = pd.DataFrame(columns=score_columns) for h, hour in enumerate(range(self.start_hour, self.end_hour + 1)): for neighbor_radius in self.neighbor_radii: n_filter = disk(neighbor_radius) for s, size_threshold in enumerate(self.size_thresholds): print("Eval hourly forecast {0:02d} {1} {2} {3} {4:d} {5:d}".format(hour, self.model_name, self.forecast_variable, self.run_date, neighbor_radius, size_threshold)) hour_obs = fftconvolve(self.raw_obs[self.mrms_variable][h] >= self.obs_thresholds[s], n_filter, mode="same") hour_obs[hour_obs > 1] = 1 hour_obs[hour_obs < 1] = 0 if self.obs_mask: hour_obs = hour_obs[self.raw_obs[self.mask_variable][h] > 0] for smoothing_radius in self.smoothing_radii: hour_var = "neighbor_prob_r_{0:d}_s_{1:d}_{2}_{3:0.2f}".format(neighbor_radius, smoothing_radius, self.forecast_variable, size_threshold) if self.obs_mask: hour_forecast = self.hourly_forecasts[hour_var][h][self.raw_obs[self.mask_variable][h] > 0] else: hour_forecast = self.hourly_forecasts[hour_var][h] roc = DistributedROC(thresholds=self.probability_levels, obs_threshold=0.5) roc.update(hour_forecast, hour_obs) rel = DistributedReliability(thresholds=self.probability_levels, obs_threshold=0.5) rel.update(hour_forecast, hour_obs) row = [self.run_date, hour, self.ensemble_name, self.model_name, self.forecast_variable, neighbor_radius, smoothing_radius, size_threshold, roc, rel] all_scores.loc[hour_var + "_{0:d}".format(hour)] = row return all_scores
python
def evaluate_hourly_forecasts(self): """ Calculates ROC curves and Reliability scores for each forecast hour. Returns: A pandas DataFrame containing forecast metadata as well as DistributedROC and Reliability objects. """ score_columns = ["Run_Date", "Forecast_Hour", "Ensemble Name", "Model_Name", "Forecast_Variable", "Neighbor_Radius", "Smoothing_Radius", "Size_Threshold", "ROC", "Reliability"] all_scores = pd.DataFrame(columns=score_columns) for h, hour in enumerate(range(self.start_hour, self.end_hour + 1)): for neighbor_radius in self.neighbor_radii: n_filter = disk(neighbor_radius) for s, size_threshold in enumerate(self.size_thresholds): print("Eval hourly forecast {0:02d} {1} {2} {3} {4:d} {5:d}".format(hour, self.model_name, self.forecast_variable, self.run_date, neighbor_radius, size_threshold)) hour_obs = fftconvolve(self.raw_obs[self.mrms_variable][h] >= self.obs_thresholds[s], n_filter, mode="same") hour_obs[hour_obs > 1] = 1 hour_obs[hour_obs < 1] = 0 if self.obs_mask: hour_obs = hour_obs[self.raw_obs[self.mask_variable][h] > 0] for smoothing_radius in self.smoothing_radii: hour_var = "neighbor_prob_r_{0:d}_s_{1:d}_{2}_{3:0.2f}".format(neighbor_radius, smoothing_radius, self.forecast_variable, size_threshold) if self.obs_mask: hour_forecast = self.hourly_forecasts[hour_var][h][self.raw_obs[self.mask_variable][h] > 0] else: hour_forecast = self.hourly_forecasts[hour_var][h] roc = DistributedROC(thresholds=self.probability_levels, obs_threshold=0.5) roc.update(hour_forecast, hour_obs) rel = DistributedReliability(thresholds=self.probability_levels, obs_threshold=0.5) rel.update(hour_forecast, hour_obs) row = [self.run_date, hour, self.ensemble_name, self.model_name, self.forecast_variable, neighbor_radius, smoothing_radius, size_threshold, roc, rel] all_scores.loc[hour_var + "_{0:d}".format(hour)] = row return all_scores
[ "def", "evaluate_hourly_forecasts", "(", "self", ")", ":", "score_columns", "=", "[", "\"Run_Date\"", ",", "\"Forecast_Hour\"", ",", "\"Ensemble Name\"", ",", "\"Model_Name\"", ",", "\"Forecast_Variable\"", ",", "\"Neighbor_Radius\"", ",", "\"Smoothing_Radius\"", ",", "\"Size_Threshold\"", ",", "\"ROC\"", ",", "\"Reliability\"", "]", "all_scores", "=", "pd", ".", "DataFrame", "(", "columns", "=", "score_columns", ")", "for", "h", ",", "hour", "in", "enumerate", "(", "range", "(", "self", ".", "start_hour", ",", "self", ".", "end_hour", "+", "1", ")", ")", ":", "for", "neighbor_radius", "in", "self", ".", "neighbor_radii", ":", "n_filter", "=", "disk", "(", "neighbor_radius", ")", "for", "s", ",", "size_threshold", "in", "enumerate", "(", "self", ".", "size_thresholds", ")", ":", "print", "(", "\"Eval hourly forecast {0:02d} {1} {2} {3} {4:d} {5:d}\"", ".", "format", "(", "hour", ",", "self", ".", "model_name", ",", "self", ".", "forecast_variable", ",", "self", ".", "run_date", ",", "neighbor_radius", ",", "size_threshold", ")", ")", "hour_obs", "=", "fftconvolve", "(", "self", ".", "raw_obs", "[", "self", ".", "mrms_variable", "]", "[", "h", "]", ">=", "self", ".", "obs_thresholds", "[", "s", "]", ",", "n_filter", ",", "mode", "=", "\"same\"", ")", "hour_obs", "[", "hour_obs", ">", "1", "]", "=", "1", "hour_obs", "[", "hour_obs", "<", "1", "]", "=", "0", "if", "self", ".", "obs_mask", ":", "hour_obs", "=", "hour_obs", "[", "self", ".", "raw_obs", "[", "self", ".", "mask_variable", "]", "[", "h", "]", ">", "0", "]", "for", "smoothing_radius", "in", "self", ".", "smoothing_radii", ":", "hour_var", "=", "\"neighbor_prob_r_{0:d}_s_{1:d}_{2}_{3:0.2f}\"", ".", "format", "(", "neighbor_radius", ",", "smoothing_radius", ",", "self", ".", "forecast_variable", ",", "size_threshold", ")", "if", "self", ".", "obs_mask", ":", "hour_forecast", "=", "self", ".", "hourly_forecasts", "[", "hour_var", "]", "[", "h", "]", "[", "self", ".", "raw_obs", "[", "self", ".", "mask_variable", "]", "[", "h", "]", ">", "0", "]", "else", ":", "hour_forecast", "=", "self", ".", "hourly_forecasts", "[", "hour_var", "]", "[", "h", "]", "roc", "=", "DistributedROC", "(", "thresholds", "=", "self", ".", "probability_levels", ",", "obs_threshold", "=", "0.5", ")", "roc", ".", "update", "(", "hour_forecast", ",", "hour_obs", ")", "rel", "=", "DistributedReliability", "(", "thresholds", "=", "self", ".", "probability_levels", ",", "obs_threshold", "=", "0.5", ")", "rel", ".", "update", "(", "hour_forecast", ",", "hour_obs", ")", "row", "=", "[", "self", ".", "run_date", ",", "hour", ",", "self", ".", "ensemble_name", ",", "self", ".", "model_name", ",", "self", ".", "forecast_variable", ",", "neighbor_radius", ",", "smoothing_radius", ",", "size_threshold", ",", "roc", ",", "rel", "]", "all_scores", ".", "loc", "[", "hour_var", "+", "\"_{0:d}\"", ".", "format", "(", "hour", ")", "]", "=", "row", "return", "all_scores" ]
Calculates ROC curves and Reliability scores for each forecast hour. Returns: A pandas DataFrame containing forecast metadata as well as DistributedROC and Reliability objects.
[ "Calculates", "ROC", "curves", "and", "Reliability", "scores", "for", "each", "forecast", "hour", "." ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/NeighborEvaluator.py#L129-L170
djgagne/hagelslag
hagelslag/evaluation/NeighborEvaluator.py
NeighborEvaluator.evaluate_period_forecasts
def evaluate_period_forecasts(self): """ Evaluates ROC and Reliability scores for forecasts over the full period from start hour to end hour Returns: A pandas DataFrame with full-period metadata and verification statistics """ score_columns = ["Run_Date", "Ensemble Name", "Model_Name", "Forecast_Variable", "Neighbor_Radius", "Smoothing_Radius", "Size_Threshold", "ROC", "Reliability"] all_scores = pd.DataFrame(columns=score_columns) if self.coordinate_file is not None: coord_mask = np.where((self.coordinates["lon"] >= self.lon_bounds[0]) & (self.coordinates["lon"] <= self.lon_bounds[1]) & (self.coordinates["lat"] >= self.lat_bounds[0]) & (self.coordinates["lat"] <= self.lat_bounds[1]) & (self.period_obs[self.mask_variable] > 0)) else: coord_mask = None for neighbor_radius in self.neighbor_radii: n_filter = disk(neighbor_radius) for s, size_threshold in enumerate(self.size_thresholds): period_obs = fftconvolve(self.period_obs[self.mrms_variable] >= self.obs_thresholds[s], n_filter, mode="same") period_obs[period_obs > 1] = 1 if self.obs_mask and self.coordinate_file is None: period_obs = period_obs[self.period_obs[self.mask_variable] > 0] elif self.obs_mask and self.coordinate_file is not None: period_obs = period_obs[coord_mask[0], coord_mask[1]] else: period_obs = period_obs.ravel() for smoothing_radius in self.smoothing_radii: print("Eval period forecast {0} {1} {2} {3} {4} {5}".format(self.model_name, self.forecast_variable, self.run_date, neighbor_radius, size_threshold, smoothing_radius)) period_var = "neighbor_prob_{0:d}-hour_r_{1:d}_s_{2:d}_{3}_{4:0.2f}".format(self.end_hour - self.start_hour + 1, neighbor_radius, smoothing_radius, self.forecast_variable, size_threshold) if self.obs_mask and self.coordinate_file is None: period_forecast = self.period_forecasts[period_var][self.period_obs[self.mask_variable] > 0] elif self.obs_mask and self.coordinate_file is not None: period_forecast = self.period_forecasts[period_var][coord_mask[0], coord_mask[1]] else: period_forecast = self.period_forecasts[period_var].ravel() roc = DistributedROC(thresholds=self.probability_levels, obs_threshold=0.5) roc.update(period_forecast, period_obs) rel = DistributedReliability(thresholds=self.probability_levels, obs_threshold=0.5) rel.update(period_forecast, period_obs) row = [self.run_date, self.ensemble_name, self.model_name, self.forecast_variable, neighbor_radius, smoothing_radius, size_threshold, roc, rel] all_scores.loc[period_var] = row return all_scores
python
def evaluate_period_forecasts(self): """ Evaluates ROC and Reliability scores for forecasts over the full period from start hour to end hour Returns: A pandas DataFrame with full-period metadata and verification statistics """ score_columns = ["Run_Date", "Ensemble Name", "Model_Name", "Forecast_Variable", "Neighbor_Radius", "Smoothing_Radius", "Size_Threshold", "ROC", "Reliability"] all_scores = pd.DataFrame(columns=score_columns) if self.coordinate_file is not None: coord_mask = np.where((self.coordinates["lon"] >= self.lon_bounds[0]) & (self.coordinates["lon"] <= self.lon_bounds[1]) & (self.coordinates["lat"] >= self.lat_bounds[0]) & (self.coordinates["lat"] <= self.lat_bounds[1]) & (self.period_obs[self.mask_variable] > 0)) else: coord_mask = None for neighbor_radius in self.neighbor_radii: n_filter = disk(neighbor_radius) for s, size_threshold in enumerate(self.size_thresholds): period_obs = fftconvolve(self.period_obs[self.mrms_variable] >= self.obs_thresholds[s], n_filter, mode="same") period_obs[period_obs > 1] = 1 if self.obs_mask and self.coordinate_file is None: period_obs = period_obs[self.period_obs[self.mask_variable] > 0] elif self.obs_mask and self.coordinate_file is not None: period_obs = period_obs[coord_mask[0], coord_mask[1]] else: period_obs = period_obs.ravel() for smoothing_radius in self.smoothing_radii: print("Eval period forecast {0} {1} {2} {3} {4} {5}".format(self.model_name, self.forecast_variable, self.run_date, neighbor_radius, size_threshold, smoothing_radius)) period_var = "neighbor_prob_{0:d}-hour_r_{1:d}_s_{2:d}_{3}_{4:0.2f}".format(self.end_hour - self.start_hour + 1, neighbor_radius, smoothing_radius, self.forecast_variable, size_threshold) if self.obs_mask and self.coordinate_file is None: period_forecast = self.period_forecasts[period_var][self.period_obs[self.mask_variable] > 0] elif self.obs_mask and self.coordinate_file is not None: period_forecast = self.period_forecasts[period_var][coord_mask[0], coord_mask[1]] else: period_forecast = self.period_forecasts[period_var].ravel() roc = DistributedROC(thresholds=self.probability_levels, obs_threshold=0.5) roc.update(period_forecast, period_obs) rel = DistributedReliability(thresholds=self.probability_levels, obs_threshold=0.5) rel.update(period_forecast, period_obs) row = [self.run_date, self.ensemble_name, self.model_name, self.forecast_variable, neighbor_radius, smoothing_radius, size_threshold, roc, rel] all_scores.loc[period_var] = row return all_scores
[ "def", "evaluate_period_forecasts", "(", "self", ")", ":", "score_columns", "=", "[", "\"Run_Date\"", ",", "\"Ensemble Name\"", ",", "\"Model_Name\"", ",", "\"Forecast_Variable\"", ",", "\"Neighbor_Radius\"", ",", "\"Smoothing_Radius\"", ",", "\"Size_Threshold\"", ",", "\"ROC\"", ",", "\"Reliability\"", "]", "all_scores", "=", "pd", ".", "DataFrame", "(", "columns", "=", "score_columns", ")", "if", "self", ".", "coordinate_file", "is", "not", "None", ":", "coord_mask", "=", "np", ".", "where", "(", "(", "self", ".", "coordinates", "[", "\"lon\"", "]", ">=", "self", ".", "lon_bounds", "[", "0", "]", ")", "&", "(", "self", ".", "coordinates", "[", "\"lon\"", "]", "<=", "self", ".", "lon_bounds", "[", "1", "]", ")", "&", "(", "self", ".", "coordinates", "[", "\"lat\"", "]", ">=", "self", ".", "lat_bounds", "[", "0", "]", ")", "&", "(", "self", ".", "coordinates", "[", "\"lat\"", "]", "<=", "self", ".", "lat_bounds", "[", "1", "]", ")", "&", "(", "self", ".", "period_obs", "[", "self", ".", "mask_variable", "]", ">", "0", ")", ")", "else", ":", "coord_mask", "=", "None", "for", "neighbor_radius", "in", "self", ".", "neighbor_radii", ":", "n_filter", "=", "disk", "(", "neighbor_radius", ")", "for", "s", ",", "size_threshold", "in", "enumerate", "(", "self", ".", "size_thresholds", ")", ":", "period_obs", "=", "fftconvolve", "(", "self", ".", "period_obs", "[", "self", ".", "mrms_variable", "]", ">=", "self", ".", "obs_thresholds", "[", "s", "]", ",", "n_filter", ",", "mode", "=", "\"same\"", ")", "period_obs", "[", "period_obs", ">", "1", "]", "=", "1", "if", "self", ".", "obs_mask", "and", "self", ".", "coordinate_file", "is", "None", ":", "period_obs", "=", "period_obs", "[", "self", ".", "period_obs", "[", "self", ".", "mask_variable", "]", ">", "0", "]", "elif", "self", ".", "obs_mask", "and", "self", ".", "coordinate_file", "is", "not", "None", ":", "period_obs", "=", "period_obs", "[", "coord_mask", "[", "0", "]", ",", "coord_mask", "[", "1", "]", "]", "else", ":", "period_obs", "=", "period_obs", ".", "ravel", "(", ")", "for", "smoothing_radius", "in", "self", ".", "smoothing_radii", ":", "print", "(", "\"Eval period forecast {0} {1} {2} {3} {4} {5}\"", ".", "format", "(", "self", ".", "model_name", ",", "self", ".", "forecast_variable", ",", "self", ".", "run_date", ",", "neighbor_radius", ",", "size_threshold", ",", "smoothing_radius", ")", ")", "period_var", "=", "\"neighbor_prob_{0:d}-hour_r_{1:d}_s_{2:d}_{3}_{4:0.2f}\"", ".", "format", "(", "self", ".", "end_hour", "-", "self", ".", "start_hour", "+", "1", ",", "neighbor_radius", ",", "smoothing_radius", ",", "self", ".", "forecast_variable", ",", "size_threshold", ")", "if", "self", ".", "obs_mask", "and", "self", ".", "coordinate_file", "is", "None", ":", "period_forecast", "=", "self", ".", "period_forecasts", "[", "period_var", "]", "[", "self", ".", "period_obs", "[", "self", ".", "mask_variable", "]", ">", "0", "]", "elif", "self", ".", "obs_mask", "and", "self", ".", "coordinate_file", "is", "not", "None", ":", "period_forecast", "=", "self", ".", "period_forecasts", "[", "period_var", "]", "[", "coord_mask", "[", "0", "]", ",", "coord_mask", "[", "1", "]", "]", "else", ":", "period_forecast", "=", "self", ".", "period_forecasts", "[", "period_var", "]", ".", "ravel", "(", ")", "roc", "=", "DistributedROC", "(", "thresholds", "=", "self", ".", "probability_levels", ",", "obs_threshold", "=", "0.5", ")", "roc", ".", "update", "(", "period_forecast", ",", "period_obs", ")", "rel", "=", "DistributedReliability", "(", "thresholds", "=", "self", ".", "probability_levels", ",", "obs_threshold", "=", "0.5", ")", "rel", ".", "update", "(", "period_forecast", ",", "period_obs", ")", "row", "=", "[", "self", ".", "run_date", ",", "self", ".", "ensemble_name", ",", "self", ".", "model_name", ",", "self", ".", "forecast_variable", ",", "neighbor_radius", ",", "smoothing_radius", ",", "size_threshold", ",", "roc", ",", "rel", "]", "all_scores", ".", "loc", "[", "period_var", "]", "=", "row", "return", "all_scores" ]
Evaluates ROC and Reliability scores for forecasts over the full period from start hour to end hour Returns: A pandas DataFrame with full-period metadata and verification statistics
[ "Evaluates", "ROC", "and", "Reliability", "scores", "for", "forecasts", "over", "the", "full", "period", "from", "start", "hour", "to", "end", "hour" ]
train
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/evaluation/NeighborEvaluator.py#L172-L227