_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q23500
Env.cache_url
train
def cache_url(self, var=DEFAULT_CACHE_ENV, default=NOTSET, backend=None): """Returns a config dictionary, defaulting to CACHE_URL. :rtype: dict """ return self.cache_url_config(self.url(var, default=default), backend=backend)
python
{ "resource": "" }
q23501
Env.email_url
train
def email_url(self, var=DEFAULT_EMAIL_ENV, default=NOTSET, backend=None): """Returns a config dictionary, defaulting to EMAIL_URL. :rtype: dict """ return self.email_url_config(self.url(var, default=default), backend=backend)
python
{ "resource": "" }
q23502
Env.search_url
train
def search_url(self, var=DEFAULT_SEARCH_ENV, default=NOTSET, engine=None): """Returns a config dictionary, defaulting to SEARCH_URL. :rtype: dict """ return self.search_url_config(self.url(var, default=default), engine=engine)
python
{ "resource": "" }
q23503
Env.parse_value
train
def parse_value(cls, value, cast): """Parse and cast provided value :param value: Stringed value. :param cast: Type to cast return value as. :returns: Casted value """ if cast is None: return value elif cast is bool: try: value = int(value) != 0 except ValueError: value = value.lower() in cls.BOOLEAN_TRUE_STRINGS elif isinstance(cast, list): value = list(map(cast[0], [x for x in value.split(',') if x])) elif isinstance(cast, tuple): val = value.strip('(').strip(')').split(',') value = tuple(map(cast[0], [x for x in val if x])) elif isinstance(cast, dict): key_cast = cast.get('key', str) value_cast = cast.get('value', str) value_cast_by_key = cast.get('cast', dict()) value = dict(map( lambda kv: ( key_cast(kv[0]), cls.parse_value(kv[1], value_cast_by_key.get(kv[0], value_cast)) ), [val.split('=') for val in value.split(';') if val] )) elif cast is dict: value = dict([val.split('=') for val in value.split(',') if val]) elif cast is list: value = [x for x in value.split(',') if x] elif cast is tuple: val = value.strip('(').strip(')').split(',') value = tuple([x for x in val if x]) elif cast is float: # clean string float_str = re.sub(r'[^\d,\.]', '', value) # split for avoid thousand separator and different locale comma/dot symbol parts = re.split(r'[,\.]', float_str) if len(parts) == 1: float_str = parts[0] else: float_str = "{0}.{1}".format(''.join(parts[0:-1]), parts[-1]) value = float(float_str) else: value = cast(value) return value
python
{ "resource": "" }
q23504
Env.db_url_config
train
def db_url_config(cls, url, engine=None): """Pulled from DJ-Database-URL, parse an arbitrary Database URL. Support currently exists for PostgreSQL, PostGIS, MySQL, Oracle and SQLite. SQLite connects to file based databases. The same URL format is used, omitting the hostname, and using the "file" portion as the filename of the database. This has the effect of four slashes being present for an absolute file path: >>> from environ import Env >>> Env.db_url_config('sqlite:////full/path/to/your/file.sqlite') {'ENGINE': 'django.db.backends.sqlite3', 'HOST': '', 'NAME': '/full/path/to/your/file.sqlite', 'PASSWORD': '', 'PORT': '', 'USER': ''} >>> Env.db_url_config('postgres://uf07k1i6d8ia0v:wegauwhgeuioweg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722r2kuvn') {'ENGINE': 'django.db.backends.postgresql', 'HOST': 'ec2-107-21-253-135.compute-1.amazonaws.com', 'NAME': 'd8r82722r2kuvn', 'PASSWORD': 'wegauwhgeuioweg', 'PORT': 5431, 'USER': 'uf07k1i6d8ia0v'} """ if not isinstance(url, cls.URL_CLASS): if url == 'sqlite://:memory:': # this is a special case, because if we pass this URL into # urlparse, urlparse will choke trying to interpret "memory" # as a port number return { 'ENGINE': cls.DB_SCHEMES['sqlite'], 'NAME': ':memory:' } # note: no other settings are required for sqlite url = urlparse(url) config = {} # Remove query strings. path = url.path[1:] path = unquote_plus(path.split('?', 2)[0]) if url.scheme == 'sqlite': if path == '': # if we are using sqlite and we have no path, then assume we # want an in-memory database (this is the behaviour of sqlalchemy) path = ':memory:' if url.netloc: warnings.warn( 'SQLite URL contains host component %r, it will be ignored' % url.netloc, stacklevel=3) if url.scheme == 'ldap': path = '{scheme}://{hostname}'.format(scheme=url.scheme, hostname=url.hostname) if url.port: path += ':{port}'.format(port=url.port) # Update with environment configuration. config.update({ 'NAME': path or '', 'USER': _cast_urlstr(url.username) or '', 'PASSWORD': _cast_urlstr(url.password) or '', 'HOST': url.hostname or '', 'PORT': _cast_int(url.port) or '', }) if url.scheme == 'postgres' and path.startswith('/'): config['HOST'], config['NAME'] = path.rsplit('/', 1) if url.scheme == 'oracle' and path == '': config['NAME'] = config['HOST'] config['HOST'] = '' if url.scheme == 'oracle': # Django oracle/base.py strips port and fails on non-string value if not config['PORT']: del(config['PORT']) else: config['PORT'] = str(config['PORT']) if url.query: config_options = {} for k, v in parse_qs(url.query).items(): if k.upper() in cls._DB_BASE_OPTIONS: config.update({k.upper(): _cast(v[0])}) else: config_options.update({k: _cast_int(v[0])}) config['OPTIONS'] = config_options if engine: config['ENGINE'] = engine else: config['ENGINE'] = url.scheme if config['ENGINE'] in Env.DB_SCHEMES: config['ENGINE'] = Env.DB_SCHEMES[config['ENGINE']] if not config.get('ENGINE', False): warnings.warn("Engine not recognized from url: {0}".format(config)) return {} return config
python
{ "resource": "" }
q23505
Env.cache_url_config
train
def cache_url_config(cls, url, backend=None): """Pulled from DJ-Cache-URL, parse an arbitrary Cache URL. :param url: :param backend: :return: """ url = urlparse(url) if not isinstance(url, cls.URL_CLASS) else url location = url.netloc.split(',') if len(location) == 1: location = location[0] config = { 'BACKEND': cls.CACHE_SCHEMES[url.scheme], 'LOCATION': location, } # Add the drive to LOCATION if url.scheme == 'filecache': config.update({ 'LOCATION': url.netloc + url.path, }) if url.path and url.scheme in ['memcache', 'pymemcache']: config.update({ 'LOCATION': 'unix:' + url.path, }) elif url.scheme.startswith('redis'): if url.hostname: scheme = url.scheme.replace('cache', '') else: scheme = 'unix' locations = [scheme + '://' + loc + url.path for loc in url.netloc.split(',')] config['LOCATION'] = locations[0] if len(locations) == 1 else locations if url.query: config_options = {} for k, v in parse_qs(url.query).items(): opt = {k.upper(): _cast(v[0])} if k.upper() in cls._CACHE_BASE_OPTIONS: config.update(opt) else: config_options.update(opt) config['OPTIONS'] = config_options if backend: config['BACKEND'] = backend return config
python
{ "resource": "" }
q23506
Path.path
train
def path(self, *paths, **kwargs): """Create new Path based on self.root and provided paths. :param paths: List of sub paths :param kwargs: required=False :rtype: Path """ return self.__class__(self.__root__, *paths, **kwargs)
python
{ "resource": "" }
q23507
warn_deprecated
train
def warn_deprecated(since, message='', name='', alternative='', pending=False, obj_type='attribute', addendum=''): """Display deprecation warning in a standard way. Parameters ---------- since : str The release at which this API became deprecated. message : str, optional Override the default deprecation message. The format specifier `%(name)s` may be used for the name of the function, and `%(alternative)s` may be used in the deprecation message to insert the name of an alternative to the deprecated function. `%(obj_type)s` may be used to insert a friendly name for the type of object being deprecated. name : str, optional The name of the deprecated object. alternative : str, optional An alternative function that the user may use in place of the deprecated function. The deprecation warning will tell the user about this alternative if provided. pending : bool, optional If True, uses a PendingDeprecationWarning instead of a DeprecationWarning. obj_type : str, optional The object type being deprecated. addendum : str, optional Additional text appended directly to the final message. Examples -------- Basic example:: # To warn of the deprecation of "metpy.name_of_module" warn_deprecated('0.6.0', name='metpy.name_of_module', obj_type='module') """ message = _generate_deprecation_message(since, message, name, alternative, pending, obj_type) warnings.warn(message, metpyDeprecation, stacklevel=1)
python
{ "resource": "" }
q23508
generate_grid
train
def generate_grid(horiz_dim, bbox): r"""Generate a meshgrid based on bounding box and x & y resolution. Parameters ---------- horiz_dim: integer Horizontal resolution bbox: dictionary Dictionary containing coordinates for corners of study area. Returns ------- grid_x: (X, Y) ndarray X dimension meshgrid defined by given bounding box grid_y: (X, Y) ndarray Y dimension meshgrid defined by given bounding box """ x_steps, y_steps = get_xy_steps(bbox, horiz_dim) grid_x = np.linspace(bbox['west'], bbox['east'], x_steps) grid_y = np.linspace(bbox['south'], bbox['north'], y_steps) gx, gy = np.meshgrid(grid_x, grid_y) return gx, gy
python
{ "resource": "" }
q23509
generate_grid_coords
train
def generate_grid_coords(gx, gy): r"""Calculate x,y coordinates of each grid cell. Parameters ---------- gx: numeric x coordinates in meshgrid gy: numeric y coordinates in meshgrid Returns ------- (X, Y) ndarray List of coordinates in meshgrid """ return np.vstack([gx.ravel(), gy.ravel()]).T
python
{ "resource": "" }
q23510
get_xy_range
train
def get_xy_range(bbox): r"""Return x and y ranges in meters based on bounding box. bbox: dictionary dictionary containing coordinates for corners of study area Returns ------- x_range: float Range in meters in x dimension. y_range: float Range in meters in y dimension. """ x_range = bbox['east'] - bbox['west'] y_range = bbox['north'] - bbox['south'] return x_range, y_range
python
{ "resource": "" }
q23511
get_xy_steps
train
def get_xy_steps(bbox, h_dim): r"""Return meshgrid spacing based on bounding box. bbox: dictionary Dictionary containing coordinates for corners of study area. h_dim: integer Horizontal resolution in meters. Returns ------- x_steps, (X, ) ndarray Number of grids in x dimension. y_steps: (Y, ) ndarray Number of grids in y dimension. """ x_range, y_range = get_xy_range(bbox) x_steps = np.ceil(x_range / h_dim) y_steps = np.ceil(y_range / h_dim) return int(x_steps), int(y_steps)
python
{ "resource": "" }
q23512
get_boundary_coords
train
def get_boundary_coords(x, y, spatial_pad=0): r"""Return bounding box based on given x and y coordinates assuming northern hemisphere. x: numeric x coordinates. y: numeric y coordinates. spatial_pad: numeric Number of meters to add to the x and y dimensions to reduce edge effects. Returns ------- bbox: dictionary dictionary containing coordinates for corners of study area """ west = np.min(x) - spatial_pad east = np.max(x) + spatial_pad north = np.max(y) + spatial_pad south = np.min(y) - spatial_pad return {'west': west, 'south': south, 'east': east, 'north': north}
python
{ "resource": "" }
q23513
natural_neighbor_to_grid
train
def natural_neighbor_to_grid(xp, yp, variable, grid_x, grid_y): r"""Generate a natural neighbor interpolation of the given points to a regular grid. This assigns values to the given grid using the Liang and Hale [Liang2010]_. approach. Parameters ---------- xp: (N, ) ndarray x-coordinates of observations yp: (N, ) ndarray y-coordinates of observations variable: (N, ) ndarray observation values associated with (xp, yp) pairs. IE, variable[i] is a unique observation at (xp[i], yp[i]) grid_x: (M, 2) ndarray Meshgrid associated with x dimension grid_y: (M, 2) ndarray Meshgrid associated with y dimension Returns ------- img: (M, N) ndarray Interpolated values on a 2-dimensional grid See Also -------- natural_neighbor_to_points """ # Handle grid-to-points conversion, and use function from `interpolation` points_obs = list(zip(xp, yp)) points_grid = generate_grid_coords(grid_x, grid_y) img = natural_neighbor_to_points(points_obs, variable, points_grid) return img.reshape(grid_x.shape)
python
{ "resource": "" }
q23514
natural_neighbor
train
def natural_neighbor(xp, yp, variable, grid_x, grid_y): """Wrap natural_neighbor_to_grid for deprecated natural_neighbor function.""" return natural_neighbor_to_grid(xp, yp, variable, grid_x, grid_y)
python
{ "resource": "" }
q23515
inverse_distance_to_grid
train
def inverse_distance_to_grid(xp, yp, variable, grid_x, grid_y, r, gamma=None, kappa=None, min_neighbors=3, kind='cressman'): r"""Generate an inverse distance interpolation of the given points to a regular grid. Values are assigned to the given grid using inverse distance weighting based on either [Cressman1959]_ or [Barnes1964]_. The Barnes implementation used here based on [Koch1983]_. Parameters ---------- xp: (N, ) ndarray x-coordinates of observations. yp: (N, ) ndarray y-coordinates of observations. variable: (N, ) ndarray observation values associated with (xp, yp) pairs. IE, variable[i] is a unique observation at (xp[i], yp[i]). grid_x: (M, 2) ndarray Meshgrid associated with x dimension. grid_y: (M, 2) ndarray Meshgrid associated with y dimension. r: float Radius from grid center, within which observations are considered and weighted. gamma: float Adjustable smoothing parameter for the barnes interpolation. Default None. kappa: float Response parameter for barnes interpolation. Default None. min_neighbors: int Minimum number of neighbors needed to perform barnes or cressman interpolation for a point. Default is 3. kind: str Specify what inverse distance weighting interpolation to use. Options: 'cressman' or 'barnes'. Default 'cressman' Returns ------- img: (M, N) ndarray Interpolated values on a 2-dimensional grid See Also -------- inverse_distance_to_points """ # Handle grid-to-points conversion, and use function from `interpolation` points_obs = list(zip(xp, yp)) points_grid = generate_grid_coords(grid_x, grid_y) img = inverse_distance_to_points(points_obs, variable, points_grid, r, gamma=gamma, kappa=kappa, min_neighbors=min_neighbors, kind=kind) return img.reshape(grid_x.shape)
python
{ "resource": "" }
q23516
inverse_distance
train
def inverse_distance(xp, yp, variable, grid_x, grid_y, r, gamma=None, kappa=None, min_neighbors=3, kind='cressman'): """Wrap inverse_distance_to_grid for deprecated inverse_distance function.""" return inverse_distance_to_grid(xp, yp, variable, grid_x, grid_y, r, gamma=gamma, kappa=kappa, min_neighbors=min_neighbors, kind=kind)
python
{ "resource": "" }
q23517
interpolate_to_isosurface
train
def interpolate_to_isosurface(level_var, interp_var, level, **kwargs): r"""Linear interpolation of a variable to a given vertical level from given values. This function assumes that highest vertical level (lowest pressure) is zeroth index. A classic use of this function would be to compute the potential temperature on the dynamic tropopause (2 PVU surface). Parameters ---------- level_var: array_like (P, M, N) Level values in 3D grid on common vertical coordinate (e.g., PV values on isobaric levels). Assumes height dimension is highest to lowest in atmosphere. interp_var: array_like (P, M, N) Variable on 3D grid with same vertical coordinate as level_var to interpolate to given level (e.g., potential temperature on isobaric levels) level: int or float Desired interpolated level (e.g., 2 PVU surface) Other Parameters ---------------- bottom_up_search : bool, optional Controls whether to search for levels bottom-up, or top-down. Defaults to True, which is bottom-up search. Returns ------- interp_level: (M, N) ndarray The interpolated variable (e.g., potential temperature) on the desired level (e.g., 2 PVU surface) Notes ----- This function implements a linear interpolation to estimate values on a given surface. The prototypical example is interpolation of potential temperature to the dynamic tropopause (e.g., 2 PVU surface) """ # Change when Python 2.7 no longer supported # Pull out keyword arguments bottom_up_search = kwargs.pop('bottom_up_search', True) # Find index values above and below desired interpolated surface value above, below, good = metpy.calc.find_bounding_indices(level_var, [level], axis=0, from_below=bottom_up_search) # Linear interpolation of variable to interpolated surface value interp_level = (((level - level_var[above]) / (level_var[below] - level_var[above])) * (interp_var[below] - interp_var[above])) + interp_var[above] # Handle missing values and instances where no values for surface exist above and below interp_level[~good] = np.nan minvar = (np.min(level_var, axis=0) >= level) maxvar = (np.max(level_var, axis=0) <= level) interp_level[0][minvar] = interp_var[-1][minvar] interp_level[0][maxvar] = interp_var[0][maxvar] return interp_level.squeeze()
python
{ "resource": "" }
q23518
interpolate
train
def interpolate(x, y, z, interp_type='linear', hres=50000, minimum_neighbors=3, gamma=0.25, kappa_star=5.052, search_radius=None, rbf_func='linear', rbf_smooth=0, boundary_coords=None): """Wrap interpolate_to_grid for deprecated interpolate function.""" return interpolate_to_grid(x, y, z, interp_type=interp_type, hres=hres, minimum_neighbors=minimum_neighbors, gamma=gamma, kappa_star=kappa_star, search_radius=search_radius, rbf_func=rbf_func, rbf_smooth=rbf_smooth, boundary_coords=boundary_coords)
python
{ "resource": "" }
q23519
interpolate_nans_1d
train
def interpolate_nans_1d(x, y, kind='linear'): """Interpolate NaN values in y. Interpolate NaN values in the y dimension. Works with unsorted x values. Parameters ---------- x : array-like 1-dimensional array of numeric x-values y : array-like 1-dimensional array of numeric y-values kind : string specifies the kind of interpolation x coordinate - 'linear' or 'log', optional. Defaults to 'linear'. Returns ------- An array of the y coordinate data with NaN values interpolated. """ x_sort_args = np.argsort(x) x = x[x_sort_args] y = y[x_sort_args] nans = np.isnan(y) if kind == 'linear': y[nans] = np.interp(x[nans], x[~nans], y[~nans]) elif kind == 'log': y[nans] = np.interp(np.log(x[nans]), np.log(x[~nans]), y[~nans]) else: raise ValueError('Unknown option for kind: {0}'.format(str(kind))) return y[x_sort_args]
python
{ "resource": "" }
q23520
interpolate_1d
train
def interpolate_1d(x, xp, *args, **kwargs): r"""Interpolates data with any shape over a specified axis. Interpolation over a specified axis for arrays of any shape. Parameters ---------- x : array-like 1-D array of desired interpolated values. xp : array-like The x-coordinates of the data points. args : array-like The data to be interpolated. Can be multiple arguments, all must be the same shape as xp. axis : int, optional The axis to interpolate over. Defaults to 0. fill_value: float, optional Specify handling of interpolation points out of data bounds. If None, will return ValueError if points are out of bounds. Defaults to nan. Returns ------- array-like Interpolated values for each point with coordinates sorted in ascending order. Examples -------- >>> x = np.array([1., 2., 3., 4.]) >>> y = np.array([1., 2., 3., 4.]) >>> x_interp = np.array([2.5, 3.5]) >>> metpy.calc.interp(x_interp, x, y) array([2.5, 3.5]) Notes ----- xp and args must be the same shape. """ # Pull out keyword args fill_value = kwargs.pop('fill_value', np.nan) axis = kwargs.pop('axis', 0) # Make x an array x = np.asanyarray(x).reshape(-1) # Save number of dimensions in xp ndim = xp.ndim # Sort input data sort_args = np.argsort(xp, axis=axis) sort_x = np.argsort(x) # indices for sorting sorter = broadcast_indices(xp, sort_args, ndim, axis) # sort xp xp = xp[sorter] # Ensure pressure in increasing order variables = [arr[sorter] for arr in args] # Make x broadcast with xp x_array = x[sort_x] expand = [np.newaxis] * ndim expand[axis] = slice(None) x_array = x_array[tuple(expand)] # Calculate value above interpolated value minv = np.apply_along_axis(np.searchsorted, axis, xp, x[sort_x]) minv2 = np.copy(minv) # If fill_value is none and data is out of bounds, raise value error if ((np.max(minv) == xp.shape[axis]) or (np.min(minv) == 0)) and fill_value is None: raise ValueError('Interpolation point out of data bounds encountered') # Warn if interpolated values are outside data bounds, will make these the values # at end of data range. if np.max(minv) == xp.shape[axis]: warnings.warn('Interpolation point out of data bounds encountered') minv2[minv == xp.shape[axis]] = xp.shape[axis] - 1 if np.min(minv) == 0: minv2[minv == 0] = 1 # Get indices for broadcasting arrays above = broadcast_indices(xp, minv2, ndim, axis) below = broadcast_indices(xp, minv2 - 1, ndim, axis) if np.any(x_array < xp[below]): warnings.warn('Interpolation point out of data bounds encountered') # Create empty output list ret = [] # Calculate interpolation for each variable for var in variables: # Var needs to be on the *left* of the multiply to ensure that if it's a pint # Quantity, it gets to control the operation--at least until we make sure # masked arrays and pint play together better. See https://github.com/hgrecco/pint#633 var_interp = var[below] + (var[above] - var[below]) * ((x_array - xp[below]) / (xp[above] - xp[below])) # Set points out of bounds to fill value. var_interp[minv == xp.shape[axis]] = fill_value var_interp[x_array < xp[below]] = fill_value # Check for input points in decreasing order and return output to match. if x[0] > x[-1]: var_interp = np.swapaxes(np.swapaxes(var_interp, 0, axis)[::-1], 0, axis) # Output to list ret.append(var_interp) if len(ret) == 1: return ret[0] else: return ret
python
{ "resource": "" }
q23521
log_interpolate_1d
train
def log_interpolate_1d(x, xp, *args, **kwargs): r"""Interpolates data with logarithmic x-scale over a specified axis. Interpolation on a logarithmic x-scale for interpolation values in pressure coordintates. Parameters ---------- x : array-like 1-D array of desired interpolated values. xp : array-like The x-coordinates of the data points. args : array-like The data to be interpolated. Can be multiple arguments, all must be the same shape as xp. axis : int, optional The axis to interpolate over. Defaults to 0. fill_value: float, optional Specify handling of interpolation points out of data bounds. If None, will return ValueError if points are out of bounds. Defaults to nan. Returns ------- array-like Interpolated values for each point with coordinates sorted in ascending order. Examples -------- >>> x_log = np.array([1e3, 1e4, 1e5, 1e6]) >>> y_log = np.log(x_log) * 2 + 3 >>> x_interp = np.array([5e3, 5e4, 5e5]) >>> metpy.calc.log_interp(x_interp, x_log, y_log) array([20.03438638, 24.63955657, 29.24472675]) Notes ----- xp and args must be the same shape. """ # Pull out kwargs fill_value = kwargs.pop('fill_value', np.nan) axis = kwargs.pop('axis', 0) # Log x and xp log_x = np.log(x) log_xp = np.log(xp) return interpolate_1d(log_x, log_xp, *args, axis=axis, fill_value=fill_value)
python
{ "resource": "" }
q23522
distances_from_cross_section
train
def distances_from_cross_section(cross): """Calculate the distances in the x and y directions along a cross-section. Parameters ---------- cross : `xarray.DataArray` The input DataArray of a cross-section from which to obtain geometeric distances in the x and y directions. Returns ------- x, y : tuple of `xarray.DataArray` A tuple of the x and y distances as DataArrays """ if (CFConventionHandler.check_axis(cross.metpy.x, 'lon') and CFConventionHandler.check_axis(cross.metpy.y, 'lat')): # Use pyproj to obtain x and y distances from pyproj import Geod g = Geod(cross.metpy.cartopy_crs.proj4_init) lon = cross.metpy.x lat = cross.metpy.y forward_az, _, distance = g.inv(lon[0].values * np.ones_like(lon), lat[0].values * np.ones_like(lat), lon.values, lat.values) x = distance * np.sin(np.deg2rad(forward_az)) y = distance * np.cos(np.deg2rad(forward_az)) # Build into DataArrays x = xr.DataArray(x, coords=lon.coords, dims=lon.dims, attrs={'units': 'meters'}) y = xr.DataArray(y, coords=lat.coords, dims=lat.dims, attrs={'units': 'meters'}) elif (CFConventionHandler.check_axis(cross.metpy.x, 'x') and CFConventionHandler.check_axis(cross.metpy.y, 'y')): # Simply return what we have x = cross.metpy.x y = cross.metpy.y else: raise AttributeError('Sufficient horizontal coordinates not defined.') return x, y
python
{ "resource": "" }
q23523
latitude_from_cross_section
train
def latitude_from_cross_section(cross): """Calculate the latitude of points in a cross-section. Parameters ---------- cross : `xarray.DataArray` The input DataArray of a cross-section from which to obtain latitudes. Returns ------- latitude : `xarray.DataArray` Latitude of points """ y = cross.metpy.y if CFConventionHandler.check_axis(y, 'lat'): return y else: import cartopy.crs as ccrs latitude = ccrs.Geodetic().transform_points(cross.metpy.cartopy_crs, cross.metpy.x.values, y.values)[..., 1] latitude = xr.DataArray(latitude, coords=y.coords, dims=y.dims, attrs={'units': 'degrees_north'}) return latitude
python
{ "resource": "" }
q23524
unit_vectors_from_cross_section
train
def unit_vectors_from_cross_section(cross, index='index'): r"""Calculate the unit tanget and unit normal vectors from a cross-section. Given a path described parametrically by :math:`\vec{l}(i) = (x(i), y(i))`, we can find the unit tangent vector by the formula .. math:: \vec{T}(i) = \frac{1}{\sqrt{\left( \frac{dx}{di} \right)^2 + \left( \frac{dy}{di} \right)^2}} \left( \frac{dx}{di}, \frac{dy}{di} \right) From this, because this is a two-dimensional path, the normal vector can be obtained by a simple :math:`\frac{\pi}{2}` rotation. Parameters ---------- cross : `xarray.DataArray` The input DataArray of a cross-section from which to obtain latitudes. index : `str`, optional A string denoting the index coordinate of the cross section, defaults to 'index' as set by `metpy.interpolate.cross_section`. Returns ------- unit_tangent_vector, unit_normal_vector : tuple of `numpy.ndarray` Arrays describing the unit tangent and unit normal vectors (in x,y) for all points along the cross section. """ x, y = distances_from_cross_section(cross) dx_di = first_derivative(x, axis=index).values dy_di = first_derivative(y, axis=index).values tangent_vector_mag = np.hypot(dx_di, dy_di) unit_tangent_vector = np.vstack([dx_di / tangent_vector_mag, dy_di / tangent_vector_mag]) unit_normal_vector = np.vstack([-dy_di / tangent_vector_mag, dx_di / tangent_vector_mag]) return unit_tangent_vector, unit_normal_vector
python
{ "resource": "" }
q23525
cross_section_components
train
def cross_section_components(data_x, data_y, index='index'): r"""Obtain the tangential and normal components of a cross-section of a vector field. Parameters ---------- data_x : `xarray.DataArray` The input DataArray of the x-component (in terms of data projection) of the vector field. data_y : `xarray.DataArray` The input DataArray of the y-component (in terms of data projection) of the vector field. Returns ------- component_tangential, component_normal: tuple of `xarray.DataArray` The components of the vector field in the tangential and normal directions, respectively. See Also -------- tangential_component, normal_component Notes ----- The coordinates of `data_x` and `data_y` must match. """ # Get the unit vectors unit_tang, unit_norm = unit_vectors_from_cross_section(data_x, index=index) # Take the dot products component_tang = data_x * unit_tang[0] + data_y * unit_tang[1] component_norm = data_x * unit_norm[0] + data_y * unit_norm[1] # Reattach units (only reliable attribute after operation) component_tang.attrs = {'units': data_x.attrs['units']} component_norm.attrs = {'units': data_x.attrs['units']} return component_tang, component_norm
python
{ "resource": "" }
q23526
normal_component
train
def normal_component(data_x, data_y, index='index'): r"""Obtain the normal component of a cross-section of a vector field. Parameters ---------- data_x : `xarray.DataArray` The input DataArray of the x-component (in terms of data projection) of the vector field. data_y : `xarray.DataArray` The input DataArray of the y-component (in terms of data projection) of the vector field. Returns ------- component_normal: `xarray.DataArray` The component of the vector field in the normal directions. See Also -------- cross_section_components, tangential_component Notes ----- The coordinates of `data_x` and `data_y` must match. """ # Get the unit vectors _, unit_norm = unit_vectors_from_cross_section(data_x, index=index) # Take the dot products component_norm = data_x * unit_norm[0] + data_y * unit_norm[1] # Reattach only reliable attributes after operation for attr in ('units', 'grid_mapping'): if attr in data_x.attrs: component_norm.attrs[attr] = data_x.attrs[attr] return component_norm
python
{ "resource": "" }
q23527
tangential_component
train
def tangential_component(data_x, data_y, index='index'): r"""Obtain the tangential component of a cross-section of a vector field. Parameters ---------- data_x : `xarray.DataArray` The input DataArray of the x-component (in terms of data projection) of the vector field. data_y : `xarray.DataArray` The input DataArray of the y-component (in terms of data projection) of the vector field. Returns ------- component_tangential: `xarray.DataArray` The component of the vector field in the tangential directions. See Also -------- cross_section_components, normal_component Notes ----- The coordinates of `data_x` and `data_y` must match. """ # Get the unit vectors unit_tang, _ = unit_vectors_from_cross_section(data_x, index=index) # Take the dot products component_tang = data_x * unit_tang[0] + data_y * unit_tang[1] # Reattach only reliable attributes after operation for attr in ('units', 'grid_mapping'): if attr in data_x.attrs: component_tang.attrs[attr] = data_x.attrs[attr] return component_tang
python
{ "resource": "" }
q23528
read_colortable
train
def read_colortable(fobj): r"""Read colortable information from a file. Reads a colortable, which consists of one color per line of the file, where a color can be one of: a tuple of 3 floats, a string with a HTML color name, or a string with a HTML hex color. Parameters ---------- fobj : a file-like object A file-like object to read the colors from Returns ------- List of tuples A list of the RGB color values, where each RGB color is a tuple of 3 floats in the range of [0, 1]. """ ret = [] try: for line in fobj: literal = _parse(line) if literal: ret.append(mcolors.colorConverter.to_rgb(literal)) return ret except (SyntaxError, ValueError): raise RuntimeError('Malformed colortable.')
python
{ "resource": "" }
q23529
convert_gempak_table
train
def convert_gempak_table(infile, outfile): r"""Convert a GEMPAK color table to one MetPy can read. Reads lines from a GEMPAK-style color table file, and writes them to another file in a format that MetPy can parse. Parameters ---------- infile : file-like object The file-like object to read from outfile : file-like object The file-like object to write to """ for line in infile: if not line.startswith('!') and line.strip(): r, g, b = map(int, line.split()) outfile.write('({0:f}, {1:f}, {2:f})\n'.format(r / 255, g / 255, b / 255))
python
{ "resource": "" }
q23530
ColortableRegistry.scan_resource
train
def scan_resource(self, pkg, path): r"""Scan a resource directory for colortable files and add them to the registry. Parameters ---------- pkg : str The package containing the resource directory path : str The path to the directory with the color tables """ for fname in resource_listdir(pkg, path): if fname.endswith(TABLE_EXT): table_path = posixpath.join(path, fname) with contextlib.closing(resource_stream(pkg, table_path)) as stream: self.add_colortable(stream, posixpath.splitext(posixpath.basename(fname))[0])
python
{ "resource": "" }
q23531
ColortableRegistry.scan_dir
train
def scan_dir(self, path): r"""Scan a directory on disk for color table files and add them to the registry. Parameters ---------- path : str The path to the directory with the color tables """ for fname in glob.glob(os.path.join(path, '*' + TABLE_EXT)): if os.path.isfile(fname): with open(fname, 'r') as fobj: try: self.add_colortable(fobj, os.path.splitext(os.path.basename(fname))[0]) log.debug('Added colortable from file: %s', fname) except RuntimeError: # If we get a file we can't handle, assume we weren't meant to. log.info('Skipping unparsable file: %s', fname)
python
{ "resource": "" }
q23532
ColortableRegistry.add_colortable
train
def add_colortable(self, fobj, name): r"""Add a color table from a file to the registry. Parameters ---------- fobj : file-like object The file to read the color table from name : str The name under which the color table will be stored """ self[name] = read_colortable(fobj) self[name + '_r'] = self[name][::-1]
python
{ "resource": "" }
q23533
cressman_point
train
def cressman_point(sq_dist, values, radius): r"""Generate a Cressman interpolation value for a point. The calculated value is based on the given distances and search radius. Parameters ---------- sq_dist: (N, ) ndarray Squared distance between observations and grid point values: (N, ) ndarray Observation values in same order as sq_dist radius: float Maximum distance to search for observations to use for interpolation. Returns ------- value: float Interpolation value for grid point. """ weights = tools.cressman_weights(sq_dist, radius) total_weights = np.sum(weights) return sum(v * (w / total_weights) for (w, v) in zip(weights, values))
python
{ "resource": "" }
q23534
barnes_point
train
def barnes_point(sq_dist, values, kappa, gamma=None): r"""Generate a single pass barnes interpolation value for a point. The calculated value is based on the given distances, kappa and gamma values. Parameters ---------- sq_dist: (N, ) ndarray Squared distance between observations and grid point values: (N, ) ndarray Observation values in same order as sq_dist kappa: float Response parameter for barnes interpolation. gamma: float Adjustable smoothing parameter for the barnes interpolation. Default 1. Returns ------- value: float Interpolation value for grid point. """ if gamma is None: gamma = 1 weights = tools.barnes_weights(sq_dist, kappa, gamma) total_weights = np.sum(weights) return sum(v * (w / total_weights) for (w, v) in zip(weights, values))
python
{ "resource": "" }
q23535
natural_neighbor_point
train
def natural_neighbor_point(xp, yp, variable, grid_loc, tri, neighbors, triangle_info): r"""Generate a natural neighbor interpolation of the observations to the given point. This uses the Liang and Hale approach [Liang2010]_. The interpolation will fail if the grid point has no natural neighbors. Parameters ---------- xp: (N, ) ndarray x-coordinates of observations yp: (N, ) ndarray y-coordinates of observations variable: (N, ) ndarray observation values associated with (xp, yp) pairs. IE, variable[i] is a unique observation at (xp[i], yp[i]) grid_loc: (float, float) Coordinates of the grid point at which to calculate the interpolation. tri: object Delaunay triangulation of the observations. neighbors: (N, ) ndarray Simplex codes of the grid point's natural neighbors. The codes will correspond to codes in the triangulation. triangle_info: dictionary Pre-calculated triangle attributes for quick look ups. Requires items 'cc' (circumcenters) and 'r' (radii) to be associated with each simplex code key from the delaunay triangulation. Returns ------- value: float Interpolated value for the grid location """ edges = geometry.find_local_boundary(tri, neighbors) edge_vertices = [segment[0] for segment in geometry.order_edges(edges)] num_vertices = len(edge_vertices) p1 = edge_vertices[0] p2 = edge_vertices[1] c1 = geometry.circumcenter(grid_loc, tri.points[p1], tri.points[p2]) polygon = [c1] area_list = [] total_area = 0.0 for i in range(num_vertices): p3 = edge_vertices[(i + 2) % num_vertices] try: c2 = geometry.circumcenter(grid_loc, tri.points[p3], tri.points[p2]) polygon.append(c2) for check_tri in neighbors: if p2 in tri.simplices[check_tri]: polygon.append(triangle_info[check_tri]['cc']) pts = [polygon[i] for i in ConvexHull(polygon).vertices] value = variable[(tri.points[p2][0] == xp) & (tri.points[p2][1] == yp)] cur_area = geometry.area(pts) total_area += cur_area area_list.append(cur_area * value[0]) except (ZeroDivisionError, qhull.QhullError) as e: message = ('Error during processing of a grid. ' 'Interpolation will continue but be mindful ' 'of errors in output. ') + str(e) log.warning(message) return np.nan polygon = [c2] p2 = p3 return sum(x / total_area for x in area_list)
python
{ "resource": "" }
q23536
natural_neighbor_to_points
train
def natural_neighbor_to_points(points, values, xi): r"""Generate a natural neighbor interpolation to the given points. This assigns values to the given interpolation points using the Liang and Hale [Liang2010]_. approach. Parameters ---------- points: array_like, shape (n, 2) Coordinates of the data points. values: array_like, shape (n,) Values of the data points. xi: array_like, shape (M, 2) Points to interpolate the data onto. Returns ------- img: (M,) ndarray Array representing the interpolated values for each input point in `xi` See Also -------- natural_neighbor_to_grid """ tri = Delaunay(points) members, triangle_info = geometry.find_natural_neighbors(tri, xi) img = np.empty(shape=(xi.shape[0]), dtype=values.dtype) img.fill(np.nan) for ind, (grid, neighbors) in enumerate(members.items()): if len(neighbors) > 0: points_transposed = np.array(points).transpose() img[ind] = natural_neighbor_point(points_transposed[0], points_transposed[1], values, xi[grid], tri, neighbors, triangle_info) return img
python
{ "resource": "" }
q23537
inverse_distance_to_points
train
def inverse_distance_to_points(points, values, xi, r, gamma=None, kappa=None, min_neighbors=3, kind='cressman'): r"""Generate an inverse distance weighting interpolation to the given points. Values are assigned to the given interpolation points based on either [Cressman1959]_ or [Barnes1964]_. The Barnes implementation used here based on [Koch1983]_. Parameters ---------- points: array_like, shape (n, 2) Coordinates of the data points. values: array_like, shape (n,) Values of the data points. xi: array_like, shape (M, 2) Points to interpolate the data onto. r: float Radius from grid center, within which observations are considered and weighted. gamma: float Adjustable smoothing parameter for the barnes interpolation. Default None. kappa: float Response parameter for barnes interpolation. Default None. min_neighbors: int Minimum number of neighbors needed to perform barnes or cressman interpolation for a point. Default is 3. kind: str Specify what inverse distance weighting interpolation to use. Options: 'cressman' or 'barnes'. Default 'cressman' Returns ------- img: (M,) ndarray Array representing the interpolated values for each input point in `xi` See Also -------- inverse_distance_to_grid """ obs_tree = cKDTree(points) indices = obs_tree.query_ball_point(xi, r=r) img = np.empty(shape=(xi.shape[0]), dtype=values.dtype) img.fill(np.nan) for idx, (matches, grid) in enumerate(zip(indices, xi)): if len(matches) >= min_neighbors: x1, y1 = obs_tree.data[matches].T values_subset = values[matches] dists = geometry.dist_2(grid[0], grid[1], x1, y1) if kind == 'cressman': img[idx] = cressman_point(dists, values_subset, r) elif kind == 'barnes': img[idx] = barnes_point(dists, values_subset, kappa, gamma) else: raise ValueError(str(kind) + ' interpolation not supported.') return img
python
{ "resource": "" }
q23538
interpolate_to_points
train
def interpolate_to_points(points, values, xi, interp_type='linear', minimum_neighbors=3, gamma=0.25, kappa_star=5.052, search_radius=None, rbf_func='linear', rbf_smooth=0): r"""Interpolate unstructured point data to the given points. This function interpolates the given `values` valid at `points` to the points `xi`. This is modeled after `scipy.interpolate.griddata`, but acts as a generalization of it by including the following types of interpolation: - Linear - Nearest Neighbor - Cubic - Radial Basis Function - Natural Neighbor (2D Only) - Barnes (2D Only) - Cressman (2D Only) Parameters ---------- points: array_like, shape (n, D) Coordinates of the data points. values: array_like, shape (n,) Values of the data points. xi: array_like, shape (M, D) Points to interpolate the data onto. interp_type: str What type of interpolation to use. Available options include: 1) "linear", "nearest", "cubic", or "rbf" from `scipy.interpolate`. 2) "natural_neighbor", "barnes", or "cressman" from `metpy.interpolate`. Default "linear". minimum_neighbors: int Minimum number of neighbors needed to perform barnes or cressman interpolation for a point. Default is 3. gamma: float Adjustable smoothing parameter for the barnes interpolation. Default 0.25. kappa_star: float Response parameter for barnes interpolation, specified nondimensionally in terms of the Nyquist. Default 5.052 search_radius: float A search radius to use for the barnes and cressman interpolation schemes. If search_radius is not specified, it will default to the average spacing of observations. rbf_func: str Specifies which function to use for Rbf interpolation. Options include: 'multiquadric', 'inverse', 'gaussian', 'linear', 'cubic', 'quintic', and 'thin_plate'. Defualt 'linear'. See `scipy.interpolate.Rbf` for more information. rbf_smooth: float Smoothing value applied to rbf interpolation. Higher values result in more smoothing. Returns ------- values_interpolated: (M,) ndarray Array representing the interpolated values for each input point in `xi`. Notes ----- This function primarily acts as a wrapper for the individual interpolation routines. The individual functions are also available for direct use. See Also -------- interpolate_to_grid """ # If this is a type that `griddata` handles, hand it along to `griddata` if interp_type in ['linear', 'nearest', 'cubic']: return griddata(points, values, xi, method=interp_type) # If this is natural neighbor, hand it along to `natural_neighbor` elif interp_type == 'natural_neighbor': return natural_neighbor_to_points(points, values, xi) # If this is Barnes/Cressman, determine search_radios and hand it along to # `inverse_distance` elif interp_type in ['cressman', 'barnes']: ave_spacing = cdist(points, points).mean() if search_radius is None: search_radius = ave_spacing if interp_type == 'cressman': return inverse_distance_to_points(points, values, xi, search_radius, min_neighbors=minimum_neighbors, kind=interp_type) else: kappa = tools.calc_kappa(ave_spacing, kappa_star) return inverse_distance_to_points(points, values, xi, search_radius, gamma, kappa, min_neighbors=minimum_neighbors, kind=interp_type) # If this is radial basis function, make the interpolator and apply it elif interp_type == 'rbf': points_transposed = np.array(points).transpose() xi_transposed = np.array(xi).transpose() rbfi = Rbf(points_transposed[0], points_transposed[1], values, function=rbf_func, smooth=rbf_smooth) return rbfi(xi_transposed[0], xi_transposed[1]) else: raise ValueError('Interpolation option not available. ' 'Try: linear, nearest, cubic, natural_neighbor, ' 'barnes, cressman, rbf')
python
{ "resource": "" }
q23539
_make_datetime
train
def _make_datetime(s): r"""Convert 7 bytes from a GINI file to a `datetime` instance.""" s = bytearray(s) # For Python 2 year, month, day, hour, minute, second, cs = s if year < 70: year += 100 return datetime(1900 + year, month, day, hour, minute, second, 10000 * cs)
python
{ "resource": "" }
q23540
_scaled_int
train
def _scaled_int(s): r"""Convert a 3 byte string to a signed integer value.""" s = bytearray(s) # For Python 2 # Get leftmost bit (sign) as 1 (if 0) or -1 (if 1) sign = 1 - ((s[0] & 0x80) >> 6) # Combine remaining bits int_val = (((s[0] & 0x7f) << 16) | (s[1] << 8) | s[2]) log.debug('Source: %s Int: %x Sign: %d', ' '.join(hex(c) for c in s), int_val, sign) # Return scaled and with proper sign return (sign * int_val) / 10000.
python
{ "resource": "" }
q23541
_name_lookup
train
def _name_lookup(names): r"""Create an io helper to convert an integer to a named value.""" mapper = dict(zip(range(len(names)), names)) def lookup(val): return mapper.get(val, 'Unknown') return lookup
python
{ "resource": "" }
q23542
cf_to_proj
train
def cf_to_proj(var): r"""Convert a Variable with projection information to a Proj.4 Projection instance. The attributes of this Variable must conform to the Climate and Forecasting (CF) netCDF conventions. Parameters ---------- var : Variable The projection variable with appropriate attributes. """ import pyproj kwargs = {'lat_0': var.latitude_of_projection_origin, 'a': var.earth_radius, 'b': var.earth_radius} if var.grid_mapping_name == 'lambert_conformal_conic': kwargs['proj'] = 'lcc' kwargs['lon_0'] = var.longitude_of_central_meridian kwargs['lat_1'] = var.standard_parallel kwargs['lat_2'] = var.standard_parallel elif var.grid_mapping_name == 'polar_stereographic': kwargs['proj'] = 'stere' kwargs['lon_0'] = var.straight_vertical_longitude_from_pole kwargs['lat_0'] = var.latitude_of_projection_origin kwargs['lat_ts'] = var.standard_parallel kwargs['x_0'] = False # Easting kwargs['y_0'] = False # Northing elif var.grid_mapping_name == 'mercator': kwargs['proj'] = 'merc' kwargs['lon_0'] = var.longitude_of_projection_origin kwargs['lat_ts'] = var.standard_parallel kwargs['x_0'] = False # Easting kwargs['y_0'] = False # Northing return pyproj.Proj(**kwargs)
python
{ "resource": "" }
q23543
get_perturbation
train
def get_perturbation(ts, axis=-1): r"""Compute the perturbation from the mean of a time series. Parameters ---------- ts : array_like The time series from which you wish to find the perturbation time series (perturbation from the mean). Returns ------- array_like The perturbation time series. Other Parameters ---------------- axis : int The index of the time axis. Default is -1 Notes ----- The perturbation time series produced by this function is defined as the perturbations about the mean: .. math:: x(t)^{\prime} = x(t) - \overline{x(t)} """ slices = [slice(None)] * ts.ndim slices[axis] = None mean = ts.mean(axis=axis)[tuple(slices)] return ts - mean
python
{ "resource": "" }
q23544
tke
train
def tke(u, v, w, perturbation=False, axis=-1): r"""Compute turbulence kinetic energy. Compute the turbulence kinetic energy (e) from the time series of the velocity components. Parameters ---------- u : array_like The wind component along the x-axis v : array_like The wind component along the y-axis w : array_like The wind component along the z-axis perturbation : {False, True}, optional True if the `u`, `v`, and `w` components of wind speed supplied to the function are perturbation velocities. If False, perturbation velocities will be calculated by removing the mean value from each component. Returns ------- array_like The corresponding turbulence kinetic energy value Other Parameters ---------------- axis : int The index of the time axis. Default is -1 See Also -------- get_perturbation : Used to compute perturbations if `perturbation` is False. Notes ----- Turbulence Kinetic Energy is computed as: .. math:: e = 0.5 \sqrt{\overline{u^{\prime2}} + \overline{v^{\prime2}} + \overline{w^{\prime2}}}, where the velocity components .. math:: u^{\prime}, v^{\prime}, u^{\prime} are perturbation velocities. For more information on the subject, please see [Garratt1994]_. """ if not perturbation: u = get_perturbation(u, axis=axis) v = get_perturbation(v, axis=axis) w = get_perturbation(w, axis=axis) u_cont = np.mean(u * u, axis=axis) v_cont = np.mean(v * v, axis=axis) w_cont = np.mean(w * w, axis=axis) return 0.5 * np.sqrt(u_cont + v_cont + w_cont)
python
{ "resource": "" }
q23545
kinematic_flux
train
def kinematic_flux(vel, b, perturbation=False, axis=-1): r"""Compute the kinematic flux from two time series. Compute the kinematic flux from the time series of two variables `vel` and b. Note that to be a kinematic flux, at least one variable must be a component of velocity. Parameters ---------- vel : array_like A component of velocity b : array_like May be a component of velocity or a scalar variable (e.g. Temperature) perturbation : bool, optional `True` if the `vel` and `b` variables are perturbations. If `False`, perturbations will be calculated by removing the mean value from each variable. Defaults to `False`. Returns ------- array_like The corresponding kinematic flux Other Parameters ---------------- axis : int, optional The index of the time axis, along which the calculations will be performed. Defaults to -1 Notes ----- A kinematic flux is computed as .. math:: \overline{u^{\prime} s^{\prime}} where at the prime notation denotes perturbation variables, and at least one variable is perturbation velocity. For example, the vertical kinematic momentum flux (two velocity components): .. math:: \overline{u^{\prime} w^{\prime}} or the vertical kinematic heat flux (one velocity component, and one scalar): .. math:: \overline{w^{\prime} T^{\prime}} If perturbation variables are passed into this function (i.e. `perturbation` is True), the kinematic flux is computed using the equation above. However, the equation above can be rewritten as .. math:: \overline{us} - \overline{u}~\overline{s} which is computationally more efficient. This is how the kinematic flux is computed in this function if `perturbation` is False. For more information on the subject, please see [Garratt1994]_. """ kf = np.mean(vel * b, axis=axis) if not perturbation: kf -= np.mean(vel, axis=axis) * np.mean(b, axis=axis) return np.atleast_1d(kf)
python
{ "resource": "" }
q23546
friction_velocity
train
def friction_velocity(u, w, v=None, perturbation=False, axis=-1): r"""Compute the friction velocity from the time series of velocity components. Compute the friction velocity from the time series of the x, z, and optionally y, velocity components. Parameters ---------- u : array_like The wind component along the x-axis w : array_like The wind component along the z-axis v : array_like, optional The wind component along the y-axis. perturbation : {False, True}, optional True if the `u`, `w`, and `v` components of wind speed supplied to the function are perturbation velocities. If False, perturbation velocities will be calculated by removing the mean value from each component. Returns ------- array_like The corresponding friction velocity Other Parameters ---------------- axis : int The index of the time axis. Default is -1 See Also -------- kinematic_flux : Used to compute the x-component and y-component vertical kinematic momentum flux(es) used in the computation of the friction velocity. Notes ----- The Friction Velocity is computed as: .. math:: u_{*} = \sqrt[4]{\left(\overline{u^{\prime}w^{\prime}}\right)^2 + \left(\overline{v^{\prime}w^{\prime}}\right)^2}, where :math: \overline{u^{\prime}w^{\prime}} and :math: \overline{v^{\prime}w^{\prime}} are the x-component and y-components of the vertical kinematic momentum flux, respectively. If the optional v component of velocity is not supplied to the function, the computation of the friction velocity is reduced to .. math:: u_{*} = \sqrt[4]{\left(\overline{u^{\prime}w^{\prime}}\right)^2} For more information on the subject, please see [Garratt1994]_. """ uw = kinematic_flux(u, w, perturbation=perturbation, axis=axis) kf = uw * uw if v is not None: vw = kinematic_flux(v, w, perturbation=perturbation, axis=axis) kf += vw * vw # the friction velocity is the 4th root of the kinematic momentum flux # As an optimization, first do inplace square root, then return the # square root of that. This is faster than np.power(..., 0.25) np.sqrt(kf, out=kf) return np.sqrt(kf)
python
{ "resource": "" }
q23547
open_as_needed
train
def open_as_needed(filename): """Return a file-object given either a filename or an object. Handles opening with the right class based on the file extension. """ if hasattr(filename, 'read'): return filename if filename.endswith('.bz2'): return bz2.BZ2File(filename, 'rb') elif filename.endswith('.gz'): return gzip.GzipFile(filename, 'rb') else: return open(filename, 'rb')
python
{ "resource": "" }
q23548
zlib_decompress_all_frames
train
def zlib_decompress_all_frames(data): """Decompress all frames of zlib-compressed bytes. Repeatedly tries to decompress `data` until all data are decompressed, or decompression fails. This will skip over bytes that are not compressed with zlib. Parameters ---------- data : bytearray or bytes Binary data compressed using zlib. Returns ------- bytearray All decompressed bytes """ frames = bytearray() data = bytes(data) while data: decomp = zlib.decompressobj() try: frames.extend(decomp.decompress(data)) data = decomp.unused_data except zlib.error: frames.extend(data) break return frames
python
{ "resource": "" }
q23549
hexdump
train
def hexdump(buf, num_bytes, offset=0, width=32): """Perform a hexudmp of the buffer. Returns the hexdump as a canonically-formatted string. """ ind = offset end = offset + num_bytes lines = [] while ind < end: chunk = buf[ind:ind + width] actual_width = len(chunk) hexfmt = '{:02X}' blocksize = 4 blocks = [hexfmt * blocksize for _ in range(actual_width // blocksize)] # Need to get any partial lines num_left = actual_width % blocksize # noqa: S001 Fix false alarm if num_left: blocks += [hexfmt * num_left + '--' * (blocksize - num_left)] blocks += ['--' * blocksize] * (width // blocksize - len(blocks)) hexoutput = ' '.join(blocks) printable = tuple(chunk) lines.append(' '.join((hexoutput.format(*printable), str(ind).ljust(len(str(end))), str(ind - offset).ljust(len(str(end))), ''.join(chr(c) if 31 < c < 128 else '.' for c in chunk)))) ind += width return '\n'.join(lines)
python
{ "resource": "" }
q23550
UnitLinker.units
train
def units(self, val): """Override the units on the underlying variable.""" if isinstance(val, units.Unit): self._unit = val else: self._unit = units(val)
python
{ "resource": "" }
q23551
NamedStruct.unpack_from
train
def unpack_from(self, buff, offset=0): """Read bytes from a buffer and return as a namedtuple.""" return self._create(super(NamedStruct, self).unpack_from(buff, offset))
python
{ "resource": "" }
q23552
DictStruct.unpack_from
train
def unpack_from(self, buff, offset=0): """Unpack the next bytes from a file object.""" return self._create(super(DictStruct, self).unpack_from(buff, offset))
python
{ "resource": "" }
q23553
IOBuffer.set_mark
train
def set_mark(self): """Mark the current location and return its id so that the buffer can return later.""" self._bookmarks.append(self._offset) return len(self._bookmarks) - 1
python
{ "resource": "" }
q23554
IOBuffer.jump_to
train
def jump_to(self, mark, offset=0): """Jump to a previously set mark.""" self._offset = self._bookmarks[mark] + offset
python
{ "resource": "" }
q23555
IOBuffer.splice
train
def splice(self, mark, newdata): """Replace the data after the marked location with the specified data.""" self.jump_to(mark) self._data = self._data[:self._offset] + bytearray(newdata)
python
{ "resource": "" }
q23556
IOBuffer.read_struct
train
def read_struct(self, struct_class): """Parse and return a structure from the current buffer offset.""" struct = struct_class.unpack_from(bytearray_to_buff(self._data), self._offset) self.skip(struct_class.size) return struct
python
{ "resource": "" }
q23557
IOBuffer.read_func
train
def read_func(self, func, num_bytes=None): """Parse data from the current buffer offset using a function.""" # only advance if func succeeds res = func(self.get_next(num_bytes)) self.skip(num_bytes) return res
python
{ "resource": "" }
q23558
IOBuffer.read_binary
train
def read_binary(self, num, item_type='B'): """Parse the current buffer offset as the specified code.""" if 'B' in item_type: return self.read(num) if item_type[0] in ('@', '=', '<', '>', '!'): order = item_type[0] item_type = item_type[1:] else: order = '@' return list(self.read_struct(Struct(order + '{:d}'.format(int(num)) + item_type)))
python
{ "resource": "" }
q23559
IOBuffer.read
train
def read(self, num_bytes=None): """Read and return the specified bytes from the buffer.""" res = self.get_next(num_bytes) self.skip(len(res)) return res
python
{ "resource": "" }
q23560
IOBuffer.get_next
train
def get_next(self, num_bytes=None): """Get the next bytes in the buffer without modifying the offset.""" if num_bytes is None: return self._data[self._offset:] else: return self._data[self._offset:self._offset + num_bytes]
python
{ "resource": "" }
q23561
IOBuffer.skip
train
def skip(self, num_bytes): """Jump the ahead the specified bytes in the buffer.""" if num_bytes is None: self._offset = len(self._data) else: self._offset += num_bytes
python
{ "resource": "" }
q23562
draw_polygon_with_info
train
def draw_polygon_with_info(ax, polygon, off_x=0, off_y=0): """Draw one of the natural neighbor polygons with some information.""" pts = np.array(polygon)[ConvexHull(polygon).vertices] for i, pt in enumerate(pts): ax.plot([pt[0], pts[(i + 1) % len(pts)][0]], [pt[1], pts[(i + 1) % len(pts)][1]], 'k-') avex, avey = np.mean(pts, axis=0) ax.annotate('area: {:.3f}'.format(geometry.area(pts)), xy=(avex + off_x, avey + off_y), fontsize=12)
python
{ "resource": "" }
q23563
_check_and_flip
train
def _check_and_flip(arr): """Transpose array or list of arrays if they are 2D.""" if hasattr(arr, 'ndim'): if arr.ndim >= 2: return arr.T else: return arr elif not is_string_like(arr) and iterable(arr): return tuple(_check_and_flip(a) for a in arr) else: return arr
python
{ "resource": "" }
q23564
ensure_yx_order
train
def ensure_yx_order(func): """Wrap a function to ensure all array arguments are y, x ordered, based on kwarg.""" @functools.wraps(func) def wrapper(*args, **kwargs): # Check what order we're given dim_order = kwargs.pop('dim_order', None) x_first = _is_x_first_dim(dim_order) # If x is the first dimension, flip (transpose) every array within the function args. if x_first: args = tuple(_check_and_flip(arr) for arr in args) for k, v in kwargs: kwargs[k] = _check_and_flip(v) ret = func(*args, **kwargs) # If we flipped on the way in, need to flip on the way out so that output array(s) # match the dimension order of the original input. if x_first: return _check_and_flip(ret) else: return ret # Inject a docstring for the dim_order argument into the function's docstring. dim_order_doc = """ dim_order : str or ``None``, optional The ordering of dimensions in passed in arrays. Can be one of ``None``, ``'xy'``, or ``'yx'``. ``'xy'`` indicates that the dimension corresponding to x is the leading dimension, followed by y. ``'yx'`` indicates that x is the last dimension, preceded by y. ``None`` indicates that the default ordering should be assumed, which is 'yx'. Can only be passed as a keyword argument, i.e. func(..., dim_order='xy').""" # Find the first blank line after the start of the parameters section params = wrapper.__doc__.find('Parameters') blank = wrapper.__doc__.find('\n\n', params) wrapper.__doc__ = wrapper.__doc__[:blank] + dim_order_doc + wrapper.__doc__[blank:] return wrapper
python
{ "resource": "" }
q23565
vorticity
train
def vorticity(u, v, dx, dy): r"""Calculate the vertical vorticity of the horizontal wind. Parameters ---------- u : (M, N) ndarray x component of the wind v : (M, N) ndarray y component of the wind dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `u` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `u` along the applicable axis. Returns ------- (M, N) ndarray vertical vorticity See Also -------- divergence Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. """ dudy = first_derivative(u, delta=dy, axis=-2) dvdx = first_derivative(v, delta=dx, axis=-1) return dvdx - dudy
python
{ "resource": "" }
q23566
divergence
train
def divergence(u, v, dx, dy): r"""Calculate the horizontal divergence of the horizontal wind. Parameters ---------- u : (M, N) ndarray x component of the wind v : (M, N) ndarray y component of the wind dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `u` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `u` along the applicable axis. Returns ------- (M, N) ndarray The horizontal divergence See Also -------- vorticity Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. """ dudx = first_derivative(u, delta=dx, axis=-1) dvdy = first_derivative(v, delta=dy, axis=-2) return dudx + dvdy
python
{ "resource": "" }
q23567
total_deformation
train
def total_deformation(u, v, dx, dy): r"""Calculate the horizontal total deformation of the horizontal wind. Parameters ---------- u : (M, N) ndarray x component of the wind v : (M, N) ndarray y component of the wind dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `u` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `u` along the applicable axis. Returns ------- (M, N) ndarray Total Deformation See Also -------- shearing_deformation, stretching_deformation Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. """ dudy, dudx = gradient(u, deltas=(dy, dx), axes=(-2, -1)) dvdy, dvdx = gradient(v, deltas=(dy, dx), axes=(-2, -1)) return np.sqrt((dvdx + dudy)**2 + (dudx - dvdy)**2)
python
{ "resource": "" }
q23568
advection
train
def advection(scalar, wind, deltas): r"""Calculate the advection of a scalar field by the wind. The order of the dimensions of the arrays must match the order in which the wind components are given. For example, if the winds are given [u, v], then the scalar and wind arrays must be indexed as x,y (which puts x as the rows, not columns). Parameters ---------- scalar : N-dimensional array Array (with N-dimensions) with the quantity to be advected. wind : sequence of arrays Length M sequence of N-dimensional arrays. Represents the flow, with a component of the wind in each dimension. For example, for horizontal advection, this could be a list: [u, v], where u and v are each a 2-dimensional array. deltas : sequence of float or ndarray A (length M) sequence containing the grid spacing(s) in each dimension. If using arrays, in each array there should be one item less than the size of `scalar` along the applicable axis. Returns ------- N-dimensional array An N-dimensional array containing the advection at all grid points. """ # This allows passing in a list of wind components or an array. wind = _stack(wind) # If we have more than one component, we need to reverse the order along the first # dimension so that the wind components line up with the # order of the gradients from the ..., y, x ordered array. if wind.ndim > scalar.ndim: wind = wind[::-1] # Gradient returns a list of derivatives along each dimension. We convert # this to an array with dimension as the first index. Reverse the deltas to line up # with the order of the dimensions. grad = _stack(gradient(scalar, deltas=deltas[::-1])) # Make them be at least 2D (handling the 1D case) so that we can do the # multiply and sum below grad, wind = atleast_2d(grad, wind) return (-grad * wind).sum(axis=0)
python
{ "resource": "" }
q23569
frontogenesis
train
def frontogenesis(thta, u, v, dx, dy, dim_order='yx'): r"""Calculate the 2D kinematic frontogenesis of a temperature field. The implementation is a form of the Petterssen Frontogenesis and uses the formula outlined in [Bluestein1993]_ pg.248-253. .. math:: F=\frac{1}{2}\left|\nabla \theta\right|[D cos(2\beta)-\delta] * :math:`F` is 2D kinematic frontogenesis * :math:`\theta` is potential temperature * :math:`D` is the total deformation * :math:`\beta` is the angle between the axis of dilitation and the isentropes * :math:`\delta` is the divergence Parameters ---------- thta : (M, N) ndarray Potential temperature u : (M, N) ndarray x component of the wind v : (M, N) ndarray y component of the wind dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `u` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `u` along the applicable axis. Returns ------- (M, N) ndarray 2D Frontogenesis in [temperature units]/m/s Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. Conversion factor to go from [temperature units]/m/s to [temperature units/100km/3h] :math:`1.08e4*1.e5` """ # Get gradients of potential temperature in both x and y ddy_thta = first_derivative(thta, delta=dy, axis=-2) ddx_thta = first_derivative(thta, delta=dx, axis=-1) # Compute the magnitude of the potential temperature gradient mag_thta = np.sqrt(ddx_thta**2 + ddy_thta**2) # Get the shearing, stretching, and total deformation of the wind field shrd = shearing_deformation(u, v, dx, dy, dim_order=dim_order) strd = stretching_deformation(u, v, dx, dy, dim_order=dim_order) tdef = total_deformation(u, v, dx, dy, dim_order=dim_order) # Get the divergence of the wind field div = divergence(u, v, dx, dy, dim_order=dim_order) # Compute the angle (beta) between the wind field and the gradient of potential temperature psi = 0.5 * np.arctan2(shrd, strd) beta = np.arcsin((-ddx_thta * np.cos(psi) - ddy_thta * np.sin(psi)) / mag_thta) return 0.5 * mag_thta * (tdef * np.cos(2 * beta) - div)
python
{ "resource": "" }
q23570
geostrophic_wind
train
def geostrophic_wind(heights, f, dx, dy): r"""Calculate the geostrophic wind given from the heights or geopotential. Parameters ---------- heights : (M, N) ndarray The height field, with either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. f : array_like The coriolis parameter. This can be a scalar to be applied everywhere or an array of values. dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `heights` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `heights` along the applicable axis. Returns ------- A 2-item tuple of arrays A tuple of the u-component and v-component of the geostrophic wind. Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. """ if heights.dimensionality['[length]'] == 2.0: norm_factor = 1. / f else: norm_factor = mpconsts.g / f dhdy = first_derivative(heights, delta=dy, axis=-2) dhdx = first_derivative(heights, delta=dx, axis=-1) return -norm_factor * dhdy, norm_factor * dhdx
python
{ "resource": "" }
q23571
ageostrophic_wind
train
def ageostrophic_wind(heights, f, dx, dy, u, v, dim_order='yx'): r"""Calculate the ageostrophic wind given from the heights or geopotential. Parameters ---------- heights : (M, N) ndarray The height field. f : array_like The coriolis parameter. This can be a scalar to be applied everywhere or an array of values. dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `heights` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `heights` along the applicable axis. u : (M, N) ndarray The u wind field. v : (M, N) ndarray The u wind field. Returns ------- A 2-item tuple of arrays A tuple of the u-component and v-component of the ageostrophic wind. Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. """ u_geostrophic, v_geostrophic = geostrophic_wind(heights, f, dx, dy, dim_order=dim_order) return u - u_geostrophic, v - v_geostrophic
python
{ "resource": "" }
q23572
storm_relative_helicity
train
def storm_relative_helicity(u, v, heights, depth, bottom=0 * units.m, storm_u=0 * units('m/s'), storm_v=0 * units('m/s')): # Partially adapted from similar SharpPy code r"""Calculate storm relative helicity. Calculates storm relatively helicity following [Markowski2010] 230-231. .. math:: \int\limits_0^d (\bar v - c) \cdot \bar\omega_{h} \,dz This is applied to the data from a hodograph with the following summation: .. math:: \sum_{n = 1}^{N-1} [(u_{n+1} - c_{x})(v_{n} - c_{y}) - (u_{n} - c_{x})(v_{n+1} - c_{y})] Parameters ---------- u : array-like u component winds v : array-like v component winds heights : array-like atmospheric heights, will be converted to AGL depth : number depth of the layer bottom : number height of layer bottom AGL (default is surface) storm_u : number u component of storm motion (default is 0 m/s) storm_v : number v component of storm motion (default is 0 m/s) Returns ------- `pint.Quantity, pint.Quantity, pint.Quantity` positive, negative, total storm-relative helicity """ _, u, v = get_layer_heights(heights, depth, u, v, with_agl=True, bottom=bottom) storm_relative_u = u - storm_u storm_relative_v = v - storm_v int_layers = (storm_relative_u[1:] * storm_relative_v[:-1] - storm_relative_u[:-1] * storm_relative_v[1:]) # Need to manually check for masked value because sum() on masked array with non-default # mask will return a masked value rather than 0. See numpy/numpy#11736 positive_srh = int_layers[int_layers.magnitude > 0.].sum() if np.ma.is_masked(positive_srh): positive_srh = 0.0 * units('meter**2 / second**2') negative_srh = int_layers[int_layers.magnitude < 0.].sum() if np.ma.is_masked(negative_srh): negative_srh = 0.0 * units('meter**2 / second**2') return (positive_srh.to('meter ** 2 / second ** 2'), negative_srh.to('meter ** 2 / second ** 2'), (positive_srh + negative_srh).to('meter ** 2 / second ** 2'))
python
{ "resource": "" }
q23573
absolute_vorticity
train
def absolute_vorticity(u, v, dx, dy, lats, dim_order='yx'): """Calculate the absolute vorticity of the horizontal wind. Parameters ---------- u : (M, N) ndarray x component of the wind v : (M, N) ndarray y component of the wind dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `u` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `u` along the applicable axis. lats : (M, N) ndarray latitudes of the wind data in radians or with appropriate unit information attached Returns ------- (M, N) ndarray absolute vorticity Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. """ f = coriolis_parameter(lats) relative_vorticity = vorticity(u, v, dx, dy, dim_order=dim_order) return relative_vorticity + f
python
{ "resource": "" }
q23574
potential_vorticity_baroclinic
train
def potential_vorticity_baroclinic(potential_temperature, pressure, u, v, dx, dy, lats): r"""Calculate the baroclinic potential vorticity. .. math:: PV = -g \left(\frac{\partial u}{\partial p}\frac{\partial \theta}{\partial y} - \frac{\partial v}{\partial p}\frac{\partial \theta}{\partial x} + \frac{\partial \theta}{\partial p}(\zeta + f) \right) This formula is based on equation 4.5.93 [Bluestein1993]_. Parameters ---------- potential_temperature : (P, M, N) ndarray potential temperature pressure : (P, M, N) ndarray vertical pressures u : (P, M, N) ndarray x component of the wind v : (P, M, N) ndarray y component of the wind dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `u` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `u` along the applicable axis. lats : (M, N) ndarray latitudes of the wind data in radians or with appropriate unit information attached axis : int, optional The axis corresponding to the vertical dimension in the potential temperature and pressure arrays, defaults to 0, the first dimension. Returns ------- (P, M, N) ndarray baroclinic potential vorticity Notes ----- This function will only work with data that is in (P, Y, X) format. If your data is in a different order you will need to re-order your data in order to get correct results from this function. The same function can be used for isobaric and isentropic PV analysis. Provide winds for vorticity calculations on the desired isobaric or isentropic surface. At least three layers of pressure/potential temperature are required in order to calculate the vertical derivative (one above and below the desired surface). The first two terms will be zero if isentropic level data is used due to the gradient of theta in both the x and y-directions will be zero since you are on an isentropic surface. This function expects pressure/isentropic level to increase with increasing array element (e.g., from higher in the atmosphere to closer to the surface. If the pressure array is one-dimensional p[:, None, None] can be used to make it appear multi-dimensional.) """ if ((np.shape(potential_temperature)[-3] < 3) or (np.shape(pressure)[-3] < 3) or (np.shape(potential_temperature)[-3] != (np.shape(pressure)[-3]))): raise ValueError('Length of potential temperature along the pressure axis ' '{} must be at least 3.'.format(-3)) avor = absolute_vorticity(u, v, dx, dy, lats, dim_order='yx') dthtadp = first_derivative(potential_temperature, x=pressure, axis=-3) if ((np.shape(potential_temperature)[-2] == 1) and (np.shape(potential_temperature)[-1] == 1)): dthtady = 0 * units.K / units.m # axis=-2 only has one dimension dthtadx = 0 * units.K / units.m # axis=-1 only has one dimension else: dthtady = first_derivative(potential_temperature, delta=dy, axis=-2) dthtadx = first_derivative(potential_temperature, delta=dx, axis=-1) dudp = first_derivative(u, x=pressure, axis=-3) dvdp = first_derivative(v, x=pressure, axis=-3) return (-mpconsts.g * (dudp * dthtady - dvdp * dthtadx + avor * dthtadp)).to(units.kelvin * units.meter**2 / (units.second * units.kilogram))
python
{ "resource": "" }
q23575
inertial_advective_wind
train
def inertial_advective_wind(u, v, u_geostrophic, v_geostrophic, dx, dy, lats): r"""Calculate the inertial advective wind. .. math:: \frac{\hat k}{f} \times (\vec V \cdot \nabla)\hat V_g .. math:: \frac{\hat k}{f} \times \left[ \left( u \frac{\partial u_g}{\partial x} + v \frac{\partial u_g}{\partial y} \right) \hat i + \left( u \frac{\partial v_g} {\partial x} + v \frac{\partial v_g}{\partial y} \right) \hat j \right] .. math:: \left[ -\frac{1}{f}\left(u \frac{\partial v_g}{\partial x} + v \frac{\partial v_g}{\partial y} \right) \right] \hat i + \left[ \frac{1}{f} \left( u \frac{\partial u_g}{\partial x} + v \frac{\partial u_g}{\partial y} \right) \right] \hat j This formula is based on equation 27 of [Rochette2006]_. Parameters ---------- u : (M, N) ndarray x component of the advecting wind v : (M, N) ndarray y component of the advecting wind u_geostrophic : (M, N) ndarray x component of the geostrophic (advected) wind v_geostrophic : (M, N) ndarray y component of the geostrophic (advected) wind dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `u` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `u` along the applicable axis. lats : (M, N) ndarray latitudes of the wind data in radians or with appropriate unit information attached Returns ------- (M, N) ndarray x component of inertial advective wind (M, N) ndarray y component of inertial advective wind Notes ----- Many forms of the inertial advective wind assume the advecting and advected wind to both be the geostrophic wind. To do so, pass the x and y components of the geostrophic with for u and u_geostrophic/v and v_geostrophic. If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. """ f = coriolis_parameter(lats) dugdy, dugdx = gradient(u_geostrophic, deltas=(dy, dx), axes=(-2, -1)) dvgdy, dvgdx = gradient(v_geostrophic, deltas=(dy, dx), axes=(-2, -1)) u_component = -(u * dvgdx + v * dvgdy) / f v_component = (u * dugdx + v * dugdy) / f return u_component, v_component
python
{ "resource": "" }
q23576
q_vector
train
def q_vector(u, v, temperature, pressure, dx, dy, static_stability=1): r"""Calculate Q-vector at a given pressure level using the u, v winds and temperature. .. math:: \vec{Q} = (Q_1, Q_2) = - \frac{R}{\sigma p}\left( \frac{\partial \vec{v}_g}{\partial x} \cdot \nabla_p T, \frac{\partial \vec{v}_g}{\partial y} \cdot \nabla_p T \right) This formula follows equation 5.7.55 from [Bluestein1992]_, and can be used with the the below form of the quasigeostrophic omega equation to assess vertical motion ([Bluestein1992]_ equation 5.7.54): .. math:: \left( \nabla_p^2 + \frac{f_0^2}{\sigma} \frac{\partial^2}{\partial p^2} \right) \omega = - 2 \nabla_p \cdot \vec{Q} - \frac{R}{\sigma p} \beta \frac{\partial T}{\partial x}. Parameters ---------- u : (M, N) ndarray x component of the wind (geostrophic in QG-theory) v : (M, N) ndarray y component of the wind (geostrophic in QG-theory) temperature : (M, N) ndarray Array of temperature at pressure level pressure : `pint.Quantity` Pressure at level dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `u` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `u` along the applicable axis. static_stability : `pint.Quantity`, optional The static stability at the pressure level. Defaults to 1 if not given to calculate the Q-vector without factoring in static stability. Returns ------- tuple of (M, N) ndarrays The components of the Q-vector in the u- and v-directions respectively See Also -------- static_stability Notes ----- If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``. """ dudy, dudx = gradient(u, deltas=(dy, dx), axes=(-2, -1)) dvdy, dvdx = gradient(v, deltas=(dy, dx), axes=(-2, -1)) dtempdy, dtempdx = gradient(temperature, deltas=(dy, dx), axes=(-2, -1)) q1 = -mpconsts.Rd / (pressure * static_stability) * (dudx * dtempdx + dvdx * dtempdy) q2 = -mpconsts.Rd / (pressure * static_stability) * (dudy * dtempdx + dvdy * dtempdy) return q1.to_base_units(), q2.to_base_units()
python
{ "resource": "" }
q23577
basic_map
train
def basic_map(proj): """Make our basic default map for plotting""" fig = plt.figure(figsize=(15, 10)) add_metpy_logo(fig, 0, 80, size='large') view = fig.add_axes([0, 0, 1, 1], projection=proj) view.set_extent([-120, -70, 20, 50]) view.add_feature(cfeature.STATES.with_scale('50m')) view.add_feature(cfeature.OCEAN) view.add_feature(cfeature.COASTLINE) view.add_feature(cfeature.BORDERS, linestyle=':') return fig, view
python
{ "resource": "" }
q23578
get_points_within_r
train
def get_points_within_r(center_points, target_points, r): r"""Get all target_points within a specified radius of a center point. All data must be in same coordinate system, or you will get undetermined results. Parameters ---------- center_points: (X, Y) ndarray location from which to grab surrounding points within r target_points: (X, Y) ndarray points from which to return if they are within r of center_points r: integer search radius around center_points to grab target_points Returns ------- matches: (X, Y) ndarray A list of points within r distance of, and in the same order as, center_points """ tree = cKDTree(target_points) indices = tree.query_ball_point(center_points, r) return tree.data[indices].T
python
{ "resource": "" }
q23579
get_point_count_within_r
train
def get_point_count_within_r(center_points, target_points, r): r"""Get count of target points within a specified radius from center points. All data must be in same coordinate system, or you will get undetermined results. Parameters ---------- center_points: (X, Y) ndarray locations from which to grab surrounding points within r target_points: (X, Y) ndarray points from which to return if they are within r of center_points r: integer search radius around center_points to grab target_points Returns ------- matches: (N, ) ndarray A list of point counts within r distance of, and in the same order as, center_points """ tree = cKDTree(target_points) indices = tree.query_ball_point(center_points, r) return np.array([len(x) for x in indices])
python
{ "resource": "" }
q23580
triangle_area
train
def triangle_area(pt1, pt2, pt3): r"""Return the area of a triangle. Parameters ---------- pt1: (X,Y) ndarray Starting vertex of a triangle pt2: (X,Y) ndarray Second vertex of a triangle pt3: (X,Y) ndarray Ending vertex of a triangle Returns ------- area: float Area of the given triangle. """ a = 0.0 a += pt1[0] * pt2[1] - pt2[0] * pt1[1] a += pt2[0] * pt3[1] - pt3[0] * pt2[1] a += pt3[0] * pt1[1] - pt1[0] * pt3[1] return abs(a) / 2
python
{ "resource": "" }
q23581
dist_2
train
def dist_2(x0, y0, x1, y1): r"""Return the squared distance between two points. This is faster than calculating distance but should only be used with comparable ratios. Parameters ---------- x0: float Starting x coordinate y0: float Starting y coordinate x1: float Ending x coordinate y1: float Ending y coordinate Returns ------- d2: float squared distance See Also -------- distance """ d0 = x1 - x0 d1 = y1 - y0 return d0 * d0 + d1 * d1
python
{ "resource": "" }
q23582
distance
train
def distance(p0, p1): r"""Return the distance between two points. Parameters ---------- p0: (X,Y) ndarray Starting coordinate p1: (X,Y) ndarray Ending coordinate Returns ------- d: float distance See Also -------- dist_2 """ return math.sqrt(dist_2(p0[0], p0[1], p1[0], p1[1]))
python
{ "resource": "" }
q23583
circumcircle_radius_2
train
def circumcircle_radius_2(pt0, pt1, pt2): r"""Calculate and return the squared radius of a given triangle's circumcircle. This is faster than calculating radius but should only be used with comparable ratios. Parameters ---------- pt0: (x, y) Starting vertex of triangle pt1: (x, y) Second vertex of triangle pt2: (x, y) Final vertex of a triangle Returns ------- r: float circumcircle radius See Also -------- circumcenter """ a = distance(pt0, pt1) b = distance(pt1, pt2) c = distance(pt2, pt0) t_area = triangle_area(pt0, pt1, pt2) prod2 = a * b * c if t_area > 0: radius = prod2 * prod2 / (16 * t_area * t_area) else: radius = np.nan return radius
python
{ "resource": "" }
q23584
circumcenter
train
def circumcenter(pt0, pt1, pt2): r"""Calculate and return the circumcenter of a circumcircle generated by a given triangle. All three points must be unique or a division by zero error will be raised. Parameters ---------- pt0: (x, y) Starting vertex of triangle pt1: (x, y) Second vertex of triangle pt2: (x, y) Final vertex of a triangle Returns ------- cc: (x, y) circumcenter coordinates See Also -------- circumcenter """ a_x = pt0[0] a_y = pt0[1] b_x = pt1[0] b_y = pt1[1] c_x = pt2[0] c_y = pt2[1] bc_y_diff = b_y - c_y ca_y_diff = c_y - a_y ab_y_diff = a_y - b_y cb_x_diff = c_x - b_x ac_x_diff = a_x - c_x ba_x_diff = b_x - a_x d_div = (a_x * bc_y_diff + b_x * ca_y_diff + c_x * ab_y_diff) if d_div == 0: raise ZeroDivisionError d_inv = 0.5 / d_div a_mag = a_x * a_x + a_y * a_y b_mag = b_x * b_x + b_y * b_y c_mag = c_x * c_x + c_y * c_y cx = (a_mag * bc_y_diff + b_mag * ca_y_diff + c_mag * ab_y_diff) * d_inv cy = (a_mag * cb_x_diff + b_mag * ac_x_diff + c_mag * ba_x_diff) * d_inv return cx, cy
python
{ "resource": "" }
q23585
find_natural_neighbors
train
def find_natural_neighbors(tri, grid_points): r"""Return the natural neighbor triangles for each given grid cell. These are determined by the properties of the given delaunay triangulation. A triangle is a natural neighbor of a grid cell if that triangles circumcenter is within the circumradius of the grid cell center. Parameters ---------- tri: Object A Delaunay Triangulation. grid_points: (X, Y) ndarray Locations of grids. Returns ------- members: dictionary List of simplex codes for natural neighbor triangles in 'tri' for each grid cell. triangle_info: dictionary Circumcenter and radius information for each triangle in 'tri'. """ tree = cKDTree(grid_points) in_triangulation = tri.find_simplex(tree.data) >= 0 triangle_info = {} members = {key: [] for key in range(len(tree.data))} for i, simplices in enumerate(tri.simplices): ps = tri.points[simplices] cc = circumcenter(*ps) r = circumcircle_radius(*ps) triangle_info[i] = {'cc': cc, 'r': r} qualifiers = tree.query_ball_point(cc, r) for qualifier in qualifiers: if in_triangulation[qualifier]: members[qualifier].append(i) return members, triangle_info
python
{ "resource": "" }
q23586
find_nn_triangles_point
train
def find_nn_triangles_point(tri, cur_tri, point): r"""Return the natural neighbors of a triangle containing a point. This is based on the provided Delaunay Triangulation. Parameters ---------- tri: Object A Delaunay Triangulation cur_tri: int Simplex code for Delaunay Triangulation lookup of a given triangle that contains 'position'. point: (x, y) Coordinates used to calculate distances to simplexes in 'tri'. Returns ------- nn: (N, ) array List of simplex codes for natural neighbor triangles in 'tri'. """ nn = [] candidates = set(tri.neighbors[cur_tri]) # find the union of the two sets candidates |= set(tri.neighbors[tri.neighbors[cur_tri]].flat) # remove instances of the "no neighbors" code candidates.discard(-1) for neighbor in candidates: triangle = tri.points[tri.simplices[neighbor]] cur_x, cur_y = circumcenter(triangle[0], triangle[1], triangle[2]) r = circumcircle_radius_2(triangle[0], triangle[1], triangle[2]) if dist_2(point[0], point[1], cur_x, cur_y) < r: nn.append(neighbor) return nn
python
{ "resource": "" }
q23587
find_local_boundary
train
def find_local_boundary(tri, triangles): r"""Find and return the outside edges of a collection of natural neighbor triangles. There is no guarantee that this boundary is convex, so ConvexHull is not sufficient in some situations. Parameters ---------- tri: Object A Delaunay Triangulation triangles: (N, ) array List of natural neighbor triangles. Returns ------- edges: (2, N) ndarray List of vertex codes that form outer edges of a group of natural neighbor triangles. """ edges = [] for triangle in triangles: for i in range(3): pt1 = tri.simplices[triangle][i] pt2 = tri.simplices[triangle][(i + 1) % 3] if (pt1, pt2) in edges: edges.remove((pt1, pt2)) elif (pt2, pt1) in edges: edges.remove((pt2, pt1)) else: edges.append((pt1, pt2)) return edges
python
{ "resource": "" }
q23588
area
train
def area(poly): r"""Find the area of a given polygon using the shoelace algorithm. Parameters ---------- poly: (2, N) ndarray 2-dimensional coordinates representing an ordered traversal around the edge a polygon. Returns ------- area: float """ a = 0.0 n = len(poly) for i in range(n): a += poly[i][0] * poly[(i + 1) % n][1] - poly[(i + 1) % n][0] * poly[i][1] return abs(a) / 2.0
python
{ "resource": "" }
q23589
order_edges
train
def order_edges(edges): r"""Return an ordered traversal of the edges of a two-dimensional polygon. Parameters ---------- edges: (2, N) ndarray List of unordered line segments, where each line segment is represented by two unique vertex codes. Returns ------- ordered_edges: (2, N) ndarray """ edge = edges[0] edges = edges[1:] ordered_edges = [edge] num_max = len(edges) while len(edges) > 0 and num_max > 0: match = edge[1] for search_edge in edges: vertex = search_edge[0] if match == vertex: edge = search_edge edges.remove(edge) ordered_edges.append(search_edge) break num_max -= 1 return ordered_edges
python
{ "resource": "" }
q23590
precipitable_water
train
def precipitable_water(dewpt, pressure, bottom=None, top=None): r"""Calculate precipitable water through the depth of a sounding. Formula used is: .. math:: -\frac{1}{\rho_l g} \int\limits_{p_\text{bottom}}^{p_\text{top}} r dp from [Salby1996]_, p. 28. Parameters ---------- dewpt : `pint.Quantity` Atmospheric dewpoint profile pressure : `pint.Quantity` Atmospheric pressure profile bottom: `pint.Quantity`, optional Bottom of the layer, specified in pressure. Defaults to None (highest pressure). top: `pint.Quantity`, optional The top of the layer, specified in pressure. Defaults to None (lowest pressure). Returns ------- `pint.Quantity` The precipitable water in the layer """ # Sort pressure and dewpoint to be in decreasing pressure order (increasing height) sort_inds = np.argsort(pressure)[::-1] pressure = pressure[sort_inds] dewpt = dewpt[sort_inds] if top is None: top = np.nanmin(pressure) * pressure.units if bottom is None: bottom = np.nanmax(pressure) * pressure.units pres_layer, dewpt_layer = get_layer(pressure, dewpt, bottom=bottom, depth=bottom - top) w = mixing_ratio(saturation_vapor_pressure(dewpt_layer), pres_layer) # Since pressure is in decreasing order, pw will be the opposite sign of that expected. pw = -1. * (np.trapz(w.magnitude, pres_layer.magnitude) * (w.units * pres_layer.units) / (mpconsts.g * mpconsts.rho_l)) return pw.to('millimeters')
python
{ "resource": "" }
q23591
mean_pressure_weighted
train
def mean_pressure_weighted(pressure, *args, **kwargs): r"""Calculate pressure-weighted mean of an arbitrary variable through a layer. Layer top and bottom specified in height or pressure. Parameters ---------- pressure : `pint.Quantity` Atmospheric pressure profile *args : `pint.Quantity` Parameters for which the pressure-weighted mean is to be calculated. heights : `pint.Quantity`, optional Heights from sounding. Standard atmosphere heights assumed (if needed) if no heights are given. bottom: `pint.Quantity`, optional The bottom of the layer in either the provided height coordinate or in pressure. Don't provide in meters AGL unless the provided height coordinate is meters AGL. Default is the first observation, assumed to be the surface. depth: `pint.Quantity`, optional The depth of the layer in meters or hPa. Returns ------- `pint.Quantity` u_mean: u-component of layer mean wind. `pint.Quantity` v_mean: v-component of layer mean wind. """ heights = kwargs.pop('heights', None) bottom = kwargs.pop('bottom', None) depth = kwargs.pop('depth', None) ret = [] # Returned variable means in layer layer_arg = get_layer(pressure, *args, heights=heights, bottom=bottom, depth=depth) layer_p = layer_arg[0] layer_arg = layer_arg[1:] # Taking the integral of the weights (pressure) to feed into the weighting # function. Said integral works out to this function: pres_int = 0.5 * (layer_p[-1].magnitude**2 - layer_p[0].magnitude**2) for i, datavar in enumerate(args): arg_mean = np.trapz(layer_arg[i] * layer_p, x=layer_p) / pres_int ret.append(arg_mean * datavar.units) return ret
python
{ "resource": "" }
q23592
bunkers_storm_motion
train
def bunkers_storm_motion(pressure, u, v, heights): r"""Calculate the Bunkers right-mover and left-mover storm motions and sfc-6km mean flow. Uses the storm motion calculation from [Bunkers2000]_. Parameters ---------- pressure : array-like Pressure from sounding u : array-like U component of the wind v : array-like V component of the wind heights : array-like Heights from sounding Returns ------- right_mover: `pint.Quantity` U and v component of Bunkers RM storm motion left_mover: `pint.Quantity` U and v component of Bunkers LM storm motion wind_mean: `pint.Quantity` U and v component of sfc-6km mean flow """ # mean wind from sfc-6km wind_mean = concatenate(mean_pressure_weighted(pressure, u, v, heights=heights, depth=6000 * units('meter'))) # mean wind from sfc-500m wind_500m = concatenate(mean_pressure_weighted(pressure, u, v, heights=heights, depth=500 * units('meter'))) # mean wind from 5.5-6km wind_5500m = concatenate(mean_pressure_weighted(pressure, u, v, heights=heights, depth=500 * units('meter'), bottom=heights[0] + 5500 * units('meter'))) # Calculate the shear vector from sfc-500m to 5.5-6km shear = wind_5500m - wind_500m # Take the cross product of the wind shear and k, and divide by the vector magnitude and # multiply by the deviaton empirically calculated in Bunkers (2000) (7.5 m/s) shear_cross = concatenate([shear[1], -shear[0]]) rdev = shear_cross * (7.5 * units('m/s').to(u.units) / np.hypot(*shear)) # Add the deviations to the layer average wind to get the RM motion right_mover = wind_mean + rdev # Subtract the deviations to get the LM motion left_mover = wind_mean - rdev return right_mover, left_mover, wind_mean
python
{ "resource": "" }
q23593
bulk_shear
train
def bulk_shear(pressure, u, v, heights=None, bottom=None, depth=None): r"""Calculate bulk shear through a layer. Layer top and bottom specified in meters or pressure. Parameters ---------- pressure : `pint.Quantity` Atmospheric pressure profile u : `pint.Quantity` U-component of wind. v : `pint.Quantity` V-component of wind. height : `pint.Quantity`, optional Heights from sounding depth: `pint.Quantity`, optional The depth of the layer in meters or hPa. Defaults to 100 hPa. bottom: `pint.Quantity`, optional The bottom of the layer in height or pressure coordinates. If using a height, it must be in the same coordinates as the given heights (i.e., don't use meters AGL unless given heights are in meters AGL.) Defaults to the highest pressure or lowest height given. Returns ------- u_shr: `pint.Quantity` u-component of layer bulk shear v_shr: `pint.Quantity` v-component of layer bulk shear """ _, u_layer, v_layer = get_layer(pressure, u, v, heights=heights, bottom=bottom, depth=depth) u_shr = u_layer[-1] - u_layer[0] v_shr = v_layer[-1] - v_layer[0] return u_shr, v_shr
python
{ "resource": "" }
q23594
supercell_composite
train
def supercell_composite(mucape, effective_storm_helicity, effective_shear): r"""Calculate the supercell composite parameter. The supercell composite parameter is designed to identify environments favorable for the development of supercells, and is calculated using the formula developed by [Thompson2004]_: .. math:: \text{SCP} = \frac{\text{MUCAPE}}{1000 \text{J/kg}} * \frac{\text{Effective SRH}}{50 \text{m}^2/\text{s}^2} * \frac{\text{Effective Shear}}{20 \text{m/s}} The effective_shear term is set to zero below 10 m/s and capped at 1 when effective_shear exceeds 20 m/s. Parameters ---------- mucape : `pint.Quantity` Most-unstable CAPE effective_storm_helicity : `pint.Quantity` Effective-layer storm-relative helicity effective_shear : `pint.Quantity` Effective bulk shear Returns ------- array-like supercell composite """ effective_shear = np.clip(atleast_1d(effective_shear), None, 20 * units('m/s')) effective_shear[effective_shear < 10 * units('m/s')] = 0 * units('m/s') effective_shear = effective_shear / (20 * units('m/s')) return ((mucape / (1000 * units('J/kg'))) * (effective_storm_helicity / (50 * units('m^2/s^2'))) * effective_shear).to('dimensionless')
python
{ "resource": "" }
q23595
critical_angle
train
def critical_angle(pressure, u, v, heights, stormu, stormv): r"""Calculate the critical angle. The critical angle is the angle between the 10m storm-relative inflow vector and the 10m-500m shear vector. A critical angle near 90 degrees indicates that a storm in this environment on the indicated storm motion vector is likely ingesting purely streamwise vorticity into its updraft, and [Esterheld2008]_ showed that significantly tornadic supercells tend to occur in environments with critical angles near 90 degrees. Parameters ---------- pressure : `pint.Quantity` Pressures from sounding. u : `pint.Quantity` U-component of sounding winds. v : `pint.Quantity` V-component of sounding winds. heights : `pint.Quantity` Heights from sounding. stormu : `pint.Quantity` U-component of storm motion. stormv : `pint.Quantity` V-component of storm motion. Returns ------- `pint.Quantity` critical angle in degrees """ # Convert everything to m/s u = u.to('m/s') v = v.to('m/s') stormu = stormu.to('m/s') stormv = stormv.to('m/s') sort_inds = np.argsort(pressure[::-1]) pressure = pressure[sort_inds] heights = heights[sort_inds] u = u[sort_inds] v = v[sort_inds] # Calculate sfc-500m shear vector shr5 = bulk_shear(pressure, u, v, heights=heights, depth=500 * units('meter')) # Make everything relative to the sfc wind orientation umn = stormu - u[0] vmn = stormv - v[0] vshr = np.asarray([shr5[0].magnitude, shr5[1].magnitude]) vsm = np.asarray([umn.magnitude, vmn.magnitude]) angle_c = np.dot(vshr, vsm) / (np.linalg.norm(vshr) * np.linalg.norm(vsm)) critical_angle = np.arccos(angle_c) * units('radian') return critical_angle.to('degrees')
python
{ "resource": "" }
q23596
broadcast_indices
train
def broadcast_indices(x, minv, ndim, axis): """Calculate index values to properly broadcast index array within data array. See usage in interp. """ ret = [] for dim in range(ndim): if dim == axis: ret.append(minv) else: broadcast_slice = [np.newaxis] * ndim broadcast_slice[dim] = slice(None) dim_inds = np.arange(x.shape[dim]) ret.append(dim_inds[tuple(broadcast_slice)]) return tuple(ret)
python
{ "resource": "" }
q23597
Registry.register
train
def register(self, name): """Register a callable with the registry under a particular name. Parameters ---------- name : str The name under which to register a function Returns ------- dec : callable A decorator that takes a function and will register it under the name. """ def dec(func): self._registry[name] = func return func return dec
python
{ "resource": "" }
q23598
wind_speed
train
def wind_speed(u, v): r"""Compute the wind speed from u and v-components. Parameters ---------- u : array_like Wind component in the X (East-West) direction v : array_like Wind component in the Y (North-South) direction Returns ------- wind speed: array_like The speed of the wind See Also -------- wind_components """ speed = np.sqrt(u * u + v * v) return speed
python
{ "resource": "" }
q23599
wind_direction
train
def wind_direction(u, v): r"""Compute the wind direction from u and v-components. Parameters ---------- u : array_like Wind component in the X (East-West) direction v : array_like Wind component in the Y (North-South) direction Returns ------- direction: `pint.Quantity` The direction of the wind in interval [0, 360] degrees, specified as the direction from which it is blowing, with 360 being North. See Also -------- wind_components Notes ----- In the case of calm winds (where `u` and `v` are zero), this function returns a direction of 0. """ wdir = 90. * units.deg - np.arctan2(-v, -u) origshape = wdir.shape wdir = atleast_1d(wdir) wdir[wdir <= 0] += 360. * units.deg # Need to be able to handle array-like u and v (with or without units) # np.any check required for legacy numpy which treats 0-d False boolean index as zero calm_mask = (np.asarray(u) == 0.) & (np.asarray(v) == 0.) if np.any(calm_mask): wdir[calm_mask] = 0. * units.deg return wdir.reshape(origshape).to('degrees')
python
{ "resource": "" }