signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def parse_cf(self, varname=None, coordinates=None):
|
from .plots.mapping import CFProjection<EOL>if varname is None:<EOL><INDENT>return self._dataset.apply(lambda da: self.parse_cf(da.name,<EOL>coordinates=coordinates))<EOL><DEDENT>var = self._dataset[varname]<EOL>if '<STR_LIT>' in var.attrs:<EOL><INDENT>proj_name = var.attrs['<STR_LIT>']<EOL>try:<EOL><INDENT>proj_var = self._dataset.variables[proj_name]<EOL><DEDENT>except KeyError:<EOL><INDENT>log.warning(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(proj_name))<EOL><DEDENT>else:<EOL><INDENT>var.coords['<STR_LIT>'] = CFProjection(proj_var.attrs)<EOL><DEDENT><DEDENT>self._fixup_coords(var)<EOL>if not self.check_axis(var, '<STR_LIT>', '<STR_LIT>') and '<STR_LIT>' not in var.coords:<EOL><INDENT>has_lat = has_lon = False<EOL>for coord_var in var.coords.values():<EOL><INDENT>has_lat = has_lat or self.check_axis(coord_var, '<STR_LIT>')<EOL>has_lon = has_lon or self.check_axis(coord_var, '<STR_LIT>')<EOL><DEDENT>if has_lat and has_lon:<EOL><INDENT>var.coords['<STR_LIT>'] = CFProjection({'<STR_LIT>': '<STR_LIT>'})<EOL><DEDENT><DEDENT>if coordinates is None:<EOL><INDENT>coordinates = self._generate_coordinate_map(var.coords.values())<EOL><DEDENT>else:<EOL><INDENT>self._fixup_coordinate_map(coordinates, var)<EOL><DEDENT>self._assign_axes(coordinates, var)<EOL>return var<EOL>
|
Parse Climate and Forecasting (CF) convention metadata.
|
f8456:c1:m1
|
@classmethod<EOL><INDENT>def check_axis(cls, var, *axes):<DEDENT>
|
for axis in axes:<EOL><INDENT>for criterion in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>if (var.attrs.get(criterion, '<STR_LIT>') in<EOL>cls.criteria[criterion].get(axis, set())):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>if (axis in cls.criteria['<STR_LIT>'] and (<EOL>(<EOL>cls.criteria['<STR_LIT>'][axis]['<STR_LIT>'] == '<STR_LIT>'<EOL>and (units.get_dimensionality(var.attrs.get('<STR_LIT>'))<EOL>== units.get_dimensionality(cls.criteria['<STR_LIT>'][axis]['<STR_LIT>']))<EOL>) or (<EOL>cls.criteria['<STR_LIT>'][axis]['<STR_LIT>'] == '<STR_LIT:name>'<EOL>and var.attrs.get('<STR_LIT>') in cls.criteria['<STR_LIT>'][axis]['<STR_LIT>']<EOL>))):<EOL><INDENT>return True<EOL><DEDENT>if re.match(cls.criteria['<STR_LIT>'][axis], var.name.lower()):<EOL><INDENT>return True<EOL><DEDENT><DEDENT>
|
Check if var satisfies the criteria for any of the given axes.
|
f8456:c1:m2
|
def _fixup_coords(self, var):
|
for coord_name, data_array in var.coords.items():<EOL><INDENT>if (self.check_axis(data_array, '<STR_LIT:x>', '<STR_LIT:y>')<EOL>and not self.check_axis(data_array, '<STR_LIT>', '<STR_LIT>')):<EOL><INDENT>try:<EOL><INDENT>var.coords[coord_name].metpy.convert_units('<STR_LIT>')<EOL><DEDENT>except DimensionalityError: <EOL><INDENT>if '<STR_LIT>' in var.coords:<EOL><INDENT>new_data_array = data_array.copy()<EOL>height = var.coords['<STR_LIT>'].item()['<STR_LIT>']<EOL>scaled_vals = new_data_array.metpy.unit_array * (height * units.meters)<EOL>new_data_array.metpy.unit_array = scaled_vals.to('<STR_LIT>')<EOL>var.coords[coord_name] = new_data_array<EOL><DEDENT><DEDENT><DEDENT><DEDENT>
|
Clean up the units on the coordinate variables.
|
f8456:c1:m3
|
def _generate_coordinate_map(self, coords):
|
<EOL>coord_lists = {'<STR_LIT:T>': [], '<STR_LIT>': [], '<STR_LIT:Y>': [], '<STR_LIT:X>': []}<EOL>for coord_var in coords:<EOL><INDENT>axes_to_check = {<EOL>'<STR_LIT:T>': ('<STR_LIT:time>',),<EOL>'<STR_LIT>': ('<STR_LIT>',),<EOL>'<STR_LIT:Y>': ('<STR_LIT:y>', '<STR_LIT>'),<EOL>'<STR_LIT:X>': ('<STR_LIT:x>', '<STR_LIT>')<EOL>}<EOL>for axis_cf, axes_readable in axes_to_check.items():<EOL><INDENT>if self.check_axis(coord_var, *axes_readable):<EOL><INDENT>coord_lists[axis_cf].append(coord_var)<EOL><DEDENT><DEDENT><DEDENT>axis_conflicts = [axis for axis in coord_lists if len(coord_lists[axis]) > <NUM_LIT:1>]<EOL>for axis in axis_conflicts:<EOL><INDENT>self._resolve_axis_conflict(axis, coord_lists)<EOL><DEDENT>return {axis: (coord_lists[axis][<NUM_LIT:0>] if len(coord_lists[axis]) > <NUM_LIT:0> else None)<EOL>for axis in coord_lists}<EOL>
|
Generate a coordinate map via CF conventions and other methods.
|
f8456:c1:m4
|
@staticmethod<EOL><INDENT>def _fixup_coordinate_map(coord_map, var):<DEDENT>
|
for axis in coord_map:<EOL><INDENT>if not isinstance(coord_map[axis], xr.DataArray):<EOL><INDENT>coord_map[axis] = var[coord_map[axis]]<EOL><DEDENT><DEDENT>
|
Ensure sure we have coordinate variables in map, not coordinate names.
|
f8456:c1:m5
|
@staticmethod<EOL><INDENT>def _assign_axes(coord_map, var):<DEDENT>
|
for axis in coord_map:<EOL><INDENT>if coord_map[axis] is not None:<EOL><INDENT>coord_map[axis].attrs['<STR_LIT>'] = axis<EOL><DEDENT><DEDENT>
|
Assign axis attribute to coordinates in var according to coord_map.
|
f8456:c1:m6
|
def _resolve_axis_conflict(self, axis, coord_lists):
|
if axis in ('<STR_LIT:Y>', '<STR_LIT:X>'):<EOL><INDENT>projection_coords = [coord_var for coord_var in coord_lists[axis] if<EOL>self.check_axis(coord_var, '<STR_LIT:x>', '<STR_LIT:y>')]<EOL>if len(projection_coords) == <NUM_LIT:1>:<EOL><INDENT>coord_lists[axis] = projection_coords<EOL>return<EOL><DEDENT><DEDENT>dimension_coords = [coord_var for coord_var in coord_lists[axis] if<EOL>coord_var.name in coord_var.dims]<EOL>if len(dimension_coords) == <NUM_LIT:1>:<EOL><INDENT>coord_lists[axis] = dimension_coords<EOL>return<EOL><DEDENT>warnings.warn('<STR_LIT>'<EOL>+ cf_to_readable_axes[axis]<EOL>+ '<STR_LIT>')<EOL>coord_lists[axis] = []<EOL>
|
Handle axis conflicts if they arise.
|
f8456:c1:m7
|
@property<EOL><INDENT>def loc(self):<DEDENT>
|
return self._LocIndexer(self._dataset)<EOL>
|
Make the LocIndexer available as a property.
|
f8456:c1:m8
|
def sel(self, indexers=None, method=None, tolerance=None, drop=False, **indexers_kwargs):
|
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, '<STR_LIT>')<EOL>indexers = _reassign_quantity_indexer(self._dataset, indexers)<EOL>return self._dataset.sel(indexers, method=method, tolerance=tolerance, drop=drop)<EOL>
|
Wrap Dataset.sel to handle units.
|
f8456:c1:m9
|
def generate_grid(horiz_dim, bbox):
|
x_steps, y_steps = get_xy_steps(bbox, horiz_dim)<EOL>grid_x = np.linspace(bbox['<STR_LIT>'], bbox['<STR_LIT>'], x_steps)<EOL>grid_y = np.linspace(bbox['<STR_LIT>'], bbox['<STR_LIT>'], y_steps)<EOL>gx, gy = np.meshgrid(grid_x, grid_y)<EOL>return gx, gy<EOL>
|
r"""Generate a meshgrid based on bounding box and x & y resolution.
Parameters
----------
horiz_dim: integer
Horizontal resolution
bbox: dictionary
Dictionary containing coordinates for corners of study area.
Returns
-------
grid_x: (X, Y) ndarray
X dimension meshgrid defined by given bounding box
grid_y: (X, Y) ndarray
Y dimension meshgrid defined by given bounding box
|
f8457:m0
|
def generate_grid_coords(gx, gy):
|
return np.vstack([gx.ravel(), gy.ravel()]).T<EOL>
|
r"""Calculate x,y coordinates of each grid cell.
Parameters
----------
gx: numeric
x coordinates in meshgrid
gy: numeric
y coordinates in meshgrid
Returns
-------
(X, Y) ndarray
List of coordinates in meshgrid
|
f8457:m1
|
def get_xy_range(bbox):
|
x_range = bbox['<STR_LIT>'] - bbox['<STR_LIT>']<EOL>y_range = bbox['<STR_LIT>'] - bbox['<STR_LIT>']<EOL>return x_range, y_range<EOL>
|
r"""Return x and y ranges in meters based on bounding box.
bbox: dictionary
dictionary containing coordinates for corners of study area
Returns
-------
x_range: float
Range in meters in x dimension.
y_range: float
Range in meters in y dimension.
|
f8457:m2
|
def get_xy_steps(bbox, h_dim):
|
x_range, y_range = get_xy_range(bbox)<EOL>x_steps = np.ceil(x_range / h_dim)<EOL>y_steps = np.ceil(y_range / h_dim)<EOL>return int(x_steps), int(y_steps)<EOL>
|
r"""Return meshgrid spacing based on bounding box.
bbox: dictionary
Dictionary containing coordinates for corners of study area.
h_dim: integer
Horizontal resolution in meters.
Returns
-------
x_steps, (X, ) ndarray
Number of grids in x dimension.
y_steps: (Y, ) ndarray
Number of grids in y dimension.
|
f8457:m3
|
def get_boundary_coords(x, y, spatial_pad=<NUM_LIT:0>):
|
west = np.min(x) - spatial_pad<EOL>east = np.max(x) + spatial_pad<EOL>north = np.max(y) + spatial_pad<EOL>south = np.min(y) - spatial_pad<EOL>return {'<STR_LIT>': west, '<STR_LIT>': south, '<STR_LIT>': east, '<STR_LIT>': north}<EOL>
|
r"""Return bounding box based on given x and y coordinates assuming northern hemisphere.
x: numeric
x coordinates.
y: numeric
y coordinates.
spatial_pad: numeric
Number of meters to add to the x and y dimensions to reduce
edge effects.
Returns
-------
bbox: dictionary
dictionary containing coordinates for corners of study area
|
f8457:m4
|
@exporter.export<EOL>def natural_neighbor_to_grid(xp, yp, variable, grid_x, grid_y):
|
<EOL>points_obs = list(zip(xp, yp))<EOL>points_grid = generate_grid_coords(grid_x, grid_y)<EOL>img = natural_neighbor_to_points(points_obs, variable, points_grid)<EOL>return img.reshape(grid_x.shape)<EOL>
|
r"""Generate a natural neighbor interpolation of the given points to a regular grid.
This assigns values to the given grid using the Liang and Hale [Liang2010]_.
approach.
Parameters
----------
xp: (N, ) ndarray
x-coordinates of observations
yp: (N, ) ndarray
y-coordinates of observations
variable: (N, ) ndarray
observation values associated with (xp, yp) pairs.
IE, variable[i] is a unique observation at (xp[i], yp[i])
grid_x: (M, 2) ndarray
Meshgrid associated with x dimension
grid_y: (M, 2) ndarray
Meshgrid associated with y dimension
Returns
-------
img: (M, N) ndarray
Interpolated values on a 2-dimensional grid
See Also
--------
natural_neighbor_to_points
|
f8457:m5
|
@exporter.export<EOL>@deprecated('<STR_LIT>', addendum='<STR_LIT>',<EOL>pending=False)<EOL>def natural_neighbor(xp, yp, variable, grid_x, grid_y):
|
return natural_neighbor_to_grid(xp, yp, variable, grid_x, grid_y)<EOL>
|
Wrap natural_neighbor_to_grid for deprecated natural_neighbor function.
|
f8457:m6
|
@exporter.export<EOL>def inverse_distance_to_grid(xp, yp, variable, grid_x, grid_y, r, gamma=None, kappa=None,<EOL>min_neighbors=<NUM_LIT:3>, kind='<STR_LIT>'):
|
<EOL>points_obs = list(zip(xp, yp))<EOL>points_grid = generate_grid_coords(grid_x, grid_y)<EOL>img = inverse_distance_to_points(points_obs, variable, points_grid, r, gamma=gamma,<EOL>kappa=kappa, min_neighbors=min_neighbors, kind=kind)<EOL>return img.reshape(grid_x.shape)<EOL>
|
r"""Generate an inverse distance interpolation of the given points to a regular grid.
Values are assigned to the given grid using inverse distance weighting based on either
[Cressman1959]_ or [Barnes1964]_. The Barnes implementation used here based on [Koch1983]_.
Parameters
----------
xp: (N, ) ndarray
x-coordinates of observations.
yp: (N, ) ndarray
y-coordinates of observations.
variable: (N, ) ndarray
observation values associated with (xp, yp) pairs.
IE, variable[i] is a unique observation at (xp[i], yp[i]).
grid_x: (M, 2) ndarray
Meshgrid associated with x dimension.
grid_y: (M, 2) ndarray
Meshgrid associated with y dimension.
r: float
Radius from grid center, within which observations
are considered and weighted.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default None.
kappa: float
Response parameter for barnes interpolation. Default None.
min_neighbors: int
Minimum number of neighbors needed to perform barnes or cressman interpolation
for a point. Default is 3.
kind: str
Specify what inverse distance weighting interpolation to use.
Options: 'cressman' or 'barnes'. Default 'cressman'
Returns
-------
img: (M, N) ndarray
Interpolated values on a 2-dimensional grid
See Also
--------
inverse_distance_to_points
|
f8457:m7
|
@exporter.export<EOL>@deprecated('<STR_LIT>', addendum='<STR_LIT>',<EOL>pending=False)<EOL>def inverse_distance(xp, yp, variable, grid_x, grid_y, r, gamma=None, kappa=None,<EOL>min_neighbors=<NUM_LIT:3>, kind='<STR_LIT>'):
|
return inverse_distance_to_grid(xp, yp, variable, grid_x, grid_y, r, gamma=gamma,<EOL>kappa=kappa, min_neighbors=min_neighbors, kind=kind)<EOL>
|
Wrap inverse_distance_to_grid for deprecated inverse_distance function.
|
f8457:m8
|
@exporter.export<EOL>def interpolate_to_grid(x, y, z, interp_type='<STR_LIT>', hres=<NUM_LIT>,<EOL>minimum_neighbors=<NUM_LIT:3>, gamma=<NUM_LIT>, kappa_star=<NUM_LIT>,<EOL>search_radius=None, rbf_func='<STR_LIT>', rbf_smooth=<NUM_LIT:0>,<EOL>boundary_coords=None):
|
<EOL>if boundary_coords is None:<EOL><INDENT>boundary_coords = get_boundary_coords(x, y)<EOL><DEDENT>grid_x, grid_y = generate_grid(hres, boundary_coords)<EOL>points_obs = np.array(list(zip(x, y)))<EOL>points_grid = generate_grid_coords(grid_x, grid_y)<EOL>img = interpolate_to_points(points_obs, z, points_grid, interp_type=interp_type,<EOL>minimum_neighbors=minimum_neighbors, gamma=gamma,<EOL>kappa_star=kappa_star, search_radius=search_radius,<EOL>rbf_func=rbf_func, rbf_smooth=rbf_smooth)<EOL>return grid_x, grid_y, img.reshape(grid_x.shape)<EOL>
|
r"""Interpolate given (x,y), observation (z) pairs to a grid based on given parameters.
Parameters
----------
x: array_like
x coordinate
y: array_like
y coordinate
z: array_like
observation value
interp_type: str
What type of interpolation to use. Available options include:
1) "linear", "nearest", "cubic", or "rbf" from `scipy.interpolate`.
2) "natural_neighbor", "barnes", or "cressman" from `metpy.interpolate`.
Default "linear".
hres: float
The horizontal resolution of the generated grid, given in the same units as the
x and y parameters. Default 50000.
minimum_neighbors: int
Minimum number of neighbors needed to perform barnes or cressman interpolation for a
point. Default is 3.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default 0.25.
kappa_star: float
Response parameter for barnes interpolation, specified nondimensionally
in terms of the Nyquist. Default 5.052
search_radius: float
A search radius to use for the barnes and cressman interpolation schemes.
If search_radius is not specified, it will default to the average spacing of
observations.
rbf_func: str
Specifies which function to use for Rbf interpolation.
Options include: 'multiquadric', 'inverse', 'gaussian', 'linear', 'cubic',
'quintic', and 'thin_plate'. Defualt 'linear'. See `scipy.interpolate.Rbf` for more
information.
rbf_smooth: float
Smoothing value applied to rbf interpolation. Higher values result in more smoothing.
boundary_coords: dictionary
Optional dictionary containing coordinates of the study area boundary. Dictionary
should be in format: {'west': west, 'south': south, 'east': east, 'north': north}
Returns
-------
grid_x: (N, 2) ndarray
Meshgrid for the resulting interpolation in the x dimension
grid_y: (N, 2) ndarray
Meshgrid for the resulting interpolation in the y dimension ndarray
img: (M, N) ndarray
2-dimensional array representing the interpolated values for each grid.
Notes
-----
This function acts as a wrapper for `interpolate_points` to allow it to generate a regular
grid.
See Also
--------
interpolate_to_points
|
f8457:m9
|
@exporter.export<EOL>def interpolate_to_isosurface(level_var, interp_var, level, **kwargs):
|
<EOL>bottom_up_search = kwargs.pop('<STR_LIT>', True)<EOL>above, below, good = metpy.calc.find_bounding_indices(level_var, [level], axis=<NUM_LIT:0>,<EOL>from_below=bottom_up_search)<EOL>interp_level = (((level - level_var[above]) / (level_var[below] - level_var[above]))<EOL>* (interp_var[below] - interp_var[above])) + interp_var[above]<EOL>interp_level[~good] = np.nan<EOL>minvar = (np.min(level_var, axis=<NUM_LIT:0>) >= level)<EOL>maxvar = (np.max(level_var, axis=<NUM_LIT:0>) <= level)<EOL>interp_level[<NUM_LIT:0>][minvar] = interp_var[-<NUM_LIT:1>][minvar]<EOL>interp_level[<NUM_LIT:0>][maxvar] = interp_var[<NUM_LIT:0>][maxvar]<EOL>return interp_level.squeeze()<EOL>
|
r"""Linear interpolation of a variable to a given vertical level from given values.
This function assumes that highest vertical level (lowest pressure) is zeroth index.
A classic use of this function would be to compute the potential temperature on the
dynamic tropopause (2 PVU surface).
Parameters
----------
level_var: array_like (P, M, N)
Level values in 3D grid on common vertical coordinate (e.g., PV values on
isobaric levels). Assumes height dimension is highest to lowest in atmosphere.
interp_var: array_like (P, M, N)
Variable on 3D grid with same vertical coordinate as level_var to interpolate to
given level (e.g., potential temperature on isobaric levels)
level: int or float
Desired interpolated level (e.g., 2 PVU surface)
Other Parameters
----------------
bottom_up_search : bool, optional
Controls whether to search for levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
Returns
-------
interp_level: (M, N) ndarray
The interpolated variable (e.g., potential temperature) on the desired level (e.g.,
2 PVU surface)
Notes
-----
This function implements a linear interpolation to estimate values on a given surface.
The prototypical example is interpolation of potential temperature to the dynamic
tropopause (e.g., 2 PVU surface)
|
f8457:m10
|
@exporter.export<EOL>@deprecated('<STR_LIT>', addendum='<STR_LIT>',<EOL>pending=False)<EOL>def interpolate(x, y, z, interp_type='<STR_LIT>', hres=<NUM_LIT>,<EOL>minimum_neighbors=<NUM_LIT:3>, gamma=<NUM_LIT>, kappa_star=<NUM_LIT>,<EOL>search_radius=None, rbf_func='<STR_LIT>', rbf_smooth=<NUM_LIT:0>,<EOL>boundary_coords=None):
|
return interpolate_to_grid(x, y, z, interp_type=interp_type, hres=hres,<EOL>minimum_neighbors=minimum_neighbors, gamma=gamma,<EOL>kappa_star=kappa_star, search_radius=search_radius,<EOL>rbf_func=rbf_func, rbf_smooth=rbf_smooth,<EOL>boundary_coords=boundary_coords)<EOL>
|
Wrap interpolate_to_grid for deprecated interpolate function.
|
f8457:m11
|
def get_points_within_r(center_points, target_points, r):
|
tree = cKDTree(target_points)<EOL>indices = tree.query_ball_point(center_points, r)<EOL>return tree.data[indices].T<EOL>
|
r"""Get all target_points within a specified radius of a center point.
All data must be in same coordinate system, or you will get undetermined results.
Parameters
----------
center_points: (X, Y) ndarray
location from which to grab surrounding points within r
target_points: (X, Y) ndarray
points from which to return if they are within r of center_points
r: integer
search radius around center_points to grab target_points
Returns
-------
matches: (X, Y) ndarray
A list of points within r distance of, and in the same
order as, center_points
|
f8464:m0
|
def get_point_count_within_r(center_points, target_points, r):
|
tree = cKDTree(target_points)<EOL>indices = tree.query_ball_point(center_points, r)<EOL>return np.array([len(x) for x in indices])<EOL>
|
r"""Get count of target points within a specified radius from center points.
All data must be in same coordinate system, or you will get undetermined results.
Parameters
----------
center_points: (X, Y) ndarray
locations from which to grab surrounding points within r
target_points: (X, Y) ndarray
points from which to return if they are within r of center_points
r: integer
search radius around center_points to grab target_points
Returns
-------
matches: (N, ) ndarray
A list of point counts within r distance of, and in the same
order as, center_points
|
f8464:m1
|
def triangle_area(pt1, pt2, pt3):
|
a = <NUM_LIT:0.0><EOL>a += pt1[<NUM_LIT:0>] * pt2[<NUM_LIT:1>] - pt2[<NUM_LIT:0>] * pt1[<NUM_LIT:1>]<EOL>a += pt2[<NUM_LIT:0>] * pt3[<NUM_LIT:1>] - pt3[<NUM_LIT:0>] * pt2[<NUM_LIT:1>]<EOL>a += pt3[<NUM_LIT:0>] * pt1[<NUM_LIT:1>] - pt1[<NUM_LIT:0>] * pt3[<NUM_LIT:1>]<EOL>return abs(a) / <NUM_LIT:2><EOL>
|
r"""Return the area of a triangle.
Parameters
----------
pt1: (X,Y) ndarray
Starting vertex of a triangle
pt2: (X,Y) ndarray
Second vertex of a triangle
pt3: (X,Y) ndarray
Ending vertex of a triangle
Returns
-------
area: float
Area of the given triangle.
|
f8464:m2
|
def dist_2(x0, y0, x1, y1):
|
d0 = x1 - x0<EOL>d1 = y1 - y0<EOL>return d0 * d0 + d1 * d1<EOL>
|
r"""Return the squared distance between two points.
This is faster than calculating distance but should
only be used with comparable ratios.
Parameters
----------
x0: float
Starting x coordinate
y0: float
Starting y coordinate
x1: float
Ending x coordinate
y1: float
Ending y coordinate
Returns
-------
d2: float
squared distance
See Also
--------
distance
|
f8464:m3
|
def distance(p0, p1):
|
return math.sqrt(dist_2(p0[<NUM_LIT:0>], p0[<NUM_LIT:1>], p1[<NUM_LIT:0>], p1[<NUM_LIT:1>]))<EOL>
|
r"""Return the distance between two points.
Parameters
----------
p0: (X,Y) ndarray
Starting coordinate
p1: (X,Y) ndarray
Ending coordinate
Returns
-------
d: float
distance
See Also
--------
dist_2
|
f8464:m4
|
def circumcircle_radius_2(pt0, pt1, pt2):
|
a = distance(pt0, pt1)<EOL>b = distance(pt1, pt2)<EOL>c = distance(pt2, pt0)<EOL>t_area = triangle_area(pt0, pt1, pt2)<EOL>prod2 = a * b * c<EOL>if t_area > <NUM_LIT:0>:<EOL><INDENT>radius = prod2 * prod2 / (<NUM_LIT:16> * t_area * t_area)<EOL><DEDENT>else:<EOL><INDENT>radius = np.nan<EOL><DEDENT>return radius<EOL>
|
r"""Calculate and return the squared radius of a given triangle's circumcircle.
This is faster than calculating radius but should only be used with comparable ratios.
Parameters
----------
pt0: (x, y)
Starting vertex of triangle
pt1: (x, y)
Second vertex of triangle
pt2: (x, y)
Final vertex of a triangle
Returns
-------
r: float
circumcircle radius
See Also
--------
circumcenter
|
f8464:m5
|
def circumcircle_radius(pt0, pt1, pt2):
|
a = distance(pt0, pt1)<EOL>b = distance(pt1, pt2)<EOL>c = distance(pt2, pt0)<EOL>t_area = triangle_area(pt0, pt1, pt2)<EOL>if t_area > <NUM_LIT:0>:<EOL><INDENT>radius = (a * b * c) / (<NUM_LIT:4> * t_area)<EOL><DEDENT>else:<EOL><INDENT>radius = np.nan<EOL><DEDENT>return radius<EOL>
|
r"""Calculate and return the radius of a given triangle's circumcircle.
Parameters
----------
pt0: (x, y)
Starting vertex of triangle
pt1: (x, y)
Second vertex of triangle
pt2: (x, y)
Final vertex of a triangle
Returns
-------
r: float
circumcircle radius
See Also
--------
circumcenter
|
f8464:m6
|
def circumcenter(pt0, pt1, pt2):
|
a_x = pt0[<NUM_LIT:0>]<EOL>a_y = pt0[<NUM_LIT:1>]<EOL>b_x = pt1[<NUM_LIT:0>]<EOL>b_y = pt1[<NUM_LIT:1>]<EOL>c_x = pt2[<NUM_LIT:0>]<EOL>c_y = pt2[<NUM_LIT:1>]<EOL>bc_y_diff = b_y - c_y<EOL>ca_y_diff = c_y - a_y<EOL>ab_y_diff = a_y - b_y<EOL>cb_x_diff = c_x - b_x<EOL>ac_x_diff = a_x - c_x<EOL>ba_x_diff = b_x - a_x<EOL>d_div = (a_x * bc_y_diff + b_x * ca_y_diff + c_x * ab_y_diff)<EOL>if d_div == <NUM_LIT:0>:<EOL><INDENT>raise ZeroDivisionError<EOL><DEDENT>d_inv = <NUM_LIT:0.5> / d_div<EOL>a_mag = a_x * a_x + a_y * a_y<EOL>b_mag = b_x * b_x + b_y * b_y<EOL>c_mag = c_x * c_x + c_y * c_y<EOL>cx = (a_mag * bc_y_diff + b_mag * ca_y_diff + c_mag * ab_y_diff) * d_inv<EOL>cy = (a_mag * cb_x_diff + b_mag * ac_x_diff + c_mag * ba_x_diff) * d_inv<EOL>return cx, cy<EOL>
|
r"""Calculate and return the circumcenter of a circumcircle generated by a given triangle.
All three points must be unique or a division by zero error will be raised.
Parameters
----------
pt0: (x, y)
Starting vertex of triangle
pt1: (x, y)
Second vertex of triangle
pt2: (x, y)
Final vertex of a triangle
Returns
-------
cc: (x, y)
circumcenter coordinates
See Also
--------
circumcenter
|
f8464:m7
|
def find_natural_neighbors(tri, grid_points):
|
tree = cKDTree(grid_points)<EOL>in_triangulation = tri.find_simplex(tree.data) >= <NUM_LIT:0><EOL>triangle_info = {}<EOL>members = {key: [] for key in range(len(tree.data))}<EOL>for i, simplices in enumerate(tri.simplices):<EOL><INDENT>ps = tri.points[simplices]<EOL>cc = circumcenter(*ps)<EOL>r = circumcircle_radius(*ps)<EOL>triangle_info[i] = {'<STR_LIT>': cc, '<STR_LIT:r>': r}<EOL>qualifiers = tree.query_ball_point(cc, r)<EOL>for qualifier in qualifiers:<EOL><INDENT>if in_triangulation[qualifier]:<EOL><INDENT>members[qualifier].append(i)<EOL><DEDENT><DEDENT><DEDENT>return members, triangle_info<EOL>
|
r"""Return the natural neighbor triangles for each given grid cell.
These are determined by the properties of the given delaunay triangulation.
A triangle is a natural neighbor of a grid cell if that triangles circumcenter
is within the circumradius of the grid cell center.
Parameters
----------
tri: Object
A Delaunay Triangulation.
grid_points: (X, Y) ndarray
Locations of grids.
Returns
-------
members: dictionary
List of simplex codes for natural neighbor
triangles in 'tri' for each grid cell.
triangle_info: dictionary
Circumcenter and radius information for each
triangle in 'tri'.
|
f8464:m8
|
def find_nn_triangles_point(tri, cur_tri, point):
|
nn = []<EOL>candidates = set(tri.neighbors[cur_tri])<EOL>candidates |= set(tri.neighbors[tri.neighbors[cur_tri]].flat)<EOL>candidates.discard(-<NUM_LIT:1>)<EOL>for neighbor in candidates:<EOL><INDENT>triangle = tri.points[tri.simplices[neighbor]]<EOL>cur_x, cur_y = circumcenter(triangle[<NUM_LIT:0>], triangle[<NUM_LIT:1>], triangle[<NUM_LIT:2>])<EOL>r = circumcircle_radius_2(triangle[<NUM_LIT:0>], triangle[<NUM_LIT:1>], triangle[<NUM_LIT:2>])<EOL>if dist_2(point[<NUM_LIT:0>], point[<NUM_LIT:1>], cur_x, cur_y) < r:<EOL><INDENT>nn.append(neighbor)<EOL><DEDENT><DEDENT>return nn<EOL>
|
r"""Return the natural neighbors of a triangle containing a point.
This is based on the provided Delaunay Triangulation.
Parameters
----------
tri: Object
A Delaunay Triangulation
cur_tri: int
Simplex code for Delaunay Triangulation lookup of
a given triangle that contains 'position'.
point: (x, y)
Coordinates used to calculate distances to
simplexes in 'tri'.
Returns
-------
nn: (N, ) array
List of simplex codes for natural neighbor
triangles in 'tri'.
|
f8464:m9
|
def find_local_boundary(tri, triangles):
|
edges = []<EOL>for triangle in triangles:<EOL><INDENT>for i in range(<NUM_LIT:3>):<EOL><INDENT>pt1 = tri.simplices[triangle][i]<EOL>pt2 = tri.simplices[triangle][(i + <NUM_LIT:1>) % <NUM_LIT:3>]<EOL>if (pt1, pt2) in edges:<EOL><INDENT>edges.remove((pt1, pt2))<EOL><DEDENT>elif (pt2, pt1) in edges:<EOL><INDENT>edges.remove((pt2, pt1))<EOL><DEDENT>else:<EOL><INDENT>edges.append((pt1, pt2))<EOL><DEDENT><DEDENT><DEDENT>return edges<EOL>
|
r"""Find and return the outside edges of a collection of natural neighbor triangles.
There is no guarantee that this boundary is convex, so ConvexHull is not
sufficient in some situations.
Parameters
----------
tri: Object
A Delaunay Triangulation
triangles: (N, ) array
List of natural neighbor triangles.
Returns
-------
edges: (2, N) ndarray
List of vertex codes that form outer edges of
a group of natural neighbor triangles.
|
f8464:m10
|
def area(poly):
|
a = <NUM_LIT:0.0><EOL>n = len(poly)<EOL>for i in range(n):<EOL><INDENT>a += poly[i][<NUM_LIT:0>] * poly[(i + <NUM_LIT:1>) % n][<NUM_LIT:1>] - poly[(i + <NUM_LIT:1>) % n][<NUM_LIT:0>] * poly[i][<NUM_LIT:1>]<EOL><DEDENT>return abs(a) / <NUM_LIT><EOL>
|
r"""Find the area of a given polygon using the shoelace algorithm.
Parameters
----------
poly: (2, N) ndarray
2-dimensional coordinates representing an ordered
traversal around the edge a polygon.
Returns
-------
area: float
|
f8464:m11
|
def order_edges(edges):
|
edge = edges[<NUM_LIT:0>]<EOL>edges = edges[<NUM_LIT:1>:]<EOL>ordered_edges = [edge]<EOL>num_max = len(edges)<EOL>while len(edges) > <NUM_LIT:0> and num_max > <NUM_LIT:0>:<EOL><INDENT>match = edge[<NUM_LIT:1>]<EOL>for search_edge in edges:<EOL><INDENT>vertex = search_edge[<NUM_LIT:0>]<EOL>if match == vertex:<EOL><INDENT>edge = search_edge<EOL>edges.remove(edge)<EOL>ordered_edges.append(search_edge)<EOL>break<EOL><DEDENT><DEDENT>num_max -= <NUM_LIT:1><EOL><DEDENT>return ordered_edges<EOL>
|
r"""Return an ordered traversal of the edges of a two-dimensional polygon.
Parameters
----------
edges: (2, N) ndarray
List of unordered line segments, where each
line segment is represented by two unique
vertex codes.
Returns
-------
ordered_edges: (2, N) ndarray
|
f8464:m12
|
@exporter.export<EOL>def interpolate_to_slice(data, points, interp_type='<STR_LIT>'):
|
try:<EOL><INDENT>x, y = data.metpy.coordinates('<STR_LIT:x>', '<STR_LIT:y>')<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>data_sliced = data.interp({<EOL>x.name: xr.DataArray(points[:, <NUM_LIT:0>], dims='<STR_LIT:index>', attrs=x.attrs),<EOL>y.name: xr.DataArray(points[:, <NUM_LIT:1>], dims='<STR_LIT:index>', attrs=y.attrs)<EOL>}, method=interp_type)<EOL>data_sliced.coords['<STR_LIT:index>'] = range(len(points))<EOL>return data_sliced<EOL>
|
r"""Obtain an interpolated slice through data using xarray.
Utilizing the interpolation functionality in `xarray`, this function takes a slice the
given data (currently only regular grids are supported), which is given as an
`xarray.DataArray` so that we can utilize its coordinate metadata.
Parameters
----------
data: `xarray.DataArray` or `xarray.Dataset`
Three- (or higher) dimensional field(s) to interpolate. The DataArray (or each
DataArray in the Dataset) must have been parsed by MetPy and include both an x and
y coordinate dimension.
points: (N, 2) array_like
A list of x, y points in the data projection at which to interpolate the data
interp_type: str, optional
The interpolation method, either 'linear' or 'nearest' (see
`xarray.DataArray.interp()` for details). Defaults to 'linear'.
Returns
-------
`xarray.DataArray` or `xarray.Dataset`
The interpolated slice of data, with new index dimension of size N.
See Also
--------
cross_section
|
f8465:m0
|
@exporter.export<EOL>def geodesic(crs, start, end, steps):
|
import cartopy.crs as ccrs<EOL>from pyproj import Geod<EOL>g = Geod(crs.proj4_init)<EOL>geodesic = np.concatenate([<EOL>np.array(start[::-<NUM_LIT:1>])[None],<EOL>np.array(g.npts(start[<NUM_LIT:1>], start[<NUM_LIT:0>], end[<NUM_LIT:1>], end[<NUM_LIT:0>], steps - <NUM_LIT:2>)),<EOL>np.array(end[::-<NUM_LIT:1>])[None]<EOL>]).transpose()<EOL>points = crs.transform_points(ccrs.Geodetic(), *geodesic)[:, :<NUM_LIT:2>]<EOL>return points<EOL>
|
r"""Construct a geodesic path between two points.
This function acts as a wrapper for the geodesic construction available in `pyproj`.
Parameters
----------
crs: `cartopy.crs`
Cartopy Coordinate Reference System to use for the output
start: (2, ) array_like
A latitude-longitude pair designating the start point of the geodesic (units are
degrees north and degrees east).
end: (2, ) array_like
A latitude-longitude pair designating the end point of the geodesic (units are degrees
north and degrees east).
steps: int, optional
The number of points along the geodesic between the start and the end point
(including the end points).
Returns
-------
`numpy.ndarray`
The list of x, y points in the given CRS of length `steps` along the geodesic.
See Also
--------
cross_section
|
f8465:m1
|
@exporter.export<EOL>def cross_section(data, start, end, steps=<NUM_LIT:100>, interp_type='<STR_LIT>'):
|
if isinstance(data, xr.Dataset):<EOL><INDENT>return data.apply(cross_section, True, (start, end), steps=steps,<EOL>interp_type=interp_type)<EOL><DEDENT>elif data.ndim == <NUM_LIT:0>:<EOL><INDENT>return data<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>crs_data = data.metpy.cartopy_crs<EOL>x = data.metpy.x<EOL><DEDENT>except AttributeError:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>points_cross = geodesic(crs_data, start, end, steps)<EOL>if CFConventionHandler.check_axis(x, '<STR_LIT>') and (x > <NUM_LIT>).any():<EOL><INDENT>points_cross[points_cross[:, <NUM_LIT:0>] < <NUM_LIT:0>, <NUM_LIT:0>] += <NUM_LIT><EOL><DEDENT>return interpolate_to_slice(data, points_cross, interp_type=interp_type)<EOL><DEDENT>
|
r"""Obtain an interpolated cross-sectional slice through gridded data.
Utilizing the interpolation functionality in `xarray`, this function takes a vertical
cross-sectional slice along a geodesic through the given data on a regular grid, which is
given as an `xarray.DataArray` so that we can utilize its coordinate and projection
metadata.
Parameters
----------
data: `xarray.DataArray` or `xarray.Dataset`
Three- (or higher) dimensional field(s) to interpolate. The DataArray (or each
DataArray in the Dataset) must have been parsed by MetPy and include both an x and
y coordinate dimension and the added `crs` coordinate.
start: (2, ) array_like
A latitude-longitude pair designating the start point of the cross section (units are
degrees north and degrees east).
end: (2, ) array_like
A latitude-longitude pair designating the end point of the cross section (units are
degrees north and degrees east).
steps: int, optional
The number of points along the geodesic between the start and the end point
(including the end points) to use in the cross section. Defaults to 100.
interp_type: str, optional
The interpolation method, either 'linear' or 'nearest' (see
`xarray.DataArray.interp()` for details). Defaults to 'linear'.
Returns
-------
`xarray.DataArray` or `xarray.Dataset`
The interpolated cross section, with new index dimension along the cross-section.
See Also
--------
interpolate_to_slice, geodesic
|
f8465:m2
|
def cressman_point(sq_dist, values, radius):
|
weights = tools.cressman_weights(sq_dist, radius)<EOL>total_weights = np.sum(weights)<EOL>return sum(v * (w / total_weights) for (w, v) in zip(weights, values))<EOL>
|
r"""Generate a Cressman interpolation value for a point.
The calculated value is based on the given distances and search radius.
Parameters
----------
sq_dist: (N, ) ndarray
Squared distance between observations and grid point
values: (N, ) ndarray
Observation values in same order as sq_dist
radius: float
Maximum distance to search for observations to use for
interpolation.
Returns
-------
value: float
Interpolation value for grid point.
|
f8466:m0
|
def barnes_point(sq_dist, values, kappa, gamma=None):
|
if gamma is None:<EOL><INDENT>gamma = <NUM_LIT:1><EOL><DEDENT>weights = tools.barnes_weights(sq_dist, kappa, gamma)<EOL>total_weights = np.sum(weights)<EOL>return sum(v * (w / total_weights) for (w, v) in zip(weights, values))<EOL>
|
r"""Generate a single pass barnes interpolation value for a point.
The calculated value is based on the given distances, kappa and gamma values.
Parameters
----------
sq_dist: (N, ) ndarray
Squared distance between observations and grid point
values: (N, ) ndarray
Observation values in same order as sq_dist
kappa: float
Response parameter for barnes interpolation.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default 1.
Returns
-------
value: float
Interpolation value for grid point.
|
f8466:m1
|
def natural_neighbor_point(xp, yp, variable, grid_loc, tri, neighbors, triangle_info):
|
edges = geometry.find_local_boundary(tri, neighbors)<EOL>edge_vertices = [segment[<NUM_LIT:0>] for segment in geometry.order_edges(edges)]<EOL>num_vertices = len(edge_vertices)<EOL>p1 = edge_vertices[<NUM_LIT:0>]<EOL>p2 = edge_vertices[<NUM_LIT:1>]<EOL>c1 = geometry.circumcenter(grid_loc, tri.points[p1], tri.points[p2])<EOL>polygon = [c1]<EOL>area_list = []<EOL>total_area = <NUM_LIT:0.0><EOL>for i in range(num_vertices):<EOL><INDENT>p3 = edge_vertices[(i + <NUM_LIT:2>) % num_vertices]<EOL>try:<EOL><INDENT>c2 = geometry.circumcenter(grid_loc, tri.points[p3], tri.points[p2])<EOL>polygon.append(c2)<EOL>for check_tri in neighbors:<EOL><INDENT>if p2 in tri.simplices[check_tri]:<EOL><INDENT>polygon.append(triangle_info[check_tri]['<STR_LIT>'])<EOL><DEDENT><DEDENT>pts = [polygon[i] for i in ConvexHull(polygon).vertices]<EOL>value = variable[(tri.points[p2][<NUM_LIT:0>] == xp) & (tri.points[p2][<NUM_LIT:1>] == yp)]<EOL>cur_area = geometry.area(pts)<EOL>total_area += cur_area<EOL>area_list.append(cur_area * value[<NUM_LIT:0>])<EOL><DEDENT>except (ZeroDivisionError, qhull.QhullError) as e:<EOL><INDENT>message = ('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>') + str(e)<EOL>log.warning(message)<EOL>return np.nan<EOL><DEDENT>polygon = [c2]<EOL>p2 = p3<EOL><DEDENT>return sum(x / total_area for x in area_list)<EOL>
|
r"""Generate a natural neighbor interpolation of the observations to the given point.
This uses the Liang and Hale approach [Liang2010]_. The interpolation will fail if
the grid point has no natural neighbors.
Parameters
----------
xp: (N, ) ndarray
x-coordinates of observations
yp: (N, ) ndarray
y-coordinates of observations
variable: (N, ) ndarray
observation values associated with (xp, yp) pairs.
IE, variable[i] is a unique observation at (xp[i], yp[i])
grid_loc: (float, float)
Coordinates of the grid point at which to calculate the
interpolation.
tri: object
Delaunay triangulation of the observations.
neighbors: (N, ) ndarray
Simplex codes of the grid point's natural neighbors. The codes
will correspond to codes in the triangulation.
triangle_info: dictionary
Pre-calculated triangle attributes for quick look ups. Requires
items 'cc' (circumcenters) and 'r' (radii) to be associated with
each simplex code key from the delaunay triangulation.
Returns
-------
value: float
Interpolated value for the grid location
|
f8466:m2
|
@exporter.export<EOL>def natural_neighbor_to_points(points, values, xi):
|
tri = Delaunay(points)<EOL>members, triangle_info = geometry.find_natural_neighbors(tri, xi)<EOL>img = np.empty(shape=(xi.shape[<NUM_LIT:0>]), dtype=values.dtype)<EOL>img.fill(np.nan)<EOL>for ind, (grid, neighbors) in enumerate(members.items()):<EOL><INDENT>if len(neighbors) > <NUM_LIT:0>:<EOL><INDENT>points_transposed = np.array(points).transpose()<EOL>img[ind] = natural_neighbor_point(points_transposed[<NUM_LIT:0>], points_transposed[<NUM_LIT:1>],<EOL>values, xi[grid], tri, neighbors, triangle_info)<EOL><DEDENT><DEDENT>return img<EOL>
|
r"""Generate a natural neighbor interpolation to the given points.
This assigns values to the given interpolation points using the Liang and Hale
[Liang2010]_. approach.
Parameters
----------
points: array_like, shape (n, 2)
Coordinates of the data points.
values: array_like, shape (n,)
Values of the data points.
xi: array_like, shape (M, 2)
Points to interpolate the data onto.
Returns
-------
img: (M,) ndarray
Array representing the interpolated values for each input point in `xi`
See Also
--------
natural_neighbor_to_grid
|
f8466:m3
|
@exporter.export<EOL>def inverse_distance_to_points(points, values, xi, r, gamma=None, kappa=None, min_neighbors=<NUM_LIT:3>,<EOL>kind='<STR_LIT>'):
|
obs_tree = cKDTree(points)<EOL>indices = obs_tree.query_ball_point(xi, r=r)<EOL>img = np.empty(shape=(xi.shape[<NUM_LIT:0>]), dtype=values.dtype)<EOL>img.fill(np.nan)<EOL>for idx, (matches, grid) in enumerate(zip(indices, xi)):<EOL><INDENT>if len(matches) >= min_neighbors:<EOL><INDENT>x1, y1 = obs_tree.data[matches].T<EOL>values_subset = values[matches]<EOL>dists = geometry.dist_2(grid[<NUM_LIT:0>], grid[<NUM_LIT:1>], x1, y1)<EOL>if kind == '<STR_LIT>':<EOL><INDENT>img[idx] = cressman_point(dists, values_subset, r)<EOL><DEDENT>elif kind == '<STR_LIT>':<EOL><INDENT>img[idx] = barnes_point(dists, values_subset, kappa, gamma)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(str(kind) + '<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>return img<EOL>
|
r"""Generate an inverse distance weighting interpolation to the given points.
Values are assigned to the given interpolation points based on either [Cressman1959]_ or
[Barnes1964]_. The Barnes implementation used here based on [Koch1983]_.
Parameters
----------
points: array_like, shape (n, 2)
Coordinates of the data points.
values: array_like, shape (n,)
Values of the data points.
xi: array_like, shape (M, 2)
Points to interpolate the data onto.
r: float
Radius from grid center, within which observations
are considered and weighted.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default None.
kappa: float
Response parameter for barnes interpolation. Default None.
min_neighbors: int
Minimum number of neighbors needed to perform barnes or cressman interpolation
for a point. Default is 3.
kind: str
Specify what inverse distance weighting interpolation to use.
Options: 'cressman' or 'barnes'. Default 'cressman'
Returns
-------
img: (M,) ndarray
Array representing the interpolated values for each input point in `xi`
See Also
--------
inverse_distance_to_grid
|
f8466:m4
|
@exporter.export<EOL>def interpolate_to_points(points, values, xi, interp_type='<STR_LIT>', minimum_neighbors=<NUM_LIT:3>,<EOL>gamma=<NUM_LIT>, kappa_star=<NUM_LIT>, search_radius=None, rbf_func='<STR_LIT>',<EOL>rbf_smooth=<NUM_LIT:0>):
|
<EOL>if interp_type in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>return griddata(points, values, xi, method=interp_type)<EOL><DEDENT>elif interp_type == '<STR_LIT>':<EOL><INDENT>return natural_neighbor_to_points(points, values, xi)<EOL><DEDENT>elif interp_type in ['<STR_LIT>', '<STR_LIT>']:<EOL><INDENT>ave_spacing = cdist(points, points).mean()<EOL>if search_radius is None:<EOL><INDENT>search_radius = ave_spacing<EOL><DEDENT>if interp_type == '<STR_LIT>':<EOL><INDENT>return inverse_distance_to_points(points, values, xi, search_radius,<EOL>min_neighbors=minimum_neighbors,<EOL>kind=interp_type)<EOL><DEDENT>else:<EOL><INDENT>kappa = tools.calc_kappa(ave_spacing, kappa_star)<EOL>return inverse_distance_to_points(points, values, xi, search_radius, gamma, kappa,<EOL>min_neighbors=minimum_neighbors,<EOL>kind=interp_type)<EOL><DEDENT><DEDENT>elif interp_type == '<STR_LIT>':<EOL><INDENT>points_transposed = np.array(points).transpose()<EOL>xi_transposed = np.array(xi).transpose()<EOL>rbfi = Rbf(points_transposed[<NUM_LIT:0>], points_transposed[<NUM_LIT:1>], values, function=rbf_func,<EOL>smooth=rbf_smooth)<EOL>return rbfi(xi_transposed[<NUM_LIT:0>], xi_transposed[<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>
|
r"""Interpolate unstructured point data to the given points.
This function interpolates the given `values` valid at `points` to the points `xi`. This is
modeled after `scipy.interpolate.griddata`, but acts as a generalization of it by including
the following types of interpolation:
- Linear
- Nearest Neighbor
- Cubic
- Radial Basis Function
- Natural Neighbor (2D Only)
- Barnes (2D Only)
- Cressman (2D Only)
Parameters
----------
points: array_like, shape (n, D)
Coordinates of the data points.
values: array_like, shape (n,)
Values of the data points.
xi: array_like, shape (M, D)
Points to interpolate the data onto.
interp_type: str
What type of interpolation to use. Available options include:
1) "linear", "nearest", "cubic", or "rbf" from `scipy.interpolate`.
2) "natural_neighbor", "barnes", or "cressman" from `metpy.interpolate`.
Default "linear".
minimum_neighbors: int
Minimum number of neighbors needed to perform barnes or cressman interpolation for a
point. Default is 3.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default 0.25.
kappa_star: float
Response parameter for barnes interpolation, specified nondimensionally
in terms of the Nyquist. Default 5.052
search_radius: float
A search radius to use for the barnes and cressman interpolation schemes.
If search_radius is not specified, it will default to the average spacing of
observations.
rbf_func: str
Specifies which function to use for Rbf interpolation.
Options include: 'multiquadric', 'inverse', 'gaussian', 'linear', 'cubic',
'quintic', and 'thin_plate'. Defualt 'linear'. See `scipy.interpolate.Rbf` for more
information.
rbf_smooth: float
Smoothing value applied to rbf interpolation. Higher values result in more smoothing.
Returns
-------
values_interpolated: (M,) ndarray
Array representing the interpolated values for each input point in `xi`.
Notes
-----
This function primarily acts as a wrapper for the individual interpolation routines. The
individual functions are also available for direct use.
See Also
--------
interpolate_to_grid
|
f8466:m5
|
@exporter.export<EOL>@preprocess_xarray<EOL>def interpolate_nans_1d(x, y, kind='<STR_LIT>'):
|
x_sort_args = np.argsort(x)<EOL>x = x[x_sort_args]<EOL>y = y[x_sort_args]<EOL>nans = np.isnan(y)<EOL>if kind == '<STR_LIT>':<EOL><INDENT>y[nans] = np.interp(x[nans], x[~nans], y[~nans])<EOL><DEDENT>elif kind == '<STR_LIT>':<EOL><INDENT>y[nans] = np.interp(np.log(x[nans]), np.log(x[~nans]), y[~nans])<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(str(kind)))<EOL><DEDENT>return y[x_sort_args]<EOL>
|
Interpolate NaN values in y.
Interpolate NaN values in the y dimension. Works with unsorted x values.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
y : array-like
1-dimensional array of numeric y-values
kind : string
specifies the kind of interpolation x coordinate - 'linear' or 'log', optional.
Defaults to 'linear'.
Returns
-------
An array of the y coordinate data with NaN values interpolated.
|
f8468:m0
|
@exporter.export<EOL>@preprocess_xarray<EOL>@units.wraps(None, ('<STR_LIT>', '<STR_LIT>'))<EOL>def interpolate_1d(x, xp, *args, **kwargs):
|
<EOL>fill_value = kwargs.pop('<STR_LIT>', np.nan)<EOL>axis = kwargs.pop('<STR_LIT>', <NUM_LIT:0>)<EOL>x = np.asanyarray(x).reshape(-<NUM_LIT:1>)<EOL>ndim = xp.ndim<EOL>sort_args = np.argsort(xp, axis=axis)<EOL>sort_x = np.argsort(x)<EOL>sorter = broadcast_indices(xp, sort_args, ndim, axis)<EOL>xp = xp[sorter]<EOL>variables = [arr[sorter] for arr in args]<EOL>x_array = x[sort_x]<EOL>expand = [np.newaxis] * ndim<EOL>expand[axis] = slice(None)<EOL>x_array = x_array[tuple(expand)]<EOL>minv = np.apply_along_axis(np.searchsorted, axis, xp, x[sort_x])<EOL>minv2 = np.copy(minv)<EOL>if ((np.max(minv) == xp.shape[axis]) or (np.min(minv) == <NUM_LIT:0>)) and fill_value is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if np.max(minv) == xp.shape[axis]:<EOL><INDENT>warnings.warn('<STR_LIT>')<EOL>minv2[minv == xp.shape[axis]] = xp.shape[axis] - <NUM_LIT:1><EOL><DEDENT>if np.min(minv) == <NUM_LIT:0>:<EOL><INDENT>minv2[minv == <NUM_LIT:0>] = <NUM_LIT:1><EOL><DEDENT>above = broadcast_indices(xp, minv2, ndim, axis)<EOL>below = broadcast_indices(xp, minv2 - <NUM_LIT:1>, ndim, axis)<EOL>if np.any(x_array < xp[below]):<EOL><INDENT>warnings.warn('<STR_LIT>')<EOL><DEDENT>ret = []<EOL>for var in variables:<EOL><INDENT>var_interp = var[below] + (var[above] - var[below]) * ((x_array - xp[below])<EOL>/ (xp[above] - xp[below]))<EOL>var_interp[minv == xp.shape[axis]] = fill_value<EOL>var_interp[x_array < xp[below]] = fill_value<EOL>if x[<NUM_LIT:0>] > x[-<NUM_LIT:1>]:<EOL><INDENT>var_interp = np.swapaxes(np.swapaxes(var_interp, <NUM_LIT:0>, axis)[::-<NUM_LIT:1>], <NUM_LIT:0>, axis)<EOL><DEDENT>ret.append(var_interp)<EOL><DEDENT>if len(ret) == <NUM_LIT:1>:<EOL><INDENT>return ret[<NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>return ret<EOL><DEDENT>
|
r"""Interpolates data with any shape over a specified axis.
Interpolation over a specified axis for arrays of any shape.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted in ascending order.
Examples
--------
>>> x = np.array([1., 2., 3., 4.])
>>> y = np.array([1., 2., 3., 4.])
>>> x_interp = np.array([2.5, 3.5])
>>> metpy.calc.interp(x_interp, x, y)
array([2.5, 3.5])
Notes
-----
xp and args must be the same shape.
|
f8468:m1
|
@exporter.export<EOL>@preprocess_xarray<EOL>@units.wraps(None, ('<STR_LIT>', '<STR_LIT>'))<EOL>def log_interpolate_1d(x, xp, *args, **kwargs):
|
<EOL>fill_value = kwargs.pop('<STR_LIT>', np.nan)<EOL>axis = kwargs.pop('<STR_LIT>', <NUM_LIT:0>)<EOL>log_x = np.log(x)<EOL>log_xp = np.log(xp)<EOL>return interpolate_1d(log_x, log_xp, *args, axis=axis, fill_value=fill_value)<EOL>
|
r"""Interpolates data with logarithmic x-scale over a specified axis.
Interpolation on a logarithmic x-scale for interpolation values in pressure coordintates.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted in ascending order.
Examples
--------
>>> x_log = np.array([1e3, 1e4, 1e5, 1e6])
>>> y_log = np.log(x_log) * 2 + 3
>>> x_interp = np.array([5e3, 5e4, 5e5])
>>> metpy.calc.log_interp(x_interp, x_log, y_log)
array([20.03438638, 24.63955657, 29.24472675])
Notes
-----
xp and args must be the same shape.
|
f8468:m2
|
def calc_kappa(spacing, kappa_star=<NUM_LIT>):
|
return kappa_star * (<NUM_LIT> * spacing / np.pi)**<NUM_LIT:2><EOL>
|
r"""Calculate the kappa parameter for barnes interpolation.
Parameters
----------
spacing: float
Average spacing between observations
kappa_star: float
Non-dimensional response parameter. Default 5.052.
Returns
-------
kappa: float
|
f8469:m0
|
@exporter.export<EOL>def remove_observations_below_value(x, y, z, val=<NUM_LIT:0>):
|
x_ = x[z >= val]<EOL>y_ = y[z >= val]<EOL>z_ = z[z >= val]<EOL>return x_, y_, z_<EOL>
|
r"""Remove all x, y, and z where z is less than val.
Will not destroy original values.
Parameters
----------
x: array_like
x coordinate.
y: array_like
y coordinate.
z: array_like
Observation value.
val: float
Value at which to threshold z.
Returns
-------
x, y, z
List of coordinate observation pairs without
observation values less than val.
|
f8469:m1
|
@exporter.export<EOL>def remove_nan_observations(x, y, z):
|
x_ = x[~np.isnan(z)]<EOL>y_ = y[~np.isnan(z)]<EOL>z_ = z[~np.isnan(z)]<EOL>return x_, y_, z_<EOL>
|
r"""Remove all x, y, and z where z is nan.
Will not destroy original values.
Parameters
----------
x: array_like
x coordinate
y: array_like
y coordinate
z: array_like
observation value
Returns
-------
x, y, z
List of coordinate observation pairs without
nan valued observations.
|
f8469:m2
|
@exporter.export<EOL>def remove_repeat_coordinates(x, y, z):
|
coords = []<EOL>variable = []<EOL>for (x_, y_, t_) in zip(x, y, z):<EOL><INDENT>if (x_, y_) not in coords:<EOL><INDENT>coords.append((x_, y_))<EOL>variable.append(t_)<EOL><DEDENT><DEDENT>coords = np.array(coords)<EOL>x_ = coords[:, <NUM_LIT:0>]<EOL>y_ = coords[:, <NUM_LIT:1>]<EOL>z_ = np.array(variable)<EOL>return x_, y_, z_<EOL>
|
r"""Remove all x, y, and z where (x,y) is repeated and keep the first occurrence only.
Will not destroy original values.
Parameters
----------
x: array_like
x coordinate
y: array_like
y coordinate
z: array_like
observation value
Returns
-------
x, y, z
List of coordinate observation pairs without
repeated coordinates.
|
f8469:m3
|
def barnes_weights(sq_dist, kappa, gamma):
|
return np.exp(-<NUM_LIT:1.0> * sq_dist / (kappa * gamma))<EOL>
|
r"""Calculate the Barnes weights from squared distance values.
Parameters
----------
sq_dist: (N, ) ndarray
Squared distances from interpolation point
associated with each observation in meters.
kappa: float
Response parameter for barnes interpolation. Default None.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default None.
Returns
-------
weights: (N, ) ndarray
Calculated weights for the given observations determined by their distance
to the interpolation point.
|
f8469:m4
|
def cressman_weights(sq_dist, r):
|
return (r * r - sq_dist) / (r * r + sq_dist)<EOL>
|
r"""Calculate the Cressman weights from squared distance values.
Parameters
----------
sq_dist: (N, ) ndarray
Squared distances from interpolation point
associated with each observation in meters.
r: float
Maximum distance an observation can be from an
interpolation point to be considered in the inter-
polation calculation.
Returns
-------
weights: (N, ) ndarray
Calculated weights for the given observations determined by their distance
to the interpolation point.
|
f8469:m5
|
def unit_calc(temp, press, dens, mixing, unitless_const):
|
pass<EOL>
|
r"""Stub calculation for testing unit checking.
|
f8472:m8
|
@classmethod<EOL><INDENT>def dontuse(cls):<DEDENT>
|
deprecation.warn_deprecated('<STR_LIT>', pending=True)<EOL>return False<EOL>
|
Don't use.
|
f8474:c0:m0
|
@classmethod<EOL><INDENT>@deprecation.deprecated('<STR_LIT>')<EOL>def really_dontuse(cls):<DEDENT>
|
return False<EOL>
|
Really, don't use.
|
f8474:c0:m1
|
def _is_x_first_dim(dim_order):
|
if dim_order is None:<EOL><INDENT>dim_order = '<STR_LIT>'<EOL><DEDENT>return dim_order == '<STR_LIT>'<EOL>
|
Determine whether x is the first dimension based on the value of dim_order.
|
f8475:m1
|
def _check_and_flip(arr):
|
if hasattr(arr, '<STR_LIT>'):<EOL><INDENT>if arr.ndim >= <NUM_LIT:2>:<EOL><INDENT>return arr.T<EOL><DEDENT>else:<EOL><INDENT>return arr<EOL><DEDENT><DEDENT>elif not is_string_like(arr) and iterable(arr):<EOL><INDENT>return tuple(_check_and_flip(a) for a in arr)<EOL><DEDENT>else:<EOL><INDENT>return arr<EOL><DEDENT>
|
Transpose array or list of arrays if they are 2D.
|
f8475:m2
|
def ensure_yx_order(func):
|
@functools.wraps(func)<EOL>def wrapper(*args, **kwargs):<EOL><INDENT>dim_order = kwargs.pop('<STR_LIT>', None)<EOL>x_first = _is_x_first_dim(dim_order)<EOL>if x_first:<EOL><INDENT>args = tuple(_check_and_flip(arr) for arr in args)<EOL>for k, v in kwargs:<EOL><INDENT>kwargs[k] = _check_and_flip(v)<EOL><DEDENT><DEDENT>ret = func(*args, **kwargs)<EOL>if x_first:<EOL><INDENT>return _check_and_flip(ret)<EOL><DEDENT>else:<EOL><INDENT>return ret<EOL><DEDENT><DEDENT>dim_order_doc = """<STR_LIT>"""<EOL>params = wrapper.__doc__.find('<STR_LIT>')<EOL>blank = wrapper.__doc__.find('<STR_LIT>', params)<EOL>wrapper.__doc__ = wrapper.__doc__[:blank] + dim_order_doc + wrapper.__doc__[blank:]<EOL>return wrapper<EOL>
|
Wrap a function to ensure all array arguments are y, x ordered, based on kwarg.
|
f8475:m3
|
@exporter.export<EOL>@preprocess_xarray<EOL>@ensure_yx_order<EOL>def vorticity(u, v, dx, dy):
|
dudy = first_derivative(u, delta=dy, axis=-<NUM_LIT:2>)<EOL>dvdx = first_derivative(v, delta=dx, axis=-<NUM_LIT:1>)<EOL>return dvdx - dudy<EOL>
|
r"""Calculate the vertical vorticity of the horizontal wind.
Parameters
----------
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
Returns
-------
(M, N) ndarray
vertical vorticity
See Also
--------
divergence
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
|
f8475:m4
|
@exporter.export<EOL>@preprocess_xarray<EOL>@ensure_yx_order<EOL>def divergence(u, v, dx, dy):
|
dudx = first_derivative(u, delta=dx, axis=-<NUM_LIT:1>)<EOL>dvdy = first_derivative(v, delta=dy, axis=-<NUM_LIT:2>)<EOL>return dudx + dvdy<EOL>
|
r"""Calculate the horizontal divergence of the horizontal wind.
Parameters
----------
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
Returns
-------
(M, N) ndarray
The horizontal divergence
See Also
--------
vorticity
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
|
f8475:m5
|
@exporter.export<EOL>@preprocess_xarray<EOL>@ensure_yx_order<EOL>def shearing_deformation(u, v, dx, dy):
|
dudy = first_derivative(u, delta=dy, axis=-<NUM_LIT:2>)<EOL>dvdx = first_derivative(v, delta=dx, axis=-<NUM_LIT:1>)<EOL>return dvdx + dudy<EOL>
|
r"""Calculate the shearing deformation of the horizontal wind.
Parameters
----------
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
Returns
-------
(M, N) ndarray
Shearing Deformation
See Also
--------
stretching_deformation, total_deformation
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
|
f8475:m6
|
@exporter.export<EOL>@preprocess_xarray<EOL>@ensure_yx_order<EOL>def stretching_deformation(u, v, dx, dy):
|
dudx = first_derivative(u, delta=dx, axis=-<NUM_LIT:1>)<EOL>dvdy = first_derivative(v, delta=dy, axis=-<NUM_LIT:2>)<EOL>return dudx - dvdy<EOL>
|
r"""Calculate the stretching deformation of the horizontal wind.
Parameters
----------
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
Returns
-------
(M, N) ndarray
Stretching Deformation
See Also
--------
shearing_deformation, total_deformation
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
|
f8475:m7
|
@exporter.export<EOL>@preprocess_xarray<EOL>@ensure_yx_order<EOL>def total_deformation(u, v, dx, dy):
|
dudy, dudx = gradient(u, deltas=(dy, dx), axes=(-<NUM_LIT:2>, -<NUM_LIT:1>))<EOL>dvdy, dvdx = gradient(v, deltas=(dy, dx), axes=(-<NUM_LIT:2>, -<NUM_LIT:1>))<EOL>return np.sqrt((dvdx + dudy)**<NUM_LIT:2> + (dudx - dvdy)**<NUM_LIT:2>)<EOL>
|
r"""Calculate the horizontal total deformation of the horizontal wind.
Parameters
----------
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
Returns
-------
(M, N) ndarray
Total Deformation
See Also
--------
shearing_deformation, stretching_deformation
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
|
f8475:m8
|
@exporter.export<EOL>@preprocess_xarray<EOL>@ensure_yx_order<EOL>def advection(scalar, wind, deltas):
|
<EOL>wind = _stack(wind)<EOL>if wind.ndim > scalar.ndim:<EOL><INDENT>wind = wind[::-<NUM_LIT:1>]<EOL><DEDENT>grad = _stack(gradient(scalar, deltas=deltas[::-<NUM_LIT:1>]))<EOL>grad, wind = atleast_2d(grad, wind)<EOL>return (-grad * wind).sum(axis=<NUM_LIT:0>)<EOL>
|
r"""Calculate the advection of a scalar field by the wind.
The order of the dimensions of the arrays must match the order in which
the wind components are given. For example, if the winds are given [u, v],
then the scalar and wind arrays must be indexed as x,y (which puts x as the
rows, not columns).
Parameters
----------
scalar : N-dimensional array
Array (with N-dimensions) with the quantity to be advected.
wind : sequence of arrays
Length M sequence of N-dimensional arrays. Represents the flow,
with a component of the wind in each dimension. For example, for
horizontal advection, this could be a list: [u, v], where u and v
are each a 2-dimensional array.
deltas : sequence of float or ndarray
A (length M) sequence containing the grid spacing(s) in each dimension. If using
arrays, in each array there should be one item less than the size of `scalar` along the
applicable axis.
Returns
-------
N-dimensional array
An N-dimensional array containing the advection at all grid points.
|
f8475:m9
|
@exporter.export<EOL>@preprocess_xarray<EOL>@ensure_yx_order<EOL>def frontogenesis(thta, u, v, dx, dy, dim_order='<STR_LIT>'):
|
<EOL>ddy_thta = first_derivative(thta, delta=dy, axis=-<NUM_LIT:2>)<EOL>ddx_thta = first_derivative(thta, delta=dx, axis=-<NUM_LIT:1>)<EOL>mag_thta = np.sqrt(ddx_thta**<NUM_LIT:2> + ddy_thta**<NUM_LIT:2>)<EOL>shrd = shearing_deformation(u, v, dx, dy, dim_order=dim_order)<EOL>strd = stretching_deformation(u, v, dx, dy, dim_order=dim_order)<EOL>tdef = total_deformation(u, v, dx, dy, dim_order=dim_order)<EOL>div = divergence(u, v, dx, dy, dim_order=dim_order)<EOL>psi = <NUM_LIT:0.5> * np.arctan2(shrd, strd)<EOL>beta = np.arcsin((-ddx_thta * np.cos(psi) - ddy_thta * np.sin(psi)) / mag_thta)<EOL>return <NUM_LIT:0.5> * mag_thta * (tdef * np.cos(<NUM_LIT:2> * beta) - div)<EOL>
|
r"""Calculate the 2D kinematic frontogenesis of a temperature field.
The implementation is a form of the Petterssen Frontogenesis and uses the formula
outlined in [Bluestein1993]_ pg.248-253.
.. math:: F=\frac{1}{2}\left|\nabla \theta\right|[D cos(2\beta)-\delta]
* :math:`F` is 2D kinematic frontogenesis
* :math:`\theta` is potential temperature
* :math:`D` is the total deformation
* :math:`\beta` is the angle between the axis of dilitation and the isentropes
* :math:`\delta` is the divergence
Parameters
----------
thta : (M, N) ndarray
Potential temperature
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
Returns
-------
(M, N) ndarray
2D Frontogenesis in [temperature units]/m/s
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
Conversion factor to go from [temperature units]/m/s to [temperature units/100km/3h]
:math:`1.08e4*1.e5`
|
f8475:m10
|
@exporter.export<EOL>@preprocess_xarray<EOL>@ensure_yx_order<EOL>def geostrophic_wind(heights, f, dx, dy):
|
if heights.dimensionality['<STR_LIT>'] == <NUM_LIT>:<EOL><INDENT>norm_factor = <NUM_LIT:1.> / f<EOL><DEDENT>else:<EOL><INDENT>norm_factor = mpconsts.g / f<EOL><DEDENT>dhdy = first_derivative(heights, delta=dy, axis=-<NUM_LIT:2>)<EOL>dhdx = first_derivative(heights, delta=dx, axis=-<NUM_LIT:1>)<EOL>return -norm_factor * dhdy, norm_factor * dhdx<EOL>
|
r"""Calculate the geostrophic wind given from the heights or geopotential.
Parameters
----------
heights : (M, N) ndarray
The height field, with either leading dimensions of (x, y) or trailing dimensions
of (y, x), depending on the value of ``dim_order``.
f : array_like
The coriolis parameter. This can be a scalar to be applied
everywhere or an array of values.
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `heights` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `heights` along the applicable axis.
Returns
-------
A 2-item tuple of arrays
A tuple of the u-component and v-component of the geostrophic wind.
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
|
f8475:m11
|
@exporter.export<EOL>@preprocess_xarray<EOL>@ensure_yx_order<EOL>def ageostrophic_wind(heights, f, dx, dy, u, v, dim_order='<STR_LIT>'):
|
u_geostrophic, v_geostrophic = geostrophic_wind(heights, f, dx, dy, dim_order=dim_order)<EOL>return u - u_geostrophic, v - v_geostrophic<EOL>
|
r"""Calculate the ageostrophic wind given from the heights or geopotential.
Parameters
----------
heights : (M, N) ndarray
The height field.
f : array_like
The coriolis parameter. This can be a scalar to be applied
everywhere or an array of values.
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `heights` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `heights` along the applicable axis.
u : (M, N) ndarray
The u wind field.
v : (M, N) ndarray
The u wind field.
Returns
-------
A 2-item tuple of arrays
A tuple of the u-component and v-component of the ageostrophic wind.
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
|
f8475:m12
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>')<EOL>def montgomery_streamfunction(height, temperature):
|
return (mpconsts.g * height) + (mpconsts.Cp_d * temperature)<EOL>
|
r"""Compute the Montgomery Streamfunction on isentropic surfaces.
The Montgomery Streamfunction is the streamfunction of the geostrophic wind on an
isentropic surface. This quantity is proportional to the geostrophic wind in isentropic
coordinates, and its gradient can be interpreted similarly to the pressure gradient in
isobaric coordinates.
Parameters
----------
height : `pint.Quantity`
Array of geopotential height of isentropic surfaces
temperature : `pint.Quantity`
Array of temperature on isentropic surfaces
Returns
-------
stream_func : `pint.Quantity`
Notes
-----
The formula used is that from [Lackmann2011]_ p. 69.
.. math:: \Psi = gZ + C_pT
* :math:`\Psi` is Montgomery Streamfunction
* :math:`g` is avg. gravitational acceleration on Earth
* :math:`Z` is geopotential height of the isentropic surface
* :math:`C_p` is specific heat at constant pressure for dry air
* :math:`T` is temperature of the isentropic surface
See Also
--------
get_isentropic_pressure
|
f8475:m13
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>')<EOL>def storm_relative_helicity(u, v, heights, depth, bottom=<NUM_LIT:0> * units.m,<EOL>storm_u=<NUM_LIT:0> * units('<STR_LIT>'), storm_v=<NUM_LIT:0> * units('<STR_LIT>')):<EOL>
|
_, u, v = get_layer_heights(heights, depth, u, v, with_agl=True, bottom=bottom)<EOL>storm_relative_u = u - storm_u<EOL>storm_relative_v = v - storm_v<EOL>int_layers = (storm_relative_u[<NUM_LIT:1>:] * storm_relative_v[:-<NUM_LIT:1>]<EOL>- storm_relative_u[:-<NUM_LIT:1>] * storm_relative_v[<NUM_LIT:1>:])<EOL>positive_srh = int_layers[int_layers.magnitude > <NUM_LIT:0.>].sum()<EOL>if np.ma.is_masked(positive_srh):<EOL><INDENT>positive_srh = <NUM_LIT:0.0> * units('<STR_LIT>')<EOL><DEDENT>negative_srh = int_layers[int_layers.magnitude < <NUM_LIT:0.>].sum()<EOL>if np.ma.is_masked(negative_srh):<EOL><INDENT>negative_srh = <NUM_LIT:0.0> * units('<STR_LIT>')<EOL><DEDENT>return (positive_srh.to('<STR_LIT>'),<EOL>negative_srh.to('<STR_LIT>'),<EOL>(positive_srh + negative_srh).to('<STR_LIT>'))<EOL>
|
r"""Calculate storm relative helicity.
Calculates storm relatively helicity following [Markowski2010] 230-231.
.. math:: \int\limits_0^d (\bar v - c) \cdot \bar\omega_{h} \,dz
This is applied to the data from a hodograph with the following summation:
.. math:: \sum_{n = 1}^{N-1} [(u_{n+1} - c_{x})(v_{n} - c_{y}) -
(u_{n} - c_{x})(v_{n+1} - c_{y})]
Parameters
----------
u : array-like
u component winds
v : array-like
v component winds
heights : array-like
atmospheric heights, will be converted to AGL
depth : number
depth of the layer
bottom : number
height of layer bottom AGL (default is surface)
storm_u : number
u component of storm motion (default is 0 m/s)
storm_v : number
v component of storm motion (default is 0 m/s)
Returns
-------
`pint.Quantity, pint.Quantity, pint.Quantity`
positive, negative, total storm-relative helicity
|
f8475:m14
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def absolute_vorticity(u, v, dx, dy, lats, dim_order='<STR_LIT>'):
|
f = coriolis_parameter(lats)<EOL>relative_vorticity = vorticity(u, v, dx, dy, dim_order=dim_order)<EOL>return relative_vorticity + f<EOL>
|
Calculate the absolute vorticity of the horizontal wind.
Parameters
----------
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
lats : (M, N) ndarray
latitudes of the wind data in radians or with appropriate unit information attached
Returns
-------
(M, N) ndarray
absolute vorticity
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
|
f8475:m15
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def potential_vorticity_baroclinic(potential_temperature, pressure, u, v, dx, dy, lats):
|
if ((np.shape(potential_temperature)[-<NUM_LIT:3>] < <NUM_LIT:3>) or (np.shape(pressure)[-<NUM_LIT:3>] < <NUM_LIT:3>)<EOL>or (np.shape(potential_temperature)[-<NUM_LIT:3>] != (np.shape(pressure)[-<NUM_LIT:3>]))):<EOL><INDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>'.format(-<NUM_LIT:3>))<EOL><DEDENT>avor = absolute_vorticity(u, v, dx, dy, lats, dim_order='<STR_LIT>')<EOL>dthtadp = first_derivative(potential_temperature, x=pressure, axis=-<NUM_LIT:3>)<EOL>if ((np.shape(potential_temperature)[-<NUM_LIT:2>] == <NUM_LIT:1>)<EOL>and (np.shape(potential_temperature)[-<NUM_LIT:1>] == <NUM_LIT:1>)):<EOL><INDENT>dthtady = <NUM_LIT:0> * units.K / units.m <EOL>dthtadx = <NUM_LIT:0> * units.K / units.m <EOL><DEDENT>else:<EOL><INDENT>dthtady = first_derivative(potential_temperature, delta=dy, axis=-<NUM_LIT:2>)<EOL>dthtadx = first_derivative(potential_temperature, delta=dx, axis=-<NUM_LIT:1>)<EOL><DEDENT>dudp = first_derivative(u, x=pressure, axis=-<NUM_LIT:3>)<EOL>dvdp = first_derivative(v, x=pressure, axis=-<NUM_LIT:3>)<EOL>return (-mpconsts.g * (dudp * dthtady - dvdp * dthtadx<EOL>+ avor * dthtadp)).to(units.kelvin * units.meter**<NUM_LIT:2><EOL>/ (units.second * units.kilogram))<EOL>
|
r"""Calculate the baroclinic potential vorticity.
.. math:: PV = -g \left(\frac{\partial u}{\partial p}\frac{\partial \theta}{\partial y}
- \frac{\partial v}{\partial p}\frac{\partial \theta}{\partial x}
+ \frac{\partial \theta}{\partial p}(\zeta + f) \right)
This formula is based on equation 4.5.93 [Bluestein1993]_.
Parameters
----------
potential_temperature : (P, M, N) ndarray
potential temperature
pressure : (P, M, N) ndarray
vertical pressures
u : (P, M, N) ndarray
x component of the wind
v : (P, M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
lats : (M, N) ndarray
latitudes of the wind data in radians or with appropriate unit information attached
axis : int, optional
The axis corresponding to the vertical dimension in the potential temperature
and pressure arrays, defaults to 0, the first dimension.
Returns
-------
(P, M, N) ndarray
baroclinic potential vorticity
Notes
-----
This function will only work with data that is in (P, Y, X) format. If your data
is in a different order you will need to re-order your data in order to get correct
results from this function.
The same function can be used for isobaric and isentropic PV analysis. Provide winds
for vorticity calculations on the desired isobaric or isentropic surface. At least three
layers of pressure/potential temperature are required in order to calculate the vertical
derivative (one above and below the desired surface). The first two terms will be zero if
isentropic level data is used due to the gradient of theta in both the x and y-directions
will be zero since you are on an isentropic surface.
This function expects pressure/isentropic level to increase with increasing array element
(e.g., from higher in the atmosphere to closer to the surface. If the pressure array is
one-dimensional p[:, None, None] can be used to make it appear multi-dimensional.)
|
f8475:m16
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def potential_vorticity_barotropic(heights, u, v, dx, dy, lats, dim_order='<STR_LIT>'):
|
avor = absolute_vorticity(u, v, dx, dy, lats, dim_order=dim_order)<EOL>return (avor / heights).to('<STR_LIT>')<EOL>
|
r"""Calculate the barotropic (Rossby) potential vorticity.
.. math:: PV = \frac{f + \zeta}{H}
This formula is based on equation 7.27 [Hobbs2006]_.
Parameters
----------
heights : (M, N) ndarray
atmospheric heights
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
lats : (M, N) ndarray
latitudes of the wind data in radians or with appropriate unit information attached
Returns
-------
(M, N) ndarray
barotropic potential vorticity
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
|
f8475:m17
|
@exporter.export<EOL>@preprocess_xarray<EOL>def inertial_advective_wind(u, v, u_geostrophic, v_geostrophic, dx, dy, lats):
|
f = coriolis_parameter(lats)<EOL>dugdy, dugdx = gradient(u_geostrophic, deltas=(dy, dx), axes=(-<NUM_LIT:2>, -<NUM_LIT:1>))<EOL>dvgdy, dvgdx = gradient(v_geostrophic, deltas=(dy, dx), axes=(-<NUM_LIT:2>, -<NUM_LIT:1>))<EOL>u_component = -(u * dvgdx + v * dvgdy) / f<EOL>v_component = (u * dugdx + v * dugdy) / f<EOL>return u_component, v_component<EOL>
|
r"""Calculate the inertial advective wind.
.. math:: \frac{\hat k}{f} \times (\vec V \cdot \nabla)\hat V_g
.. math:: \frac{\hat k}{f} \times \left[ \left( u \frac{\partial u_g}{\partial x} + v
\frac{\partial u_g}{\partial y} \right) \hat i + \left( u \frac{\partial v_g}
{\partial x} + v \frac{\partial v_g}{\partial y} \right) \hat j \right]
.. math:: \left[ -\frac{1}{f}\left(u \frac{\partial v_g}{\partial x} + v
\frac{\partial v_g}{\partial y} \right) \right] \hat i + \left[ \frac{1}{f}
\left( u \frac{\partial u_g}{\partial x} + v \frac{\partial u_g}{\partial y}
\right) \right] \hat j
This formula is based on equation 27 of [Rochette2006]_.
Parameters
----------
u : (M, N) ndarray
x component of the advecting wind
v : (M, N) ndarray
y component of the advecting wind
u_geostrophic : (M, N) ndarray
x component of the geostrophic (advected) wind
v_geostrophic : (M, N) ndarray
y component of the geostrophic (advected) wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
lats : (M, N) ndarray
latitudes of the wind data in radians or with appropriate unit information attached
Returns
-------
(M, N) ndarray
x component of inertial advective wind
(M, N) ndarray
y component of inertial advective wind
Notes
-----
Many forms of the inertial advective wind assume the advecting and advected
wind to both be the geostrophic wind. To do so, pass the x and y components
of the geostrophic with for u and u_geostrophic/v and v_geostrophic.
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
|
f8475:m18
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def q_vector(u, v, temperature, pressure, dx, dy, static_stability=<NUM_LIT:1>):
|
dudy, dudx = gradient(u, deltas=(dy, dx), axes=(-<NUM_LIT:2>, -<NUM_LIT:1>))<EOL>dvdy, dvdx = gradient(v, deltas=(dy, dx), axes=(-<NUM_LIT:2>, -<NUM_LIT:1>))<EOL>dtempdy, dtempdx = gradient(temperature, deltas=(dy, dx), axes=(-<NUM_LIT:2>, -<NUM_LIT:1>))<EOL>q1 = -mpconsts.Rd / (pressure * static_stability) * (dudx * dtempdx + dvdx * dtempdy)<EOL>q2 = -mpconsts.Rd / (pressure * static_stability) * (dudy * dtempdx + dvdy * dtempdy)<EOL>return q1.to_base_units(), q2.to_base_units()<EOL>
|
r"""Calculate Q-vector at a given pressure level using the u, v winds and temperature.
.. math:: \vec{Q} = (Q_1, Q_2)
= - \frac{R}{\sigma p}\left(
\frac{\partial \vec{v}_g}{\partial x} \cdot \nabla_p T,
\frac{\partial \vec{v}_g}{\partial y} \cdot \nabla_p T
\right)
This formula follows equation 5.7.55 from [Bluestein1992]_, and can be used with the
the below form of the quasigeostrophic omega equation to assess vertical motion
([Bluestein1992]_ equation 5.7.54):
.. math:: \left( \nabla_p^2 + \frac{f_0^2}{\sigma} \frac{\partial^2}{\partial p^2}
\right) \omega =
- 2 \nabla_p \cdot \vec{Q} -
\frac{R}{\sigma p} \beta \frac{\partial T}{\partial x}.
Parameters
----------
u : (M, N) ndarray
x component of the wind (geostrophic in QG-theory)
v : (M, N) ndarray
y component of the wind (geostrophic in QG-theory)
temperature : (M, N) ndarray
Array of temperature at pressure level
pressure : `pint.Quantity`
Pressure at level
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
static_stability : `pint.Quantity`, optional
The static stability at the pressure level. Defaults to 1 if not given to calculate
the Q-vector without factoring in static stability.
Returns
-------
tuple of (M, N) ndarrays
The components of the Q-vector in the u- and v-directions respectively
See Also
--------
static_stability
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
|
f8475:m19
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>')<EOL>def relative_humidity_from_dewpoint(temperature, dewpt):
|
e = saturation_vapor_pressure(dewpt)<EOL>e_s = saturation_vapor_pressure(temperature)<EOL>return (e / e_s)<EOL>
|
r"""Calculate the relative humidity.
Uses temperature and dewpoint in celsius to calculate relative
humidity using the ratio of vapor pressure to saturation vapor pressures.
Parameters
----------
temperature : `pint.Quantity`
The temperature
dew point : `pint.Quantity`
The dew point temperature
Returns
-------
`pint.Quantity`
The relative humidity
See Also
--------
saturation_vapor_pressure
|
f8476:m0
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>')<EOL>def exner_function(pressure, reference_pressure=mpconsts.P0):
|
return (pressure / reference_pressure).to('<STR_LIT>')**mpconsts.kappa<EOL>
|
r"""Calculate the Exner function.
.. math:: \Pi = \left( \frac{p}{p_0} \right)^\kappa
This can be used to calculate potential temperature from temperature (and visa-versa),
since
.. math:: \Pi = \frac{T}{\theta}
Parameters
----------
pressure : `pint.Quantity`
The total atmospheric pressure
reference_pressure : `pint.Quantity`, optional
The reference pressure against which to calculate the Exner function, defaults to P0
Returns
-------
`pint.Quantity`
The value of the Exner function at the given pressure
See Also
--------
potential_temperature
temperature_from_potential_temperature
|
f8476:m1
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>')<EOL>def potential_temperature(pressure, temperature):
|
return temperature / exner_function(pressure)<EOL>
|
r"""Calculate the potential temperature.
Uses the Poisson equation to calculation the potential temperature
given `pressure` and `temperature`.
Parameters
----------
pressure : `pint.Quantity`
The total atmospheric pressure
temperature : `pint.Quantity`
The temperature
Returns
-------
`pint.Quantity`
The potential temperature corresponding to the temperature and
pressure.
See Also
--------
dry_lapse
Notes
-----
Formula:
.. math:: \Theta = T (P_0 / P)^\kappa
Examples
--------
>>> from metpy.units import units
>>> metpy.calc.potential_temperature(800. * units.mbar, 273. * units.kelvin)
<Quantity(290.96653180346203, 'kelvin')>
|
f8476:m2
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>')<EOL>def temperature_from_potential_temperature(pressure, theta):
|
return theta * exner_function(pressure)<EOL>
|
r"""Calculate the temperature from a given potential temperature.
Uses the inverse of the Poisson equation to calculate the temperature from a
given potential temperature at a specific pressure level.
Parameters
----------
pressure : `pint.Quantity`
The total atmospheric pressure
theta : `pint.Quantity`
The potential temperature
Returns
-------
`pint.Quantity`
The temperature corresponding to the potential temperature and pressure.
See Also
--------
dry_lapse
potential_temperature
Notes
-----
Formula:
.. math:: T = \Theta (P / P_0)^\kappa
Examples
--------
>>> from metpy.units import units
>>> from metpy.calc import temperature_from_potential_temperature
>>> # potential temperature
>>> theta = np.array([ 286.12859679, 288.22362587]) * units.kelvin
>>> p = 850 * units.mbar
>>> T = temperature_from_potential_temperature(p,theta)
|
f8476:m3
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def dry_lapse(pressure, temperature, ref_pressure=None):
|
if ref_pressure is None:<EOL><INDENT>ref_pressure = pressure[<NUM_LIT:0>]<EOL><DEDENT>return temperature * (pressure / ref_pressure)**mpconsts.kappa<EOL>
|
r"""Calculate the temperature at a level assuming only dry processes.
This function lifts a parcel starting at `temperature`, conserving
potential temperature. The starting pressure can be given by `ref_pressure`.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
The starting temperature
ref_pressure : `pint.Quantity`, optional
The reference pressure. If not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The resulting parcel temperature at levels given by `pressure`
See Also
--------
moist_lapse : Calculate parcel temperature assuming liquid saturation
processes
parcel_profile : Calculate complete parcel profile
potential_temperature
|
f8476:m4
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def moist_lapse(pressure, temperature, ref_pressure=None):
|
def dt(t, p):<EOL><INDENT>t = units.Quantity(t, temperature.units)<EOL>p = units.Quantity(p, pressure.units)<EOL>rs = saturation_mixing_ratio(p, t)<EOL>frac = ((mpconsts.Rd * t + mpconsts.Lv * rs)<EOL>/ (mpconsts.Cp_d + (mpconsts.Lv * mpconsts.Lv * rs * mpconsts.epsilon<EOL>/ (mpconsts.Rd * t * t)))).to('<STR_LIT>')<EOL>return frac / p<EOL><DEDENT>if ref_pressure is None:<EOL><INDENT>ref_pressure = pressure[<NUM_LIT:0>]<EOL><DEDENT>pressure = pressure.to('<STR_LIT>')<EOL>ref_pressure = ref_pressure.to('<STR_LIT>')<EOL>temperature = atleast_1d(temperature)<EOL>side = '<STR_LIT:left>'<EOL>pres_decreasing = (pressure[<NUM_LIT:0>] > pressure[-<NUM_LIT:1>])<EOL>if pres_decreasing:<EOL><INDENT>pressure = pressure[::-<NUM_LIT:1>]<EOL>side = '<STR_LIT:right>'<EOL><DEDENT>ref_pres_idx = np.searchsorted(pressure.m, ref_pressure.m, side=side)<EOL>ret_temperatures = np.empty((<NUM_LIT:0>, temperature.shape[<NUM_LIT:0>]))<EOL>if ref_pressure > pressure.min():<EOL><INDENT>pres_down = np.append(ref_pressure, pressure[(ref_pres_idx - <NUM_LIT:1>)::-<NUM_LIT:1>])<EOL>trace_down = si.odeint(dt, temperature.squeeze(), pres_down.squeeze())<EOL>ret_temperatures = np.concatenate((ret_temperatures, trace_down[:<NUM_LIT:0>:-<NUM_LIT:1>]))<EOL><DEDENT>if ref_pressure < pressure.max():<EOL><INDENT>pres_up = np.append(ref_pressure, pressure[ref_pres_idx:])<EOL>trace_up = si.odeint(dt, temperature.squeeze(), pres_up.squeeze())<EOL>ret_temperatures = np.concatenate((ret_temperatures, trace_up[<NUM_LIT:1>:]))<EOL><DEDENT>if pres_decreasing:<EOL><INDENT>ret_temperatures = ret_temperatures[::-<NUM_LIT:1>]<EOL><DEDENT>return units.Quantity(ret_temperatures.T.squeeze(), temperature.units)<EOL>
|
r"""Calculate the temperature at a level assuming liquid saturation processes.
This function lifts a parcel starting at `temperature`. The starting pressure can
be given by `ref_pressure`. Essentially, this function is calculating moist
pseudo-adiabats.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
The starting temperature
ref_pressure : `pint.Quantity`, optional
The reference pressure. If not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The temperature corresponding to the starting temperature and
pressure levels.
See Also
--------
dry_lapse : Calculate parcel temperature assuming dry adiabatic processes
parcel_profile : Calculate complete parcel profile
Notes
-----
This function is implemented by integrating the following differential
equation:
.. math:: \frac{dT}{dP} = \frac{1}{P} \frac{R_d T + L_v r_s}
{C_{pd} + \frac{L_v^2 r_s \epsilon}{R_d T^2}}
This equation comes from [Bakhshaii2013]_.
|
f8476:m5
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def lcl(pressure, temperature, dewpt, max_iters=<NUM_LIT:50>, eps=<NUM_LIT>):
|
def _lcl_iter(p, p0, w, t):<EOL><INDENT>td = dewpoint(vapor_pressure(units.Quantity(p, pressure.units), w))<EOL>return (p0 * (td / t) ** (<NUM_LIT:1.> / mpconsts.kappa)).m<EOL><DEDENT>w = mixing_ratio(saturation_vapor_pressure(dewpt), pressure)<EOL>fp = so.fixed_point(_lcl_iter, pressure.m, args=(pressure.m, w, temperature),<EOL>xtol=eps, maxiter=max_iters)<EOL>lcl_p = fp * pressure.units<EOL>return lcl_p, dewpoint(vapor_pressure(lcl_p, w))<EOL>
|
r"""Calculate the lifted condensation level (LCL) using from the starting point.
The starting state for the parcel is defined by `temperature`, `dewpt`,
and `pressure`.
Parameters
----------
pressure : `pint.Quantity`
The starting atmospheric pressure
temperature : `pint.Quantity`
The starting temperature
dewpt : `pint.Quantity`
The starting dew point
Returns
-------
`(pint.Quantity, pint.Quantity)`
The LCL pressure and temperature
Other Parameters
----------------
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired relative error in the calculated value, defaults to 1e-5.
See Also
--------
parcel_profile
Notes
-----
This function is implemented using an iterative approach to solve for the
LCL. The basic algorithm is:
1. Find the dew point from the LCL pressure and starting mixing ratio
2. Find the LCL pressure from the starting temperature and dewpoint
3. Iterate until convergence
The function is guaranteed to finish by virtue of the `max_iters` counter.
|
f8476:m6
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def lfc(pressure, temperature, dewpt, parcel_temperature_profile=None, dewpt_start=None):
|
<EOL>if parcel_temperature_profile is None:<EOL><INDENT>new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpt)<EOL>pressure, temperature, _, parcel_temperature_profile = new_stuff<EOL>temperature = temperature.to('<STR_LIT>')<EOL>parcel_temperature_profile = parcel_temperature_profile.to('<STR_LIT>')<EOL><DEDENT>if dewpt_start is None:<EOL><INDENT>dewpt_start = dewpt[<NUM_LIT:0>]<EOL><DEDENT>if np.isclose(parcel_temperature_profile[<NUM_LIT:0>].m, temperature[<NUM_LIT:0>].m):<EOL><INDENT>x, y = find_intersections(pressure[<NUM_LIT:1>:], parcel_temperature_profile[<NUM_LIT:1>:],<EOL>temperature[<NUM_LIT:1>:], direction='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>x, y = find_intersections(pressure, parcel_temperature_profile,<EOL>temperature, direction='<STR_LIT>')<EOL><DEDENT>this_lcl = lcl(pressure[<NUM_LIT:0>], parcel_temperature_profile[<NUM_LIT:0>], dewpt_start)<EOL>if len(x) == <NUM_LIT:0>:<EOL><INDENT>mask = pressure < this_lcl[<NUM_LIT:0>]<EOL>if np.all(_less_or_close(parcel_temperature_profile[mask], temperature[mask])):<EOL><INDENT>return np.nan * pressure.units, np.nan * temperature.units<EOL><DEDENT>else: <EOL><INDENT>x, y = this_lcl<EOL>return x, y<EOL><DEDENT><DEDENT>else:<EOL><INDENT>idx = x < this_lcl[<NUM_LIT:0>]<EOL>if not any(idx):<EOL><INDENT>x, y = this_lcl<EOL>return x, y<EOL><DEDENT>else:<EOL><INDENT>x = x[idx]<EOL>y = y[idx]<EOL>return x[<NUM_LIT:0>], y[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>
|
r"""Calculate the level of free convection (LFC).
This works by finding the first intersection of the ideal parcel path and
the measured parcel temperature.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpt : `pint.Quantity`
The dew point at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the LFC. Defaults to the
surface parcel profile.
dewpt_start: `pint.Quantity`, optional
The dewpoint of the parcel for which to calculate the LFC. Defaults to the surface
dewpoint.
Returns
-------
`pint.Quantity`
The LFC pressure and temperature
See Also
--------
parcel_profile
|
f8476:m7
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def el(pressure, temperature, dewpt, parcel_temperature_profile=None):
|
<EOL>if parcel_temperature_profile is None:<EOL><INDENT>new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpt)<EOL>pressure, temperature, _, parcel_temperature_profile = new_stuff<EOL>temperature = temperature.to('<STR_LIT>')<EOL>parcel_temperature_profile = parcel_temperature_profile.to('<STR_LIT>')<EOL><DEDENT>if parcel_temperature_profile[-<NUM_LIT:1>] > temperature[-<NUM_LIT:1>]:<EOL><INDENT>return np.nan * pressure.units, np.nan * temperature.units<EOL><DEDENT>x, y = find_intersections(pressure[<NUM_LIT:1>:], parcel_temperature_profile[<NUM_LIT:1>:], temperature[<NUM_LIT:1>:])<EOL>if len(x) > <NUM_LIT:0>:<EOL><INDENT>return x[-<NUM_LIT:1>], y[-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>return np.nan * pressure.units, np.nan * temperature.units<EOL><DEDENT>
|
r"""Calculate the equilibrium level.
This works by finding the last intersection of the ideal parcel path and
the measured environmental temperature. If there is one or fewer intersections, there is
no equilibrium level.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpt : `pint.Quantity`
The dew point at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the EL. Defaults to the
surface parcel profile.
Returns
-------
`pint.Quantity, pint.Quantity`
The EL pressure and temperature
See Also
--------
parcel_profile
|
f8476:m8
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def parcel_profile(pressure, temperature, dewpt):
|
_, _, _, t_l, _, t_u = _parcel_profile_helper(pressure, temperature, dewpt)<EOL>return concatenate((t_l, t_u))<EOL>
|
r"""Calculate the profile a parcel takes through the atmosphere.
The parcel starts at `temperature`, and `dewpt`, lifted up
dry adiabatically to the LCL, and then moist adiabatically from there.
`pressure` specifies the pressure levels for the profile.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest. The first entry should be the starting
point pressure.
temperature : `pint.Quantity`
The starting temperature
dewpt : `pint.Quantity`
The starting dew point
Returns
-------
`pint.Quantity`
The parcel temperatures at the specified pressure levels.
See Also
--------
lcl, moist_lapse, dry_lapse
|
f8476:m9
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def parcel_profile_with_lcl(pressure, temperature, dewpt):
|
p_l, p_lcl, p_u, t_l, t_lcl, t_u = _parcel_profile_helper(pressure, temperature[<NUM_LIT:0>],<EOL>dewpt[<NUM_LIT:0>])<EOL>new_press = concatenate((p_l, p_lcl, p_u))<EOL>prof_temp = concatenate((t_l, t_lcl, t_u))<EOL>new_temp = _insert_lcl_level(pressure, temperature, p_lcl)<EOL>new_dewp = _insert_lcl_level(pressure, dewpt, p_lcl)<EOL>return new_press, new_temp, new_dewp, prof_temp<EOL>
|
r"""Calculate the profile a parcel takes through the atmosphere.
The parcel starts at `temperature`, and `dewpt`, lifted up
dry adiabatically to the LCL, and then moist adiabatically from there.
`pressure` specifies the pressure levels for the profile. This function returns
a profile that includes the LCL.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest. The first entry should be the starting
point pressure.
temperature : `pint.Quantity`
The atmospheric temperature at the levels in `pressure`. The first entry should be the
starting point temperature.
dewpt : `pint.Quantity`
The atmospheric dew point at the levels in `pressure`. The first entry should be the
starting dew point.
Returns
-------
pressure : `pint.Quantity`
The parcel profile pressures, which includes the specified levels and the LCL
ambient_temperature : `pint.Quantity`
The atmospheric temperature values, including the value interpolated to the LCL level
ambient_dew_point : `pint.Quantity`
The atmospheric dew point values, including the value interpolated to the LCL level
profile_temperature : `pint.Quantity`
The parcel profile temperatures at all of the levels in the returned pressures array,
including the LCL.
See Also
--------
lcl, moist_lapse, dry_lapse, parcel_profile
|
f8476:m10
|
def _parcel_profile_helper(pressure, temperature, dewpt):
|
<EOL>press_lcl, temp_lcl = lcl(pressure[<NUM_LIT:0>], temperature, dewpt)<EOL>press_lcl = press_lcl.to(pressure.units)<EOL>press_lower = concatenate((pressure[pressure >= press_lcl], press_lcl))<EOL>temp_lower = dry_lapse(press_lower, temperature)<EOL>if _greater_or_close(np.nanmin(pressure), press_lcl.m):<EOL><INDENT>return (press_lower[:-<NUM_LIT:1>], press_lcl, np.array([]) * press_lower.units,<EOL>temp_lower[:-<NUM_LIT:1>], temp_lcl, np.array([]) * temp_lower.units)<EOL><DEDENT>press_upper = concatenate((press_lcl, pressure[pressure < press_lcl]))<EOL>temp_upper = moist_lapse(press_upper, temp_lower[-<NUM_LIT:1>]).to(temp_lower.units)<EOL>return (press_lower[:-<NUM_LIT:1>], press_lcl, press_upper[<NUM_LIT:1>:],<EOL>temp_lower[:-<NUM_LIT:1>], temp_lcl, temp_upper[<NUM_LIT:1>:])<EOL>
|
Help calculate parcel profiles.
Returns the temperature and pressure, above, below, and including the LCL. The
other calculation functions decide what to do with the pieces.
|
f8476:m11
|
def _insert_lcl_level(pressure, temperature, lcl_pressure):
|
interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)<EOL>loc = pressure.size - pressure[::-<NUM_LIT:1>].searchsorted(lcl_pressure)<EOL>return np.insert(temperature.m, loc, interp_temp.m) * temperature.units<EOL>
|
Insert the LCL pressure into the profile.
|
f8476:m12
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>')<EOL>def vapor_pressure(pressure, mixing):
|
return pressure * mixing / (mpconsts.epsilon + mixing)<EOL>
|
r"""Calculate water vapor (partial) pressure.
Given total `pressure` and water vapor `mixing` ratio, calculates the
partial pressure of water vapor.
Parameters
----------
pressure : `pint.Quantity`
total atmospheric pressure
mixing : `pint.Quantity`
dimensionless mass mixing ratio
Returns
-------
`pint.Quantity`
The ambient water vapor (partial) pressure in the same units as
`pressure`.
Notes
-----
This function is a straightforward implementation of the equation given in many places,
such as [Hobbs1977]_ pg.71:
.. math:: e = p \frac{r}{r + \epsilon}
See Also
--------
saturation_vapor_pressure, dewpoint
|
f8476:m13
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>')<EOL>def saturation_vapor_pressure(temperature):
|
<EOL>return sat_pressure_0c * np.exp(<NUM_LIT> * (temperature - <NUM_LIT> * units.kelvin)<EOL>/ (temperature - <NUM_LIT> * units.kelvin))<EOL>
|
r"""Calculate the saturation water vapor (partial) pressure.
Parameters
----------
temperature : `pint.Quantity`
The temperature
Returns
-------
`pint.Quantity`
The saturation water vapor (partial) pressure
See Also
--------
vapor_pressure, dewpoint
Notes
-----
Instead of temperature, dewpoint may be used in order to calculate
the actual (ambient) water vapor (partial) pressure.
The formula used is that from [Bolton1980]_ for T in degrees Celsius:
.. math:: 6.112 e^\frac{17.67T}{T + 243.5}
|
f8476:m14
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>')<EOL>def dewpoint_rh(temperature, rh):
|
if np.any(rh > <NUM_LIT>):<EOL><INDENT>warnings.warn('<STR_LIT>')<EOL><DEDENT>return dewpoint(rh * saturation_vapor_pressure(temperature))<EOL>
|
r"""Calculate the ambient dewpoint given air temperature and relative humidity.
Parameters
----------
temperature : `pint.Quantity`
Air temperature
rh : `pint.Quantity`
Relative humidity expressed as a ratio in the range 0 < rh <= 1
Returns
-------
`pint.Quantity`
The dew point temperature
See Also
--------
dewpoint, saturation_vapor_pressure
|
f8476:m15
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>')<EOL>def dewpoint(e):
|
val = np.log(e / sat_pressure_0c)<EOL>return <NUM_LIT:0.> * units.degC + <NUM_LIT> * units.delta_degC * val / (<NUM_LIT> - val)<EOL>
|
r"""Calculate the ambient dewpoint given the vapor pressure.
Parameters
----------
e : `pint.Quantity`
Water vapor partial pressure
Returns
-------
`pint.Quantity`
Dew point temperature
See Also
--------
dewpoint_rh, saturation_vapor_pressure, vapor_pressure
Notes
-----
This function inverts the [Bolton1980]_ formula for saturation vapor
pressure to instead calculate the temperature. This yield the following
formula for dewpoint in degrees Celsius:
.. math:: T = \frac{243.5 log(e / 6.112)}{17.67 - log(e / 6.112)}
|
f8476:m16
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def mixing_ratio(part_press, tot_press, molecular_weight_ratio=mpconsts.epsilon):
|
return (molecular_weight_ratio * part_press<EOL>/ (tot_press - part_press)).to('<STR_LIT>')<EOL>
|
r"""Calculate the mixing ratio of a gas.
This calculates mixing ratio given its partial pressure and the total pressure of
the air. There are no required units for the input arrays, other than that
they have the same units.
Parameters
----------
part_press : `pint.Quantity`
Partial pressure of the constituent gas
tot_press : `pint.Quantity`
Total air pressure
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The (mass) mixing ratio, dimensionless (e.g. Kg/Kg or g/g)
Notes
-----
This function is a straightforward implementation of the equation given in many places,
such as [Hobbs1977]_ pg.73:
.. math:: r = \epsilon \frac{e}{p - e}
See Also
--------
saturation_mixing_ratio, vapor_pressure
|
f8476:m17
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>')<EOL>def saturation_mixing_ratio(tot_press, temperature):
|
return mixing_ratio(saturation_vapor_pressure(temperature), tot_press)<EOL>
|
r"""Calculate the saturation mixing ratio of water vapor.
This calculation is given total pressure and the temperature. The implementation
uses the formula outlined in [Hobbs1977]_ pg.73.
Parameters
----------
tot_press: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
The temperature
Returns
-------
`pint.Quantity`
The saturation mixing ratio, dimensionless
|
f8476:m18
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def equivalent_potential_temperature(pressure, temperature, dewpoint):
|
t = temperature.to('<STR_LIT>').magnitude<EOL>td = dewpoint.to('<STR_LIT>').magnitude<EOL>p = pressure.to('<STR_LIT>').magnitude<EOL>e = saturation_vapor_pressure(dewpoint).to('<STR_LIT>').magnitude<EOL>r = saturation_mixing_ratio(pressure, dewpoint).magnitude<EOL>t_l = <NUM_LIT> + <NUM_LIT:1.> / (<NUM_LIT:1.> / (td - <NUM_LIT>) + np.log(t / td) / <NUM_LIT>)<EOL>th_l = t * (<NUM_LIT:1000> / (p - e)) ** mpconsts.kappa * (t / t_l) ** (<NUM_LIT> * r)<EOL>th_e = th_l * np.exp((<NUM_LIT> / t_l - <NUM_LIT>) * r * (<NUM_LIT:1> + <NUM_LIT> * r))<EOL>return th_e * units.kelvin<EOL>
|
r"""Calculate equivalent potential temperature.
This calculation must be given an air parcel's pressure, temperature, and dewpoint.
The implementation uses the formula outlined in [Bolton1980]_:
First, the LCL temperature is calculated:
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
Which is then used to calculate the potential temperature at the LCL:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{L}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
dewpoint: `pint.Quantity`
Dewpoint of parcel
Returns
-------
`pint.Quantity`
The equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used, since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available.
|
f8476:m19
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>')<EOL>def saturation_equivalent_potential_temperature(pressure, temperature):
|
t = temperature.to('<STR_LIT>').magnitude<EOL>p = pressure.to('<STR_LIT>').magnitude<EOL>e = saturation_vapor_pressure(temperature).to('<STR_LIT>').magnitude<EOL>r = saturation_mixing_ratio(pressure, temperature).magnitude<EOL>th_l = t * (<NUM_LIT:1000> / (p - e)) ** mpconsts.kappa<EOL>th_es = th_l * np.exp((<NUM_LIT> / t - <NUM_LIT>) * r * (<NUM_LIT:1> + <NUM_LIT> * r))<EOL>return th_es * units.kelvin<EOL>
|
r"""Calculate saturation equivalent potential temperature.
This calculation must be given an air parcel's pressure and temperature.
The implementation uses the formula outlined in [Bolton1980]_ for the
equivalent potential temperature, and assumes a saturated process.
First, because we assume a saturated process, the temperature at the LCL is
equivalent to the current temperature. Therefore the following equation
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
reduces to
.. math:: T_{L} = T_{K}
Then the potential temperature at the temperature/LCL is calculated:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
However, because
.. math:: T_{L} = T_{K}
it follows that
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{K}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
Returns
-------
`pint.Quantity`
The saturation equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used (for saturated case), since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available.
|
f8476:m20
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def virtual_temperature(temperature, mixing, molecular_weight_ratio=mpconsts.epsilon):
|
return temperature * ((mixing + molecular_weight_ratio)<EOL>/ (molecular_weight_ratio * (<NUM_LIT:1> + mixing)))<EOL>
|
r"""Calculate virtual temperature.
This calculation must be given an air parcel's temperature and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.80.
Parameters
----------
temperature: `pint.Quantity`
The temperature
mixing : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding virtual temperature of the parcel
Notes
-----
.. math:: T_v = T \frac{\text{w} + \epsilon}{\epsilon\,(1 + \text{w})}
|
f8476:m21
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def virtual_potential_temperature(pressure, temperature, mixing,<EOL>molecular_weight_ratio=mpconsts.epsilon):
|
pottemp = potential_temperature(pressure, temperature)<EOL>return virtual_temperature(pottemp, mixing, molecular_weight_ratio)<EOL>
|
r"""Calculate virtual potential temperature.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Markowski2010]_ pg.13.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
The temperature
mixing : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding virtual potential temperature of the parcel
Notes
-----
.. math:: \Theta_v = \Theta \frac{\text{w} + \epsilon}{\epsilon\,(1 + \text{w})}
|
f8476:m22
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def density(pressure, temperature, mixing, molecular_weight_ratio=mpconsts.epsilon):
|
virttemp = virtual_temperature(temperature, mixing, molecular_weight_ratio)<EOL>return (pressure / (mpconsts.Rd * virttemp)).to(units.kilogram / units.meter ** <NUM_LIT:3>)<EOL>
|
r"""Calculate density.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.67.
Parameters
----------
temperature: `pint.Quantity`
The temperature
pressure: `pint.Quantity`
Total atmospheric pressure
mixing : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding density of the parcel
Notes
-----
.. math:: \rho = \frac{p}{R_dT_v}
|
f8476:m23
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def relative_humidity_wet_psychrometric(dry_bulb_temperature, web_bulb_temperature,<EOL>pressure, **kwargs):
|
return (psychrometric_vapor_pressure_wet(dry_bulb_temperature, web_bulb_temperature,<EOL>pressure, **kwargs)<EOL>/ saturation_vapor_pressure(dry_bulb_temperature))<EOL>
|
r"""Calculate the relative humidity with wet bulb and dry bulb temperatures.
This uses a psychrometric relationship as outlined in [WMO8-2014]_, with
coefficients from [Fan1987]_.
Parameters
----------
dry_bulb_temperature: `pint.Quantity`
Dry bulb temperature
web_bulb_temperature: `pint.Quantity`
Wet bulb temperature
pressure: `pint.Quantity`
Total atmospheric pressure
Returns
-------
`pint.Quantity`
Relative humidity
Notes
-----
.. math:: RH = \frac{e}{e_s}
* :math:`RH` is relative humidity as a unitless ratio
* :math:`e` is vapor pressure from the wet psychrometric calculation
* :math:`e_s` is the saturation vapor pressure
See Also
--------
psychrometric_vapor_pressure_wet, saturation_vapor_pressure
|
f8476:m24
|
@exporter.export<EOL>@preprocess_xarray<EOL>@check_units('<STR_LIT>', '<STR_LIT>', '<STR_LIT>')<EOL>def psychrometric_vapor_pressure_wet(dry_bulb_temperature, wet_bulb_temperature, pressure,<EOL>psychrometer_coefficient=<NUM_LIT> / units.kelvin):
|
return (saturation_vapor_pressure(wet_bulb_temperature) - psychrometer_coefficient<EOL>* pressure * (dry_bulb_temperature - wet_bulb_temperature).to('<STR_LIT>'))<EOL>
|
r"""Calculate the vapor pressure with wet bulb and dry bulb temperatures.
This uses a psychrometric relationship as outlined in [WMO8-2014]_, with
coefficients from [Fan1987]_.
Parameters
----------
dry_bulb_temperature: `pint.Quantity`
Dry bulb temperature
wet_bulb_temperature: `pint.Quantity`
Wet bulb temperature
pressure: `pint.Quantity`
Total atmospheric pressure
psychrometer_coefficient: `pint.Quantity`, optional
Psychrometer coefficient. Defaults to 6.21e-4 K^-1.
Returns
-------
`pint.Quantity`
Vapor pressure
Notes
-----
.. math:: e' = e'_w(T_w) - A p (T - T_w)
* :math:`e'` is vapor pressure
* :math:`e'_w(T_w)` is the saturation vapor pressure with respect to water at temperature
:math:`T_w`
* :math:`p` is the pressure of the wet bulb
* :math:`T` is the temperature of the dry bulb
* :math:`T_w` is the temperature of the wet bulb
* :math:`A` is the psychrometer coefficient
Psychrometer coefficient depends on the specific instrument being used and the ventilation
of the instrument.
See Also
--------
saturation_vapor_pressure
|
f8476:m25
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.