_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q23600
|
heat_index
|
train
|
def heat_index(temperature, rh, mask_undefined=True):
r"""Calculate the Heat Index from the current temperature and relative humidity.
The implementation uses the formula outlined in [Rothfusz1990]_. This equation is a
multi-variable least-squares regression of the values obtained in [Steadman1979]_.
Parameters
----------
temperature : `pint.Quantity`
Air temperature
rh : array_like
The relative humidity expressed as a unitless ratio in the range [0, 1].
Can also pass a percentage if proper units are attached.
Returns
-------
`pint.Quantity`
The corresponding Heat Index value(s)
Other Parameters
----------------
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where heat index is undefined masked. These are values where
the temperature < 80F or relative humidity < 40 percent. Defaults
to `True`.
See Also
--------
windchill
"""
delta = temperature.to(units.degF) - 0. * units.degF
rh2 = rh * rh
delta2 = delta * delta
# Calculate the Heat Index -- constants converted for RH in [0, 1]
hi = (-42.379 * units.degF
+ 2.04901523 * delta
+ 1014.333127 * units.delta_degF * rh
- 22.475541 * delta * rh
- 6.83783e-3 / units.delta_degF * delta2
- 5.481717e2 * units.delta_degF * rh2
+ 1.22874e-1 / units.delta_degF * delta2 * rh
+ 8.5282 * delta * rh2
- 1.99e-2 / units.delta_degF * delta2 * rh2)
# See if we need to mask any undefined values
if mask_undefined:
mask = np.array((temperature < 80. * units.degF) | (rh < 40 * units.percent))
if mask.any():
hi = masked_array(hi, mask=mask)
return hi
|
python
|
{
"resource": ""
}
|
q23601
|
apparent_temperature
|
train
|
def apparent_temperature(temperature, rh, speed, face_level_winds=False):
r"""Calculate the current apparent temperature.
Calculates the current apparent temperature based on the wind chill or heat index
as appropriate for the current conditions. Follows [NWS10201]_.
Parameters
----------
temperature : `pint.Quantity`
The air temperature
rh : `pint.Quantity`
The relative humidity expressed as a unitless ratio in the range [0, 1].
Can also pass a percentage if proper units are attached.
speed : `pint.Quantity`
The wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
Returns
-------
`pint.Quantity`
The corresponding apparent temperature value(s)
See Also
--------
heat_index, windchill
"""
is_not_scalar = isinstance(temperature.m, (list, tuple, np.ndarray))
temperature = atleast_1d(temperature)
rh = atleast_1d(rh)
speed = atleast_1d(speed)
wind_chill_temperature = windchill(temperature, speed, face_level_winds=face_level_winds,
mask_undefined=True).to(temperature.units)
heat_index_temperature = heat_index(temperature, rh,
mask_undefined=True).to(temperature.units)
# Combine the heat index and wind chill arrays (no point has a value in both)
app_temperature = np.ma.where(masked_array(wind_chill_temperature).mask,
heat_index_temperature,
wind_chill_temperature)
if is_not_scalar:
# Fill in missing areas where neither wind chill or heat index are applicable with the
# ambient temperature.
app_temperature[app_temperature.mask] = temperature[app_temperature.mask]
return np.array(app_temperature) * temperature.units
else:
if app_temperature.mask:
app_temperature = temperature.m
return atleast_1d(app_temperature)[0] * temperature.units
|
python
|
{
"resource": ""
}
|
q23602
|
pressure_to_height_std
|
train
|
def pressure_to_height_std(pressure):
r"""Convert pressure data to heights using the U.S. standard atmosphere.
The implementation uses the formula outlined in [Hobbs1977]_ pg.60-61.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Notes
-----
.. math:: Z = \frac{T_0}{\Gamma}[1-\frac{p}{p_0}^\frac{R\Gamma}{g}]
"""
t0 = 288. * units.kelvin
gamma = 6.5 * units('K/km')
p0 = 1013.25 * units.mbar
return (t0 / gamma) * (1 - (pressure / p0).to('dimensionless')**(
mpconsts.Rd * gamma / mpconsts.g))
|
python
|
{
"resource": ""
}
|
q23603
|
height_to_geopotential
|
train
|
def height_to_geopotential(height):
r"""Compute geopotential for a given height.
Parameters
----------
height : `pint.Quantity`
Height above sea level (array_like)
Returns
-------
`pint.Quantity`
The corresponding geopotential value(s)
Examples
--------
>>> from metpy.constants import g, G, me, Re
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0,10000, num = 11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305888
39251.39289118 49056.54621087 58858.62446525 68657.62910064
78453.56156253 88246.42329545 98036.21574306], 'meter ** 2 / second ** 2')>
Notes
-----
Derived from definition of geopotential in [Hobbs2006]_ pg.14 Eq.1.8.
"""
# Calculate geopotential
geopot = mpconsts.G * mpconsts.me * ((1 / mpconsts.Re) - (1 / (mpconsts.Re + height)))
return geopot
|
python
|
{
"resource": ""
}
|
q23604
|
geopotential_to_height
|
train
|
def geopotential_to_height(geopot):
r"""Compute height from a given geopotential.
Parameters
----------
geopotential : `pint.Quantity`
Geopotential (array_like)
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Examples
--------
>>> from metpy.constants import g, G, me, Re
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0,10000, num = 11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305888
39251.39289118 49056.54621087 58858.62446525 68657.62910064
78453.56156253 88246.42329545 98036.21574306], 'meter ** 2 / second ** 2')>
>>> height = metpy.calc.geopotential_to_height(geopot)
>>> height
<Quantity([ 0. 1000. 2000. 3000. 4000. 5000. 6000. 7000. 8000.
9000. 10000.], 'meter')>
Notes
-----
Derived from definition of geopotential in [Hobbs2006]_ pg.14 Eq.1.8.
"""
# Calculate geopotential
height = (((1 / mpconsts.Re) - (geopot / (mpconsts.G * mpconsts.me))) ** -1) - mpconsts.Re
return height
|
python
|
{
"resource": ""
}
|
q23605
|
height_to_pressure_std
|
train
|
def height_to_pressure_std(height):
r"""Convert height data to pressures using the U.S. standard atmosphere.
The implementation inverts the formula outlined in [Hobbs1977]_ pg.60-61.
Parameters
----------
height : `pint.Quantity`
Atmospheric height
Returns
-------
`pint.Quantity`
The corresponding pressure value(s)
Notes
-----
.. math:: p = p_0 e^{\frac{g}{R \Gamma} \text{ln}(1-\frac{Z \Gamma}{T_0})}
"""
t0 = 288. * units.kelvin
gamma = 6.5 * units('K/km')
p0 = 1013.25 * units.mbar
return p0 * (1 - (gamma / t0) * height) ** (mpconsts.g / (mpconsts.Rd * gamma))
|
python
|
{
"resource": ""
}
|
q23606
|
coriolis_parameter
|
train
|
def coriolis_parameter(latitude):
r"""Calculate the coriolis parameter at each point.
The implementation uses the formula outlined in [Hobbs1977]_ pg.370-371.
Parameters
----------
latitude : array_like
Latitude at each point
Returns
-------
`pint.Quantity`
The corresponding coriolis force at each point
"""
latitude = _check_radians(latitude, max_radians=np.pi / 2)
return (2. * mpconsts.omega * np.sin(latitude)).to('1/s')
|
python
|
{
"resource": ""
}
|
q23607
|
sigma_to_pressure
|
train
|
def sigma_to_pressure(sigma, psfc, ptop):
r"""Calculate pressure from sigma values.
Parameters
----------
sigma : ndarray
The sigma levels to be converted to pressure levels.
psfc : `pint.Quantity`
The surface pressure value.
ptop : `pint.Quantity`
The pressure value at the top of the model domain.
Returns
-------
`pint.Quantity`
The pressure values at the given sigma levels.
Notes
-----
Sigma definition adapted from [Philips1957]_.
.. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top}
* :math:`p` is pressure at a given `\sigma` level
* :math:`\sigma` is non-dimensional, scaled pressure
* :math:`p_{sfc}` is pressure at the surface or model floor
* :math:`p_{top}` is pressure at the top of the model domain
"""
if np.any(sigma < 0) or np.any(sigma > 1):
raise ValueError('Sigma values should be bounded by 0 and 1')
if psfc.magnitude < 0 or ptop.magnitude < 0:
raise ValueError('Pressure input should be non-negative')
return sigma * (psfc - ptop) + ptop
|
python
|
{
"resource": ""
}
|
q23608
|
_check_radians
|
train
|
def _check_radians(value, max_radians=2 * np.pi):
"""Input validation of values that could be in degrees instead of radians.
Parameters
----------
value : `pint.Quantity`
The input value to check.
max_radians : float
Maximum absolute value of radians before warning.
Returns
-------
`pint.Quantity`
The input value
"""
try:
value = value.to('radians').m
except AttributeError:
pass
if np.greater(np.nanmax(np.abs(value)), max_radians):
warnings.warn('Input over {} radians. '
'Ensure proper units are given.'.format(max_radians))
return value
|
python
|
{
"resource": ""
}
|
q23609
|
remove_observations_below_value
|
train
|
def remove_observations_below_value(x, y, z, val=0):
r"""Remove all x, y, and z where z is less than val.
Will not destroy original values.
Parameters
----------
x: array_like
x coordinate.
y: array_like
y coordinate.
z: array_like
Observation value.
val: float
Value at which to threshold z.
Returns
-------
x, y, z
List of coordinate observation pairs without
observation values less than val.
"""
x_ = x[z >= val]
y_ = y[z >= val]
z_ = z[z >= val]
return x_, y_, z_
|
python
|
{
"resource": ""
}
|
q23610
|
remove_nan_observations
|
train
|
def remove_nan_observations(x, y, z):
r"""Remove all x, y, and z where z is nan.
Will not destroy original values.
Parameters
----------
x: array_like
x coordinate
y: array_like
y coordinate
z: array_like
observation value
Returns
-------
x, y, z
List of coordinate observation pairs without
nan valued observations.
"""
x_ = x[~np.isnan(z)]
y_ = y[~np.isnan(z)]
z_ = z[~np.isnan(z)]
return x_, y_, z_
|
python
|
{
"resource": ""
}
|
q23611
|
SkewXTick.gridOn
|
train
|
def gridOn(self): # noqa: N802
"""Control whether the gridline is drawn for this tick."""
return (self._gridOn and (self._has_default_loc()
or transforms.interval_contains(self.get_view_interval(), self.get_loc())))
|
python
|
{
"resource": ""
}
|
q23612
|
SkewXAxes._set_lim_and_transforms
|
train
|
def _set_lim_and_transforms(self):
"""Set limits and transforms.
This is called once when the plot is created to set up all the
transforms for the data, text and grids.
"""
# Get the standard transform setup from the Axes base class
Axes._set_lim_and_transforms(self)
# Need to put the skew in the middle, after the scale and limits,
# but before the transAxes. This way, the skew is done in Axes
# coordinates thus performing the transform around the proper origin
# We keep the pre-transAxes transform around for other users, like the
# spines for finding bounds
self.transDataToAxes = (self.transScale
+ (self.transLimits
+ transforms.Affine2D().skew_deg(self.rot, 0)))
# Create the full transform from Data to Pixels
self.transData = self.transDataToAxes + self.transAxes
# Blended transforms like this need to have the skewing applied using
# both axes, in axes coords like before.
self._xaxis_transform = (
transforms.blended_transform_factory(self.transScale + self.transLimits,
transforms.IdentityTransform())
+ transforms.Affine2D().skew_deg(self.rot, 0)) + self.transAxes
|
python
|
{
"resource": ""
}
|
q23613
|
bzip_blocks_decompress_all
|
train
|
def bzip_blocks_decompress_all(data):
"""Decompress all of the bzip2-ed blocks.
Returns the decompressed data as a `bytearray`.
"""
frames = bytearray()
offset = 0
while offset < len(data):
size_bytes = data[offset:offset + 4]
offset += 4
block_cmp_bytes = abs(Struct('>l').unpack(size_bytes)[0])
try:
frames.extend(bz2.decompress(data[offset:offset + block_cmp_bytes]))
offset += block_cmp_bytes
except IOError:
# If we've decompressed any frames, this is an error mid-stream, so warn, stop
# trying to decompress and let processing proceed
if frames:
logging.warning('Error decompressing bz2 block stream at offset: %d',
offset - 4)
break
else: # Otherwise, this isn't a bzip2 stream, so bail
raise ValueError('Not a bz2 stream.')
return frames
|
python
|
{
"resource": ""
}
|
q23614
|
nexrad_to_datetime
|
train
|
def nexrad_to_datetime(julian_date, ms_midnight):
"""Convert NEXRAD date time format to python `datetime.datetime`."""
# Subtracting one from julian_date is because epoch date is 1
return datetime.datetime.utcfromtimestamp((julian_date - 1) * day + ms_midnight * milli)
|
python
|
{
"resource": ""
}
|
q23615
|
remap_status
|
train
|
def remap_status(val):
"""Convert status integer value to appropriate bitmask."""
status = 0
bad = BAD_DATA if val & 0xF0 else 0
val &= 0x0F
if val == 0:
status = START_ELEVATION
elif val == 1:
status = 0
elif val == 2:
status = END_ELEVATION
elif val == 3:
status = START_ELEVATION | START_VOLUME
elif val == 4:
status = END_ELEVATION | END_VOLUME
elif val == 5:
status = START_ELEVATION | LAST_ELEVATION
return status | bad
|
python
|
{
"resource": ""
}
|
q23616
|
reduce_lists
|
train
|
def reduce_lists(d):
"""Replace single item lists in a dictionary with the single item."""
for field in d:
old_data = d[field]
if len(old_data) == 1:
d[field] = old_data[0]
|
python
|
{
"resource": ""
}
|
q23617
|
float16
|
train
|
def float16(val):
"""Convert a 16-bit floating point value to a standard Python float."""
# Fraction is 10 LSB, Exponent middle 5, and Sign the MSB
frac = val & 0x03ff
exp = (val >> 10) & 0x1F
sign = val >> 15
if exp:
value = 2 ** (exp - 16) * (1 + float(frac) / 2**10)
else:
value = float(frac) / 2**9
if sign:
value *= -1
return value
|
python
|
{
"resource": ""
}
|
q23618
|
date_elem
|
train
|
def date_elem(ind_days, ind_minutes):
"""Create a function to parse a datetime from the product-specific blocks."""
def inner(seq):
return nexrad_to_datetime(seq[ind_days], seq[ind_minutes] * 60 * 1000)
return inner
|
python
|
{
"resource": ""
}
|
q23619
|
combine_elem
|
train
|
def combine_elem(ind1, ind2):
"""Create a function to combine two specified product-specific blocks into a single int."""
def inner(seq):
shift = 2**16
if seq[ind1] < 0:
seq[ind1] += shift
if seq[ind2] < 0:
seq[ind2] += shift
return (seq[ind1] << 16) | seq[ind2]
return inner
|
python
|
{
"resource": ""
}
|
q23620
|
relative_humidity_from_dewpoint
|
train
|
def relative_humidity_from_dewpoint(temperature, dewpt):
r"""Calculate the relative humidity.
Uses temperature and dewpoint in celsius to calculate relative
humidity using the ratio of vapor pressure to saturation vapor pressures.
Parameters
----------
temperature : `pint.Quantity`
The temperature
dew point : `pint.Quantity`
The dew point temperature
Returns
-------
`pint.Quantity`
The relative humidity
See Also
--------
saturation_vapor_pressure
"""
e = saturation_vapor_pressure(dewpt)
e_s = saturation_vapor_pressure(temperature)
return (e / e_s)
|
python
|
{
"resource": ""
}
|
q23621
|
exner_function
|
train
|
def exner_function(pressure, reference_pressure=mpconsts.P0):
r"""Calculate the Exner function.
.. math:: \Pi = \left( \frac{p}{p_0} \right)^\kappa
This can be used to calculate potential temperature from temperature (and visa-versa),
since
.. math:: \Pi = \frac{T}{\theta}
Parameters
----------
pressure : `pint.Quantity`
The total atmospheric pressure
reference_pressure : `pint.Quantity`, optional
The reference pressure against which to calculate the Exner function, defaults to P0
Returns
-------
`pint.Quantity`
The value of the Exner function at the given pressure
See Also
--------
potential_temperature
temperature_from_potential_temperature
"""
return (pressure / reference_pressure).to('dimensionless')**mpconsts.kappa
|
python
|
{
"resource": ""
}
|
q23622
|
dry_lapse
|
train
|
def dry_lapse(pressure, temperature, ref_pressure=None):
r"""Calculate the temperature at a level assuming only dry processes.
This function lifts a parcel starting at `temperature`, conserving
potential temperature. The starting pressure can be given by `ref_pressure`.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
The starting temperature
ref_pressure : `pint.Quantity`, optional
The reference pressure. If not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The resulting parcel temperature at levels given by `pressure`
See Also
--------
moist_lapse : Calculate parcel temperature assuming liquid saturation
processes
parcel_profile : Calculate complete parcel profile
potential_temperature
"""
if ref_pressure is None:
ref_pressure = pressure[0]
return temperature * (pressure / ref_pressure)**mpconsts.kappa
|
python
|
{
"resource": ""
}
|
q23623
|
moist_lapse
|
train
|
def moist_lapse(pressure, temperature, ref_pressure=None):
r"""Calculate the temperature at a level assuming liquid saturation processes.
This function lifts a parcel starting at `temperature`. The starting pressure can
be given by `ref_pressure`. Essentially, this function is calculating moist
pseudo-adiabats.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
The starting temperature
ref_pressure : `pint.Quantity`, optional
The reference pressure. If not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The temperature corresponding to the starting temperature and
pressure levels.
See Also
--------
dry_lapse : Calculate parcel temperature assuming dry adiabatic processes
parcel_profile : Calculate complete parcel profile
Notes
-----
This function is implemented by integrating the following differential
equation:
.. math:: \frac{dT}{dP} = \frac{1}{P} \frac{R_d T + L_v r_s}
{C_{pd} + \frac{L_v^2 r_s \epsilon}{R_d T^2}}
This equation comes from [Bakhshaii2013]_.
"""
def dt(t, p):
t = units.Quantity(t, temperature.units)
p = units.Quantity(p, pressure.units)
rs = saturation_mixing_ratio(p, t)
frac = ((mpconsts.Rd * t + mpconsts.Lv * rs)
/ (mpconsts.Cp_d + (mpconsts.Lv * mpconsts.Lv * rs * mpconsts.epsilon
/ (mpconsts.Rd * t * t)))).to('kelvin')
return frac / p
if ref_pressure is None:
ref_pressure = pressure[0]
pressure = pressure.to('mbar')
ref_pressure = ref_pressure.to('mbar')
temperature = atleast_1d(temperature)
side = 'left'
pres_decreasing = (pressure[0] > pressure[-1])
if pres_decreasing:
# Everything is easier if pressures are in increasing order
pressure = pressure[::-1]
side = 'right'
ref_pres_idx = np.searchsorted(pressure.m, ref_pressure.m, side=side)
ret_temperatures = np.empty((0, temperature.shape[0]))
if ref_pressure > pressure.min():
# Integrate downward in pressure
pres_down = np.append(ref_pressure, pressure[(ref_pres_idx - 1)::-1])
trace_down = si.odeint(dt, temperature.squeeze(), pres_down.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_down[:0:-1]))
if ref_pressure < pressure.max():
# Integrate upward in pressure
pres_up = np.append(ref_pressure, pressure[ref_pres_idx:])
trace_up = si.odeint(dt, temperature.squeeze(), pres_up.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_up[1:]))
if pres_decreasing:
ret_temperatures = ret_temperatures[::-1]
return units.Quantity(ret_temperatures.T.squeeze(), temperature.units)
|
python
|
{
"resource": ""
}
|
q23624
|
el
|
train
|
def el(pressure, temperature, dewpt, parcel_temperature_profile=None):
r"""Calculate the equilibrium level.
This works by finding the last intersection of the ideal parcel path and
the measured environmental temperature. If there is one or fewer intersections, there is
no equilibrium level.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpt : `pint.Quantity`
The dew point at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the EL. Defaults to the
surface parcel profile.
Returns
-------
`pint.Quantity, pint.Quantity`
The EL pressure and temperature
See Also
--------
parcel_profile
"""
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpt)
pressure, temperature, _, parcel_temperature_profile = new_stuff
temperature = temperature.to('degC')
parcel_temperature_profile = parcel_temperature_profile.to('degC')
# If the top of the sounding parcel is warmer than the environment, there is no EL
if parcel_temperature_profile[-1] > temperature[-1]:
return np.nan * pressure.units, np.nan * temperature.units
# Otherwise the last intersection (as long as there is one) is the EL
x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:], temperature[1:])
if len(x) > 0:
return x[-1], y[-1]
else:
return np.nan * pressure.units, np.nan * temperature.units
|
python
|
{
"resource": ""
}
|
q23625
|
_parcel_profile_helper
|
train
|
def _parcel_profile_helper(pressure, temperature, dewpt):
"""Help calculate parcel profiles.
Returns the temperature and pressure, above, below, and including the LCL. The
other calculation functions decide what to do with the pieces.
"""
# Find the LCL
press_lcl, temp_lcl = lcl(pressure[0], temperature, dewpt)
press_lcl = press_lcl.to(pressure.units)
# Find the dry adiabatic profile, *including* the LCL. We need >= the LCL in case the
# LCL is included in the levels. It's slightly redundant in that case, but simplifies
# the logic for removing it later.
press_lower = concatenate((pressure[pressure >= press_lcl], press_lcl))
temp_lower = dry_lapse(press_lower, temperature)
# If the pressure profile doesn't make it to the lcl, we can stop here
if _greater_or_close(np.nanmin(pressure), press_lcl.m):
return (press_lower[:-1], press_lcl, np.array([]) * press_lower.units,
temp_lower[:-1], temp_lcl, np.array([]) * temp_lower.units)
# Find moist pseudo-adiabatic profile starting at the LCL
press_upper = concatenate((press_lcl, pressure[pressure < press_lcl]))
temp_upper = moist_lapse(press_upper, temp_lower[-1]).to(temp_lower.units)
# Return profile pieces
return (press_lower[:-1], press_lcl, press_upper[1:],
temp_lower[:-1], temp_lcl, temp_upper[1:])
|
python
|
{
"resource": ""
}
|
q23626
|
_insert_lcl_level
|
train
|
def _insert_lcl_level(pressure, temperature, lcl_pressure):
"""Insert the LCL pressure into the profile."""
interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)
# Pressure needs to be increasing for searchsorted, so flip it and then convert
# the index back to the original array
loc = pressure.size - pressure[::-1].searchsorted(lcl_pressure)
return np.insert(temperature.m, loc, interp_temp.m) * temperature.units
|
python
|
{
"resource": ""
}
|
q23627
|
dewpoint_rh
|
train
|
def dewpoint_rh(temperature, rh):
r"""Calculate the ambient dewpoint given air temperature and relative humidity.
Parameters
----------
temperature : `pint.Quantity`
Air temperature
rh : `pint.Quantity`
Relative humidity expressed as a ratio in the range 0 < rh <= 1
Returns
-------
`pint.Quantity`
The dew point temperature
See Also
--------
dewpoint, saturation_vapor_pressure
"""
if np.any(rh > 1.2):
warnings.warn('Relative humidity >120%, ensure proper units.')
return dewpoint(rh * saturation_vapor_pressure(temperature))
|
python
|
{
"resource": ""
}
|
q23628
|
dewpoint
|
train
|
def dewpoint(e):
r"""Calculate the ambient dewpoint given the vapor pressure.
Parameters
----------
e : `pint.Quantity`
Water vapor partial pressure
Returns
-------
`pint.Quantity`
Dew point temperature
See Also
--------
dewpoint_rh, saturation_vapor_pressure, vapor_pressure
Notes
-----
This function inverts the [Bolton1980]_ formula for saturation vapor
pressure to instead calculate the temperature. This yield the following
formula for dewpoint in degrees Celsius:
.. math:: T = \frac{243.5 log(e / 6.112)}{17.67 - log(e / 6.112)}
"""
val = np.log(e / sat_pressure_0c)
return 0. * units.degC + 243.5 * units.delta_degC * val / (17.67 - val)
|
python
|
{
"resource": ""
}
|
q23629
|
mixing_ratio
|
train
|
def mixing_ratio(part_press, tot_press, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate the mixing ratio of a gas.
This calculates mixing ratio given its partial pressure and the total pressure of
the air. There are no required units for the input arrays, other than that
they have the same units.
Parameters
----------
part_press : `pint.Quantity`
Partial pressure of the constituent gas
tot_press : `pint.Quantity`
Total air pressure
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The (mass) mixing ratio, dimensionless (e.g. Kg/Kg or g/g)
Notes
-----
This function is a straightforward implementation of the equation given in many places,
such as [Hobbs1977]_ pg.73:
.. math:: r = \epsilon \frac{e}{p - e}
See Also
--------
saturation_mixing_ratio, vapor_pressure
"""
return (molecular_weight_ratio * part_press
/ (tot_press - part_press)).to('dimensionless')
|
python
|
{
"resource": ""
}
|
q23630
|
equivalent_potential_temperature
|
train
|
def equivalent_potential_temperature(pressure, temperature, dewpoint):
r"""Calculate equivalent potential temperature.
This calculation must be given an air parcel's pressure, temperature, and dewpoint.
The implementation uses the formula outlined in [Bolton1980]_:
First, the LCL temperature is calculated:
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
Which is then used to calculate the potential temperature at the LCL:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{L}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
dewpoint: `pint.Quantity`
Dewpoint of parcel
Returns
-------
`pint.Quantity`
The equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used, since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available.
"""
t = temperature.to('kelvin').magnitude
td = dewpoint.to('kelvin').magnitude
p = pressure.to('hPa').magnitude
e = saturation_vapor_pressure(dewpoint).to('hPa').magnitude
r = saturation_mixing_ratio(pressure, dewpoint).magnitude
t_l = 56 + 1. / (1. / (td - 56) + np.log(t / td) / 800.)
th_l = t * (1000 / (p - e)) ** mpconsts.kappa * (t / t_l) ** (0.28 * r)
th_e = th_l * np.exp((3036. / t_l - 1.78) * r * (1 + 0.448 * r))
return th_e * units.kelvin
|
python
|
{
"resource": ""
}
|
q23631
|
saturation_equivalent_potential_temperature
|
train
|
def saturation_equivalent_potential_temperature(pressure, temperature):
r"""Calculate saturation equivalent potential temperature.
This calculation must be given an air parcel's pressure and temperature.
The implementation uses the formula outlined in [Bolton1980]_ for the
equivalent potential temperature, and assumes a saturated process.
First, because we assume a saturated process, the temperature at the LCL is
equivalent to the current temperature. Therefore the following equation
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
reduces to
.. math:: T_{L} = T_{K}
Then the potential temperature at the temperature/LCL is calculated:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
However, because
.. math:: T_{L} = T_{K}
it follows that
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{K}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
Returns
-------
`pint.Quantity`
The saturation equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used (for saturated case), since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available.
"""
t = temperature.to('kelvin').magnitude
p = pressure.to('hPa').magnitude
e = saturation_vapor_pressure(temperature).to('hPa').magnitude
r = saturation_mixing_ratio(pressure, temperature).magnitude
th_l = t * (1000 / (p - e)) ** mpconsts.kappa
th_es = th_l * np.exp((3036. / t - 1.78) * r * (1 + 0.448 * r))
return th_es * units.kelvin
|
python
|
{
"resource": ""
}
|
q23632
|
virtual_temperature
|
train
|
def virtual_temperature(temperature, mixing, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate virtual temperature.
This calculation must be given an air parcel's temperature and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.80.
Parameters
----------
temperature: `pint.Quantity`
The temperature
mixing : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding virtual temperature of the parcel
Notes
-----
.. math:: T_v = T \frac{\text{w} + \epsilon}{\epsilon\,(1 + \text{w})}
"""
return temperature * ((mixing + molecular_weight_ratio)
/ (molecular_weight_ratio * (1 + mixing)))
|
python
|
{
"resource": ""
}
|
q23633
|
virtual_potential_temperature
|
train
|
def virtual_potential_temperature(pressure, temperature, mixing,
molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate virtual potential temperature.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Markowski2010]_ pg.13.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
The temperature
mixing : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding virtual potential temperature of the parcel
Notes
-----
.. math:: \Theta_v = \Theta \frac{\text{w} + \epsilon}{\epsilon\,(1 + \text{w})}
"""
pottemp = potential_temperature(pressure, temperature)
return virtual_temperature(pottemp, mixing, molecular_weight_ratio)
|
python
|
{
"resource": ""
}
|
q23634
|
density
|
train
|
def density(pressure, temperature, mixing, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate density.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.67.
Parameters
----------
temperature: `pint.Quantity`
The temperature
pressure: `pint.Quantity`
Total atmospheric pressure
mixing : `pint.Quantity`
dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The corresponding density of the parcel
Notes
-----
.. math:: \rho = \frac{p}{R_dT_v}
"""
virttemp = virtual_temperature(temperature, mixing, molecular_weight_ratio)
return (pressure / (mpconsts.Rd * virttemp)).to(units.kilogram / units.meter ** 3)
|
python
|
{
"resource": ""
}
|
q23635
|
relative_humidity_wet_psychrometric
|
train
|
def relative_humidity_wet_psychrometric(dry_bulb_temperature, web_bulb_temperature,
pressure, **kwargs):
r"""Calculate the relative humidity with wet bulb and dry bulb temperatures.
This uses a psychrometric relationship as outlined in [WMO8-2014]_, with
coefficients from [Fan1987]_.
Parameters
----------
dry_bulb_temperature: `pint.Quantity`
Dry bulb temperature
web_bulb_temperature: `pint.Quantity`
Wet bulb temperature
pressure: `pint.Quantity`
Total atmospheric pressure
Returns
-------
`pint.Quantity`
Relative humidity
Notes
-----
.. math:: RH = \frac{e}{e_s}
* :math:`RH` is relative humidity as a unitless ratio
* :math:`e` is vapor pressure from the wet psychrometric calculation
* :math:`e_s` is the saturation vapor pressure
See Also
--------
psychrometric_vapor_pressure_wet, saturation_vapor_pressure
"""
return (psychrometric_vapor_pressure_wet(dry_bulb_temperature, web_bulb_temperature,
pressure, **kwargs)
/ saturation_vapor_pressure(dry_bulb_temperature))
|
python
|
{
"resource": ""
}
|
q23636
|
psychrometric_vapor_pressure_wet
|
train
|
def psychrometric_vapor_pressure_wet(dry_bulb_temperature, wet_bulb_temperature, pressure,
psychrometer_coefficient=6.21e-4 / units.kelvin):
r"""Calculate the vapor pressure with wet bulb and dry bulb temperatures.
This uses a psychrometric relationship as outlined in [WMO8-2014]_, with
coefficients from [Fan1987]_.
Parameters
----------
dry_bulb_temperature: `pint.Quantity`
Dry bulb temperature
wet_bulb_temperature: `pint.Quantity`
Wet bulb temperature
pressure: `pint.Quantity`
Total atmospheric pressure
psychrometer_coefficient: `pint.Quantity`, optional
Psychrometer coefficient. Defaults to 6.21e-4 K^-1.
Returns
-------
`pint.Quantity`
Vapor pressure
Notes
-----
.. math:: e' = e'_w(T_w) - A p (T - T_w)
* :math:`e'` is vapor pressure
* :math:`e'_w(T_w)` is the saturation vapor pressure with respect to water at temperature
:math:`T_w`
* :math:`p` is the pressure of the wet bulb
* :math:`T` is the temperature of the dry bulb
* :math:`T_w` is the temperature of the wet bulb
* :math:`A` is the psychrometer coefficient
Psychrometer coefficient depends on the specific instrument being used and the ventilation
of the instrument.
See Also
--------
saturation_vapor_pressure
"""
return (saturation_vapor_pressure(wet_bulb_temperature) - psychrometer_coefficient
* pressure * (dry_bulb_temperature - wet_bulb_temperature).to('kelvin'))
|
python
|
{
"resource": ""
}
|
q23637
|
cape_cin
|
train
|
def cape_cin(pressure, temperature, dewpt, parcel_profile):
r"""Calculate CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and parcel path. CIN is integrated between the surface and
LFC, CAPE is integrated between the LFC and EL (or top of sounding). Intersection points of
the measured temperature profile and parcel profile are linearly interpolated.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest. The first entry should be the starting
point pressure.
temperature : `pint.Quantity`
The atmospheric temperature corresponding to pressure.
dewpt : `pint.Quantity`
The atmospheric dew point corresponding to pressure.
parcel_profile : `pint.Quantity`
The temperature profile of the parcel
Returns
-------
`pint.Quantity`
Convective available potential energy (CAPE).
`pint.Quantity`
Convective inhibition (CIN).
Notes
-----
Formula adopted from [Hobbs1977]_.
.. math:: \text{CAPE} = -R_d \int_{LFC}^{EL} (T_{parcel} - T_{env}) d\text{ln}(p)
.. math:: \text{CIN} = -R_d \int_{SFC}^{LFC} (T_{parcel} - T_{env}) d\text{ln}(p)
* :math:`CAPE` Convective available potential energy
* :math:`CIN` Convective inhibition
* :math:`LFC` Pressure of the level of free convection
* :math:`EL` Pressure of the equilibrium level
* :math:`SFC` Level of the surface or beginning of parcel path
* :math:`R_d` Gas constant
* :math:`g` Gravitational acceleration
* :math:`T_{parcel}` Parcel temperature
* :math:`T_{env}` Environment temperature
* :math:`p` Atmospheric pressure
See Also
--------
lfc, el
"""
# Calculate LFC limit of integration
lfc_pressure, _ = lfc(pressure, temperature, dewpt,
parcel_temperature_profile=parcel_profile)
# If there is no LFC, no need to proceed.
if np.isnan(lfc_pressure):
return 0 * units('J/kg'), 0 * units('J/kg')
else:
lfc_pressure = lfc_pressure.magnitude
# Calculate the EL limit of integration
el_pressure, _ = el(pressure, temperature, dewpt,
parcel_temperature_profile=parcel_profile)
# No EL and we use the top reading of the sounding.
if np.isnan(el_pressure):
el_pressure = pressure[-1].magnitude
else:
el_pressure = el_pressure.magnitude
# Difference between the parcel path and measured temperature profiles
y = (parcel_profile - temperature).to(units.degK)
# Estimate zero crossings
x, y = _find_append_zero_crossings(np.copy(pressure), y)
# CAPE
# Only use data between the LFC and EL for calculation
p_mask = _less_or_close(x, lfc_pressure) & _greater_or_close(x, el_pressure)
x_clipped = x[p_mask]
y_clipped = y[p_mask]
cape = (mpconsts.Rd
* (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))
# CIN
# Only use data between the surface and LFC for calculation
p_mask = _greater_or_close(x, lfc_pressure)
x_clipped = x[p_mask]
y_clipped = y[p_mask]
cin = (mpconsts.Rd
* (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units('J/kg'))
return cape, cin
|
python
|
{
"resource": ""
}
|
q23638
|
_find_append_zero_crossings
|
train
|
def _find_append_zero_crossings(x, y):
r"""
Find and interpolate zero crossings.
Estimate the zero crossings of an x,y series and add estimated crossings to series,
returning a sorted array with no duplicate values.
Parameters
----------
x : `pint.Quantity`
x values of data
y : `pint.Quantity`
y values of data
Returns
-------
x : `pint.Quantity`
x values of data
y : `pint.Quantity`
y values of data
"""
# Find and append crossings to the data
crossings = find_intersections(x[1:], y[1:], np.zeros_like(y[1:]) * y.units)
x = concatenate((x, crossings[0]))
y = concatenate((y, crossings[1]))
# Resort so that data are in order
sort_idx = np.argsort(x)
x = x[sort_idx]
y = y[sort_idx]
# Remove duplicate data points if there are any
keep_idx = np.ediff1d(x, to_end=[1]) > 0
x = x[keep_idx]
y = y[keep_idx]
return x, y
|
python
|
{
"resource": ""
}
|
q23639
|
most_unstable_parcel
|
train
|
def most_unstable_parcel(pressure, temperature, dewpoint, heights=None,
bottom=None, depth=300 * units.hPa):
"""
Determine the most unstable parcel in a layer.
Determines the most unstable parcel of air by calculating the equivalent
potential temperature and finding its maximum in the specified layer.
Parameters
----------
pressure: `pint.Quantity`
Atmospheric pressure profile
temperature: `pint.Quantity`
Atmospheric temperature profile
dewpoint: `pint.Quantity`
Atmospheric dewpoint profile
heights: `pint.Quantity`, optional
Atmospheric height profile. Standard atmosphere assumed when None (the default).
bottom: `pint.Quantity`, optional
Bottom of the layer to consider for the calculation in pressure or height.
Defaults to using the bottom pressure or height.
depth: `pint.Quantity`, optional
Depth of the layer to consider for the calculation in pressure or height. Defaults
to 300 hPa.
Returns
-------
`pint.Quantity`
Pressure, temperature, and dew point of most unstable parcel in the profile.
integer
Index of the most unstable parcel in the given profile
See Also
--------
get_layer
"""
p_layer, t_layer, td_layer = get_layer(pressure, temperature, dewpoint, bottom=bottom,
depth=depth, heights=heights, interpolate=False)
theta_e = equivalent_potential_temperature(p_layer, t_layer, td_layer)
max_idx = np.argmax(theta_e)
return p_layer[max_idx], t_layer[max_idx], td_layer[max_idx], max_idx
|
python
|
{
"resource": ""
}
|
q23640
|
surface_based_cape_cin
|
train
|
def surface_based_cape_cin(pressure, temperature, dewpoint):
r"""Calculate surface-based CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile for a surface-based parcel. CIN is integrated
between the surface and LFC, CAPE is integrated between the LFC and EL (or top of
sounding). Intersection points of the measured temperature profile and parcel profile are
linearly interpolated.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile. The first entry should be the starting
(surface) observation.
temperature : `pint.Quantity`
Temperature profile
dewpoint : `pint.Quantity`
Dewpoint profile
Returns
-------
`pint.Quantity`
Surface based Convective Available Potential Energy (CAPE).
`pint.Quantity`
Surface based Convective INhibition (CIN).
See Also
--------
cape_cin, parcel_profile
"""
p, t, td, profile = parcel_profile_with_lcl(pressure, temperature, dewpoint)
return cape_cin(p, t, td, profile)
|
python
|
{
"resource": ""
}
|
q23641
|
mixed_parcel
|
train
|
def mixed_parcel(p, temperature, dewpt, parcel_start_pressure=None,
heights=None, bottom=None, depth=100 * units.hPa, interpolate=True):
r"""Calculate the properties of a parcel mixed from a layer.
Determines the properties of an air parcel that is the result of complete mixing of a
given atmospheric layer.
Parameters
----------
p : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
dewpt : `pint.Quantity`
Atmospheric dewpoint profile
parcel_start_pressure : `pint.Quantity`, optional
Pressure at which the mixed parcel should begin (default None)
heights: `pint.Quantity`, optional
Atmospheric heights corresponding to the given pressures (default None)
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure
(default None)
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer
(default 100 hPa)
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data
Returns
-------
`pint.Quantity, pint.Quantity, pint.Quantity`
The pressure, temperature, and dewpoint of the mixed parcel.
"""
# If a parcel starting pressure is not provided, use the surface
if not parcel_start_pressure:
parcel_start_pressure = p[0]
# Calculate the potential temperature and mixing ratio over the layer
theta = potential_temperature(p, temperature)
mixing_ratio = saturation_mixing_ratio(p, dewpt)
# Mix the variables over the layer
mean_theta, mean_mixing_ratio = mixed_layer(p, theta, mixing_ratio, bottom=bottom,
heights=heights, depth=depth,
interpolate=interpolate)
# Convert back to temperature
mean_temperature = (mean_theta / potential_temperature(parcel_start_pressure,
1 * units.kelvin)) * units.kelvin
# Convert back to dewpoint
mean_vapor_pressure = vapor_pressure(parcel_start_pressure, mean_mixing_ratio)
mean_dewpoint = dewpoint(mean_vapor_pressure)
return (parcel_start_pressure, mean_temperature.to(temperature.units),
mean_dewpoint.to(dewpt.units))
|
python
|
{
"resource": ""
}
|
q23642
|
dry_static_energy
|
train
|
def dry_static_energy(heights, temperature):
r"""Calculate the dry static energy of parcels.
This function will calculate the dry static energy following the first two terms of
equation 3.72 in [Hobbs2006]_.
Notes
-----
.. math::\text{dry static energy} = c_{pd} * T + gz
* :math:`T` is temperature
* :math:`z` is height
Parameters
----------
heights : array-like
Atmospheric height
temperature : array-like
Atmospheric temperature
Returns
-------
`pint.Quantity`
The dry static energy
"""
return (mpconsts.g * heights + mpconsts.Cp_d * temperature).to('kJ/kg')
|
python
|
{
"resource": ""
}
|
q23643
|
moist_static_energy
|
train
|
def moist_static_energy(heights, temperature, specific_humidity):
r"""Calculate the moist static energy of parcels.
This function will calculate the moist static energy following
equation 3.72 in [Hobbs2006]_.
Notes
-----
.. math::\text{moist static energy} = c_{pd} * T + gz + L_v q
* :math:`T` is temperature
* :math:`z` is height
* :math:`q` is specific humidity
Parameters
----------
heights : array-like
Atmospheric height
temperature : array-like
Atmospheric temperature
specific_humidity : array-like
Atmospheric specific humidity
Returns
-------
`pint.Quantity`
The moist static energy
"""
return (dry_static_energy(heights, temperature)
+ mpconsts.Lv * specific_humidity.to('dimensionless')).to('kJ/kg')
|
python
|
{
"resource": ""
}
|
q23644
|
thickness_hydrostatic
|
train
|
def thickness_hydrostatic(pressure, temperature, **kwargs):
r"""Calculate the thickness of a layer via the hypsometric equation.
This thickness calculation uses the pressure and temperature profiles (and optionally
mixing ratio) via the hypsometric equation with virtual temperature adjustment
.. math:: Z_2 - Z_1 = -\frac{R_d}{g} \int_{p_1}^{p_2} T_v d\ln p,
which is based off of Equation 3.24 in [Hobbs2006]_.
This assumes a hydrostatic atmosphere.
Layer bottom and depth specified in pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
mixing : `pint.Quantity`, optional
Profile of dimensionless mass mixing ratio. If none is given, virtual temperature
is simply set to be the given temperature.
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`).
bottom : `pint.Quantity`, optional
The bottom of the layer in pressure. Defaults to the first observation.
depth : `pint.Quantity`, optional
The depth of the layer in hPa. Defaults to the full profile if bottom is not given,
and 100 hPa if bottom is given.
Returns
-------
`pint.Quantity`
The thickness of the layer in meters.
See Also
--------
thickness_hydrostatic_from_relative_humidity, pressure_to_height_std, virtual_temperature
"""
mixing = kwargs.pop('mixing', None)
molecular_weight_ratio = kwargs.pop('molecular_weight_ratio', mpconsts.epsilon)
bottom = kwargs.pop('bottom', None)
depth = kwargs.pop('depth', None)
# Get the data for the layer, conditional upon bottom/depth being specified and mixing
# ratio being given
if bottom is None and depth is None:
if mixing is None:
layer_p, layer_virttemp = pressure, temperature
else:
layer_p = pressure
layer_virttemp = virtual_temperature(temperature, mixing, molecular_weight_ratio)
else:
if mixing is None:
layer_p, layer_virttemp = get_layer(pressure, temperature, bottom=bottom,
depth=depth)
else:
layer_p, layer_temp, layer_w = get_layer(pressure, temperature, mixing,
bottom=bottom, depth=depth)
layer_virttemp = virtual_temperature(layer_temp, layer_w, molecular_weight_ratio)
# Take the integral (with unit handling) and return the result in meters
return (- mpconsts.Rd / mpconsts.g * np.trapz(
layer_virttemp.to('K'), x=np.log(layer_p / units.hPa)) * units.K).to('m')
|
python
|
{
"resource": ""
}
|
q23645
|
thickness_hydrostatic_from_relative_humidity
|
train
|
def thickness_hydrostatic_from_relative_humidity(pressure, temperature, relative_humidity,
**kwargs):
r"""Calculate the thickness of a layer given pressure, temperature and relative humidity.
Similar to ``thickness_hydrostatic``, this thickness calculation uses the pressure,
temperature, and relative humidity profiles via the hypsometric equation with virtual
temperature adjustment.
.. math:: Z_2 - Z_1 = -\frac{R_d}{g} \int_{p_1}^{p_2} T_v d\ln p,
which is based off of Equation 3.24 in [Hobbs2006]_. Virtual temperature is calculated
from the profiles of temperature and relative humidity.
This assumes a hydrostatic atmosphere.
Layer bottom and depth specified in pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
relative_humidity : `pint.Quantity`
Atmospheric relative humidity profile. The relative humidity is expressed as a
unitless ratio in the range [0, 1]. Can also pass a percentage if proper units are
attached.
bottom : `pint.Quantity`, optional
The bottom of the layer in pressure. Defaults to the first observation.
depth : `pint.Quantity`, optional
The depth of the layer in hPa. Defaults to the full profile if bottom is not given,
and 100 hPa if bottom is given.
Returns
-------
`pint.Quantity`
The thickness of the layer in meters.
See Also
--------
thickness_hydrostatic, pressure_to_height_std, virtual_temperature,
mixing_ratio_from_relative_humidity
"""
bottom = kwargs.pop('bottom', None)
depth = kwargs.pop('depth', None)
mixing = mixing_ratio_from_relative_humidity(relative_humidity, temperature, pressure)
return thickness_hydrostatic(pressure, temperature, mixing=mixing, bottom=bottom,
depth=depth)
|
python
|
{
"resource": ""
}
|
q23646
|
brunt_vaisala_frequency_squared
|
train
|
def brunt_vaisala_frequency_squared(heights, potential_temperature, axis=0):
r"""Calculate the square of the Brunt-Vaisala frequency.
Brunt-Vaisala frequency squared (a measure of atmospheric stability) is given by the
formula:
.. math:: N^2 = \frac{g}{\theta} \frac{d\theta}{dz}
This formula is based off of Equations 3.75 and 3.77 in [Hobbs2006]_.
Parameters
----------
heights : array-like
One-dimensional profile of atmospheric height
potential_temperature : array-like
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
array-like
The square of the Brunt-Vaisala frequency.
See Also
--------
brunt_vaisala_frequency, brunt_vaisala_period, potential_temperature
"""
# Ensure validity of temperature units
potential_temperature = potential_temperature.to('K')
# Calculate and return the square of Brunt-Vaisala frequency
return mpconsts.g / potential_temperature * first_derivative(potential_temperature,
x=heights, axis=axis)
|
python
|
{
"resource": ""
}
|
q23647
|
brunt_vaisala_frequency
|
train
|
def brunt_vaisala_frequency(heights, potential_temperature, axis=0):
r"""Calculate the Brunt-Vaisala frequency.
This function will calculate the Brunt-Vaisala frequency as follows:
.. math:: N = \left( \frac{g}{\theta} \frac{d\theta}{dz} \right)^\frac{1}{2}
This formula based off of Equations 3.75 and 3.77 in [Hobbs2006]_.
This function is a wrapper for `brunt_vaisala_frequency_squared` that filters out negative
(unstable) quanties and takes the square root.
Parameters
----------
heights : array-like
One-dimensional profile of atmospheric height
potential_temperature : array-like
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
array-like
Brunt-Vaisala frequency.
See Also
--------
brunt_vaisala_frequency_squared, brunt_vaisala_period, potential_temperature
"""
bv_freq_squared = brunt_vaisala_frequency_squared(heights, potential_temperature,
axis=axis)
bv_freq_squared[bv_freq_squared.magnitude < 0] = np.nan
return np.sqrt(bv_freq_squared)
|
python
|
{
"resource": ""
}
|
q23648
|
brunt_vaisala_period
|
train
|
def brunt_vaisala_period(heights, potential_temperature, axis=0):
r"""Calculate the Brunt-Vaisala period.
This function is a helper function for `brunt_vaisala_frequency` that calculates the
period of oscilation as in Exercise 3.13 of [Hobbs2006]_:
.. math:: \tau = \frac{2\pi}{N}
Returns `NaN` when :math:`N^2 > 0`.
Parameters
----------
heights : array-like
One-dimensional profile of atmospheric height
potential_temperature : array-like
Atmospheric potential temperature
axis : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0.
Returns
-------
array-like
Brunt-Vaisala period.
See Also
--------
brunt_vaisala_frequency, brunt_vaisala_frequency_squared, potential_temperature
"""
bv_freq_squared = brunt_vaisala_frequency_squared(heights, potential_temperature,
axis=axis)
bv_freq_squared[bv_freq_squared.magnitude <= 0] = np.nan
return 2 * np.pi / np.sqrt(bv_freq_squared)
|
python
|
{
"resource": ""
}
|
q23649
|
wet_bulb_temperature
|
train
|
def wet_bulb_temperature(pressure, temperature, dewpoint):
"""Calculate the wet-bulb temperature using Normand's rule.
This function calculates the wet-bulb temperature using the Normand method. The LCL is
computed, and that parcel brought down to the starting pressure along a moist adiabat.
The Normand method (and others) are described and compared by [Knox2017]_.
Parameters
----------
pressure : `pint.Quantity`
Initial atmospheric pressure
temperature : `pint.Quantity`
Initial atmospheric temperature
dewpoint : `pint.Quantity`
Initial atmospheric dewpoint
Returns
-------
array-like
Wet-bulb temperature
See Also
--------
lcl, moist_lapse
"""
if not hasattr(pressure, 'shape'):
pressure = atleast_1d(pressure)
temperature = atleast_1d(temperature)
dewpoint = atleast_1d(dewpoint)
it = np.nditer([pressure, temperature, dewpoint, None],
op_dtypes=['float', 'float', 'float', 'float'],
flags=['buffered'])
for press, temp, dewp, ret in it:
press = press * pressure.units
temp = temp * temperature.units
dewp = dewp * dewpoint.units
lcl_pressure, lcl_temperature = lcl(press, temp, dewp)
moist_adiabat_temperatures = moist_lapse(concatenate([lcl_pressure, press]),
lcl_temperature)
ret[...] = moist_adiabat_temperatures[-1]
# If we started with a scalar, return a scalar
if it.operands[3].size == 1:
return it.operands[3][0] * moist_adiabat_temperatures.units
return it.operands[3] * moist_adiabat_temperatures.units
|
python
|
{
"resource": ""
}
|
q23650
|
static_stability
|
train
|
def static_stability(pressure, temperature, axis=0):
r"""Calculate the static stability within a vertical profile.
.. math:: \sigma = -\frac{RT}{p} \frac{\partial \ln \theta}{\partial p}
This formuala is based on equation 4.3.6 in [Bluestein1992]_.
Parameters
----------
pressure : array-like
Profile of atmospheric pressure
temperature : array-like
Profile of temperature
axis : int, optional
The axis corresponding to vertical in the pressure and temperature arrays, defaults
to 0.
Returns
-------
array-like
The profile of static stability.
"""
theta = potential_temperature(pressure, temperature)
return - mpconsts.Rd * temperature / pressure * first_derivative(np.log(theta / units.K),
x=pressure, axis=axis)
|
python
|
{
"resource": ""
}
|
q23651
|
dewpoint_from_specific_humidity
|
train
|
def dewpoint_from_specific_humidity(specific_humidity, temperature, pressure):
r"""Calculate the dewpoint from specific humidity, temperature, and pressure.
Parameters
----------
specific_humidity: `pint.Quantity`
Specific humidity of air
temperature: `pint.Quantity`
Air temperature
pressure: `pint.Quantity`
Total atmospheric pressure
Returns
-------
`pint.Quantity`
Dewpoint temperature
See Also
--------
relative_humidity_from_mixing_ratio, dewpoint_rh
"""
return dewpoint_rh(temperature, relative_humidity_from_specific_humidity(specific_humidity,
temperature,
pressure))
|
python
|
{
"resource": ""
}
|
q23652
|
vertical_velocity_pressure
|
train
|
def vertical_velocity_pressure(w, pressure, temperature, mixing=0):
r"""Calculate omega from w assuming hydrostatic conditions.
This function converts vertical velocity with respect to height
:math:`\left(w = \frac{Dz}{Dt}\right)` to that
with respect to pressure :math:`\left(\omega = \frac{Dp}{Dt}\right)`
assuming hydrostatic conditions on the synoptic scale.
By Equation 7.33 in [Hobbs2006]_,
.. math: \omega \simeq -\rho g w
Density (:math:`\rho`) is calculated using the :func:`density` function,
from the given pressure and temperature. If `mixing` is given, the virtual
temperature correction is used, otherwise, dry air is assumed.
Parameters
----------
w: `pint.Quantity`
Vertical velocity in terms of height
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing: `pint.Quantity`, optional
Mixing ratio of air
Returns
-------
`pint.Quantity`
Vertical velocity in terms of pressure (in Pascals / second)
See Also
--------
density, vertical_velocity
"""
rho = density(pressure, temperature, mixing)
return (- mpconsts.g * rho * w).to('Pa/s')
|
python
|
{
"resource": ""
}
|
q23653
|
vertical_velocity
|
train
|
def vertical_velocity(omega, pressure, temperature, mixing=0):
r"""Calculate w from omega assuming hydrostatic conditions.
This function converts vertical velocity with respect to pressure
:math:`\left(\omega = \frac{Dp}{Dt}\right)` to that with respect to height
:math:`\left(w = \frac{Dz}{Dt}\right)` assuming hydrostatic conditions on
the synoptic scale. By Equation 7.33 in [Hobbs2006]_,
.. math: \omega \simeq -\rho g w
so that
.. math w \simeq \frac{- \omega}{\rho g}
Density (:math:`\rho`) is calculated using the :func:`density` function,
from the given pressure and temperature. If `mixing` is given, the virtual
temperature correction is used, otherwise, dry air is assumed.
Parameters
----------
omega: `pint.Quantity`
Vertical velocity in terms of pressure
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing: `pint.Quantity`, optional
Mixing ratio of air
Returns
-------
`pint.Quantity`
Vertical velocity in terms of height (in meters / second)
See Also
--------
density, vertical_velocity_pressure
"""
rho = density(pressure, temperature, mixing)
return (omega / (- mpconsts.g * rho)).to('m/s')
|
python
|
{
"resource": ""
}
|
q23654
|
make_geo
|
train
|
def make_geo(attrs_dict, globe):
"""Handle geostationary projection."""
attr_mapping = [('satellite_height', 'perspective_point_height'),
('sweep_axis', 'sweep_angle_axis')]
kwargs = CFProjection.build_projection_kwargs(attrs_dict, attr_mapping)
# CartoPy can't handle central latitude for Geostationary (nor should it)
# Just remove it if it's 0.
if not kwargs.get('central_latitude'):
kwargs.pop('central_latitude', None)
# If sweep_angle_axis is not present, we should look for fixed_angle_axis and adjust
if 'sweep_axis' not in kwargs:
kwargs['sweep_axis'] = 'x' if attrs_dict['fixed_angle_axis'] == 'y' else 'y'
return ccrs.Geostationary(globe=globe, **kwargs)
|
python
|
{
"resource": ""
}
|
q23655
|
make_lcc
|
train
|
def make_lcc(attrs_dict, globe):
"""Handle Lambert conformal conic projection."""
attr_mapping = [('central_longitude', 'longitude_of_central_meridian'),
('standard_parallels', 'standard_parallel')]
kwargs = CFProjection.build_projection_kwargs(attrs_dict, attr_mapping)
if 'standard_parallels' in kwargs:
try:
len(kwargs['standard_parallels'])
except TypeError:
kwargs['standard_parallels'] = [kwargs['standard_parallels']]
return ccrs.LambertConformal(globe=globe, **kwargs)
|
python
|
{
"resource": ""
}
|
q23656
|
make_mercator
|
train
|
def make_mercator(attrs_dict, globe):
"""Handle Mercator projection."""
attr_mapping = [('latitude_true_scale', 'standard_parallel'),
('scale_factor', 'scale_factor_at_projection_origin')]
kwargs = CFProjection.build_projection_kwargs(attrs_dict, attr_mapping)
# Work around the fact that in CartoPy <= 0.16 can't handle the easting/northing
# in Mercator
if not kwargs.get('false_easting'):
kwargs.pop('false_easting', None)
if not kwargs.get('false_northing'):
kwargs.pop('false_northing', None)
return ccrs.Mercator(globe=globe, **kwargs)
|
python
|
{
"resource": ""
}
|
q23657
|
make_stereo
|
train
|
def make_stereo(attrs_dict, globe):
"""Handle generic stereographic projection."""
attr_mapping = [('scale_factor', 'scale_factor_at_projection_origin')]
kwargs = CFProjection.build_projection_kwargs(attrs_dict, attr_mapping)
return ccrs.Stereographic(globe=globe, **kwargs)
|
python
|
{
"resource": ""
}
|
q23658
|
CFProjection.build_projection_kwargs
|
train
|
def build_projection_kwargs(cls, source, mapping):
"""Handle mapping a dictionary of metadata to keyword arguments."""
return cls._map_arg_names(source, cls._default_attr_mapping + mapping)
|
python
|
{
"resource": ""
}
|
q23659
|
CFProjection._map_arg_names
|
train
|
def _map_arg_names(source, mapping):
"""Map one set of keys to another."""
return {cartopy_name: source[cf_name] for cartopy_name, cf_name in mapping
if cf_name in source}
|
python
|
{
"resource": ""
}
|
q23660
|
CFProjection.cartopy_globe
|
train
|
def cartopy_globe(self):
"""Initialize a `cartopy.crs.Globe` from the metadata."""
if 'earth_radius' in self._attrs:
kwargs = {'ellipse': 'sphere', 'semimajor_axis': self._attrs['earth_radius'],
'semiminor_axis': self._attrs['earth_radius']}
else:
attr_mapping = [('semimajor_axis', 'semi_major_axis'),
('semiminor_axis', 'semi_minor_axis'),
('inverse_flattening', 'inverse_flattening')]
kwargs = self._map_arg_names(self._attrs, attr_mapping)
# WGS84 with semi_major==semi_minor is NOT the same as spherical Earth
# Also need to handle the case where we're not given any spheroid
kwargs['ellipse'] = None if kwargs else 'sphere'
return ccrs.Globe(**kwargs)
|
python
|
{
"resource": ""
}
|
q23661
|
CFProjection.to_cartopy
|
train
|
def to_cartopy(self):
"""Convert to a CartoPy projection."""
globe = self.cartopy_globe
proj_name = self._attrs['grid_mapping_name']
try:
proj_handler = self.projection_registry[proj_name]
except KeyError:
raise ValueError('Unhandled projection: {}'.format(proj_name))
return proj_handler(self._attrs, globe)
|
python
|
{
"resource": ""
}
|
q23662
|
add_timestamp
|
train
|
def add_timestamp(ax, time=None, x=0.99, y=-0.04, ha='right', high_contrast=False,
pretext='Created: ', time_format='%Y-%m-%dT%H:%M:%SZ', **kwargs):
"""Add a timestamp to a plot.
Adds a timestamp to a plot, defaulting to the time of plot creation in ISO format.
Parameters
----------
ax : `matplotlib.axes.Axes`
The `Axes` instance used for plotting
time : `datetime.datetime`
Specific time to be plotted - datetime.utcnow will be use if not specified
x : float
Relative x position on the axes of the timestamp
y : float
Relative y position on the axes of the timestamp
ha : str
Horizontal alignment of the time stamp string
high_contrast : bool
Outline text for increased contrast
pretext : str
Text to appear before the timestamp, optional. Defaults to 'Created: '
time_format : str
Display format of time, optional. Defaults to ISO format.
Returns
-------
`matplotlib.text.Text`
The `matplotlib.text.Text` instance created
"""
if high_contrast:
text_args = {'color': 'white',
'path_effects':
[mpatheffects.withStroke(linewidth=2, foreground='black')]}
else:
text_args = {}
text_args.update(**kwargs)
if not time:
time = datetime.utcnow()
timestr = pretext + time.strftime(time_format)
return ax.text(x, y, timestr, ha=ha, transform=ax.transAxes, **text_args)
|
python
|
{
"resource": ""
}
|
q23663
|
_add_logo
|
train
|
def _add_logo(fig, x=10, y=25, zorder=100, which='metpy', size='small', **kwargs):
"""Add the MetPy or Unidata logo to a figure.
Adds an image to the figure.
Parameters
----------
fig : `matplotlib.figure`
The `figure` instance used for plotting
x : int
x position padding in pixels
y : float
y position padding in pixels
zorder : int
The zorder of the logo
which : str
Which logo to plot 'metpy' or 'unidata'
size : str
Size of logo to be used. Can be 'small' for 75 px square or 'large' for
150 px square.
Returns
-------
`matplotlib.image.FigureImage`
The `matplotlib.image.FigureImage` instance created
"""
fname_suffix = {'small': '_75x75.png',
'large': '_150x150.png'}
fname_prefix = {'unidata': 'unidata',
'metpy': 'metpy'}
try:
fname = fname_prefix[which] + fname_suffix[size]
fpath = posixpath.join('_static', fname)
except KeyError:
raise ValueError('Unknown logo size or selection')
logo = imread(pkg_resources.resource_stream('metpy.plots', fpath))
return fig.figimage(logo, x, y, zorder=zorder, **kwargs)
|
python
|
{
"resource": ""
}
|
q23664
|
add_metpy_logo
|
train
|
def add_metpy_logo(fig, x=10, y=25, zorder=100, size='small', **kwargs):
"""Add the MetPy logo to a figure.
Adds an image of the MetPy logo to the figure.
Parameters
----------
fig : `matplotlib.figure`
The `figure` instance used for plotting
x : int
x position padding in pixels
y : float
y position padding in pixels
zorder : int
The zorder of the logo
size : str
Size of logo to be used. Can be 'small' for 75 px square or 'large' for
150 px square.
Returns
-------
`matplotlib.image.FigureImage`
The `matplotlib.image.FigureImage` instance created
"""
return _add_logo(fig, x=x, y=y, zorder=zorder, which='metpy', size=size, **kwargs)
|
python
|
{
"resource": ""
}
|
q23665
|
colored_line
|
train
|
def colored_line(x, y, c, **kwargs):
"""Create a multi-colored line.
Takes a set of points and turns them into a collection of lines colored by another array.
Parameters
----------
x : array-like
x-axis coordinates
y : array-like
y-axis coordinates
c : array-like
values used for color-mapping
kwargs : dict
Other keyword arguments passed to :class:`matplotlib.collections.LineCollection`
Returns
-------
The created :class:`matplotlib.collections.LineCollection` instance.
"""
# Mask out any NaN values
nan_mask = ~(np.isnan(x) | np.isnan(y) | np.isnan(c))
x = x[nan_mask]
y = y[nan_mask]
c = c[nan_mask]
# Paste values end to end
points = concatenate([x, y])
# Exploit numpy's strides to present a view of these points without copying.
# Dimensions are (segment, start/end, x/y). Since x and y are concatenated back to back,
# moving between segments only moves one item; moving start to end is only an item;
# The move between x any moves from one half of the array to the other
num_pts = points.size // 2
final_shape = (num_pts - 1, 2, 2)
final_strides = (points.itemsize, points.itemsize, num_pts * points.itemsize)
segments = np.lib.stride_tricks.as_strided(points, shape=final_shape,
strides=final_strides)
# Create a LineCollection from the segments and set it to colormap based on c
lc = LineCollection(segments, **kwargs)
lc.set_array(c)
return lc
|
python
|
{
"resource": ""
}
|
q23666
|
convert_gempak_color
|
train
|
def convert_gempak_color(c, style='psc'):
"""Convert GEMPAK color numbers into corresponding Matplotlib colors.
Takes a sequence of GEMPAK color numbers and turns them into
equivalent Matplotlib colors. Various GEMPAK quirks are respected,
such as treating negative values as equivalent to 0.
Parameters
----------
c : int or sequence of ints
GEMPAK color number(s)
style : str, optional
The GEMPAK 'device' to use to interpret color numbers. May be 'psc'
(the default; best for a white background) or 'xw' (best for a black background).
Returns
-------
List of strings of Matplotlib colors, or a single string if only one color requested.
"""
def normalize(x):
"""Transform input x to an int in range 0 to 31 consistent with GEMPAK color quirks."""
x = int(x)
if x < 0 or x == 101:
x = 0
else:
x = x % 32
return x
# Define GEMPAK colors (Matplotlib doesn't appear to like numbered variants)
cols = ['white', # 0/32
'black', # 1
'red', # 2
'green', # 3
'blue', # 4
'yellow', # 5
'cyan', # 6
'magenta', # 7
'#CD6839', # 8 (sienna3)
'#FF8247', # 9 (sienna1)
'#FFA54F', # 10 (tan1)
'#FFAEB9', # 11 (LightPink1)
'#FF6A6A', # 12 (IndianRed1)
'#EE2C2C', # 13 (firebrick2)
'#8B0000', # 14 (red4)
'#CD0000', # 15 (red3)
'#EE4000', # 16 (OrangeRed2)
'#FF7F00', # 17 (DarkOrange1)
'#CD8500', # 18 (orange3)
'gold', # 19
'#EEEE00', # 20 (yellow2)
'chartreuse', # 21
'#00CD00', # 22 (green3)
'#008B00', # 23 (green4)
'#104E8B', # 24 (DodgerBlue4)
'DodgerBlue', # 25
'#00B2EE', # 26 (DeepSkyBlue2)
'#00EEEE', # 27 (cyan2)
'#8968CD', # 28 (MediumPurple3)
'#912CEE', # 29 (purple2)
'#8B008B', # 30 (magenta4)
'bisque'] # 31
if style != 'psc':
if style == 'xw':
cols[0] = 'black'
cols[1] = 'bisque'
cols[31] = 'white'
else:
raise ValueError('Unknown style parameter')
try:
c_list = list(c)
res = [cols[normalize(x)] for x in c_list]
except TypeError:
res = cols[normalize(c)]
return res
|
python
|
{
"resource": ""
}
|
q23667
|
process_msg3
|
train
|
def process_msg3(fname):
"""Handle information for message type 3."""
with open(fname, 'r') as infile:
info = []
for lineno, line in enumerate(infile):
parts = line.split(' ')
try:
var_name, desc, typ, units = parts[:4]
size_hw = parts[-1]
if '-' in size_hw:
start, end = map(int, size_hw.split('-'))
size = (end - start + 1) * 2
else:
size = 2
assert size >= 2
fmt = fix_type(typ, size)
var_name = fix_var_name(var_name)
full_desc = fix_desc(desc, units)
info.append({'name': var_name, 'desc': full_desc, 'fmt': fmt})
if ignored_item(info[-1]) and var_name != 'Spare':
warnings.warn('{} has type {}. Setting as Spare'.format(var_name, typ))
except (ValueError, AssertionError):
warnings.warn('{} > {}'.format(lineno + 1, ':'.join(parts)))
raise
return info
|
python
|
{
"resource": ""
}
|
q23668
|
process_msg18
|
train
|
def process_msg18(fname):
"""Handle information for message type 18."""
with open(fname, 'r') as infile:
info = []
for lineno, line in enumerate(infile):
parts = line.split(' ')
try:
if len(parts) == 8:
parts = parts[:6] + [parts[6] + parts[7]]
var_name, desc, typ, units, rng, prec, byte_range = parts
start, end = map(int, byte_range.split('-'))
size = end - start + 1
assert size >= 4
fmt = fix_type(typ, size,
additional=[('See Note (5)', ('{size}s', 1172))])
if ' ' in var_name:
warnings.warn('Space in {}'.format(var_name))
if not desc:
warnings.warn('null description for {}'.format(var_name))
var_name = fix_var_name(var_name)
full_desc = fix_desc(desc, units)
info.append({'name': var_name, 'desc': full_desc, 'fmt': fmt})
if (ignored_item(info[-1]) and var_name != 'SPARE'
and 'SPARE' not in full_desc):
warnings.warn('{} has type {}. Setting as SPARE'.format(var_name, typ))
except (ValueError, AssertionError):
warnings.warn('{} > {}'.format(lineno + 1, ':'.join(parts)))
raise
return info
|
python
|
{
"resource": ""
}
|
q23669
|
fix_type
|
train
|
def fix_type(typ, size, additional=None):
"""Fix up creating the appropriate struct type based on the information in the column."""
if additional is not None:
my_types = types + additional
else:
my_types = types
for t, info in my_types:
if callable(t):
matches = t(typ)
else:
matches = t == typ
if matches:
if callable(info):
fmt_str, true_size = info(size)
else:
fmt_str, true_size = info
assert size == true_size, ('{}: Got size {} instead of {}'.format(typ, size,
true_size))
return fmt_str.format(size=size)
raise ValueError('No type match! ({})'.format(typ))
|
python
|
{
"resource": ""
}
|
q23670
|
fix_var_name
|
train
|
def fix_var_name(var_name):
"""Clean up and apply standard formatting to variable names."""
name = var_name.strip()
for char in '(). /#,':
name = name.replace(char, '_')
name = name.replace('+', 'pos_')
name = name.replace('-', 'neg_')
if name.endswith('_'):
name = name[:-1]
return name
|
python
|
{
"resource": ""
}
|
q23671
|
fix_desc
|
train
|
def fix_desc(desc, units=None):
"""Clean up description column."""
full_desc = desc.strip()
if units and units != 'N/A':
if full_desc:
full_desc += ' (' + units + ')'
else:
full_desc = units
return full_desc
|
python
|
{
"resource": ""
}
|
q23672
|
write_file
|
train
|
def write_file(fname, info):
"""Write out the generated Python code."""
with open(fname, 'w') as outfile:
# File header
outfile.write('# Copyright (c) 2018 MetPy Developers.\n')
outfile.write('# Distributed under the terms of the BSD 3-Clause License.\n')
outfile.write('# SPDX-License-Identifier: BSD-3-Clause\n\n')
outfile.write('# flake8: noqa\n')
outfile.write('# Generated file -- do not modify\n')
# Variable descriptions
outfile.write('descriptions = {')
outdata = ',\n '.join('"{name}": "{desc}"'.format(
**i) for i in info if need_desc(i))
outfile.write(outdata)
outfile.write('}\n\n')
# Now the struct format
outfile.write('fields = [')
outdata = ',\n '.join('({fname}, "{fmt}")'.format(
fname=field_name(i), **i) for i in info)
outfile.write(outdata)
outfile.write(']\n')
|
python
|
{
"resource": ""
}
|
q23673
|
pandas_dataframe_to_unit_arrays
|
train
|
def pandas_dataframe_to_unit_arrays(df, column_units=None):
"""Attach units to data in pandas dataframes and return united arrays.
Parameters
----------
df : `pandas.DataFrame`
Data in pandas dataframe.
column_units : dict
Dictionary of units to attach to columns of the dataframe. Overrides
the units attribute if it is attached to the dataframe.
Returns
-------
Dictionary containing united arrays with keys corresponding to the dataframe
column names.
"""
if not column_units:
try:
column_units = df.units
except AttributeError:
raise ValueError('No units attribute attached to pandas '
'dataframe and col_units not given.')
# Iterate through columns attaching units if we have them, if not, don't touch it
res = {}
for column in df:
if column in column_units and column_units[column]:
res[column] = df[column].values * units(column_units[column])
else:
res[column] = df[column].values
return res
|
python
|
{
"resource": ""
}
|
q23674
|
concatenate
|
train
|
def concatenate(arrs, axis=0):
r"""Concatenate multiple values into a new unitized object.
This is essentially a unit-aware version of `numpy.concatenate`. All items
must be able to be converted to the same units. If an item has no units, it will be given
those of the rest of the collection, without conversion. The first units found in the
arguments is used as the final output units.
Parameters
----------
arrs : Sequence of arrays
The items to be joined together
axis : integer, optional
The array axis along which to join the arrays. Defaults to 0 (the first dimension)
Returns
-------
`pint.Quantity`
New container with the value passed in and units corresponding to the first item.
"""
dest = 'dimensionless'
for a in arrs:
if hasattr(a, 'units'):
dest = a.units
break
data = []
for a in arrs:
if hasattr(a, 'to'):
a = a.to(dest).magnitude
data.append(np.atleast_1d(a))
# Use masked array concatenate to ensure masks are preserved, but convert to an
# array if there are no masked values.
data = np.ma.concatenate(data, axis=axis)
if not np.any(data.mask):
data = np.asarray(data)
return units.Quantity(data, dest)
|
python
|
{
"resource": ""
}
|
q23675
|
diff
|
train
|
def diff(x, **kwargs):
"""Calculate the n-th discrete difference along given axis.
Wraps :func:`numpy.diff` to handle units.
Parameters
----------
x : array-like
Input data
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The n-th differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`. The
type of the output is the same as that of the input.
See Also
--------
numpy.diff
"""
ret = np.diff(x, **kwargs)
if hasattr(x, 'units'):
# Can't just use units because of how things like temperature work
it = x.flat
true_units = (next(it) - next(it)).units
ret = ret * true_units
return ret
|
python
|
{
"resource": ""
}
|
q23676
|
atleast_1d
|
train
|
def atleast_1d(*arrs):
r"""Convert inputs to arrays with at least one dimension.
Scalars are converted to 1-dimensional arrays, whilst other
higher-dimensional inputs are preserved. This is a thin wrapper
around `numpy.atleast_1d` to preserve units.
Parameters
----------
arrs : arbitrary positional arguments
Input arrays to be converted if necessary
Returns
-------
`pint.Quantity`
A single quantity or a list of quantities, matching the number of inputs.
"""
mags = [a.magnitude if hasattr(a, 'magnitude') else a for a in arrs]
orig_units = [a.units if hasattr(a, 'units') else None for a in arrs]
ret = np.atleast_1d(*mags)
if len(mags) == 1:
if orig_units[0] is not None:
return units.Quantity(ret, orig_units[0])
else:
return ret
return [units.Quantity(m, u) if u is not None else m for m, u in zip(ret, orig_units)]
|
python
|
{
"resource": ""
}
|
q23677
|
_check_argument_units
|
train
|
def _check_argument_units(args, dimensionality):
"""Yield arguments with improper dimensionality."""
for arg, val in args.items():
# Get the needed dimensionality (for printing) as well as cached, parsed version
# for this argument.
try:
need, parsed = dimensionality[arg]
except KeyError:
# Argument did not have units specified in decorator
continue
# See if the value passed in is appropriate
try:
if val.dimensionality != parsed:
yield arg, val.units, need
# No dimensionality
except AttributeError:
# If this argument is dimensionless, don't worry
if parsed != '':
yield arg, 'none', need
|
python
|
{
"resource": ""
}
|
q23678
|
interpolate_to_slice
|
train
|
def interpolate_to_slice(data, points, interp_type='linear'):
r"""Obtain an interpolated slice through data using xarray.
Utilizing the interpolation functionality in `xarray`, this function takes a slice the
given data (currently only regular grids are supported), which is given as an
`xarray.DataArray` so that we can utilize its coordinate metadata.
Parameters
----------
data: `xarray.DataArray` or `xarray.Dataset`
Three- (or higher) dimensional field(s) to interpolate. The DataArray (or each
DataArray in the Dataset) must have been parsed by MetPy and include both an x and
y coordinate dimension.
points: (N, 2) array_like
A list of x, y points in the data projection at which to interpolate the data
interp_type: str, optional
The interpolation method, either 'linear' or 'nearest' (see
`xarray.DataArray.interp()` for details). Defaults to 'linear'.
Returns
-------
`xarray.DataArray` or `xarray.Dataset`
The interpolated slice of data, with new index dimension of size N.
See Also
--------
cross_section
"""
try:
x, y = data.metpy.coordinates('x', 'y')
except AttributeError:
raise ValueError('Required coordinate information not available. Verify that '
'your data has been parsed by MetPy with proper x and y '
'dimension coordinates.')
data_sliced = data.interp({
x.name: xr.DataArray(points[:, 0], dims='index', attrs=x.attrs),
y.name: xr.DataArray(points[:, 1], dims='index', attrs=y.attrs)
}, method=interp_type)
data_sliced.coords['index'] = range(len(points))
return data_sliced
|
python
|
{
"resource": ""
}
|
q23679
|
geodesic
|
train
|
def geodesic(crs, start, end, steps):
r"""Construct a geodesic path between two points.
This function acts as a wrapper for the geodesic construction available in `pyproj`.
Parameters
----------
crs: `cartopy.crs`
Cartopy Coordinate Reference System to use for the output
start: (2, ) array_like
A latitude-longitude pair designating the start point of the geodesic (units are
degrees north and degrees east).
end: (2, ) array_like
A latitude-longitude pair designating the end point of the geodesic (units are degrees
north and degrees east).
steps: int, optional
The number of points along the geodesic between the start and the end point
(including the end points).
Returns
-------
`numpy.ndarray`
The list of x, y points in the given CRS of length `steps` along the geodesic.
See Also
--------
cross_section
"""
import cartopy.crs as ccrs
from pyproj import Geod
# Geod.npts only gives points *in between* the start and end, and we want to include
# the endpoints.
g = Geod(crs.proj4_init)
geodesic = np.concatenate([
np.array(start[::-1])[None],
np.array(g.npts(start[1], start[0], end[1], end[0], steps - 2)),
np.array(end[::-1])[None]
]).transpose()
points = crs.transform_points(ccrs.Geodetic(), *geodesic)[:, :2]
return points
|
python
|
{
"resource": ""
}
|
q23680
|
cross_section
|
train
|
def cross_section(data, start, end, steps=100, interp_type='linear'):
r"""Obtain an interpolated cross-sectional slice through gridded data.
Utilizing the interpolation functionality in `xarray`, this function takes a vertical
cross-sectional slice along a geodesic through the given data on a regular grid, which is
given as an `xarray.DataArray` so that we can utilize its coordinate and projection
metadata.
Parameters
----------
data: `xarray.DataArray` or `xarray.Dataset`
Three- (or higher) dimensional field(s) to interpolate. The DataArray (or each
DataArray in the Dataset) must have been parsed by MetPy and include both an x and
y coordinate dimension and the added `crs` coordinate.
start: (2, ) array_like
A latitude-longitude pair designating the start point of the cross section (units are
degrees north and degrees east).
end: (2, ) array_like
A latitude-longitude pair designating the end point of the cross section (units are
degrees north and degrees east).
steps: int, optional
The number of points along the geodesic between the start and the end point
(including the end points) to use in the cross section. Defaults to 100.
interp_type: str, optional
The interpolation method, either 'linear' or 'nearest' (see
`xarray.DataArray.interp()` for details). Defaults to 'linear'.
Returns
-------
`xarray.DataArray` or `xarray.Dataset`
The interpolated cross section, with new index dimension along the cross-section.
See Also
--------
interpolate_to_slice, geodesic
"""
if isinstance(data, xr.Dataset):
# Recursively apply to dataset
return data.apply(cross_section, True, (start, end), steps=steps,
interp_type=interp_type)
elif data.ndim == 0:
# This has no dimensions, so it is likely a projection variable. In any case, there
# are no data here to take the cross section with. Therefore, do nothing.
return data
else:
# Get the projection and coordinates
try:
crs_data = data.metpy.cartopy_crs
x = data.metpy.x
except AttributeError:
raise ValueError('Data missing required coordinate information. Verify that '
'your data have been parsed by MetPy with proper x and y '
'dimension coordinates and added crs coordinate of the '
'correct projection for each variable.')
# Get the geodesic
points_cross = geodesic(crs_data, start, end, steps)
# Patch points_cross to match given longitude range, whether [0, 360) or (-180, 180]
if CFConventionHandler.check_axis(x, 'lon') and (x > 180).any():
points_cross[points_cross[:, 0] < 0, 0] += 360.
# Return the interpolated data
return interpolate_to_slice(data, points_cross, interp_type=interp_type)
|
python
|
{
"resource": ""
}
|
q23681
|
preprocess_xarray
|
train
|
def preprocess_xarray(func):
"""Decorate a function to convert all DataArray arguments to pint.Quantities.
This uses the metpy xarray accessors to do the actual conversion.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
args = tuple(a.metpy.unit_array if isinstance(a, xr.DataArray) else a for a in args)
kwargs = {name: (v.metpy.unit_array if isinstance(v, xr.DataArray) else v)
for name, v in kwargs.items()}
return func(*args, **kwargs)
return wrapper
|
python
|
{
"resource": ""
}
|
q23682
|
check_matching_coordinates
|
train
|
def check_matching_coordinates(func):
"""Decorate a function to make sure all given DataArrays have matching coordinates."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
data_arrays = ([a for a in args if isinstance(a, xr.DataArray)]
+ [a for a in kwargs.values() if isinstance(a, xr.DataArray)])
if len(data_arrays) > 1:
first = data_arrays[0]
for other in data_arrays[1:]:
if not first.metpy.coordinates_identical(other):
raise ValueError('Input DataArray arguments must be on same coordinates.')
return func(*args, **kwargs)
return wrapper
|
python
|
{
"resource": ""
}
|
q23683
|
_reassign_quantity_indexer
|
train
|
def _reassign_quantity_indexer(data, indexers):
"""Reassign a units.Quantity indexer to units of relevant coordinate."""
def _to_magnitude(val, unit):
try:
return val.to(unit).m
except AttributeError:
return val
for coord_name in indexers:
# Handle axis types for DataArrays
if (isinstance(data, xr.DataArray) and coord_name not in data.dims
and coord_name in readable_to_cf_axes):
axis = coord_name
coord_name = next(data.metpy.coordinates(axis)).name
indexers[coord_name] = indexers[axis]
del indexers[axis]
# Handle slices of quantities
if isinstance(indexers[coord_name], slice):
start = _to_magnitude(indexers[coord_name].start, data[coord_name].metpy.units)
stop = _to_magnitude(indexers[coord_name].stop, data[coord_name].metpy.units)
step = _to_magnitude(indexers[coord_name].step, data[coord_name].metpy.units)
indexers[coord_name] = slice(start, stop, step)
# Handle quantities
indexers[coord_name] = _to_magnitude(indexers[coord_name],
data[coord_name].metpy.units)
return indexers
|
python
|
{
"resource": ""
}
|
q23684
|
resample_nn_1d
|
train
|
def resample_nn_1d(a, centers):
"""Return one-dimensional nearest-neighbor indexes based on user-specified centers.
Parameters
----------
a : array-like
1-dimensional array of numeric values from which to
extract indexes of nearest-neighbors
centers : array-like
1-dimensional array of numeric values representing a subset of values to approximate
Returns
-------
An array of indexes representing values closest to given array values
"""
ix = []
for center in centers:
index = (np.abs(a - center)).argmin()
if index not in ix:
ix.append(index)
return ix
|
python
|
{
"resource": ""
}
|
q23685
|
nearest_intersection_idx
|
train
|
def nearest_intersection_idx(a, b):
"""Determine the index of the point just before two lines with common x values.
Parameters
----------
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
Returns
-------
An array of indexes representing the index of the values
just before the intersection(s) of the two lines.
"""
# Difference in the two y-value sets
difference = a - b
# Determine the point just before the intersection of the lines
# Will return multiple points for multiple intersections
sign_change_idx, = np.nonzero(np.diff(np.sign(difference)))
return sign_change_idx
|
python
|
{
"resource": ""
}
|
q23686
|
find_intersections
|
train
|
def find_intersections(x, a, b, direction='all'):
"""Calculate the best estimate of intersection.
Calculates the best estimates of the intersection of two y-value
data sets that share a common x-value set.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
a : array-like
1-dimensional array of y-values for line 1
b : array-like
1-dimensional array of y-values for line 2
direction : string, optional
specifies direction of crossing. 'all', 'increasing' (a becoming greater than b),
or 'decreasing' (b becoming greater than a). Defaults to 'all'.
Returns
-------
A tuple (x, y) of array-like with the x and y coordinates of the
intersections of the lines.
"""
# Find the index of the points just before the intersection(s)
nearest_idx = nearest_intersection_idx(a, b)
next_idx = nearest_idx + 1
# Determine the sign of the change
sign_change = np.sign(a[next_idx] - b[next_idx])
# x-values around each intersection
_, x0 = _next_non_masked_element(x, nearest_idx)
_, x1 = _next_non_masked_element(x, next_idx)
# y-values around each intersection for the first line
_, a0 = _next_non_masked_element(a, nearest_idx)
_, a1 = _next_non_masked_element(a, next_idx)
# y-values around each intersection for the second line
_, b0 = _next_non_masked_element(b, nearest_idx)
_, b1 = _next_non_masked_element(b, next_idx)
# Calculate the x-intersection. This comes from finding the equations of the two lines,
# one through (x0, a0) and (x1, a1) and the other through (x0, b0) and (x1, b1),
# finding their intersection, and reducing with a bunch of algebra.
delta_y0 = a0 - b0
delta_y1 = a1 - b1
intersect_x = (delta_y1 * x0 - delta_y0 * x1) / (delta_y1 - delta_y0)
# Calculate the y-intersection of the lines. Just plug the x above into the equation
# for the line through the a points. One could solve for y like x above, but this
# causes weirder unit behavior and seems a little less good numerically.
intersect_y = ((intersect_x - x0) / (x1 - x0)) * (a1 - a0) + a0
# If there's no intersections, return
if len(intersect_x) == 0:
return intersect_x, intersect_y
# Check for duplicates
duplicate_mask = (np.ediff1d(intersect_x, to_end=1) != 0)
# Make a mask based on the direction of sign change desired
if direction == 'increasing':
mask = sign_change > 0
elif direction == 'decreasing':
mask = sign_change < 0
elif direction == 'all':
return intersect_x[duplicate_mask], intersect_y[duplicate_mask]
else:
raise ValueError('Unknown option for direction: {0}'.format(str(direction)))
return intersect_x[mask & duplicate_mask], intersect_y[mask & duplicate_mask]
|
python
|
{
"resource": ""
}
|
q23687
|
_next_non_masked_element
|
train
|
def _next_non_masked_element(a, idx):
"""Return the next non masked element of a masked array.
If an array is masked, return the next non-masked element (if the given index is masked).
If no other unmasked points are after the given masked point, returns none.
Parameters
----------
a : array-like
1-dimensional array of numeric values
idx : integer
index of requested element
Returns
-------
Index of next non-masked element and next non-masked element
"""
try:
next_idx = idx + a[idx:].mask.argmin()
if ma.is_masked(a[next_idx]):
return None, None
else:
return next_idx, a[next_idx]
except (AttributeError, TypeError, IndexError):
return idx, a[idx]
|
python
|
{
"resource": ""
}
|
q23688
|
_delete_masked_points
|
train
|
def _delete_masked_points(*arrs):
"""Delete masked points from arrays.
Takes arrays and removes masked points to help with calculations and plotting.
Parameters
----------
arrs : one or more array-like
source arrays
Returns
-------
arrs : one or more array-like
arrays with masked elements removed
"""
if any(hasattr(a, 'mask') for a in arrs):
keep = ~functools.reduce(np.logical_or, (np.ma.getmaskarray(a) for a in arrs))
return tuple(ma.asarray(a[keep]) for a in arrs)
else:
return arrs
|
python
|
{
"resource": ""
}
|
q23689
|
reduce_point_density
|
train
|
def reduce_point_density(points, radius, priority=None):
r"""Return a mask to reduce the density of points in irregularly-spaced data.
This function is used to down-sample a collection of scattered points (e.g. surface
data), returning a mask that can be used to select the points from one or more arrays
(e.g. arrays of temperature and dew point). The points selected can be controlled by
providing an array of ``priority`` values (e.g. rainfall totals to ensure that
stations with higher precipitation remain in the mask).
Parameters
----------
points : (N, K) array-like
N locations of the points in K dimensional space
radius : float
minimum radius allowed between points
priority : (N, K) array-like, optional
If given, this should have the same shape as ``points``; these values will
be used to control selection priority for points.
Returns
-------
(N,) array-like of boolean values indicating whether points should be kept. This
can be used directly to index numpy arrays to return only the desired points.
Examples
--------
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.)
array([ True, False, True])
>>> metpy.calc.reduce_point_density(np.array([1, 2, 3]), 1.,
... priority=np.array([0.1, 0.9, 0.3]))
array([False, True, False])
"""
# Handle 1D input
if points.ndim < 2:
points = points.reshape(-1, 1)
# Make a kd-tree to speed searching of data.
tree = cKDTree(points)
# Need to use sorted indices rather than sorting the position
# so that the keep mask matches *original* order.
if priority is not None:
# Need to sort the locations in decreasing priority.
sorted_indices = np.argsort(priority)[::-1]
else:
# Take advantage of iterator nature of range here to avoid making big lists
sorted_indices = range(len(points))
# Keep all points initially
keep = np.ones(len(points), dtype=np.bool)
# Loop over all the potential points
for ind in sorted_indices:
# Only proceed if we haven't already excluded this point
if keep[ind]:
# Find the neighbors and eliminate them
neighbors = tree.query_ball_point(points[ind], radius)
keep[neighbors] = False
# We just removed ourselves, so undo that
keep[ind] = True
return keep
|
python
|
{
"resource": ""
}
|
q23690
|
_get_bound_pressure_height
|
train
|
def _get_bound_pressure_height(pressure, bound, heights=None, interpolate=True):
"""Calculate the bounding pressure and height in a layer.
Given pressure, optional heights, and a bound, return either the closest pressure/height
or interpolated pressure/height. If no heights are provided, a standard atmosphere is
assumed.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressures
bound : `pint.Quantity`
Bound to retrieve (in pressure or height)
heights : `pint.Quantity`, optional
Atmospheric heights associated with the pressure levels. Defaults to using
heights calculated from ``pressure`` assuming a standard atmosphere.
interpolate : boolean, optional
Interpolate the bound or return the nearest. Defaults to True.
Returns
-------
`pint.Quantity`
The bound pressure and height.
"""
# Make sure pressure is monotonically decreasing
sort_inds = np.argsort(pressure)[::-1]
pressure = pressure[sort_inds]
if heights is not None:
heights = heights[sort_inds]
# Bound is given in pressure
if bound.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}:
# If the bound is in the pressure data, we know the pressure bound exactly
if bound in pressure:
bound_pressure = bound
# If we have heights, we know the exact height value, otherwise return standard
# atmosphere height for the pressure
if heights is not None:
bound_height = heights[pressure == bound_pressure]
else:
bound_height = pressure_to_height_std(bound_pressure)
# If bound is not in the data, return the nearest or interpolated values
else:
if interpolate:
bound_pressure = bound # Use the user specified bound
if heights is not None: # Interpolate heights from the height data
bound_height = log_interpolate_1d(bound_pressure, pressure, heights)
else: # If not heights given, use the standard atmosphere
bound_height = pressure_to_height_std(bound_pressure)
else: # No interpolation, find the closest values
idx = (np.abs(pressure - bound)).argmin()
bound_pressure = pressure[idx]
if heights is not None:
bound_height = heights[idx]
else:
bound_height = pressure_to_height_std(bound_pressure)
# Bound is given in height
elif bound.dimensionality == {'[length]': 1.0}:
# If there is height data, see if we have the bound or need to interpolate/find nearest
if heights is not None:
if bound in heights: # Bound is in the height data
bound_height = bound
bound_pressure = pressure[heights == bound]
else: # Bound is not in the data
if interpolate:
bound_height = bound
# Need to cast back to the input type since interp (up to at least numpy
# 1.13 always returns float64. This can cause upstream users problems,
# resulting in something like np.append() to upcast.
bound_pressure = np.interp(np.atleast_1d(bound), heights,
pressure).astype(bound.dtype) * pressure.units
else:
idx = (np.abs(heights - bound)).argmin()
bound_pressure = pressure[idx]
bound_height = heights[idx]
else: # Don't have heights, so assume a standard atmosphere
bound_height = bound
bound_pressure = height_to_pressure_std(bound)
# If interpolation is on, this is all we need, if not, we need to go back and
# find the pressure closest to this and refigure the bounds
if not interpolate:
idx = (np.abs(pressure - bound_pressure)).argmin()
bound_pressure = pressure[idx]
bound_height = pressure_to_height_std(bound_pressure)
# Bound has invalid units
else:
raise ValueError('Bound must be specified in units of length or pressure.')
# If the bound is out of the range of the data, we shouldn't extrapolate
if not (_greater_or_close(bound_pressure, np.nanmin(pressure) * pressure.units)
and _less_or_close(bound_pressure, np.nanmax(pressure) * pressure.units)):
raise ValueError('Specified bound is outside pressure range.')
if heights is not None:
if not (_less_or_close(bound_height, np.nanmax(heights) * heights.units)
and _greater_or_close(bound_height, np.nanmin(heights) * heights.units)):
raise ValueError('Specified bound is outside height range.')
return bound_pressure, bound_height
|
python
|
{
"resource": ""
}
|
q23691
|
get_layer_heights
|
train
|
def get_layer_heights(heights, depth, *args, **kwargs):
"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer using
the heights only.
Parameters
----------
heights : array-like
Atmospheric heights
depth : `pint.Quantity`
The thickness of the layer
*args : array-like
Atmospheric variable(s) measured at the given pressures
bottom : `pint.Quantity`, optional
The bottom of the layer
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
with_agl : bool, optional
Returns the heights as above ground level by subtracting the minimum height in the
provided heights. Defaults to False.
Returns
-------
`pint.Quantity, pint.Quantity`
The height and data variables of the layer
"""
bottom = kwargs.pop('bottom', None)
interpolate = kwargs.pop('interpolate', True)
with_agl = kwargs.pop('with_agl', False)
# Make sure pressure and datavars are the same length
for datavar in args:
if len(heights) != len(datavar):
raise ValueError('Height and data variables must have the same length.')
# If we want things in AGL, subtract the minimum height from all height values
if with_agl:
sfc_height = np.min(heights)
heights = heights - sfc_height
# If the bottom is not specified, make it the surface
if bottom is None:
bottom = heights[0]
# Make heights and arguments base units
heights = heights.to_base_units()
bottom = bottom.to_base_units()
# Calculate the top of the layer
top = bottom + depth
ret = [] # returned data variables in layer
# Ensure heights are sorted in ascending order
sort_inds = np.argsort(heights)
heights = heights[sort_inds]
# Mask based on top and bottom
inds = _greater_or_close(heights, bottom) & _less_or_close(heights, top)
heights_interp = heights[inds]
# Interpolate heights at bounds if necessary and sort
if interpolate:
# If we don't have the bottom or top requested, append them
if top not in heights_interp:
heights_interp = np.sort(np.append(heights_interp, top)) * heights.units
if bottom not in heights_interp:
heights_interp = np.sort(np.append(heights_interp, bottom)) * heights.units
ret.append(heights_interp)
for datavar in args:
# Ensure that things are sorted in ascending order
datavar = datavar[sort_inds]
if interpolate:
# Interpolate for the possibly missing bottom/top values
datavar_interp = interpolate_1d(heights_interp, heights, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar)
return ret
|
python
|
{
"resource": ""
}
|
q23692
|
get_layer
|
train
|
def get_layer(pressure, *args, **kwargs):
r"""Return an atmospheric layer from upper air data with the requested bottom and depth.
This function will subset an upper air dataset to contain only the specified layer. The
bottom of the layer can be specified with a pressure or height above the surface
pressure. The bottom defaults to the surface pressure. The depth of the layer can be
specified in terms of pressure or height above the bottom of the layer. If the top and
bottom of the layer are not in the data, they are interpolated by default.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
*args : array-like
Atmospheric variable(s) measured at the given pressures
heights: array-like, optional
Atmospheric heights corresponding to the given pressures. Defaults to using
heights calculated from ``p`` assuming a standard atmosphere.
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure. Defaults
to the highest pressure or lowest height given.
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer.
Defaults to 100 hPa.
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data. Defaults
to True.
Returns
-------
`pint.Quantity, pint.Quantity`
The pressure and data variables of the layer
"""
# Pop off keyword arguments
heights = kwargs.pop('heights', None)
bottom = kwargs.pop('bottom', None)
depth = kwargs.pop('depth', 100 * units.hPa)
interpolate = kwargs.pop('interpolate', True)
# If we get the depth kwarg, but it's None, set it to the default as well
if depth is None:
depth = 100 * units.hPa
# Make sure pressure and datavars are the same length
for datavar in args:
if len(pressure) != len(datavar):
raise ValueError('Pressure and data variables must have the same length.')
# If the bottom is not specified, make it the surface pressure
if bottom is None:
bottom = np.nanmax(pressure) * pressure.units
bottom_pressure, bottom_height = _get_bound_pressure_height(pressure, bottom,
heights=heights,
interpolate=interpolate)
# Calculate the top if whatever units depth is in
if depth.dimensionality == {'[length]': -1.0, '[mass]': 1.0, '[time]': -2.0}:
top = bottom_pressure - depth
elif depth.dimensionality == {'[length]': 1}:
top = bottom_height + depth
else:
raise ValueError('Depth must be specified in units of length or pressure')
top_pressure, _ = _get_bound_pressure_height(pressure, top, heights=heights,
interpolate=interpolate)
ret = [] # returned data variables in layer
# Ensure pressures are sorted in ascending order
sort_inds = np.argsort(pressure)
pressure = pressure[sort_inds]
# Mask based on top and bottom pressure
inds = (_less_or_close(pressure, bottom_pressure)
& _greater_or_close(pressure, top_pressure))
p_interp = pressure[inds]
# Interpolate pressures at bounds if necessary and sort
if interpolate:
# If we don't have the bottom or top requested, append them
if not np.any(np.isclose(top_pressure, p_interp)):
p_interp = np.sort(np.append(p_interp, top_pressure)) * pressure.units
if not np.any(np.isclose(bottom_pressure, p_interp)):
p_interp = np.sort(np.append(p_interp, bottom_pressure)) * pressure.units
ret.append(p_interp[::-1])
for datavar in args:
# Ensure that things are sorted in ascending order
datavar = datavar[sort_inds]
if interpolate:
# Interpolate for the possibly missing bottom/top values
datavar_interp = log_interpolate_1d(p_interp, pressure, datavar)
datavar = datavar_interp
else:
datavar = datavar[inds]
ret.append(datavar[::-1])
return ret
|
python
|
{
"resource": ""
}
|
q23693
|
interp
|
train
|
def interp(x, xp, *args, **kwargs):
"""Wrap interpolate_1d for deprecated interp."""
return interpolate_1d(x, xp, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q23694
|
find_bounding_indices
|
train
|
def find_bounding_indices(arr, values, axis, from_below=True):
"""Find the indices surrounding the values within arr along axis.
Returns a set of above, below, good. Above and below are lists of arrays of indices.
These lists are formulated such that they can be used directly to index into a numpy
array and get the expected results (no extra slices or ellipsis necessary). `good` is
a boolean array indicating the "columns" that actually had values to bound the desired
value(s).
Parameters
----------
arr : array-like
Array to search for values
values: array-like
One or more values to search for in `arr`
axis : int
The dimension of `arr` along which to search.
from_below : bool, optional
Whether to search from "below" (i.e. low indices to high indices). If `False`,
the search will instead proceed from high indices to low indices. Defaults to `True`.
Returns
-------
above : list of arrays
List of broadcasted indices to the location above the desired value
below : list of arrays
List of broadcasted indices to the location below the desired value
good : array
Boolean array indicating where the search found proper bounds for the desired value
"""
# The shape of generated indices is the same as the input, but with the axis of interest
# replaced by the number of values to search for.
indices_shape = list(arr.shape)
indices_shape[axis] = len(values)
# Storage for the found indices and the mask for good locations
indices = np.empty(indices_shape, dtype=np.int)
good = np.empty(indices_shape, dtype=np.bool)
# Used to put the output in the proper location
store_slice = [slice(None)] * arr.ndim
# Loop over all of the values and for each, see where the value would be found from a
# linear search
for level_index, value in enumerate(values):
# Look for changes in the value of the test for <= value in consecutive points
# Taking abs() because we only care if there is a flip, not which direction.
switches = np.abs(np.diff((arr <= value).astype(np.int), axis=axis))
# Good points are those where it's not just 0's along the whole axis
good_search = np.any(switches, axis=axis)
if from_below:
# Look for the first switch; need to add 1 to the index since argmax is giving the
# index within the difference array, which is one smaller.
index = switches.argmax(axis=axis) + 1
else:
# Generate a list of slices to reverse the axis of interest so that searching from
# 0 to N is starting at the "top" of the axis.
arr_slice = [slice(None)] * arr.ndim
arr_slice[axis] = slice(None, None, -1)
# Same as above, but we use the slice to come from the end; then adjust those
# indices to measure from the front.
index = arr.shape[axis] - 1 - switches[tuple(arr_slice)].argmax(axis=axis)
# Set all indices where the results are not good to 0
index[~good_search] = 0
# Put the results in the proper slice
store_slice[axis] = level_index
indices[tuple(store_slice)] = index
good[tuple(store_slice)] = good_search
# Create index values for broadcasting arrays
above = broadcast_indices(arr, indices, arr.ndim, axis)
below = broadcast_indices(arr, indices - 1, arr.ndim, axis)
return above, below, good
|
python
|
{
"resource": ""
}
|
q23695
|
log_interp
|
train
|
def log_interp(x, xp, *args, **kwargs):
"""Wrap log_interpolate_1d for deprecated log_interp."""
return log_interpolate_1d(x, xp, *args, **kwargs)
|
python
|
{
"resource": ""
}
|
q23696
|
_greater_or_close
|
train
|
def _greater_or_close(a, value, **kwargs):
r"""Compare values for greater or close to boolean masks.
Returns a boolean mask for values greater than or equal to a target within a specified
absolute or relative tolerance (as in :func:`numpy.isclose`).
Parameters
----------
a : array-like
Array of values to be compared
value : float
Comparison value
Returns
-------
array-like
Boolean array where values are greater than or nearly equal to value.
"""
return (a > value) | np.isclose(a, value, **kwargs)
|
python
|
{
"resource": ""
}
|
q23697
|
_less_or_close
|
train
|
def _less_or_close(a, value, **kwargs):
r"""Compare values for less or close to boolean masks.
Returns a boolean mask for values less than or equal to a target within a specified
absolute or relative tolerance (as in :func:`numpy.isclose`).
Parameters
----------
a : array-like
Array of values to be compared
value : float
Comparison value
Returns
-------
array-like
Boolean array where values are less than or nearly equal to value.
"""
return (a < value) | np.isclose(a, value, **kwargs)
|
python
|
{
"resource": ""
}
|
q23698
|
grid_deltas_from_dataarray
|
train
|
def grid_deltas_from_dataarray(f):
"""Calculate the horizontal deltas between grid points of a DataArray.
Calculate the signed delta distance between grid points of a DataArray in the horizontal
directions, whether the grid is lat/lon or x/y.
Parameters
----------
f : `xarray.DataArray`
Parsed DataArray on a latitude/longitude grid, in (..., lat, lon) or (..., y, x)
dimension order
Returns
-------
dx, dy:
arrays of signed deltas between grid points in the x and y directions with dimensions
matching those of `f`.
See Also
--------
lat_lon_grid_deltas
"""
if f.metpy.crs['grid_mapping_name'] == 'latitude_longitude':
dx, dy = lat_lon_grid_deltas(f.metpy.x, f.metpy.y,
initstring=f.metpy.cartopy_crs.proj4_init)
slc_x = slc_y = tuple([np.newaxis] * (f.ndim - 2) + [slice(None)] * 2)
else:
dx = np.diff(f.metpy.x.metpy.unit_array.to('m').magnitude) * units('m')
dy = np.diff(f.metpy.y.metpy.unit_array.to('m').magnitude) * units('m')
slc = [np.newaxis] * (f.ndim - 2)
slc_x = tuple(slc + [np.newaxis, slice(None)])
slc_y = tuple(slc + [slice(None), np.newaxis])
return dx[slc_x], dy[slc_y]
|
python
|
{
"resource": ""
}
|
q23699
|
xarray_derivative_wrap
|
train
|
def xarray_derivative_wrap(func):
"""Decorate the derivative functions to make them work nicely with DataArrays.
This will automatically determine if the coordinates can be pulled directly from the
DataArray, or if a call to lat_lon_grid_deltas is needed.
"""
@functools.wraps(func)
def wrapper(f, **kwargs):
if 'x' in kwargs or 'delta' in kwargs:
# Use the usual DataArray to pint.Quantity preprocessing wrapper
return preprocess_xarray(func)(f, **kwargs)
elif isinstance(f, xr.DataArray):
# Get axis argument, defaulting to first dimension
axis = f.metpy.find_axis_name(kwargs.get('axis', 0))
# Initialize new kwargs with the axis number
new_kwargs = {'axis': f.get_axis_num(axis)}
if f[axis].attrs.get('_metpy_axis') == 'T':
# Time coordinate, need to convert to seconds from datetimes
new_kwargs['x'] = f[axis].metpy.as_timestamp().metpy.unit_array
elif CFConventionHandler.check_axis(f[axis], 'lon'):
# Longitude coordinate, need to get grid deltas
new_kwargs['delta'], _ = grid_deltas_from_dataarray(f)
elif CFConventionHandler.check_axis(f[axis], 'lat'):
# Latitude coordinate, need to get grid deltas
_, new_kwargs['delta'] = grid_deltas_from_dataarray(f)
else:
# General coordinate, use as is
new_kwargs['x'] = f[axis].metpy.unit_array
# Calculate and return result as a DataArray
result = func(f.metpy.unit_array, **new_kwargs)
return xr.DataArray(result.magnitude,
coords=f.coords,
dims=f.dims,
attrs={'units': str(result.units)})
else:
# Error
raise ValueError('Must specify either "x" or "delta" for value positions when "f" '
'is not a DataArray.')
return wrapper
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.