_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q4900
|
add_uniform_time_weights
|
train
|
def add_uniform_time_weights(ds):
"""Append uniform time weights to a Dataset.
All DataArrays with a time coordinate require a time weights coordinate.
For Datasets read in without a time bounds coordinate or explicit
time weights built in, aospy adds uniform time weights at each point
in the time coordinate.
Parameters
----------
ds : Dataset
Input data
Returns
-------
Dataset
"""
time = ds[TIME_STR]
unit_interval = time.attrs['units'].split('since')[0].strip()
time_weights = xr.ones_like(time)
time_weights.attrs['units'] = unit_interval
del time_weights.attrs['calendar']
ds[TIME_WEIGHTS_STR] = time_weights
return ds
|
python
|
{
"resource": ""
}
|
q4901
|
_assert_has_data_for_time
|
train
|
def _assert_has_data_for_time(da, start_date, end_date):
"""Check to make sure data is in Dataset for the given time range.
Parameters
----------
da : DataArray
DataArray with a time variable
start_date : datetime-like object or str
start date
end_date : datetime-like object or str
end date
Raises
------
AssertionError
If the time range is not within the time range of the DataArray
"""
if isinstance(start_date, str) and isinstance(end_date, str):
logging.warning(
'When using strings to specify start and end dates, the check '
'to determine if data exists for the full extent of the desired '
'interval is not implemented. Therefore it is possible that '
'you are doing a calculation for a lesser interval than you '
'specified. If you would like this check to occur, use explicit '
'datetime-like objects for bounds instead.')
return
if RAW_START_DATE_STR in da.coords:
with warnings.catch_warnings(record=True):
da_start = da[RAW_START_DATE_STR].values
da_end = da[RAW_END_DATE_STR].values
else:
times = da.time.isel(**{TIME_STR: [0, -1]})
da_start, da_end = times.values
message = ('Data does not exist for requested time range: {0} to {1};'
' found data from time range: {2} to {3}.')
# Add tolerance of one second, due to precision of cftime.datetimes
tol = datetime.timedelta(seconds=1)
if isinstance(da_start, np.datetime64):
tol = np.timedelta64(tol, 'ns')
range_exists = ((da_start - tol) <= start_date and
(da_end + tol) >= end_date)
assert (range_exists), message.format(start_date, end_date,
da_start, da_end)
|
python
|
{
"resource": ""
}
|
q4902
|
sel_time
|
train
|
def sel_time(da, start_date, end_date):
"""Subset a DataArray or Dataset for a given date range.
Ensures that data are present for full extent of requested range.
Appends start and end date of the subset to the DataArray.
Parameters
----------
da : DataArray or Dataset
data to subset
start_date : np.datetime64
start of date interval
end_date : np.datetime64
end of date interval
Returns
----------
da : DataArray or Dataset
subsetted data
Raises
------
AssertionError
if data for requested range do not exist for part or all of
requested range
"""
_assert_has_data_for_time(da, start_date, end_date)
da[SUBSET_START_DATE_STR] = xr.DataArray(start_date)
da[SUBSET_END_DATE_STR] = xr.DataArray(end_date)
return da.sel(**{TIME_STR: slice(start_date, end_date)})
|
python
|
{
"resource": ""
}
|
q4903
|
assert_matching_time_coord
|
train
|
def assert_matching_time_coord(arr1, arr2):
"""Check to see if two DataArrays have the same time coordinate.
Parameters
----------
arr1 : DataArray or Dataset
First DataArray or Dataset
arr2 : DataArray or Dataset
Second DataArray or Dataset
Raises
------
ValueError
If the time coordinates are not identical between the two Datasets
"""
message = ('Time weights not indexed by the same time coordinate as'
' computed data. This will lead to an improperly computed'
' time weighted average. Exiting.\n'
'arr1: {}\narr2: {}')
if not (arr1[TIME_STR].identical(arr2[TIME_STR])):
raise ValueError(message.format(arr1[TIME_STR], arr2[TIME_STR]))
|
python
|
{
"resource": ""
}
|
q4904
|
ensure_time_as_index
|
train
|
def ensure_time_as_index(ds):
"""Ensures that time is an indexed coordinate on relevant quantites.
Sometimes when the data we load from disk has only one timestep, the
indexing of time-defined quantities in the resulting xarray.Dataset gets
messed up, in that the time bounds array and data variables don't get
indexed by time, even though they should. Therefore, we need this helper
function to (possibly) correct this.
Note that this must be applied before CF-conventions are decoded; otherwise
it casts ``np.datetime64[ns]`` as ``int`` values.
Parameters
----------
ds : Dataset
Dataset with a time coordinate
Returns
-------
Dataset
"""
time_indexed_coords = {TIME_WEIGHTS_STR, TIME_BOUNDS_STR}
time_indexed_vars = set(ds.data_vars).union(time_indexed_coords)
time_indexed_vars = time_indexed_vars.intersection(ds.variables)
variables_to_replace = {}
for name in time_indexed_vars:
if TIME_STR not in ds[name].indexes:
da = ds[name]
if TIME_STR not in da.dims:
da = ds[name].expand_dims(TIME_STR)
da = da.assign_coords(**{TIME_STR: ds[TIME_STR]})
variables_to_replace[name] = da
return ds.assign(**variables_to_replace)
|
python
|
{
"resource": ""
}
|
q4905
|
infer_year
|
train
|
def infer_year(date):
"""Given a datetime-like object or string infer the year.
Parameters
----------
date : datetime-like object or str
Input date
Returns
-------
int
Examples
--------
>>> infer_year('2000')
2000
>>> infer_year('2000-01')
2000
>>> infer_year('2000-01-31')
2000
>>> infer_year(datetime.datetime(2000, 1, 1))
2000
>>> infer_year(np.datetime64('2000-01-01'))
2000
>>> infer_year(DatetimeNoLeap(2000, 1, 1))
2000
>>>
"""
if isinstance(date, str):
# Look for a string that begins with four numbers; the first four
# numbers found are the year.
pattern = r'(?P<year>\d{4})'
result = re.match(pattern, date)
if result:
return int(result.groupdict()['year'])
else:
raise ValueError('Invalid date string provided: {}'.format(date))
elif isinstance(date, np.datetime64):
return date.item().year
else:
return date.year
|
python
|
{
"resource": ""
}
|
q4906
|
maybe_convert_to_index_date_type
|
train
|
def maybe_convert_to_index_date_type(index, date):
"""Convert a datetime-like object to the index's date type.
Datetime indexing in xarray can be done using either a pandas
DatetimeIndex or a CFTimeIndex. Both support partial-datetime string
indexing regardless of the calendar type of the underlying data;
therefore if a string is passed as a date, we return it unchanged. If a
datetime-like object is provided, it will be converted to the underlying
date type of the index. For a DatetimeIndex that is np.datetime64; for a
CFTimeIndex that is an object of type cftime.datetime specific to the
calendar used.
Parameters
----------
index : pd.Index
Input time index
date : datetime-like object or str
Input datetime
Returns
-------
date of the type appropriate for the time index of the Dataset
"""
if isinstance(date, str):
return date
if isinstance(index, pd.DatetimeIndex):
if isinstance(date, np.datetime64):
return date
else:
return np.datetime64(str(date))
else:
date_type = index.date_type
if isinstance(date, date_type):
return date
else:
if isinstance(date, np.datetime64):
# Convert to datetime.date or datetime.datetime object
date = date.item()
if isinstance(date, datetime.date):
# Convert to a datetime.datetime object
date = datetime.datetime.combine(
date, datetime.datetime.min.time())
return date_type(date.year, date.month, date.day, date.hour,
date.minute, date.second, date.microsecond)
|
python
|
{
"resource": ""
}
|
q4907
|
Region._make_mask
|
train
|
def _make_mask(self, data, lon_str=LON_STR, lat_str=LAT_STR):
"""Construct the mask that defines a region on a given data's grid."""
mask = False
for west, east, south, north in self.mask_bounds:
if west < east:
mask_lon = (data[lon_str] > west) & (data[lon_str] < east)
else:
mask_lon = (data[lon_str] < west) | (data[lon_str] > east)
mask_lat = (data[lat_str] > south) & (data[lat_str] < north)
mask |= mask_lon & mask_lat
return mask
|
python
|
{
"resource": ""
}
|
q4908
|
Region.mask_var
|
train
|
def mask_var(self, data, lon_cyclic=True, lon_str=LON_STR,
lat_str=LAT_STR):
"""Mask the given data outside this region.
Parameters
----------
data : xarray.DataArray
The array to be regionally masked.
lon_cyclic : bool, optional (default True)
Whether or not the longitudes of ``data`` span the whole globe,
meaning that they should be wrapped around as necessary to cover
the Region's full width.
lon_str, lat_str : str, optional
The names of the longitude and latitude dimensions, respectively,
in the data to be masked. Defaults are
``aospy.internal_names.LON_STR`` and
``aospy.internal_names.LON_STR``, respectively.
Returns
-------
xarray.DataArray
The original array with points outside of the region masked.
"""
# TODO: is this still necessary?
if not lon_cyclic:
if self.west_bound > self.east_bound:
raise ValueError("Longitudes of data to be masked are "
"specified as non-cyclic, but Region's "
"definition requires wraparound longitudes.")
masked = data.where(self._make_mask(data, lon_str=lon_str,
lat_str=lat_str))
return masked
|
python
|
{
"resource": ""
}
|
q4909
|
Region.ts
|
train
|
def ts(self, data, lon_cyclic=True, lon_str=LON_STR, lat_str=LAT_STR,
land_mask_str=LAND_MASK_STR, sfc_area_str=SFC_AREA_STR):
"""Create yearly time-series of region-averaged data.
Parameters
----------
data : xarray.DataArray
The array to create the regional timeseries of
lon_cyclic : { None, True, False }, optional (default True)
Whether or not the longitudes of ``data`` span the whole globe,
meaning that they should be wrapped around as necessary to cover
the Region's full width.
lat_str, lon_str, land_mask_str, sfc_area_str : str, optional
The name of the latitude, longitude, land mask, and surface area
coordinates, respectively, in ``data``. Defaults are the
corresponding values in ``aospy.internal_names``.
Returns
-------
xarray.DataArray
The timeseries of values averaged within the region and within each
year, one value per year.
"""
data_masked = self.mask_var(data, lon_cyclic=lon_cyclic,
lon_str=lon_str, lat_str=lat_str)
sfc_area = data[sfc_area_str]
sfc_area_masked = self.mask_var(sfc_area, lon_cyclic=lon_cyclic,
lon_str=lon_str, lat_str=lat_str)
land_mask = _get_land_mask(data, self.do_land_mask,
land_mask_str=land_mask_str)
weights = sfc_area_masked * land_mask
# Mask weights where data values are initially invalid in addition
# to applying the region mask.
weights = weights.where(np.isfinite(data))
weights_reg_sum = weights.sum(lon_str).sum(lat_str)
data_reg_sum = (data_masked * sfc_area_masked *
land_mask).sum(lat_str).sum(lon_str)
return data_reg_sum / weights_reg_sum
|
python
|
{
"resource": ""
}
|
q4910
|
Region.av
|
train
|
def av(self, data, lon_str=LON_STR, lat_str=LAT_STR,
land_mask_str=LAND_MASK_STR, sfc_area_str=SFC_AREA_STR):
"""Time-average of region-averaged data.
Parameters
----------
data : xarray.DataArray
The array to compute the regional time-average of
lat_str, lon_str, land_mask_str, sfc_area_str : str, optional
The name of the latitude, longitude, land mask, and surface area
coordinates, respectively, in ``data``. Defaults are the
corresponding values in ``aospy.internal_names``.
Returns
-------
xarray.DataArray
The region-averaged and time-averaged data.
"""
ts = self.ts(data, lon_str=lon_str, lat_str=lat_str,
land_mask_str=land_mask_str, sfc_area_str=sfc_area_str)
if YEAR_STR not in ts.coords:
return ts
else:
return ts.mean(YEAR_STR)
|
python
|
{
"resource": ""
}
|
q4911
|
_rename_coords
|
train
|
def _rename_coords(ds, attrs):
"""Rename coordinates to aospy's internal names."""
for name_int, names_ext in attrs.items():
# Check if coord is in dataset already.
ds_coord_name = set(names_ext).intersection(set(ds.coords))
if ds_coord_name:
# Rename to the aospy internal name.
try:
ds = ds.rename({list(ds_coord_name)[0]: name_int})
logging.debug("Rename coord from `{0}` to `{1}` for "
"Dataset `{2}`".format(ds_coord_name,
name_int, ds))
# xarray throws a ValueError if the name already exists
except ValueError:
ds = ds
return ds
|
python
|
{
"resource": ""
}
|
q4912
|
_bounds_from_array
|
train
|
def _bounds_from_array(arr, dim_name, bounds_name):
"""Get the bounds of an array given its center values.
E.g. if lat-lon grid center lat/lon values are known, but not the
bounds of each grid box. The algorithm assumes that the bounds
are simply halfway between each pair of center values.
"""
# TODO: don't assume needed dimension is in axis=0
# TODO: refactor to get rid of repetitive code
spacing = arr.diff(dim_name).values
lower = xr.DataArray(np.empty_like(arr), dims=arr.dims,
coords=arr.coords)
lower.values[:-1] = arr.values[:-1] - 0.5*spacing
lower.values[-1] = arr.values[-1] - 0.5*spacing[-1]
upper = xr.DataArray(np.empty_like(arr), dims=arr.dims,
coords=arr.coords)
upper.values[:-1] = arr.values[:-1] + 0.5*spacing
upper.values[-1] = arr.values[-1] + 0.5*spacing[-1]
bounds = xr.concat([lower, upper], dim='bounds')
return bounds.T
|
python
|
{
"resource": ""
}
|
q4913
|
_diff_bounds
|
train
|
def _diff_bounds(bounds, coord):
"""Get grid spacing by subtracting upper and lower bounds."""
try:
return bounds[:, 1] - bounds[:, 0]
except IndexError:
diff = np.diff(bounds, axis=0)
return xr.DataArray(diff, dims=coord.dims, coords=coord.coords)
|
python
|
{
"resource": ""
}
|
q4914
|
_grid_sfc_area
|
train
|
def _grid_sfc_area(lon, lat, lon_bounds=None, lat_bounds=None):
"""Calculate surface area of each grid cell in a lon-lat grid."""
# Compute the bounds if not given.
if lon_bounds is None:
lon_bounds = _bounds_from_array(
lon, internal_names.LON_STR, internal_names.LON_BOUNDS_STR)
if lat_bounds is None:
lat_bounds = _bounds_from_array(
lat, internal_names.LAT_STR, internal_names.LAT_BOUNDS_STR)
# Compute the surface area.
dlon = _diff_bounds(utils.vertcoord.to_radians(lon_bounds, is_delta=True),
lon)
sinlat_bounds = np.sin(utils.vertcoord.to_radians(lat_bounds,
is_delta=True))
dsinlat = np.abs(_diff_bounds(sinlat_bounds, lat))
sfc_area = dlon*dsinlat*(RADIUS_EARTH**2)
# Rename the coordinates such that they match the actual lat / lon.
try:
sfc_area = sfc_area.rename(
{internal_names.LAT_BOUNDS_STR: internal_names.LAT_STR,
internal_names.LON_BOUNDS_STR: internal_names.LON_STR})
except ValueError:
pass
# Clean up: correct names and dimension order.
sfc_area = sfc_area.rename(internal_names.SFC_AREA_STR)
sfc_area[internal_names.LAT_STR] = lat
sfc_area[internal_names.LON_STR] = lon
return sfc_area.transpose()
|
python
|
{
"resource": ""
}
|
q4915
|
Model._get_grid_files
|
train
|
def _get_grid_files(self):
"""Get the files holding grid data for an aospy object."""
grid_file_paths = self.grid_file_paths
datasets = []
if isinstance(grid_file_paths, str):
grid_file_paths = [grid_file_paths]
for path in grid_file_paths:
try:
ds = xr.open_dataset(path, decode_times=False)
except (TypeError, AttributeError):
ds = xr.open_mfdataset(path, decode_times=False).load()
except (RuntimeError, OSError) as e:
msg = str(e) + ': {}'.format(path)
raise RuntimeError(msg)
datasets.append(ds)
return tuple(datasets)
|
python
|
{
"resource": ""
}
|
q4916
|
Model._set_mult_grid_attr
|
train
|
def _set_mult_grid_attr(self):
"""
Set multiple attrs from grid file given their names in the grid file.
"""
grid_objs = self._get_grid_files()
if self.grid_attrs is None:
self.grid_attrs = {}
# Override GRID_ATTRS with entries in grid_attrs
attrs = internal_names.GRID_ATTRS.copy()
for k, v in self.grid_attrs.items():
if k not in attrs:
raise ValueError(
'Unrecognized internal name, {!r}, specified for a '
'custom grid attribute name. See the full list of '
'valid internal names below:\n\n{}'.format(
k, list(internal_names.GRID_ATTRS.keys())))
attrs[k] = (v, )
for name_int, names_ext in attrs.items():
for name in names_ext:
grid_attr = _get_grid_attr(grid_objs, name)
if grid_attr is not None:
TIME_STR = internal_names.TIME_STR
renamed_attr = _rename_coords(grid_attr, attrs)
if ((TIME_STR not in renamed_attr.dims) and
(TIME_STR in renamed_attr.coords)):
renamed_attr = renamed_attr.drop(TIME_STR)
setattr(self, name_int, renamed_attr)
break
|
python
|
{
"resource": ""
}
|
q4917
|
Model.set_grid_data
|
train
|
def set_grid_data(self):
"""Populate the attrs that hold grid data."""
if self._grid_data_is_set:
return
self._set_mult_grid_attr()
if not np.any(getattr(self, 'sfc_area', None)):
try:
sfc_area = _grid_sfc_area(self.lon, self.lat, self.lon_bounds,
self.lat_bounds)
except AttributeError:
sfc_area = _grid_sfc_area(self.lon, self.lat)
self.sfc_area = sfc_area
try:
self.levs_thick = utils.vertcoord.level_thickness(self.level)
except AttributeError:
self.level = None
self.levs_thick = None
self._grid_data_is_set = True
|
python
|
{
"resource": ""
}
|
q4918
|
_other_to_lon
|
train
|
def _other_to_lon(func):
"""Wrapper for casting Longitude operator arguments to Longitude"""
def func_other_to_lon(obj, other):
return func(obj, _maybe_cast_to_lon(other))
return func_other_to_lon
|
python
|
{
"resource": ""
}
|
q4919
|
_get_attr_by_tag
|
train
|
def _get_attr_by_tag(obj, tag, attr_name):
"""Get attribute from an object via a string tag.
Parameters
----------
obj : object from which to get the attribute
attr_name : str
Unmodified name of the attribute to be found. The actual attribute
that is returned may be modified be 'tag'.
tag : str
Tag specifying how to modify 'attr_name' by pre-pending it with 'tag'.
Must be a key of the _TAG_ATTR_MODIFIERS dict.
Returns
-------
the specified attribute of obj
"""
attr_name = _TAG_ATTR_MODIFIERS[tag] + attr_name
return getattr(obj, attr_name)
|
python
|
{
"resource": ""
}
|
q4920
|
_get_all_objs_of_type
|
train
|
def _get_all_objs_of_type(type_, parent):
"""Get all attributes of the given type from the given object.
Parameters
----------
type_ : The desired type
parent : The object from which to get the attributes with type matching
'type_'
Returns
-------
A list (possibly empty) of attributes from 'parent'
"""
return set([obj for obj in parent.__dict__.values()
if isinstance(obj, type_)])
|
python
|
{
"resource": ""
}
|
q4921
|
_prune_invalid_time_reductions
|
train
|
def _prune_invalid_time_reductions(spec):
"""Prune time reductions of spec with no time dimension."""
valid_reductions = []
if not spec['var'].def_time and spec['dtype_out_time'] is not None:
for reduction in spec['dtype_out_time']:
if reduction not in _TIME_DEFINED_REDUCTIONS:
valid_reductions.append(reduction)
else:
msg = ("Var {0} has no time dimension "
"for the given time reduction "
"{1} so this calculation will "
"be skipped".format(spec['var'].name, reduction))
logging.info(msg)
else:
valid_reductions = spec['dtype_out_time']
return valid_reductions
|
python
|
{
"resource": ""
}
|
q4922
|
_compute_or_skip_on_error
|
train
|
def _compute_or_skip_on_error(calc, compute_kwargs):
"""Execute the Calc, catching and logging exceptions, but don't re-raise.
Prevents one failed calculation from stopping a larger requested set
of calculations.
"""
try:
return calc.compute(**compute_kwargs)
except Exception:
msg = ("Skipping aospy calculation `{0}` due to error with the "
"following traceback: \n{1}")
logging.warning(msg.format(calc, traceback.format_exc()))
return None
|
python
|
{
"resource": ""
}
|
q4923
|
_submit_calcs_on_client
|
train
|
def _submit_calcs_on_client(calcs, client, func):
"""Submit calculations via dask.bag and a distributed client"""
logging.info('Connected to client: {}'.format(client))
if LooseVersion(dask.__version__) < '0.18':
dask_option_setter = dask.set_options
else:
dask_option_setter = dask.config.set
with dask_option_setter(get=client.get):
return db.from_sequence(calcs).map(func).compute()
|
python
|
{
"resource": ""
}
|
q4924
|
_exec_calcs
|
train
|
def _exec_calcs(calcs, parallelize=False, client=None, **compute_kwargs):
"""Execute the given calculations.
Parameters
----------
calcs : Sequence of ``aospy.Calc`` objects
parallelize : bool, default False
Whether to submit the calculations in parallel or not
client : distributed.Client or None
The distributed Client used if parallelize is set to True; if None
a distributed LocalCluster is used.
compute_kwargs : dict of keyword arguments passed to ``Calc.compute``
Returns
-------
A list of the values returned by each Calc object that was executed.
"""
if parallelize:
def func(calc):
"""Wrap _compute_or_skip_on_error to require only the calc
argument"""
if 'write_to_tar' in compute_kwargs:
compute_kwargs['write_to_tar'] = False
return _compute_or_skip_on_error(calc, compute_kwargs)
if client is None:
n_workers = _n_workers_for_local_cluster(calcs)
with distributed.LocalCluster(n_workers=n_workers) as cluster:
with distributed.Client(cluster) as client:
result = _submit_calcs_on_client(calcs, client, func)
else:
result = _submit_calcs_on_client(calcs, client, func)
if compute_kwargs['write_to_tar']:
_serial_write_to_tar(calcs)
return result
else:
return [_compute_or_skip_on_error(calc, compute_kwargs)
for calc in calcs]
|
python
|
{
"resource": ""
}
|
q4925
|
submit_mult_calcs
|
train
|
def submit_mult_calcs(calc_suite_specs, exec_options=None):
"""Generate and execute all specified computations.
Once the calculations are prepped and submitted for execution, any
calculation that triggers any exception or error is skipped, and the rest
of the calculations proceed unaffected. This prevents an error in a single
calculation from crashing a large suite of calculations.
Parameters
----------
calc_suite_specs : dict
The specifications describing the full set of calculations to be
generated and potentially executed. Accepted keys and their values:
library : module or package comprising an aospy object library
The aospy object library for these calculations.
projects : list of aospy.Proj objects
The projects to permute over.
models : 'all', 'default', or list of aospy.Model objects
The models to permute over. If 'all', use all models in the
``models`` attribute of each ``Proj``. If 'default', use all
models in the ``default_models`` attribute of each ``Proj``.
runs : 'all', 'default', or list of aospy.Run objects
The runs to permute over. If 'all', use all runs in the
``runs`` attribute of each ``Model``. If 'default', use all
runs in the ``default_runs`` attribute of each ``Model``.
variables : list of aospy.Var objects
The variables to be calculated.
regions : 'all' or list of aospy.Region objects
The region(s) over which any regional reductions will be performed.
If 'all', use all regions in the ``regions`` attribute of each
``Proj``.
date_ranges : 'default' or a list of tuples
The range of dates (inclusive) over which to perform calculations.
If 'default', use the ``default_start_date`` and
``default_end_date`` attribute of each ``Run``. Else provide a
list of tuples, each containing a pair of start and end dates,
such as ``date_ranges=[(start, end)]`` where ``start`` and
``end`` are each ``datetime.datetime`` objects, partial
datetime strings (e.g. '0001'), ``np.datetime64`` objects, or
``cftime.datetime`` objects.
output_time_intervals : {'ann', season-string, month-integer}
The sub-annual time interval over which to aggregate.
- 'ann' : Annual mean
- season-string : E.g. 'JJA' for June-July-August
- month-integer : 1 for January, 2 for February, etc. Each one is
a separate reduction, e.g. [1, 2] would produce averages (or
other specified time reduction) over all Januaries, and
separately over all Februaries.
output_time_regional_reductions : list of reduction string identifiers
Unlike most other keys, these are not permuted over when creating
the :py:class:`aospy.Calc` objects that execute the calculations;
each :py:class:`aospy.Calc` performs all of the specified
reductions. Accepted string identifiers are:
- Gridpoint-by-gridpoint output:
- 'av' : Gridpoint-by-gridpoint time-average
- 'std' : Gridpoint-by-gridpoint temporal standard deviation
- 'ts' : Gridpoint-by-gridpoint time-series
- Averages over each region specified via `region`:
- 'reg.av', 'reg.std', 'reg.ts' : analogous to 'av', 'std', 'ts'
output_vertical_reductions : {None, 'vert_av', 'vert_int'}, optional
How to reduce the data vertically:
- None : no vertical reduction
- 'vert_av' : mass-weighted vertical average
- 'vert_int' : mass-weighted vertical integral
input_time_intervals : {'annual', 'monthly', 'daily', '#hr'}
A string specifying the time resolution of the input data. In
'#hr' above, the '#' stands for a number, e.g. 3hr or 6hr, for
sub-daily output. These are the suggested specifiers, but others
may be used if they are also used by the DataLoaders for the given
Runs.
input_time_datatypes : {'inst', 'ts', 'av'}
What the time axis of the input data represents:
- 'inst' : Timeseries of instantaneous values
- 'ts' : Timeseries of averages over the period of each time-index
- 'av' : A single value averaged over a date range
input_vertical_datatypes : {False, 'pressure', 'sigma'}, optional
The vertical coordinate system used by the input data:
- False : not defined vertically
- 'pressure' : pressure coordinates
- 'sigma' : hybrid sigma-pressure coordinates
input_time_offsets : {None, dict}, optional
How to offset input data in time to correct for metadata errors
- None : no time offset applied
- dict : e.g. ``{'hours': -3}`` to offset times by -3 hours
See :py:meth:`aospy.utils.times.apply_time_offset`.
exec_options : dict or None (default None)
Options regarding how the calculations are reported, submitted, and
saved. If None, default settings are used for all options. Currently
supported options (each should be either `True` or `False`):
- prompt_verify : (default False) If True, print summary of
calculations to be performed and prompt user to confirm before
submitting for execution.
- parallelize : (default False) If True, submit calculations in
parallel.
- client : distributed.Client or None (default None) The
dask.distributed Client used to schedule computations. If None
and parallelize is True, a LocalCluster will be started.
- write_to_tar : (default True) If True, write results of calculations
to .tar files, one for each :py:class:`aospy.Run` object.
These tar files have an identical directory structures the
standard output relative to their root directory, which is
specified via the `tar_direc_out` argument of each Proj
object's instantiation.
Returns
-------
A list of the return values from each :py:meth:`aospy.Calc.compute` call
If a calculation ran without error, this value is the
:py:class:`aospy.Calc` object itself, with the results of its
calculations saved in its ``data_out`` attribute. ``data_out`` is a
dictionary, with the keys being the temporal-regional reduction
identifiers (e.g. 'reg.av'), and the values being the corresponding
result.
If any error occurred during a calculation, the return value is None.
Raises
------
AospyException
If the ``prompt_verify`` option is set to True and the user does not
respond affirmatively to the prompt.
"""
if exec_options is None:
exec_options = dict()
if exec_options.pop('prompt_verify', False):
print(_print_suite_summary(calc_suite_specs))
_user_verify()
calc_suite = CalcSuite(calc_suite_specs)
calcs = calc_suite.create_calcs()
if not calcs:
raise AospyException(
"The specified combination of parameters yielded zero "
"calculations. Most likely, one of the parameters is "
"inadvertently empty."
)
return _exec_calcs(calcs, **exec_options)
|
python
|
{
"resource": ""
}
|
q4926
|
CalcSuite._get_requested_spec
|
train
|
def _get_requested_spec(self, obj, spec_name):
"""Helper to translate user specifications to needed objects."""
requested = self._specs_in[spec_name]
if isinstance(requested, str):
return _get_attr_by_tag(obj, requested, spec_name)
else:
return requested
|
python
|
{
"resource": ""
}
|
q4927
|
CalcSuite._permute_core_specs
|
train
|
def _permute_core_specs(self):
"""Generate all requested combinations of the core objects."""
obj_trees = []
projects = self._get_requested_spec(self._obj_lib, _PROJECTS_STR)
for project in projects:
models = self._get_requested_spec(project, _MODELS_STR)
for model in models:
runs = self._get_requested_spec(model, _RUNS_STR)
for run in runs:
obj_trees.append({
self._NAMES_SUITE_TO_CALC[_PROJECTS_STR]: project,
self._NAMES_SUITE_TO_CALC[_MODELS_STR]: model,
self._NAMES_SUITE_TO_CALC[_RUNS_STR]: run,
})
return obj_trees
|
python
|
{
"resource": ""
}
|
q4928
|
CalcSuite._get_regions
|
train
|
def _get_regions(self):
"""Get the requested regions."""
if self._specs_in[_REGIONS_STR] == 'all':
return [_get_all_objs_of_type(
Region, getattr(self._obj_lib, 'regions', self._obj_lib)
)]
else:
return [set(self._specs_in[_REGIONS_STR])]
|
python
|
{
"resource": ""
}
|
q4929
|
CalcSuite._get_variables
|
train
|
def _get_variables(self):
"""Get the requested variables."""
if self._specs_in[_VARIABLES_STR] == 'all':
return _get_all_objs_of_type(
Var, getattr(self._obj_lib, 'variables', self._obj_lib)
)
else:
return set(self._specs_in[_VARIABLES_STR])
|
python
|
{
"resource": ""
}
|
q4930
|
CalcSuite._get_aux_specs
|
train
|
def _get_aux_specs(self):
"""Get and pre-process all of the non-core specifications."""
# Drop the "core" specifications, which are handled separately.
specs = self._specs_in.copy()
[specs.pop(core) for core in self._CORE_SPEC_NAMES]
specs[_REGIONS_STR] = self._get_regions()
specs[_VARIABLES_STR] = self._get_variables()
specs['date_ranges'] = self._get_date_ranges()
specs['output_time_regional_reductions'] = self._get_time_reg_reducts()
return specs
|
python
|
{
"resource": ""
}
|
q4931
|
CalcSuite._permute_aux_specs
|
train
|
def _permute_aux_specs(self):
"""Generate all permutations of the non-core specifications."""
# Convert to attr names that Calc is expecting.
calc_aux_mapping = self._NAMES_SUITE_TO_CALC.copy()
# Special case: manually add 'library' to mapping
calc_aux_mapping[_OBJ_LIB_STR] = None
[calc_aux_mapping.pop(core) for core in self._CORE_SPEC_NAMES]
specs = self._get_aux_specs()
for suite_name, calc_name in calc_aux_mapping.items():
specs[calc_name] = specs.pop(suite_name)
return _permuted_dicts_of_specs(specs)
|
python
|
{
"resource": ""
}
|
q4932
|
CalcSuite._combine_core_aux_specs
|
train
|
def _combine_core_aux_specs(self):
"""Combine permutations over core and auxilliary Calc specs."""
all_specs = []
for core_dict in self._permute_core_specs():
for aux_dict in self._permute_aux_specs():
all_specs.append(_merge_dicts(core_dict, aux_dict))
return all_specs
|
python
|
{
"resource": ""
}
|
q4933
|
CalcSuite.create_calcs
|
train
|
def create_calcs(self):
"""Generate a Calc object for each requested parameter combination."""
specs = self._combine_core_aux_specs()
for spec in specs:
spec['dtype_out_time'] = _prune_invalid_time_reductions(spec)
return [Calc(**sp) for sp in specs]
|
python
|
{
"resource": ""
}
|
q4934
|
data_in_label
|
train
|
def data_in_label(intvl_in, dtype_in_time, dtype_in_vert=False):
"""Create string label specifying the input data of a calculation."""
intvl_lbl = intvl_in
time_lbl = dtype_in_time
lbl = '_'.join(['from', intvl_lbl, time_lbl]).replace('__', '_')
vert_lbl = dtype_in_vert if dtype_in_vert else False
if vert_lbl:
lbl = '_'.join([lbl, vert_lbl]).replace('__', '_')
return lbl
|
python
|
{
"resource": ""
}
|
q4935
|
data_name_gfdl
|
train
|
def data_name_gfdl(name, domain, data_type, intvl_type, data_yr,
intvl, data_in_start_yr, data_in_dur):
"""Determine the filename of GFDL model data output."""
# Determine starting year of netCDF file to be accessed.
extra_yrs = (data_yr - data_in_start_yr) % data_in_dur
data_in_yr = data_yr - extra_yrs
# Determine file name. Two cases: time series (ts) or time-averaged (av).
if data_type in ('ts', 'inst'):
if intvl_type == 'annual':
if data_in_dur == 1:
filename = '.'.join([domain, '{:04d}'.format(data_in_yr),
name, 'nc'])
else:
filename = '.'.join([domain, '{:04d}-{:04d}'.format(
data_in_yr, data_in_yr + data_in_dur - 1
), name, 'nc'])
elif intvl_type == 'monthly':
filename = (domain + '.{:04d}'.format(data_in_yr) + '01-' +
'{:04d}'.format(int(data_in_yr+data_in_dur-1)) +
'12.' + name + '.nc')
elif intvl_type == 'daily':
filename = (domain + '.{:04d}'.format(data_in_yr) + '0101-' +
'{:04d}'.format(int(data_in_yr+data_in_dur-1)) +
'1231.' + name + '.nc')
elif 'hr' in intvl_type:
filename = '.'.join(
[domain, '{:04d}010100-{:04d}123123'.format(
data_in_yr, data_in_yr + data_in_dur - 1), name, 'nc']
)
elif data_type == 'av':
if intvl_type in ['annual', 'ann']:
label = 'ann'
elif intvl_type in ['seasonal', 'seas']:
label = intvl.upper()
elif intvl_type in ['monthly', 'mon']:
label, val = time_label(intvl)
if data_in_dur == 1:
filename = (domain + '.{:04d}'.format(data_in_yr) +
'.' + label + '.nc')
else:
filename = (domain + '.{:04d}'.format(data_in_yr) + '-' +
'{:04d}'.format(int(data_in_yr+data_in_dur-1)) +
'.' + label + '.nc')
elif data_type == 'av_ts':
filename = (domain + '.{:04d}'.format(data_in_yr) + '-' +
'{:04d}'.format(int(data_in_yr+data_in_dur-1)) +
'.01-12.nc')
return filename
|
python
|
{
"resource": ""
}
|
q4936
|
dmget
|
train
|
def dmget(files_list):
"""Call GFDL command 'dmget' to access archived files."""
if isinstance(files_list, str):
files_list = [files_list]
archive_files = []
for f in files_list:
if f.startswith('/archive'):
archive_files.append(f)
try:
subprocess.call(['dmget'] + archive_files)
except OSError:
logging.debug('dmget command not found in this machine')
|
python
|
{
"resource": ""
}
|
q4937
|
_replace_pressure
|
train
|
def _replace_pressure(arguments, dtype_in_vert):
"""Replace p and dp Vars with appropriate Var objects specific to
the dtype_in_vert."""
arguments_out = []
for arg in arguments:
if isinstance(arg, Var):
if arg.name == 'p':
arguments_out.append(_P_VARS[dtype_in_vert])
elif arg.name == 'dp':
arguments_out.append(_DP_VARS[dtype_in_vert])
else:
arguments_out.append(arg)
else:
arguments_out.append(arg)
return arguments_out
|
python
|
{
"resource": ""
}
|
q4938
|
_add_metadata_as_attrs
|
train
|
def _add_metadata_as_attrs(data, units, description, dtype_out_vert):
"""Add metadata attributes to Dataset or DataArray"""
if isinstance(data, xr.DataArray):
return _add_metadata_as_attrs_da(data, units, description,
dtype_out_vert)
else:
for name, arr in data.data_vars.items():
_add_metadata_as_attrs_da(arr, units, description,
dtype_out_vert)
return data
|
python
|
{
"resource": ""
}
|
q4939
|
_add_metadata_as_attrs_da
|
train
|
def _add_metadata_as_attrs_da(data, units, description, dtype_out_vert):
"""Add metadata attributes to DataArray"""
if dtype_out_vert == 'vert_int':
if units != '':
units = '(vertical integral of {0}): {0} kg m^-2)'.format(units)
else:
units = '(vertical integral of quantity with unspecified units)'
data.attrs['units'] = units
data.attrs['description'] = description
return data
|
python
|
{
"resource": ""
}
|
q4940
|
Calc._dir_out
|
train
|
def _dir_out(self):
"""Create string of the data directory to save individual .nc files."""
return os.path.join(self.proj.direc_out, self.proj.name,
self.model.name, self.run.name, self.name)
|
python
|
{
"resource": ""
}
|
q4941
|
Calc._dir_tar_out
|
train
|
def _dir_tar_out(self):
"""Create string of the data directory to store a tar file."""
return os.path.join(self.proj.tar_direc_out, self.proj.name,
self.model.name, self.run.name)
|
python
|
{
"resource": ""
}
|
q4942
|
Calc._file_name
|
train
|
def _file_name(self, dtype_out_time, extension='nc'):
"""Create the name of the aospy file."""
if dtype_out_time is None:
dtype_out_time = ''
out_lbl = utils.io.data_out_label(self.intvl_out, dtype_out_time,
dtype_vert=self.dtype_out_vert)
in_lbl = utils.io.data_in_label(self.intvl_in, self.dtype_in_time,
self.dtype_in_vert)
start_year = utils.times.infer_year(self.start_date)
end_year = utils.times.infer_year(self.end_date)
yr_lbl = utils.io.yr_label((start_year, end_year))
return '.'.join(
[self.name, out_lbl, in_lbl, self.model.name,
self.run.name, yr_lbl, extension]
).replace('..', '.')
|
python
|
{
"resource": ""
}
|
q4943
|
Calc._print_verbose
|
train
|
def _print_verbose(*args):
"""Print diagnostic message."""
try:
return '{0} {1} ({2})'.format(args[0], args[1], ctime())
except IndexError:
return '{0} ({1})'.format(args[0], ctime())
|
python
|
{
"resource": ""
}
|
q4944
|
Calc._to_desired_dates
|
train
|
def _to_desired_dates(self, arr):
"""Restrict the xarray DataArray or Dataset to the desired months."""
times = utils.times.extract_months(
arr[internal_names.TIME_STR], self.months
)
return arr.sel(time=times)
|
python
|
{
"resource": ""
}
|
q4945
|
Calc._add_grid_attributes
|
train
|
def _add_grid_attributes(self, ds):
"""Add model grid attributes to a dataset"""
for name_int, names_ext in self._grid_attrs.items():
ds_coord_name = set(names_ext).intersection(set(ds.coords) |
set(ds.data_vars))
model_attr = getattr(self.model, name_int, None)
if ds_coord_name and (model_attr is not None):
# Force coords to have desired name.
ds = ds.rename({list(ds_coord_name)[0]: name_int})
ds = ds.set_coords(name_int)
if not np.array_equal(ds[name_int], model_attr):
if np.allclose(ds[name_int], model_attr):
msg = ("Values for '{0}' are nearly (but not exactly) "
"the same in the Run {1} and the Model {2}. "
"Therefore replacing Run's values with the "
"model's.".format(name_int, self.run,
self.model))
logging.info(msg)
ds[name_int].values = model_attr.values
else:
msg = ("Model coordinates for '{0}' do not match those"
" in Run: {1} vs. {2}"
"".format(name_int, ds[name_int], model_attr))
logging.info(msg)
else:
# Bring in coord from model object if it exists.
ds = ds.load()
if model_attr is not None:
ds[name_int] = model_attr
ds = ds.set_coords(name_int)
if (self.dtype_in_vert == 'pressure' and
internal_names.PLEVEL_STR in ds.coords):
self.pressure = ds.level
return ds
|
python
|
{
"resource": ""
}
|
q4946
|
Calc._get_input_data
|
train
|
def _get_input_data(self, var, start_date, end_date):
"""Get the data for a single variable over the desired date range."""
logging.info(self._print_verbose("Getting input data:", var))
if isinstance(var, (float, int)):
return var
else:
cond_pfull = ((not hasattr(self, internal_names.PFULL_STR))
and var.def_vert and
self.dtype_in_vert == internal_names.ETA_STR)
data = self.data_loader.recursively_compute_variable(
var, start_date, end_date, self.time_offset, self.model,
**self.data_loader_attrs)
name = data.name
data = self._add_grid_attributes(data.to_dataset(name=data.name))
data = data[name]
if cond_pfull:
try:
self.pfull_coord = data[internal_names.PFULL_STR]
except KeyError:
pass
# Force all data to be at full pressure levels, not half levels.
bool_to_pfull = (self.dtype_in_vert == internal_names.ETA_STR and
var.def_vert == internal_names.PHALF_STR)
if bool_to_pfull:
data = utils.vertcoord.to_pfull_from_phalf(data,
self.pfull_coord)
if var.def_time:
# Restrict to the desired dates within each year.
if self.dtype_in_time != 'av':
return self._to_desired_dates(data)
else:
return data
|
python
|
{
"resource": ""
}
|
q4947
|
Calc._get_all_data
|
train
|
def _get_all_data(self, start_date, end_date):
"""Get the needed data from all of the vars in the calculation."""
return [self._get_input_data(var, start_date, end_date)
for var in _replace_pressure(self.variables,
self.dtype_in_vert)]
|
python
|
{
"resource": ""
}
|
q4948
|
Calc._compute
|
train
|
def _compute(self, data):
"""Perform the calculation."""
local_ts = self._local_ts(*data)
dt = local_ts[internal_names.TIME_WEIGHTS_STR]
# Convert dt to units of days to prevent overflow
dt = dt / np.timedelta64(1, 'D')
return local_ts, dt
|
python
|
{
"resource": ""
}
|
q4949
|
Calc._compute_full_ts
|
train
|
def _compute_full_ts(self, data):
"""Perform calculation and create yearly timeseries at each point."""
# Get results at each desired timestep and spatial point.
full_ts, dt = self._compute(data)
# Vertically integrate.
vert_types = ('vert_int', 'vert_av')
if self.dtype_out_vert in vert_types and self.var.def_vert:
dp = self._get_input_data(_DP_VARS[self.dtype_in_vert],
self.start_date, self.end_date)
full_ts = utils.vertcoord.int_dp_g(full_ts, dp)
if self.dtype_out_vert == 'vert_av':
ps = self._get_input_data(utils.vertcoord.ps,
self.start_date, self.end_date)
full_ts *= (GRAV_EARTH / ps)
return full_ts, dt
|
python
|
{
"resource": ""
}
|
q4950
|
Calc._full_to_yearly_ts
|
train
|
def _full_to_yearly_ts(self, arr, dt):
"""Average the full timeseries within each year."""
time_defined = self.def_time and not ('av' in self.dtype_in_time)
if time_defined:
arr = utils.times.yearly_average(arr, dt)
return arr
|
python
|
{
"resource": ""
}
|
q4951
|
Calc._time_reduce
|
train
|
def _time_reduce(self, arr, reduction):
"""Perform the specified time reduction on a local time-series."""
if self.dtype_in_time == 'av' or not self.def_time:
return arr
reductions = {
'ts': lambda xarr: xarr,
'av': lambda xarr: xarr.mean(internal_names.YEAR_STR),
'std': lambda xarr: xarr.std(internal_names.YEAR_STR),
}
try:
return reductions[reduction](arr)
except KeyError:
raise ValueError("Specified time-reduction method '{}' is not "
"supported".format(reduction))
|
python
|
{
"resource": ""
}
|
q4952
|
Calc.region_calcs
|
train
|
def region_calcs(self, arr, func):
"""Perform a calculation for all regions."""
# Get pressure values for data output on hybrid vertical coordinates.
bool_pfull = (self.def_vert and self.dtype_in_vert ==
internal_names.ETA_STR and self.dtype_out_vert is False)
if bool_pfull:
pfull_data = self._get_input_data(_P_VARS[self.dtype_in_vert],
self.start_date,
self.end_date)
pfull = self._full_to_yearly_ts(
pfull_data, arr[internal_names.TIME_WEIGHTS_STR]
).rename('pressure')
# Loop over the regions, performing the calculation.
reg_dat = {}
for reg in self.region:
# Just pass along the data if averaged already.
if 'av' in self.dtype_in_time:
data_out = reg.ts(arr)
# Otherwise perform the calculation.
else:
method = getattr(reg, func)
data_out = method(arr)
if bool_pfull:
# Don't apply e.g. standard deviation to coordinates.
if func not in ['av', 'ts']:
method = reg.ts
# Convert Pa to hPa
coord = method(pfull) * 1e-2
data_out = data_out.assign_coords(
**{reg.name + '_pressure': coord}
)
reg_dat.update(**{reg.name: data_out})
return xr.Dataset(reg_dat)
|
python
|
{
"resource": ""
}
|
q4953
|
Calc._apply_all_time_reductions
|
train
|
def _apply_all_time_reductions(self, data):
"""Apply all requested time reductions to the data."""
logging.info(self._print_verbose("Applying desired time-"
"reduction methods."))
reduc_specs = [r.split('.') for r in self.dtype_out_time]
reduced = {}
for reduc, specs in zip(self.dtype_out_time, reduc_specs):
func = specs[-1]
if 'reg' in specs:
reduced.update({reduc: self.region_calcs(data, func)})
else:
reduced.update({reduc: self._time_reduce(data, func)})
return OrderedDict(sorted(reduced.items(), key=lambda t: t[0]))
|
python
|
{
"resource": ""
}
|
q4954
|
Calc.compute
|
train
|
def compute(self, write_to_tar=True):
"""Perform all desired calculations on the data and save externally."""
data = self._get_all_data(self.start_date, self.end_date)
logging.info('Computing timeseries for {0} -- '
'{1}.'.format(self.start_date, self.end_date))
full, full_dt = self._compute_full_ts(data)
full_out = self._full_to_yearly_ts(full, full_dt)
reduced = self._apply_all_time_reductions(full_out)
logging.info("Writing desired gridded outputs to disk.")
for dtype_time, data in reduced.items():
data = _add_metadata_as_attrs(data, self.var.units,
self.var.description,
self.dtype_out_vert)
self.save(data, dtype_time, dtype_out_vert=self.dtype_out_vert,
save_files=True, write_to_tar=write_to_tar)
return self
|
python
|
{
"resource": ""
}
|
q4955
|
Calc._save_files
|
train
|
def _save_files(self, data, dtype_out_time):
"""Save the data to netcdf files in direc_out."""
path = self.path_out[dtype_out_time]
if not os.path.isdir(self.dir_out):
os.makedirs(self.dir_out)
if 'reg' in dtype_out_time:
try:
reg_data = xr.open_dataset(path)
except (EOFError, RuntimeError, IOError):
reg_data = xr.Dataset()
reg_data.update(data)
data_out = reg_data
else:
data_out = data
if isinstance(data_out, xr.DataArray):
data_out = xr.Dataset({self.name: data_out})
data_out.to_netcdf(path, engine='netcdf4', format='NETCDF3_64BIT')
|
python
|
{
"resource": ""
}
|
q4956
|
Calc._write_to_tar
|
train
|
def _write_to_tar(self, dtype_out_time):
"""Add the data to the tar file in tar_out_direc."""
# When submitted in parallel and the directory does not exist yet
# multiple processes may try to create a new directory; this leads
# to an OSError for all processes that tried to make the
# directory, but were later than the first.
try:
os.makedirs(self.dir_tar_out)
except OSError:
pass
# tarfile 'append' mode won't overwrite the old file, which we want.
# So open in 'read' mode, extract the file, and then delete it.
# But 'read' mode throws OSError if file doesn't exist: make it first.
utils.io.dmget([self.path_tar_out])
with tarfile.open(self.path_tar_out, 'a') as tar:
pass
with tarfile.open(self.path_tar_out, 'r') as tar:
old_data_path = os.path.join(self.dir_tar_out,
self.file_name[dtype_out_time])
try:
tar.extract(self.file_name[dtype_out_time],
path=old_data_path)
except KeyError:
pass
else:
# The os module treats files on archive as non-empty
# directories, so can't use os.remove or os.rmdir.
shutil.rmtree(old_data_path)
retcode = subprocess.call([
"tar", "--delete", "--file={}".format(self.path_tar_out),
self.file_name[dtype_out_time]
])
if retcode:
msg = ("The 'tar' command to save your aospy output "
"exited with an error. Most likely, this is due "
"to using an old version of 'tar' (especially if "
"you are on a Mac). Consider installing a newer "
"version of 'tar' or disabling tar output by "
"setting `write_to_tar=False` in the "
"`calc_exec_options` argument of "
"`submit_mult_calcs`.")
logging.warn(msg)
with tarfile.open(self.path_tar_out, 'a') as tar:
tar.add(self.path_out[dtype_out_time],
arcname=self.file_name[dtype_out_time])
|
python
|
{
"resource": ""
}
|
q4957
|
Calc._update_data_out
|
train
|
def _update_data_out(self, data, dtype):
"""Append the data of the given dtype_out to the data_out attr."""
try:
self.data_out.update({dtype: data})
except AttributeError:
self.data_out = {dtype: data}
|
python
|
{
"resource": ""
}
|
q4958
|
Calc.save
|
train
|
def save(self, data, dtype_out_time, dtype_out_vert=False,
save_files=True, write_to_tar=False):
"""Save aospy data to data_out attr and to an external file."""
self._update_data_out(data, dtype_out_time)
if save_files:
self._save_files(data, dtype_out_time)
if write_to_tar and self.proj.tar_direc_out:
self._write_to_tar(dtype_out_time)
logging.info('\t{}'.format(self.path_out[dtype_out_time]))
|
python
|
{
"resource": ""
}
|
q4959
|
Calc._load_from_disk
|
train
|
def _load_from_disk(self, dtype_out_time, dtype_out_vert=False,
region=False):
"""Load aospy data saved as netcdf files on the file system."""
ds = xr.open_dataset(self.path_out[dtype_out_time])
if region:
arr = ds[region.name]
# Use region-specific pressure values if available.
if (self.dtype_in_vert == internal_names.ETA_STR
and not dtype_out_vert):
reg_pfull_str = region.name + '_pressure'
arr = arr.drop([r for r in arr.coords.iterkeys()
if r not in (internal_names.PFULL_STR,
reg_pfull_str)])
# Rename pfull to pfull_ref always.
arr = arr.rename({internal_names.PFULL_STR:
internal_names.PFULL_STR + '_ref'})
# Rename region_pfull to pfull if its there.
if hasattr(arr, reg_pfull_str):
return arr.rename({reg_pfull_str:
internal_names.PFULL_STR})
return arr
return arr
return ds[self.name]
|
python
|
{
"resource": ""
}
|
q4960
|
Calc._load_from_tar
|
train
|
def _load_from_tar(self, dtype_out_time, dtype_out_vert=False):
"""Load data save in tarball form on the file system."""
path = os.path.join(self.dir_tar_out, 'data.tar')
utils.io.dmget([path])
with tarfile.open(path, 'r') as data_tar:
ds = xr.open_dataset(
data_tar.extractfile(self.file_name[dtype_out_time])
)
return ds[self.name]
|
python
|
{
"resource": ""
}
|
q4961
|
Calc.load
|
train
|
def load(self, dtype_out_time, dtype_out_vert=False, region=False,
plot_units=False, mask_unphysical=False):
"""Load the data from the object if possible or from disk."""
msg = ("Loading data from disk for object={0}, dtype_out_time={1}, "
"dtype_out_vert={2}, and region="
"{3}".format(self, dtype_out_time, dtype_out_vert, region))
logging.info(msg + ' ({})'.format(ctime()))
# Grab from the object if its there.
try:
data = self.data_out[dtype_out_time]
except (AttributeError, KeyError):
# Otherwise get from disk. Try scratch first, then archive.
try:
data = self._load_from_disk(dtype_out_time, dtype_out_vert,
region=region)
except IOError:
data = self._load_from_tar(dtype_out_time, dtype_out_vert)
# Copy the array to self.data_out for ease of future access.
self._update_data_out(data, dtype_out_time)
# Apply desired plotting/cleanup methods.
if mask_unphysical:
data = self.var.mask_unphysical(data)
if plot_units:
data = self.var.to_plot_units(data, dtype_vert=dtype_out_vert)
return data
|
python
|
{
"resource": ""
}
|
q4962
|
conv_precip_frac
|
train
|
def conv_precip_frac(precip_largescale, precip_convective):
"""Fraction of total precip that is from convection parameterization.
Parameters
----------
precip_largescale, precip_convective : xarray.DataArrays
Precipitation from grid-scale condensation and from convective
parameterization, respectively.
Returns
-------
xarray.DataArray
"""
total = total_precip(precip_largescale, precip_convective)
# Mask using xarray's `where` method to prevent divide-by-zero.
return precip_convective / total.where(total)
|
python
|
{
"resource": ""
}
|
q4963
|
dumps
|
train
|
def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
"""
Dump data structure to str.
Here float_bits is either 32 or 64.
"""
with lock:
if float_bits == 32:
encode_func[float] = encode_float32
elif float_bits == 64:
encode_func[float] = encode_float64
else:
raise ValueError('Float bits (%d) is not 32 or 64' % float_bits)
r = []
encode_func[type(x)](x, r)
return b''.join(r)
|
python
|
{
"resource": ""
}
|
q4964
|
Brain.stop_batch_learning
|
train
|
def stop_batch_learning(self):
"""Finish a series of batch learn operations."""
self._learning = False
self.graph.commit()
self.graph.cursor().execute("PRAGMA journal_mode=truncate")
self.graph.ensure_indexes()
|
python
|
{
"resource": ""
}
|
q4965
|
Brain.learn
|
train
|
def learn(self, text):
"""Learn a string of text. If the input is not already
Unicode, it will be decoded as utf-8."""
if type(text) != types.UnicodeType:
# Assume that non-Unicode text is encoded as utf-8, which
# should be somewhat safe in the modern world.
text = text.decode("utf-8", "ignore")
tokens = self.tokenizer.split(text)
trace("Brain.learn_input_token_count", len(tokens))
self._learn_tokens(tokens)
|
python
|
{
"resource": ""
}
|
q4966
|
Brain._to_graph
|
train
|
def _to_graph(self, contexts):
"""This is an iterator that returns each edge of our graph
with its two nodes"""
prev = None
for context in contexts:
if prev is None:
prev = context
continue
yield prev[0], context[1], context[0]
prev = context
|
python
|
{
"resource": ""
}
|
q4967
|
Brain.reply
|
train
|
def reply(self, text, loop_ms=500, max_len=None):
"""Reply to a string of text. If the input is not already
Unicode, it will be decoded as utf-8."""
if type(text) != types.UnicodeType:
# Assume that non-Unicode text is encoded as utf-8, which
# should be somewhat safe in the modern world.
text = text.decode("utf-8", "ignore")
tokens = self.tokenizer.split(text)
input_ids = map(self.graph.get_token_by_text, tokens)
# filter out unknown words and non-words from the potential pivots
pivot_set = self._filter_pivots(input_ids)
# Conflate the known ids with the stems of their words
if self.stemmer is not None:
self._conflate_stems(pivot_set, tokens)
# If we didn't recognize any word tokens in the input, pick
# something random from the database and babble.
if len(pivot_set) == 0:
pivot_set = self._babble()
score_cache = {}
best_score = -1.0
best_reply = None
# Loop for approximately loop_ms milliseconds. This can either
# take more (if the first reply takes a long time to generate)
# or less (if the _generate_replies search ends early) time,
# but it should stay roughly accurate.
start = time.time()
end = start + loop_ms * 0.001
count = 0
all_replies = []
_start = time.time()
for edges, pivot_node in self._generate_replies(pivot_set):
reply = Reply(self.graph, tokens, input_ids, pivot_node, edges)
if max_len and self._too_long(max_len, reply):
continue
key = reply.edge_ids
if key not in score_cache:
with trace_us("Brain.evaluate_reply_us"):
score = self.scorer.score(reply)
score_cache[key] = score
else:
# skip scoring, we've already seen this reply
score = -1
if score > best_score:
best_reply = reply
best_score = score
# dump all replies to the console if debugging is enabled
if log.isEnabledFor(logging.DEBUG):
all_replies.append((score, reply))
count += 1
if time.time() > end:
break
if best_reply is None:
# we couldn't find any pivot words in _babble(), so we're
# working with an essentially empty brain. Use the classic
# MegaHAL reply:
return "I don't know enough to answer you yet!"
_time = time.time() - _start
self.scorer.end(best_reply)
if log.isEnabledFor(logging.DEBUG):
replies = [(score, reply.to_text())
for score, reply in all_replies]
replies.sort()
for score, text in replies:
log.debug("%f %s", score, text)
trace("Brain.reply_input_token_count", len(tokens))
trace("Brain.known_word_token_count", len(pivot_set))
trace("Brain.reply_us", _time)
trace("Brain.reply_count", count, _time)
trace("Brain.best_reply_score", int(best_score * 1000))
trace("Brain.best_reply_length", len(best_reply.edge_ids))
log.debug("made %d replies (%d unique) in %f seconds"
% (count, len(score_cache), _time))
if len(text) > 60:
msg = text[0:60] + "..."
else:
msg = text
log.info("[%s] %d %f", msg, count, best_score)
# look up the words for these tokens
with trace_us("Brain.reply_words_lookup_us"):
text = best_reply.to_text()
return text
|
python
|
{
"resource": ""
}
|
q4968
|
Brain.init
|
train
|
def init(filename, order=3, tokenizer=None):
"""Initialize a brain. This brain's file must not already exist.
Keyword arguments:
order -- Order of the forward/reverse Markov chains (integer)
tokenizer -- One of Cobe, MegaHAL (default Cobe). See documentation
for cobe.tokenizers for details. (string)"""
log.info("Initializing a cobe brain: %s" % filename)
if tokenizer is None:
tokenizer = "Cobe"
if tokenizer not in ("Cobe", "MegaHAL"):
log.info("Unknown tokenizer: %s. Using CobeTokenizer", tokenizer)
tokenizer = "Cobe"
graph = Graph(sqlite3.connect(filename))
with trace_us("Brain.init_time_us"):
graph.init(order, tokenizer)
|
python
|
{
"resource": ""
}
|
q4969
|
PulseExtStreamRestoreInfo.struct_from_value
|
train
|
def struct_from_value( cls, name, volume,
channel_list=None, mute=False, device=None ):
'Same arguments as with class instance init.'
chan_map = c.PA_CHANNEL_MAP()
if not channel_list: c.pa.channel_map_init_mono(chan_map)
else:
if not is_str(channel_list):
channel_list = b','.join(map(c.force_bytes, channel_list))
c.pa.channel_map_parse(chan_map, channel_list)
if not isinstance(volume, PulseVolumeInfo):
volume = PulseVolumeInfo(volume, chan_map.channels)
struct = c.PA_EXT_STREAM_RESTORE_INFO(
name=c.force_bytes(name),
mute=int(bool(mute)), device=c.force_bytes(device),
channel_map=chan_map, volume=volume.to_struct() )
return struct
|
python
|
{
"resource": ""
}
|
q4970
|
Pulse.connect
|
train
|
def connect(self, autospawn=False, wait=False):
'''Connect to pulseaudio server.
"autospawn" option will start new pulse daemon, if necessary.
Specifying "wait" option will make function block until pulseaudio server appears.'''
if self._loop_closed:
raise PulseError('Eventloop object was already'
' destroyed and cannot be reused from this instance.')
if self.connected is not None: self._ctx_init()
flags, self.connected = 0, None
if not autospawn: flags |= c.PA_CONTEXT_NOAUTOSPAWN
if wait: flags |= c.PA_CONTEXT_NOFAIL
try: c.pa.context_connect(self._ctx, self.server, flags, None)
except c.pa.CallError: self.connected = False
while self.connected is None: self._pulse_iterate()
if self.connected is False: raise PulseError('Failed to connect to pulseaudio server')
|
python
|
{
"resource": ""
}
|
q4971
|
Pulse.stream_restore_delete
|
train
|
def stream_restore_delete(obj_name_or_list):
'''Can be passed string name,
PulseExtStreamRestoreInfo object or a list of any of these.'''
if is_str(obj_name_or_list, PulseExtStreamRestoreInfo):
obj_name_or_list = [obj_name_or_list]
name_list = list((obj.name if isinstance( obj,
PulseExtStreamRestoreInfo ) else obj) for obj in obj_name_or_list)
name_struct = (c.c_char_p * len(name_list))()
name_struct[:] = list(map(c.force_bytes, name_list))
return [name_struct]
|
python
|
{
"resource": ""
}
|
q4972
|
Pulse.default_set
|
train
|
def default_set(self, obj):
'Set passed sink or source to be used as default one by pulseaudio server.'
assert_pulse_object(obj)
method = {
PulseSinkInfo: self.sink_default_set,
PulseSourceInfo: self.source_default_set }.get(type(obj))
if not method: raise NotImplementedError(type(obj))
method(obj)
|
python
|
{
"resource": ""
}
|
q4973
|
MLink.update
|
train
|
def update(uid, post_data):
'''
Updat the link.
'''
entry = TabLink.update(
name=post_data['name'],
link=post_data['link'],
order=post_data['order'],
logo=post_data['logo'] if 'logo' in post_data else '',
).where(TabLink.uid == uid)
try:
entry.execute()
return True
except:
return False
|
python
|
{
"resource": ""
}
|
q4974
|
MLink.create_link
|
train
|
def create_link(id_link, post_data):
'''
Add record in link.
'''
if MLink.get_by_uid(id_link):
return False
try:
the_order = int(post_data['order'])
except:
the_order = 999
TabLink.create(name=post_data['name'],
link=post_data['link'],
order=the_order,
logo=post_data['logo'] if 'logo' in post_data else '',
uid=id_link)
return id_link
|
python
|
{
"resource": ""
}
|
q4975
|
WikiHandler.recent
|
train
|
def recent(self):
'''
List recent wiki.
'''
kwd = {
'pager': '',
'title': 'Recent Pages',
}
self.render('wiki_page/wiki_list.html',
view=MWiki.query_recent(),
format_date=tools.format_date,
kwd=kwd,
userinfo=self.userinfo)
|
python
|
{
"resource": ""
}
|
q4976
|
WikiHandler.view_or_add
|
train
|
def view_or_add(self, title):
'''
To judge if there is a post of the title.
Then, to show, or to add.
'''
postinfo = MWiki.get_by_wiki(title)
if postinfo:
if postinfo.kind == self.kind:
self.view(postinfo)
else:
return False
else:
self.to_add(title)
|
python
|
{
"resource": ""
}
|
q4977
|
WikiHandler.update
|
train
|
def update(self, uid):
'''
Update the wiki.
'''
postinfo = MWiki.get_by_uid(uid)
if self.check_post_role()['EDIT'] or postinfo.user_name == self.get_current_user():
pass
else:
return False
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
cnt_old = tornado.escape.xhtml_unescape(postinfo.cnt_md).strip()
cnt_new = post_data['cnt_md'].strip()
if cnt_old == cnt_new:
pass
else:
MWikiHist.create_wiki_history(postinfo)
MWiki.update(uid, post_data)
# cele_gen_whoosh.delay()
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/wiki/{0}'.format(tornado.escape.url_escape(post_data['title'])))
|
python
|
{
"resource": ""
}
|
q4978
|
WikiHandler.view
|
train
|
def view(self, view):
'''
View the wiki.
'''
kwd = {
'pager': '',
'editable': self.editable(),
}
self.render('wiki_page/wiki_view.html',
postinfo=view,
kwd=kwd,
userinfo=self.userinfo)
|
python
|
{
"resource": ""
}
|
q4979
|
FilterHandler.echo_html
|
train
|
def echo_html(self, url_str):
'''
Show the HTML
'''
logger.info('info echo html: {0}'.format(url_str))
condition = self.gen_redis_kw()
url_arr = self.parse_url(url_str)
sig = url_arr[0]
num = (len(url_arr) - 2) // 2
catinfo = MCategory.get_by_uid(sig)
if catinfo.pid == '0000':
condition['def_cat_pid'] = sig
else:
condition['def_cat_uid'] = sig
fenye_num = 1
for idx in range(num):
ckey = url_arr[idx * 2 + 2]
tval = url_arr[idx * 2 + 3]
if tval == '0':
continue
if ckey == 'fenye':
# 分页参数。单独处理。
fenye_num = int(tval)
continue
else:
cval = tval
ckey = 'tag_' + ckey
condition[ckey] = cval
if url_arr[1] == 'con':
infos = MPost.query_list_pager(condition, fenye_num, kind=catinfo.kind)
self.echo_html_list_str(sig, infos)
elif url_arr[1] == 'num':
allinfos = MPost.query_under_condition(condition, kind=catinfo.kind)
self.write(
tornado.escape.xhtml_unescape(
echo_html_fenye_str(
allinfos.count(),
fenye_num
)
)
)
|
python
|
{
"resource": ""
}
|
q4980
|
gen_array_crud
|
train
|
def gen_array_crud():
'''
Return the dictionay of the switcher form XLXS file.
if valud of the column of the row is `1`, it will be added to the array.
'''
if WORK_BOOK:
pass
else:
return False
papa_id = 0
switch_dics = {}
kind_dics = {}
for work_sheet in WORK_BOOK:
kind_sig = str(work_sheet['A1'].value).strip()
# the number of the categories in a website won't greater than 1000.
for row_num in range(3, 1000):
# 父类, column A
a_cell_value = work_sheet['A{0}'.format(row_num)].value
# 子类, column B
b_cell_val = work_sheet['B{0}'.format(row_num)].value
if a_cell_value or b_cell_val:
pass
else:
break
if a_cell_value and a_cell_value != '':
papa_id = a_cell_value.strip()[1:]
u_dic = __get_switch_arr(work_sheet, row_num)
switch_dics['dic_{0}00'.format(papa_id)] = u_dic
kind_dics['kind_{0}00'.format(papa_id)] = kind_sig
if b_cell_val and b_cell_val != '':
sun_id = b_cell_val.strip()[1:]
if len(sun_id) == 4:
app_uid = sun_id
else:
app_uid = '{0}{1}'.format(papa_id, sun_id)
u_dic = __get_switch_arr(work_sheet, row_num)
switch_dics['dic_{0}'.format(app_uid)] = u_dic
kind_dics['kind_{0}'.format(app_uid)] = kind_sig
return (switch_dics, kind_dics)
|
python
|
{
"resource": ""
}
|
q4981
|
__get_switch_arr
|
train
|
def __get_switch_arr(work_sheet, row_num):
'''
if valud of the column of the row is `1`, it will be added to the array.
'''
u_dic = []
for col_idx in FILTER_COLUMNS:
cell_val = work_sheet['{0}{1}'.format(col_idx, row_num)].value
if cell_val in [1, '1']:
# Appending the slug name of the switcher.
u_dic.append(work_sheet['{0}1'.format(col_idx)].value.strip().split(',')[0])
return u_dic
|
python
|
{
"resource": ""
}
|
q4982
|
MUsage.add_or_update
|
train
|
def add_or_update(user_id, post_id, kind):
'''
Create the record if new, else update it.
'''
rec = MUsage.query_by_signature(user_id, post_id)
cate_rec = MInfor2Catalog.get_first_category(post_id)
if cate_rec:
cat_id = cate_rec.tag_id
else:
return False
if rec.count() > 0:
logger.info('Usage update: {uid}'.format(uid=post_id))
rec = rec.get()
query = TabUsage.update(kind=kind).where(TabUsage.uid == rec.uid)
query.execute()
MUsage.count_increate(rec.uid, cat_id, rec.count)
else:
logger.info('Usage create: {uid}'.format(uid=post_id))
TabUsage.create(
uid=tools.get_uuid(),
post_id=post_id,
user_id=user_id,
count=1,
tag_id=cat_id,
timestamp=int(time.time()),
kind=kind,
)
|
python
|
{
"resource": ""
}
|
q4983
|
run_send_all
|
train
|
def run_send_all(*args):
'''
Send email to all user.
'''
for user_rec in MUser.query_all():
email_add = user_rec.user_email
send_mail([email_add],
"{0}|{1}".format(SMTP_CFG['name'], email_cfg['title']),
email_cfg['content'])
|
python
|
{
"resource": ""
}
|
q4984
|
run_send_nologin
|
train
|
def run_send_nologin(*args):
'''
Send email to who not logged in recently.
'''
for user_rec in MUser.query_nologin():
email_add = user_rec.user_email
print(email_add)
send_mail([email_add],
"{0}|{1}".format(SMTP_CFG['name'], email_cfg['title']),
email_cfg['content'])
MUser.set_sendemail_time(user_rec.uid)
|
python
|
{
"resource": ""
}
|
q4985
|
gen_xlsx_category
|
train
|
def gen_xlsx_category():
'''
Genereting catetory from xlsx file.
'''
if os.path.exists(XLSX_FILE):
pass
else:
return
# 在分类中排序
order_index = 1
all_cate_arr = []
for sheet_ranges in load_workbook(filename=XLSX_FILE):
kind_sig = str(sheet_ranges['A1'].value).strip()
for row_num in range(3, 10000):
# 父类
a_cell_val = sheet_ranges['A{0}'.format(row_num)].value
b_cell_val = sheet_ranges['B{0}'.format(row_num)].value
c_cell_val = sheet_ranges['C{0}'.format(row_num)].value
if a_cell_val or b_cell_val or c_cell_val:
pass
else:
break
if a_cell_val and a_cell_val != '':
cell_arr = a_cell_val.strip()
p_uid = cell_arr[1:] # 所有以 t 开头
t_slug = sheet_ranges['C{0}'.format(row_num)].value.strip()
t_title = sheet_ranges['D{0}'.format(row_num)].value.strip()
u_uid = p_uid + (4 - len(p_uid)) * '0'
pp_uid = '0000'
elif b_cell_val and b_cell_val != '':
cell_arr = b_cell_val
c_iud = cell_arr[1:]
t_slug = sheet_ranges['C{0}'.format(row_num)].value.strip()
t_title = sheet_ranges['D{0}'.format(row_num)].value.strip()
if len(c_iud) == 4:
u_uid = c_iud
else:
u_uid = '{0}{1}'.format(p_uid, c_iud)
pp_uid = p_uid + (4 - len(p_uid)) * '0'
else:
continue
post_data = {
'name': t_title,
'slug': t_slug,
'order': order_index,
'uid': u_uid,
'pid': pp_uid,
'kind': kind_sig,
}
all_cate_arr.append(post_data)
MCategory.add_or_update(u_uid, post_data)
order_index += 1
return all_cate_arr
|
python
|
{
"resource": ""
}
|
q4986
|
gen_category
|
train
|
def gen_category(yaml_file, sig):
'''
Genereting catetory from YAML file.
'''
out_dic = yaml.load(open(yaml_file))
for key in out_dic:
if key.endswith('00'):
uid = key[1:]
cur_dic = out_dic[key]
porder = cur_dic['order']
cat_dic = {
'uid': uid,
'slug': cur_dic['slug'],
'name': cur_dic['name'],
'count': 0,
'tmpl': 1,
'pid': '0000',
'order': porder * 100,
'kind': '{0}'.format(sig),
}
MCategory.add_or_update(uid, cat_dic)
else:
sub_arr = out_dic[key]
pid = key[1:3]
for sub_dic in sub_arr:
porder = out_dic['z' + pid + '00']['order']
for key2 in sub_dic:
uid = key2[1:]
cur_dic = sub_dic[key2]
sorder = cur_dic['order']
cat_dic = {
'uid': uid,
'slug': cur_dic['slug'],
'name': cur_dic['name'],
'count': 0,
'tmpl': 1,
'pid': pid + '00',
'order': porder * 100 + sorder,
'kind': '{0}'.format(sig),
}
MCategory.add_or_update(pid + uid, cat_dic)
|
python
|
{
"resource": ""
}
|
q4987
|
gen_yaml_category
|
train
|
def gen_yaml_category():
'''
find YAML.
'''
for wroot, _, wfiles in os.walk('./database/meta'):
for wfile in wfiles:
if wfile.endswith('.yaml'):
gen_category(os.path.join(wroot, wfile), wfile[0])
|
python
|
{
"resource": ""
}
|
q4988
|
get_cfg
|
train
|
def get_cfg():
'''
Get the configure value.
'''
cfg_var = dir(cfg)
if 'DB_CFG' in cfg_var:
db_cfg = cfg.DB_CFG
else:
db_cfg = ConfigDefault.DB_CFG
if 'SMTP_CFG' in cfg_var:
smtp_cfg = cfg.SMTP_CFG
else:
smtp_cfg = ConfigDefault.SMTP_CFG
if 'SITE_CFG' in cfg_var:
site_cfg = cfg.SITE_CFG
else:
site_cfg = ConfigDefault.SITE_CFG
if 'ROLE_CFG' in cfg_var:
role_cfg = cfg.ROLE_CFG
else:
role_cfg = ConfigDefault.ROLE_CFG
role_cfg['view'] = role_cfg.get('view', '')
role_cfg['add'] = role_cfg.get('add', '1000')
role_cfg['edit'] = role_cfg.get('edit', '2000')
role_cfg['delete'] = role_cfg.get('delete', '3000')
role_cfg['admin'] = role_cfg.get('admin', '0300')
###################################################################
site_url = site_cfg['site_url'].strip('/')
site_cfg['site_url'] = site_url
infor = site_url.split(':')
if len(infor) == 1:
site_cfg['PORT'] = 8888
else:
site_cfg['PORT'] = infor[-1]
if 'DEBUG' in site_cfg:
pass
else:
site_cfg['DEBUG'] = False
db_con = PostgresqlExtDatabase(
db_cfg['db'],
user=db_cfg.get('user', db_cfg['db']),
password=db_cfg['pass'],
host='127.0.0.1',
port=db_cfg.get('port', '5432'),
autocommit=True,
autorollback=True)
return (db_con, smtp_cfg, site_cfg, role_cfg)
|
python
|
{
"resource": ""
}
|
q4989
|
PageHandler.view_or_add
|
train
|
def view_or_add(self, slug):
'''
When access with the slug, It will add the page if there is no record in database.
'''
rec_page = MWiki.get_by_uid(slug)
if rec_page:
if rec_page.kind == self.kind:
self.view(rec_page)
else:
return False
else:
self.to_add(slug)
|
python
|
{
"resource": ""
}
|
q4990
|
PageHandler.to_add
|
train
|
def to_add(self, citiao):
'''
To Add page.
'''
kwd = {
'cats': MCategory.query_all(),
'slug': citiao,
'pager': '',
}
self.render('wiki_page/page_add.html',
kwd=kwd,
userinfo=self.userinfo)
|
python
|
{
"resource": ""
}
|
q4991
|
PageHandler.__could_edit
|
train
|
def __could_edit(self, slug):
'''
Test if the user could edit the page.
'''
page_rec = MWiki.get_by_uid(slug)
if not page_rec:
return False
if self.check_post_role()['EDIT']:
return True
elif page_rec.user_name == self.userinfo.user_name:
return True
else:
return False
|
python
|
{
"resource": ""
}
|
q4992
|
PageHandler.update
|
train
|
def update(self, slug):
'''
Update the page.
'''
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
pageinfo = MWiki.get_by_uid(slug)
cnt_old = tornado.escape.xhtml_unescape(pageinfo.cnt_md).strip()
cnt_new = post_data['cnt_md'].strip()
if cnt_old == cnt_new:
pass
else:
MWikiHist.create_wiki_history(MWiki.get_by_uid(slug))
MWiki.update(slug, post_data)
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/page/{0}'.format(post_data['slug']))
|
python
|
{
"resource": ""
}
|
q4993
|
PageHandler.to_modify
|
train
|
def to_modify(self, uid):
'''
Try to modify the page.
'''
kwd = {
'pager': '',
}
self.render('wiki_page/page_edit.html',
postinfo=MWiki.get_by_uid(uid),
kwd=kwd,
cfg=CMS_CFG,
userinfo=self.userinfo)
|
python
|
{
"resource": ""
}
|
q4994
|
PageHandler.view
|
train
|
def view(self, rec):
'''
View the page.
'''
kwd = {
'pager': '',
}
self.render('wiki_page/page_view.html',
postinfo=rec,
kwd=kwd,
author=rec.user_name,
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=CMS_CFG)
|
python
|
{
"resource": ""
}
|
q4995
|
PageHandler.ajax_count_plus
|
train
|
def ajax_count_plus(self, slug):
'''
post count plus one via ajax.
'''
output = {
'status': 1 if MWiki.view_count_plus(slug) else 0,
}
return json.dump(output, self)
|
python
|
{
"resource": ""
}
|
q4996
|
PageHandler.add_page
|
train
|
def add_page(self, slug):
'''
Add new page.
'''
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
if MWiki.get_by_uid(slug):
self.set_status(400)
return False
else:
MWiki.create_page(slug, post_data)
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/page/{0}'.format(slug))
|
python
|
{
"resource": ""
}
|
q4997
|
MPostHist.update_cnt
|
train
|
def update_cnt(uid, post_data):
'''
Update the content by ID.
'''
entry = TabPostHist.update(
user_name=post_data['user_name'],
cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md']),
time_update=tools.timestamp(),
).where(TabPostHist.uid == uid)
entry.execute()
|
python
|
{
"resource": ""
}
|
q4998
|
MPostHist.query_by_postid
|
train
|
def query_by_postid(postid, limit=5):
'''
Query history of certian records.
'''
recs = TabPostHist.select().where(
TabPostHist.post_id == postid
).order_by(
TabPostHist.time_update.desc()
).limit(limit)
return recs
|
python
|
{
"resource": ""
}
|
q4999
|
MPostHist.get_last
|
train
|
def get_last(postid, limit=10):
'''
Get the last one of the records.
'''
recs = TabPostHist.select().where(
TabPostHist.post_id == postid
).order_by(TabPostHist.time_update.desc()).limit(limit)
if recs.count():
return recs.get()
return None
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.