_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q26900 | FCIFDHSIFileHandler.calc_area_extent | train | def calc_area_extent(self, key):
"""Calculate area extent for a dataset."""
# Calculate the area extent of the swath based on start line and column
# information, total number of segments and channel resolution
xyres = {500: 22272, 1000: 11136, 2000: 5568}
chkres = xyres[key.resolution]
# Get metadata for given dataset
measured = self.nc['/data/{}/measured'.format(key.name)]
variable = self.nc['/data/{}/measured/effective_radiance'
.format(key.name)]
# Get start/end line and column of loaded swath.
self.startline = int(measured['start_position_row'][...])
self.endline = int(measured['end_position_row'][...])
self.startcol = int(measured['start_position_column'][...])
self.endcol = int(measured['end_position_column'][...])
self.nlines, self.ncols = variable[:].shape
logger.debug('Channel {} resolution: {}'.format(key.name, chkres))
logger.debug('Row/Cols: {} / {}'.format(self.nlines, self.ncols))
logger.debug('Start/End row: {} / {}'.format(self.startline, self.endline))
logger.debug('Start/End col: {} / {}'.format(self.startcol, self.endcol))
# total_segments = 70
# Calculate full globe line extent
max_y = 5432229.9317116784
min_y = -5429229.5285458621
full_y = max_y + abs(min_y)
# Single swath line extent
res_y = full_y / chkres # Extent per pixel resolution
startl = min_y + res_y * self.startline - 0.5 * (res_y)
endl = min_y + res_y * self.endline + 0.5 * (res_y)
logger.debug('Start / end extent: {} / {}'.format(startl, endl))
chk_extent = (-5432229.9317116784, endl,
5429229.5285458621, startl)
return(chk_extent) | python | {
"resource": ""
} |
q26901 | FCIFDHSIFileHandler.calibrate | train | def calibrate(self, data, key):
"""Data calibration."""
# logger.debug('Calibration: %s' % key.calibration)
logger.warning('Calibration disabled!')
if key.calibration == 'brightness_temperature':
# self._ir_calibrate(data, key)
pass
elif key.calibration == 'reflectance':
# self._vis_calibrate(data, key)
pass
else:
pass
return data | python | {
"resource": ""
} |
q26902 | FCIFDHSIFileHandler._ir_calibrate | train | def _ir_calibrate(self, data, key):
"""IR channel calibration."""
# Not sure if Lv is correct, FCI User Guide is a bit unclear
Lv = data.data * \
self.nc[
'/data/{}/measured/radiance_unit_conversion_coefficient'
.format(key.name)][...]
vc = self.nc['/data/{}/central_wavelength_actual'
.format(key.name)][...]
a, b, dummy = self.nc[
'/data/{}/measured/radiance_to_bt_conversion_coefficients'
.format(key.name)][...]
c1, c2 = self.nc[
'/data/{}/measured/radiance_to_bt_conversion_constants'
.format(key.name)][...]
nom = c2 * vc
denom = a * np.log(1 + (c1 * vc**3) / Lv)
data.data[:] = nom / denom - b / a | python | {
"resource": ""
} |
q26903 | FCIFDHSIFileHandler._vis_calibrate | train | def _vis_calibrate(self, data, key):
"""VIS channel calibration."""
# radiance to reflectance taken as in mipp/xrit/MSG.py
# again FCI User Guide is not clear on how to do this
sirr = self.nc[
'/data/{}/measured/channel_effective_solar_irradiance'
.format(key.name)][...]
# reflectance = radiance / sirr * 100
data.data[:] /= sirr
data.data[:] *= 100 | python | {
"resource": ""
} |
q26904 | get_xritdecompress_cmd | train | def get_xritdecompress_cmd():
"""Find a valid binary for the xRITDecompress command."""
cmd = os.environ.get('XRIT_DECOMPRESS_PATH', None)
if not cmd:
raise IOError("XRIT_DECOMPRESS_PATH is not defined (complete path to xRITDecompress)")
question = ("Did you set the environment variable XRIT_DECOMPRESS_PATH correctly?")
if not os.path.exists(cmd):
raise IOError(str(cmd) + " does not exist!\n" + question)
elif os.path.isdir(cmd):
raise IOError(str(cmd) + " is a directory!\n" + question)
return cmd | python | {
"resource": ""
} |
q26905 | get_xritdecompress_outfile | train | def get_xritdecompress_outfile(stdout):
"""Analyse the output of the xRITDecompress command call and return the file."""
outfile = b''
for line in stdout:
try:
k, v = [x.strip() for x in line.split(b':', 1)]
except ValueError:
break
if k == b'Decompressed file':
outfile = v
break
return outfile | python | {
"resource": ""
} |
q26906 | decompress | train | def decompress(infile, outdir='.'):
"""Decompress an XRIT data file and return the path to the decompressed file.
It expect to find Eumetsat's xRITDecompress through the environment variable
XRIT_DECOMPRESS_PATH.
"""
cmd = get_xritdecompress_cmd()
infile = os.path.abspath(infile)
cwd = os.getcwd()
os.chdir(outdir)
p = Popen([cmd, infile], stdout=PIPE)
stdout = BytesIO(p.communicate()[0])
status = p.returncode
os.chdir(cwd)
if status != 0:
raise IOError("xrit_decompress '%s', failed, status=%d" % (infile, status))
outfile = get_xritdecompress_outfile(stdout)
if not outfile:
raise IOError("xrit_decompress '%s', failed, no output file is generated" % infile)
return os.path.join(outdir, outfile.decode('utf-8')) | python | {
"resource": ""
} |
q26907 | HRITFileHandler._get_hd | train | def _get_hd(self, hdr_info):
"""Open the file, read and get the basic file header info and set the mda
dictionary
"""
hdr_map, variable_length_headers, text_headers = hdr_info
with open(self.filename) as fp:
total_header_length = 16
while fp.tell() < total_header_length:
hdr_id = np.fromfile(fp, dtype=common_hdr, count=1)[0]
the_type = hdr_map[hdr_id['hdr_id']]
if the_type in variable_length_headers:
field_length = int((hdr_id['record_length'] - 3) /
the_type.itemsize)
current_hdr = np.fromfile(fp,
dtype=the_type,
count=field_length)
key = variable_length_headers[the_type]
if key in self.mda:
if not isinstance(self.mda[key], list):
self.mda[key] = [self.mda[key]]
self.mda[key].append(current_hdr)
else:
self.mda[key] = current_hdr
elif the_type in text_headers:
field_length = int((hdr_id['record_length'] - 3) /
the_type.itemsize)
char = list(the_type.fields.values())[0][0].char
new_type = np.dtype(char + str(field_length))
current_hdr = np.fromfile(fp,
dtype=new_type,
count=1)[0]
self.mda[text_headers[the_type]] = current_hdr
else:
current_hdr = np.fromfile(fp,
dtype=the_type,
count=1)[0]
self.mda.update(
dict(zip(current_hdr.dtype.names, current_hdr)))
total_header_length = self.mda['total_header_length']
self.mda.setdefault('number_of_bits_per_pixel', 10)
self.mda['projection_parameters'] = {'a': 6378169.00,
'b': 6356583.80,
'h': 35785831.00,
# FIXME: find a reasonable SSP
'SSP_longitude': 0.0}
self.mda['navigation_parameters'] = {} | python | {
"resource": ""
} |
q26908 | HRITFileHandler.get_xy_from_linecol | train | def get_xy_from_linecol(self, line, col, offsets, factors):
"""Get the intermediate coordinates from line & col.
Intermediate coordinates are actually the instruments scanning angles.
"""
loff, coff = offsets
lfac, cfac = factors
x__ = (col - coff) / cfac * 2**16
y__ = (line - loff) / lfac * 2**16
return x__, y__ | python | {
"resource": ""
} |
q26909 | _wl_dist | train | def _wl_dist(wl_a, wl_b):
"""Return the distance between two requested wavelengths."""
if isinstance(wl_a, tuple):
# central wavelength
wl_a = wl_a[1]
if isinstance(wl_b, tuple):
wl_b = wl_b[1]
if wl_a is None or wl_b is None:
return 1000.
return abs(wl_a - wl_b) | python | {
"resource": ""
} |
q26910 | get_best_dataset_key | train | def get_best_dataset_key(key, choices):
"""Choose the "best" `DatasetID` from `choices` based on `key`.
The best key is chosen based on the follow criteria:
1. Central wavelength is nearest to the `key` wavelength if
specified.
2. Least modified dataset if `modifiers` is `None` in `key`.
Otherwise, the modifiers are ignored.
3. Highest calibration if `calibration` is `None` in `key`.
Calibration priority is chosen by `satpy.CALIBRATION_ORDER`.
4. Best resolution (smallest number) if `resolution` is `None`
in `key`. Otherwise, the resolution is ignored.
This function assumes `choices` has already been filtered to only
include datasets that match the provided `key`.
Args:
key (DatasetID): Query parameters to sort `choices` by.
choices (iterable): `DatasetID` objects to sort through to determine
the best dataset.
Returns: List of best `DatasetID`s from `choices`. If there is more
than one element this function could not choose between the
available datasets.
"""
# Choose the wavelength closest to the choice
if key.wavelength is not None and choices:
# find the dataset with a central wavelength nearest to the
# requested wavelength
nearest_wl = min([_wl_dist(key.wavelength, x.wavelength)
for x in choices if x.wavelength is not None])
choices = [c for c in choices
if _wl_dist(key.wavelength, c.wavelength) == nearest_wl]
if key.modifiers is None and choices:
num_modifiers = min(len(x.modifiers or tuple()) for x in choices)
choices = [c for c in choices if len(
c.modifiers or tuple()) == num_modifiers]
if key.calibration is None and choices:
best_cal = [x.calibration for x in choices if x.calibration]
if best_cal:
best_cal = min(best_cal, key=lambda x: CALIBRATION_ORDER[x])
choices = [c for c in choices if c.calibration == best_cal]
if key.resolution is None and choices:
low_res = [x.resolution for x in choices if x.resolution]
if low_res:
low_res = min(low_res)
choices = [c for c in choices if c.resolution == low_res]
if key.level is None and choices:
low_level = [x.level for x in choices if x.level]
if low_level:
low_level = max(low_level)
choices = [c for c in choices if c.level == low_level]
return choices | python | {
"resource": ""
} |
q26911 | filter_keys_by_dataset_id | train | def filter_keys_by_dataset_id(did, key_container):
"""Filer provided key iterable by the provided `DatasetID`.
Note: The `modifiers` attribute of `did` should be `None` to allow for
**any** modifier in the results.
Args:
did (DatasetID): Query parameters to match in the `key_container`.
key_container (iterable): Set, list, tuple, or dict of `DatasetID`
keys.
Returns (list): List of keys matching the provided parameters in no
specific order.
"""
keys = iter(key_container)
for key in DATASET_KEYS:
if getattr(did, key) is not None:
if key == "wavelength":
keys = [k for k in keys
if (getattr(k, key) is not None and
DatasetID.wavelength_match(getattr(k, key),
getattr(did, key)))]
else:
keys = [k for k in keys
if getattr(k, key) is not None and getattr(k, key)
== getattr(did, key)]
return keys | python | {
"resource": ""
} |
q26912 | get_key | train | def get_key(key, key_container, num_results=1, best=True,
resolution=None, calibration=None, polarization=None,
level=None, modifiers=None):
"""Get the fully-specified key best matching the provided key.
Only the best match is returned if `best` is `True` (default). See
`get_best_dataset_key` for more information on how this is determined.
The `resolution` and other identifier keywords are provided as a
convenience to filter by multiple parameters at once without having
to filter by multiple `key` inputs.
Args:
key (DatasetID): DatasetID of query parameters to use for
searching. Any parameter that is `None`
is considered a wild card and any match is
accepted.
key_container (dict or set): Container of DatasetID objects that
uses hashing to quickly access items.
num_results (int): Number of results to return. Use `0` for all
matching results. If `1` then the single matching
key is returned instead of a list of length 1.
(default: 1)
best (bool): Sort results to get "best" result first
(default: True). See `get_best_dataset_key` for details.
resolution (float, int, or list): Resolution of the dataset in
dataset units (typically
meters). This can also be a
list of these numbers.
calibration (str or list): Dataset calibration
(ex.'reflectance'). This can also be a
list of these strings.
polarization (str or list): Dataset polarization
(ex.'V'). This can also be a
list of these strings.
level (number or list): Dataset level (ex. 100). This can also be a
list of these numbers.
modifiers (list): Modifiers applied to the dataset. Unlike
resolution and calibration this is the exact
desired list of modifiers for one dataset, not
a list of possible modifiers.
Returns (list or DatasetID): Matching key(s)
Raises: KeyError if no matching results or if more than one result is
found when `num_results` is `1`.
"""
if isinstance(key, numbers.Number):
# we want this ID to act as a query so we set modifiers to None
# meaning "we don't care how many modifiers it has".
key = DatasetID(wavelength=key, modifiers=None)
elif isinstance(key, (str, six.text_type)):
# ID should act as a query (see wl comment above)
key = DatasetID(name=key, modifiers=None)
elif not isinstance(key, DatasetID):
raise ValueError("Expected 'DatasetID', str, or number dict key, "
"not {}".format(str(type(key))))
res = filter_keys_by_dataset_id(key, key_container)
# further filter by other parameters
if resolution is not None:
if not isinstance(resolution, (list, tuple)):
resolution = (resolution, )
res = [k for k in res
if k.resolution is not None and k.resolution in resolution]
if polarization is not None:
if not isinstance(polarization, (list, tuple)):
polarization = (polarization, )
res = [k for k in res
if k.polarization is not None and k.polarization in
polarization]
if calibration is not None:
if not isinstance(calibration, (list, tuple)):
calibration = (calibration, )
res = [k for k in res
if k.calibration is not None and k.calibration in calibration]
if level is not None:
if not isinstance(level, (list, tuple)):
level = (level, )
res = [k for k in res
if k.level is not None and k.level in level]
if modifiers is not None:
res = [k for k in res
if k.modifiers is not None and k.modifiers == modifiers]
if best:
res = get_best_dataset_key(key, res)
if num_results == 1 and not res:
raise KeyError("No dataset matching '{}' found".format(str(key)))
elif num_results == 1 and len(res) != 1:
raise TooManyResults("No unique dataset matching {}".format(str(key)))
elif num_results == 1:
return res[0]
elif num_results == 0:
return res
else:
return res[:num_results] | python | {
"resource": ""
} |
q26913 | group_files | train | def group_files(files_to_sort, reader=None, time_threshold=10,
group_keys=None, ppp_config_dir=None, reader_kwargs=None):
"""Group series of files by file pattern information.
By default this will group files by their filename ``start_time``
assuming it exists in the pattern. By passing the individual
dictionaries returned by this function to the Scene classes'
``filenames``, a series `Scene` objects can be easily created.
.. versionadded:: 0.12
Args:
files_to_sort (iterable): File paths to sort in to group
reader (str): Reader whose file patterns should be used to sort files.
This
time_threshold (int): Number of seconds used to consider time elements
in a group as being equal. For example, if the 'start_time' item
is used to group files then any time within `time_threshold`
seconds of the first file's 'start_time' will be seen as occurring
at the same time.
group_keys (list or tuple): File pattern information to use to group
files. Keys are sorted in order and only the first key is used when
comparing datetime elements with `time_threshold` (see above). This
means it is recommended that datetime values should only come from
the first key in ``group_keys``. Otherwise, there is a good chance
that files will not be grouped properly (datetimes being barely
unequal). Defaults to a reader's ``group_keys`` configuration (set
in YAML), otherwise ``('start_time',)``.
ppp_config_dir (str): Root usser configuration directory for Satpy.
This will be deprecated in the future, but is here for consistency
with other Satpy features.
reader_kwargs (dict): Additional keyword arguments to pass to reader
creation.
Returns:
List of dictionaries mapping 'reader' to a list of filenames.
Each of these dictionaries can be passed as ``filenames`` to
a `Scene` object.
"""
# FUTURE: Find the best reader for each filename using `find_files_and_readers`
if reader is None:
raise ValueError("'reader' keyword argument is required.")
elif not isinstance(reader, (list, tuple)):
reader = [reader]
# FUTURE: Handle multiple readers
reader = reader[0]
reader_configs = list(configs_for_reader(reader, ppp_config_dir))[0]
reader_kwargs = reader_kwargs or {}
try:
reader_instance = load_reader(reader_configs, **reader_kwargs)
except (KeyError, IOError, yaml.YAMLError) as err:
LOG.info('Cannot use %s', str(reader_configs))
LOG.debug(str(err))
# if reader and (isinstance(reader, str) or len(reader) == 1):
# # if it is a single reader then give a more usable error
# raise
raise
if group_keys is None:
group_keys = reader_instance.info.get('group_keys', ('start_time',))
file_keys = []
for filetype, filetype_info in reader_instance.sorted_filetype_items():
for f, file_info in reader_instance.filename_items_for_filetype(files_to_sort, filetype_info):
group_key = tuple(file_info.get(k) for k in group_keys)
file_keys.append((group_key, f))
prev_key = None
threshold = timedelta(seconds=time_threshold)
file_groups = {}
for gk, f in sorted(file_keys):
# use first element of key as time identifier (if datetime type)
if prev_key is None:
is_new_group = True
prev_key = gk
elif isinstance(gk[0], datetime):
# datetimes within threshold difference are "the same time"
is_new_group = (gk[0] - prev_key[0]) > threshold
else:
is_new_group = gk[0] != prev_key[0]
# compare keys for those that are found for both the key and
# this is a generator and is not computed until the if statement below
# when we know that `prev_key` is not None
vals_not_equal = (this_val != prev_val for this_val, prev_val in zip(gk[1:], prev_key[1:])
if this_val is not None and prev_val is not None)
# if this is a new group based on the first element
if is_new_group or any(vals_not_equal):
file_groups[gk] = [f]
prev_key = gk
else:
file_groups[prev_key].append(f)
sorted_group_keys = sorted(file_groups)
# passable to Scene as 'filenames'
return [{reader: file_groups[group_key]} for group_key in sorted_group_keys] | python | {
"resource": ""
} |
q26914 | configs_for_reader | train | def configs_for_reader(reader=None, ppp_config_dir=None):
"""Generator of reader configuration files for one or more readers
Args:
reader (Optional[str]): Yield configs only for this reader
ppp_config_dir (Optional[str]): Additional configuration directory
to search for reader configuration files.
Returns: Generator of lists of configuration files
"""
search_paths = (ppp_config_dir,) if ppp_config_dir else tuple()
if reader is not None:
if not isinstance(reader, (list, tuple)):
reader = [reader]
# check for old reader names
new_readers = []
for reader_name in reader:
if reader_name.endswith('.yaml') or reader_name not in OLD_READER_NAMES:
new_readers.append(reader_name)
continue
new_name = OLD_READER_NAMES[reader_name]
# Satpy 0.11 only displays a warning
# Satpy 0.13 will raise an exception
raise ValueError("Reader name '{}' has been deprecated, use '{}' instead.".format(reader_name, new_name))
# Satpy 0.15 or 1.0, remove exception and mapping
reader = new_readers
# given a config filename or reader name
config_files = [r if r.endswith('.yaml') else r + '.yaml' for r in reader]
else:
reader_configs = glob_config(os.path.join('readers', '*.yaml'),
*search_paths)
config_files = set(reader_configs)
for config_file in config_files:
config_basename = os.path.basename(config_file)
reader_configs = config_search_paths(
os.path.join("readers", config_basename), *search_paths)
if not reader_configs:
# either the reader they asked for does not exist
# or satpy is improperly configured and can't find its own readers
raise ValueError("No reader(s) named: {}".format(reader))
yield reader_configs | python | {
"resource": ""
} |
q26915 | available_readers | train | def available_readers(as_dict=False):
"""Available readers based on current configuration.
Args:
as_dict (bool): Optionally return reader information as a dictionary.
Default: False
Returns: List of available reader names. If `as_dict` is `True` then
a list of dictionaries including additionally reader information
is returned.
"""
readers = []
for reader_configs in configs_for_reader():
try:
reader_info = read_reader_config(reader_configs)
except (KeyError, IOError, yaml.YAMLError):
LOG.warning("Could not import reader config from: %s", reader_configs)
LOG.debug("Error loading YAML", exc_info=True)
continue
readers.append(reader_info if as_dict else reader_info['name'])
return readers | python | {
"resource": ""
} |
q26916 | find_files_and_readers | train | def find_files_and_readers(start_time=None, end_time=None, base_dir=None,
reader=None, sensor=None, ppp_config_dir=None,
filter_parameters=None, reader_kwargs=None):
"""Find on-disk files matching the provided parameters.
Use `start_time` and/or `end_time` to limit found filenames by the times
in the filenames (not the internal file metadata). Files are matched if
they fall anywhere within the range specified by these parameters.
Searching is **NOT** recursive.
The returned dictionary can be passed directly to the `Scene` object
through the `filenames` keyword argument.
The behaviour of time-based filtering depends on whether or not the filename
contains information about the end time of the data or not:
- if the end time is not present in the filename, the start time of the filename
is used and has to fall between (inclusive) the requested start and end times
- otherwise, the timespan of the filename has to overlap the requested timespan
Args:
start_time (datetime): Limit used files by starting time.
end_time (datetime): Limit used files by ending time.
base_dir (str): The directory to search for files containing the
data to load. Defaults to the current directory.
reader (str or list): The name of the reader to use for loading the data or a list of names.
sensor (str or list): Limit used files by provided sensors.
ppp_config_dir (str): The directory containing the configuration
files for Satpy.
filter_parameters (dict): Filename pattern metadata to filter on. `start_time` and `end_time` are
automatically added to this dictionary. Shortcut for
`reader_kwargs['filter_parameters']`.
reader_kwargs (dict): Keyword arguments to pass to specific reader
instances to further configure file searching.
Returns: Dictionary mapping reader name string to list of filenames
"""
if ppp_config_dir is None:
ppp_config_dir = get_environ_config_dir()
reader_files = {}
reader_kwargs = reader_kwargs or {}
filter_parameters = filter_parameters or reader_kwargs.get('filter_parameters', {})
sensor_supported = False
if start_time or end_time:
filter_parameters['start_time'] = start_time
filter_parameters['end_time'] = end_time
reader_kwargs['filter_parameters'] = filter_parameters
for reader_configs in configs_for_reader(reader, ppp_config_dir):
try:
reader_instance = load_reader(reader_configs, **reader_kwargs)
except (KeyError, IOError, yaml.YAMLError) as err:
LOG.info('Cannot use %s', str(reader_configs))
LOG.debug(str(err))
if reader and (isinstance(reader, str) or len(reader) == 1):
# if it is a single reader then give a more usable error
raise
continue
if not reader_instance.supports_sensor(sensor):
continue
elif sensor is not None:
# sensor was specified and a reader supports it
sensor_supported = True
loadables = reader_instance.select_files_from_directory(base_dir)
if loadables:
loadables = list(
reader_instance.filter_selected_filenames(loadables))
if loadables:
reader_files[reader_instance.name] = list(loadables)
if sensor and not sensor_supported:
raise ValueError("Sensor '{}' not supported by any readers".format(sensor))
if not reader_files:
raise ValueError("No supported files found")
return reader_files | python | {
"resource": ""
} |
q26917 | load_readers | train | def load_readers(filenames=None, reader=None, reader_kwargs=None,
ppp_config_dir=None):
"""Create specified readers and assign files to them.
Args:
filenames (iterable or dict): A sequence of files that will be used to load data from. A ``dict`` object
should map reader names to a list of filenames for that reader.
reader (str or list): The name of the reader to use for loading the data or a list of names.
reader_kwargs (dict): Keyword arguments to pass to specific reader instances.
ppp_config_dir (str): The directory containing the configuration files for satpy.
Returns: Dictionary mapping reader name to reader instance
"""
reader_instances = {}
reader_kwargs = reader_kwargs or {}
reader_kwargs_without_filter = reader_kwargs.copy()
reader_kwargs_without_filter.pop('filter_parameters', None)
if ppp_config_dir is None:
ppp_config_dir = get_environ_config_dir()
if not filenames and not reader:
# used for an empty Scene
return {}
elif reader and filenames is not None and not filenames:
# user made a mistake in their glob pattern
raise ValueError("'filenames' was provided but is empty.")
elif not filenames:
LOG.warning("'filenames' required to create readers and load data")
return {}
elif reader is None and isinstance(filenames, dict):
# filenames is a dictionary of reader_name -> filenames
reader = list(filenames.keys())
remaining_filenames = set(f for fl in filenames.values() for f in fl)
elif reader and isinstance(filenames, dict):
# filenames is a dictionary of reader_name -> filenames
# but they only want one of the readers
filenames = filenames[reader]
remaining_filenames = set(filenames or [])
else:
remaining_filenames = set(filenames or [])
for idx, reader_configs in enumerate(configs_for_reader(reader, ppp_config_dir)):
if isinstance(filenames, dict):
readers_files = set(filenames[reader[idx]])
else:
readers_files = remaining_filenames
try:
reader_instance = load_reader(reader_configs, **reader_kwargs)
except (KeyError, IOError, yaml.YAMLError) as err:
LOG.info('Cannot use %s', str(reader_configs))
LOG.debug(str(err))
continue
if readers_files:
loadables = reader_instance.select_files_from_pathnames(readers_files)
if loadables:
reader_instance.create_filehandlers(loadables, fh_kwargs=reader_kwargs_without_filter)
reader_instances[reader_instance.name] = reader_instance
remaining_filenames -= set(loadables)
if not remaining_filenames:
break
if remaining_filenames:
LOG.warning("Don't know how to open the following files: {}".format(str(remaining_filenames)))
if not reader_instances:
raise ValueError("No supported files found")
elif not any(list(r.available_dataset_ids) for r in reader_instances.values()):
raise ValueError("No dataset could be loaded. Either missing "
"requirements (such as Epilog, Prolog) or none of the "
"provided files match the filter parameters.")
return reader_instances | python | {
"resource": ""
} |
q26918 | DatasetDict.get_key | train | def get_key(self, match_key, num_results=1, best=True, **dfilter):
"""Get multiple fully-specified keys that match the provided query.
Args:
key (DatasetID): DatasetID of query parameters to use for
searching. Any parameter that is `None`
is considered a wild card and any match is
accepted. Can also be a string representing the
dataset name or a number representing the dataset
wavelength.
num_results (int): Number of results to return. If `0` return all,
if `1` return only that element, otherwise
return a list of matching keys.
**dfilter (dict): See `get_key` function for more information.
"""
return get_key(match_key, self.keys(), num_results=num_results,
best=best, **dfilter) | python | {
"resource": ""
} |
q26919 | DatasetDict.get | train | def get(self, key, default=None):
"""Get value with optional default."""
try:
key = self.get_key(key)
except KeyError:
return default
return super(DatasetDict, self).get(key, default) | python | {
"resource": ""
} |
q26920 | sub_arrays | train | def sub_arrays(proj1, proj2):
"""Substract two DataArrays and combine their attrs."""
attrs = combine_metadata(proj1.attrs, proj2.attrs)
if (attrs.get('area') is None and
proj1.attrs.get('area') is not None and
proj2.attrs.get('area') is not None):
raise IncompatibleAreas
res = proj1 - proj2
res.attrs = attrs
return res | python | {
"resource": ""
} |
q26921 | zero_missing_data | train | def zero_missing_data(data1, data2):
"""Replace NaN values with zeros in data1 if the data is valid in data2."""
nans = xu.logical_and(xu.isnan(data1), xu.logical_not(xu.isnan(data2)))
return data1.where(~nans, 0) | python | {
"resource": ""
} |
q26922 | CompositorLoader.load_sensor_composites | train | def load_sensor_composites(self, sensor_name):
"""Load all compositor configs for the provided sensor."""
config_filename = sensor_name + ".yaml"
LOG.debug("Looking for composites config file %s", config_filename)
composite_configs = config_search_paths(
os.path.join("composites", config_filename),
self.ppp_config_dir, check_exists=True)
if not composite_configs:
LOG.debug("No composite config found called {}".format(
config_filename))
return
self._load_config(composite_configs) | python | {
"resource": ""
} |
q26923 | CompositorLoader.load_compositors | train | def load_compositors(self, sensor_names):
"""Load all compositor configs for the provided sensors.
Args:
sensor_names (list of strings): Sensor names that have matching
``sensor_name.yaml`` config files.
Returns:
(comps, mods): Where `comps` is a dictionary:
sensor_name -> composite ID -> compositor object
And `mods` is a dictionary:
sensor_name -> modifier name -> (modifier class,
modifiers options)
Note that these dictionaries are copies of those cached in
this object.
"""
comps = {}
mods = {}
for sensor_name in sensor_names:
if sensor_name not in self.compositors:
self.load_sensor_composites(sensor_name)
if sensor_name in self.compositors:
comps[sensor_name] = DatasetDict(
self.compositors[sensor_name].copy())
mods[sensor_name] = self.modifiers[sensor_name].copy()
return comps, mods | python | {
"resource": ""
} |
q26924 | NIRReflectance._init_refl3x | train | def _init_refl3x(self, projectables):
"""Initiate the 3.x reflectance derivations."""
if not Calculator:
LOG.info("Couldn't load pyspectral")
raise ImportError("No module named pyspectral.near_infrared_reflectance")
_nir, _tb11 = projectables
self._refl3x = Calculator(_nir.attrs['platform_name'], _nir.attrs['sensor'], _nir.attrs['name']) | python | {
"resource": ""
} |
q26925 | NIRReflectance._get_reflectance | train | def _get_reflectance(self, projectables, optional_datasets):
"""Calculate 3.x reflectance with pyspectral."""
_nir, _tb11 = projectables
LOG.info('Getting reflective part of %s', _nir.attrs['name'])
sun_zenith = None
tb13_4 = None
for dataset in optional_datasets:
wavelengths = dataset.attrs.get('wavelength', [100., 0, 0])
if (dataset.attrs.get('units') == 'K' and
wavelengths[0] <= 13.4 <= wavelengths[2]):
tb13_4 = dataset
elif ("standard_name" in dataset.attrs and
dataset.attrs["standard_name"] == "solar_zenith_angle"):
sun_zenith = dataset
# Check if the sun-zenith angle was provided:
if sun_zenith is None:
if sun_zenith_angle is None:
raise ImportError("No module named pyorbital.astronomy")
lons, lats = _nir.attrs["area"].get_lonlats_dask(CHUNK_SIZE)
sun_zenith = sun_zenith_angle(_nir.attrs['start_time'], lons, lats)
return self._refl3x.reflectance_from_tbs(sun_zenith, _nir, _tb11, tb_ir_co2=tb13_4) | python | {
"resource": ""
} |
q26926 | RatioSharpenedRGB._get_band | train | def _get_band(self, high_res, low_res, color, ratio):
"""Figure out what data should represent this color."""
if self.high_resolution_band == color:
ret = high_res
else:
ret = low_res * ratio
ret.attrs = low_res.attrs.copy()
return ret | python | {
"resource": ""
} |
q26927 | prepare_resampler | train | def prepare_resampler(source_area, destination_area, resampler=None, **resample_kwargs):
"""Instantiate and return a resampler."""
if resampler is None:
LOG.info("Using default KDTree resampler")
resampler = 'kd_tree'
if isinstance(resampler, BaseResampler):
raise ValueError("Trying to create a resampler when one already "
"exists.")
elif isinstance(resampler, str):
resampler_class = RESAMPLERS[resampler]
else:
resampler_class = resampler
key = (resampler_class,
source_area, destination_area,
hash_dict(resample_kwargs))
try:
resampler_instance = resamplers_cache[key]
except KeyError:
resampler_instance = resampler_class(source_area, destination_area)
resamplers_cache[key] = resampler_instance
return key, resampler_instance | python | {
"resource": ""
} |
q26928 | resample | train | def resample(source_area, data, destination_area,
resampler=None, **kwargs):
"""Do the resampling."""
if 'resampler_class' in kwargs:
import warnings
warnings.warn("'resampler_class' is deprecated, use 'resampler'",
DeprecationWarning)
resampler = kwargs.pop('resampler_class')
if not isinstance(resampler, BaseResampler):
# we don't use the first argument (cache key)
_, resampler_instance = prepare_resampler(source_area,
destination_area,
resampler)
else:
resampler_instance = resampler
if isinstance(data, list):
res = [resampler_instance.resample(ds, **kwargs) for ds in data]
else:
res = resampler_instance.resample(data, **kwargs)
return res | python | {
"resource": ""
} |
q26929 | BaseResampler.resample | train | def resample(self, data, cache_dir=None, mask_area=None, **kwargs):
"""Resample `data` by calling `precompute` and `compute` methods.
Only certain resampling classes may use `cache_dir` and the `mask`
provided when `mask_area` is True. The return value of calling the
`precompute` method is passed as the `cache_id` keyword argument
of the `compute` method, but may not be used directly for caching. It
is up to the individual resampler subclasses to determine how this
is used.
Args:
data (xarray.DataArray): Data to be resampled
cache_dir (str): directory to cache precomputed results
(default False, optional)
mask_area (bool): Mask geolocation data where data values are
invalid. This should be used when data values
may affect what neighbors are considered valid.
Returns (xarray.DataArray): Data resampled to the target area
"""
# default is to mask areas for SwathDefinitions
if mask_area is None and isinstance(
self.source_geo_def, SwathDefinition):
mask_area = True
if mask_area:
if isinstance(self.source_geo_def, SwathDefinition):
geo_dims = self.source_geo_def.lons.dims
else:
geo_dims = ('y', 'x')
flat_dims = [dim for dim in data.dims if dim not in geo_dims]
# xarray <= 0.10.1 computes dask arrays during isnull
if np.issubdtype(data.dtype, np.integer):
kwargs['mask'] = data == data.attrs.get('_FillValue', np.iinfo(data.dtype.type).max)
else:
kwargs['mask'] = data.isnull()
kwargs['mask'] = kwargs['mask'].all(dim=flat_dims)
cache_id = self.precompute(cache_dir=cache_dir, **kwargs)
return self.compute(data, cache_id=cache_id, **kwargs) | python | {
"resource": ""
} |
q26930 | BaseResampler._create_cache_filename | train | def _create_cache_filename(self, cache_dir=None, **kwargs):
"""Create filename for the cached resampling parameters"""
cache_dir = cache_dir or '.'
hash_str = self.get_hash(**kwargs)
return os.path.join(cache_dir, 'resample_lut-' + hash_str + '.npz') | python | {
"resource": ""
} |
q26931 | KDTreeResampler.precompute | train | def precompute(self, mask=None, radius_of_influence=None, epsilon=0,
cache_dir=None, **kwargs):
"""Create a KDTree structure and store it for later use.
Note: The `mask` keyword should be provided if geolocation may be valid
where data points are invalid.
"""
del kwargs
source_geo_def = self.source_geo_def
if mask is not None and cache_dir is not None:
LOG.warning("Mask and cache_dir both provided to nearest "
"resampler. Cached parameters are affected by "
"masked pixels. Will not cache results.")
cache_dir = None
# TODO: move this to pyresample
if radius_of_influence is None:
try:
radius_of_influence = source_geo_def.lons.resolution * 3
except AttributeError:
try:
radius_of_influence = max(abs(source_geo_def.pixel_size_x),
abs(source_geo_def.pixel_size_y)) * 3
except AttributeError:
radius_of_influence = 1000
except TypeError:
radius_of_influence = 10000
kwargs = dict(source_geo_def=source_geo_def,
target_geo_def=self.target_geo_def,
radius_of_influence=radius_of_influence,
neighbours=1,
epsilon=epsilon)
if self.resampler is None:
# FIXME: We need to move all of this caching logic to pyresample
self.resampler = XArrayResamplerNN(**kwargs)
try:
self.load_neighbour_info(cache_dir, mask=mask, **kwargs)
LOG.debug("Read pre-computed kd-tree parameters")
except IOError:
LOG.debug("Computing kd-tree parameters")
self.resampler.get_neighbour_info(mask=mask)
self.save_neighbour_info(cache_dir, mask=mask, **kwargs) | python | {
"resource": ""
} |
q26932 | KDTreeResampler._apply_cached_indexes | train | def _apply_cached_indexes(self, cached_indexes, persist=False):
"""Reassign various resampler index attributes."""
# cacheable_dict = {}
for elt in ['valid_input_index', 'valid_output_index',
'index_array', 'distance_array']:
val = cached_indexes[elt]
if isinstance(val, tuple):
val = cached_indexes[elt][0]
elif isinstance(val, np.ndarray):
val = da.from_array(val, chunks=CHUNK_SIZE)
elif persist and isinstance(val, da.Array):
cached_indexes[elt] = val = val.persist()
setattr(self.resampler, elt, val) | python | {
"resource": ""
} |
q26933 | KDTreeResampler.load_neighbour_info | train | def load_neighbour_info(self, cache_dir, mask=None, **kwargs):
"""Read index arrays from either the in-memory or disk cache."""
mask_name = getattr(mask, 'name', None)
filename = self._create_cache_filename(cache_dir,
mask=mask_name, **kwargs)
if kwargs.get('mask') in self._index_caches:
self._apply_cached_indexes(self._index_caches[kwargs.get('mask')])
elif cache_dir:
cache = np.load(filename, mmap_mode='r')
# copy the dict so we can modify it's keys
new_cache = dict(cache.items())
cache.close()
self._apply_cached_indexes(new_cache) # modifies cache dict in-place
self._index_caches[mask_name] = new_cache
else:
raise IOError | python | {
"resource": ""
} |
q26934 | KDTreeResampler.save_neighbour_info | train | def save_neighbour_info(self, cache_dir, mask=None, **kwargs):
"""Cache resampler's index arrays if there is a cache dir."""
if cache_dir:
mask_name = getattr(mask, 'name', None)
filename = self._create_cache_filename(
cache_dir, mask=mask_name, **kwargs)
LOG.info('Saving kd_tree neighbour info to %s', filename)
cache = self._read_resampler_attrs()
# update the cache in place with persisted dask arrays
self._apply_cached_indexes(cache, persist=True)
self._index_caches[mask_name] = cache
np.savez(filename, **cache) | python | {
"resource": ""
} |
q26935 | EWAResampler.resample | train | def resample(self, *args, **kwargs):
"""Run precompute and compute methods.
.. note::
This sets the default of 'mask_area' to False since it is
not needed in EWA resampling currently.
"""
kwargs.setdefault('mask_area', False)
return super(EWAResampler, self).resample(*args, **kwargs) | python | {
"resource": ""
} |
q26936 | EWAResampler._call_ll2cr | train | def _call_ll2cr(self, lons, lats, target_geo_def, swath_usage=0):
"""Wrapper around ll2cr for handling dask delayed calls better."""
new_src = SwathDefinition(lons, lats)
swath_points_in_grid, cols, rows = ll2cr(new_src, target_geo_def)
# FIXME: How do we check swath usage/coverage if we only do this
# per-block
# # Determine if enough of the input swath was used
# grid_name = getattr(self.target_geo_def, "name", "N/A")
# fraction_in = swath_points_in_grid / float(lons.size)
# swath_used = fraction_in > swath_usage
# if not swath_used:
# LOG.info("Data does not fit in grid %s because it only %f%% of "
# "the swath is used" %
# (grid_name, fraction_in * 100))
# raise RuntimeError("Data does not fit in grid %s" % (grid_name,))
# else:
# LOG.debug("Data fits in grid %s and uses %f%% of the swath",
# grid_name, fraction_in * 100)
return np.stack([cols, rows], axis=0) | python | {
"resource": ""
} |
q26937 | EWAResampler.precompute | train | def precompute(self, cache_dir=None, swath_usage=0, **kwargs):
"""Generate row and column arrays and store it for later use."""
if kwargs.get('mask') is not None:
LOG.warning("'mask' parameter has no affect during EWA "
"resampling")
del kwargs
source_geo_def = self.source_geo_def
target_geo_def = self.target_geo_def
if cache_dir:
LOG.warning("'cache_dir' is not used by EWA resampling")
# Satpy/PyResample don't support dynamic grids out of the box yet
lons, lats = source_geo_def.get_lonlats()
if isinstance(lons, xr.DataArray):
# get dask arrays
lons = lons.data
lats = lats.data
# we are remapping to a static unchanging grid/area with all of
# its parameters specified
chunks = (2,) + lons.chunks
res = da.map_blocks(self._call_ll2cr, lons, lats,
target_geo_def, swath_usage,
dtype=lons.dtype, chunks=chunks, new_axis=[0])
cols = res[0]
rows = res[1]
# save the dask arrays in the class instance cache
# the on-disk cache will store the numpy arrays
self.cache = {
"rows": rows,
"cols": cols,
}
return None | python | {
"resource": ""
} |
q26938 | EWAResampler._call_fornav | train | def _call_fornav(self, cols, rows, target_geo_def, data,
grid_coverage=0, **kwargs):
"""Wrapper to run fornav as a dask delayed."""
num_valid_points, res = fornav(cols, rows, target_geo_def,
data, **kwargs)
if isinstance(data, tuple):
# convert 'res' from tuple of arrays to one array
res = np.stack(res)
num_valid_points = sum(num_valid_points)
grid_covered_ratio = num_valid_points / float(res.size)
grid_covered = grid_covered_ratio > grid_coverage
if not grid_covered:
msg = "EWA resampling only found %f%% of the grid covered " \
"(need %f%%)" % (grid_covered_ratio * 100,
grid_coverage * 100)
raise RuntimeError(msg)
LOG.debug("EWA resampling found %f%% of the grid covered" %
(grid_covered_ratio * 100))
return res | python | {
"resource": ""
} |
q26939 | BilinearResampler.precompute | train | def precompute(self, mask=None, radius_of_influence=50000, epsilon=0,
reduce_data=True, nprocs=1,
cache_dir=False, **kwargs):
"""Create bilinear coefficients and store them for later use.
Note: The `mask` keyword should be provided if geolocation may be valid
where data points are invalid. This defaults to the `mask` attribute of
the `data` numpy masked array passed to the `resample` method.
"""
del kwargs
if self.resampler is None:
kwargs = dict(source_geo_def=self.source_geo_def,
target_geo_def=self.target_geo_def,
radius_of_influence=radius_of_influence,
neighbours=32,
epsilon=epsilon,
reduce_data=reduce_data)
self.resampler = XArrayResamplerBilinear(**kwargs)
try:
self.load_bil_info(cache_dir, **kwargs)
LOG.debug("Loaded bilinear parameters")
except IOError:
LOG.debug("Computing bilinear parameters")
self.resampler.get_bil_info()
self.save_bil_info(cache_dir, **kwargs) | python | {
"resource": ""
} |
q26940 | BilinearResampler.compute | train | def compute(self, data, fill_value=None, **kwargs):
"""Resample the given data using bilinear interpolation"""
del kwargs
if fill_value is None:
fill_value = data.attrs.get('_FillValue')
target_shape = self.target_geo_def.shape
res = self.resampler.get_sample_from_bil_info(data,
fill_value=fill_value,
output_shape=target_shape)
return res | python | {
"resource": ""
} |
q26941 | AttributeHelper.apply_attributes | train | def apply_attributes(self, nc, table, prefix=''):
"""
apply fixed attributes, or look up attributes needed and apply them
"""
for name, value in sorted(table.items()):
if name in nc.ncattrs():
LOG.debug('already have a value for %s' % name)
continue
if value is not None:
setattr(nc, name, value)
else:
funcname = prefix+name # _global_ + product_tile_height
func = getattr(self, funcname, None)
if func is not None:
value = func()
if value is not None:
setattr(nc, name, value)
else:
LOG.info('no routine matching %s' % funcname) | python | {
"resource": ""
} |
q26942 | NetCDFWriter.set_projection_attrs | train | def set_projection_attrs(self, area_id, proj4_info):
"""Assign projection attributes per GRB standard"""
proj4_info['a'], proj4_info['b'] = proj4_radius_parameters(proj4_info)
if proj4_info["proj"] == "geos":
p = self.projection = self.nc.createVariable("fixedgrid_projection", 'i4')
self.image_data.grid_mapping = "fixedgrid_projection"
p.short_name = area_id
p.grid_mapping_name = "geostationary"
p.sweep_angle_axis = proj4_info.get("sweep", "y")
p.perspective_point_height = proj4_info['h']
p.latitude_of_projection_origin = np.float32(0.0)
p.longitude_of_projection_origin = np.float32(proj4_info.get('lon_0', 0.0)) # is the float32 needed?
elif proj4_info["proj"] == "lcc":
p = self.projection = self.nc.createVariable("lambert_projection", 'i4')
self.image_data.grid_mapping = "lambert_projection"
p.short_name = area_id
p.grid_mapping_name = "lambert_conformal_conic"
p.standard_parallel = proj4_info["lat_0"] # How do we specify two standard parallels?
p.longitude_of_central_meridian = proj4_info["lon_0"]
p.latitude_of_projection_origin = proj4_info.get('lat_1', proj4_info['lat_0']) # Correct?
elif proj4_info['proj'] == 'stere':
p = self.projection = self.nc.createVariable("polar_projection", 'i4')
self.image_data.grid_mapping = "polar_projection"
p.short_name = area_id
p.grid_mapping_name = "polar_stereographic"
p.standard_parallel = proj4_info["lat_ts"]
p.straight_vertical_longitude_from_pole = proj4_info.get("lon_0", 0.0)
p.latitude_of_projection_origin = proj4_info["lat_0"] # ?
elif proj4_info['proj'] == 'merc':
p = self.projection = self.nc.createVariable("mercator_projection", 'i4')
self.image_data.grid_mapping = "mercator_projection"
p.short_name = area_id
p.grid_mapping_name = "mercator"
p.standard_parallel = proj4_info.get('lat_ts', proj4_info.get('lat_0', 0.0))
p.longitude_of_projection_origin = proj4_info.get("lon_0", 0.0)
else:
raise ValueError("SCMI can not handle projection '{}'".format(proj4_info['proj']))
p.semi_major_axis = np.float64(proj4_info["a"])
p.semi_minor_axis = np.float64(proj4_info["b"])
p.false_easting = np.float32(proj4_info.get("x", 0.0))
p.false_northing = np.float32(proj4_info.get("y", 0.0)) | python | {
"resource": ""
} |
q26943 | SCMIWriter.enhancer | train | def enhancer(self):
"""Lazy loading of enhancements only if needed."""
if self._enhancer is None:
self._enhancer = Enhancer(ppp_config_dir=self.ppp_config_dir)
return self._enhancer | python | {
"resource": ""
} |
q26944 | SCMIWriter._group_by_area | train | def _group_by_area(self, datasets):
"""Group datasets by their area."""
def _area_id(area_def):
return area_def.name + str(area_def.area_extent) + str(area_def.shape)
# get all of the datasets stored by area
area_datasets = {}
for x in datasets:
area_id = _area_id(x.attrs['area'])
area, ds_list = area_datasets.setdefault(area_id, (x.attrs['area'], []))
ds_list.append(x)
return area_datasets | python | {
"resource": ""
} |
q26945 | _config_data_files | train | def _config_data_files(base_dirs, extensions=(".cfg", )):
"""Find all subdirectory configuration files.
Searches each base directory relative to this setup.py file and finds
all files ending in the extensions provided.
:param base_dirs: iterable of relative base directories to search
:param extensions: iterable of file extensions to include (with '.' prefix)
:returns: list of 2-element tuples compatible with `setuptools.setup`
"""
data_files = []
pkg_root = os.path.realpath(os.path.dirname(__file__)) + "/"
for base_dir in base_dirs:
new_data_files = []
for ext in extensions:
configs = glob(os.path.join(pkg_root, base_dir, "*" + ext))
configs = [c.replace(pkg_root, "") for c in configs]
new_data_files.extend(configs)
data_files.append((base_dir, new_data_files))
return data_files | python | {
"resource": ""
} |
q26946 | from_sds | train | def from_sds(var, *args, **kwargs):
"""Create a dask array from a SD dataset."""
var.__dict__['dtype'] = HTYPE_TO_DTYPE[var.info()[3]]
shape = var.info()[2]
var.__dict__['shape'] = shape if isinstance(shape, (tuple, list)) else tuple(shape)
return da.from_array(var, *args, **kwargs) | python | {
"resource": ""
} |
q26947 | HDF4FileHandler._open_xarray_dataset | train | def _open_xarray_dataset(self, val, chunks=CHUNK_SIZE):
"""Read the band in blocks."""
dask_arr = from_sds(val, chunks=chunks)
attrs = val.attributes()
return xr.DataArray(dask_arr, dims=('y', 'x'),
attrs=attrs) | python | {
"resource": ""
} |
q26948 | overlay | train | def overlay(top, bottom, maxval=None):
"""Blending two layers.
from: https://docs.gimp.org/en/gimp-concepts-layer-modes.html
"""
if maxval is None:
maxval = np.maximum(top.max(), bottom.max())
res = ((2 * top / maxval - 1) * bottom + 2 * top) * bottom / maxval
return res.clip(min=0) | python | {
"resource": ""
} |
q26949 | read_writer_config | train | def read_writer_config(config_files, loader=UnsafeLoader):
"""Read the writer `config_files` and return the info extracted."""
conf = {}
LOG.debug('Reading %s', str(config_files))
for config_file in config_files:
with open(config_file) as fd:
conf.update(yaml.load(fd.read(), Loader=loader))
try:
writer_info = conf['writer']
except KeyError:
raise KeyError(
"Malformed config file {}: missing writer 'writer'".format(
config_files))
writer_info['config_files'] = config_files
return writer_info | python | {
"resource": ""
} |
q26950 | load_writer_configs | train | def load_writer_configs(writer_configs, ppp_config_dir,
**writer_kwargs):
"""Load the writer from the provided `writer_configs`."""
try:
writer_info = read_writer_config(writer_configs)
writer_class = writer_info['writer']
except (ValueError, KeyError, yaml.YAMLError):
raise ValueError("Invalid writer configs: "
"'{}'".format(writer_configs))
init_kwargs, kwargs = writer_class.separate_init_kwargs(writer_kwargs)
writer = writer_class(ppp_config_dir=ppp_config_dir,
config_files=writer_configs,
**init_kwargs)
return writer, kwargs | python | {
"resource": ""
} |
q26951 | load_writer | train | def load_writer(writer, ppp_config_dir=None, **writer_kwargs):
"""Find and load writer `writer` in the available configuration files."""
if ppp_config_dir is None:
ppp_config_dir = get_environ_config_dir()
config_fn = writer + ".yaml" if "." not in writer else writer
config_files = config_search_paths(
os.path.join("writers", config_fn), ppp_config_dir)
writer_kwargs.setdefault("config_files", config_files)
if not writer_kwargs['config_files']:
raise ValueError("Unknown writer '{}'".format(writer))
try:
return load_writer_configs(writer_kwargs['config_files'],
ppp_config_dir=ppp_config_dir,
**writer_kwargs)
except ValueError:
raise ValueError("Writer '{}' does not exist or could not be "
"loaded".format(writer)) | python | {
"resource": ""
} |
q26952 | configs_for_writer | train | def configs_for_writer(writer=None, ppp_config_dir=None):
"""Generator of writer configuration files for one or more writers
Args:
writer (Optional[str]): Yield configs only for this writer
ppp_config_dir (Optional[str]): Additional configuration directory
to search for writer configuration files.
Returns: Generator of lists of configuration files
"""
search_paths = (ppp_config_dir,) if ppp_config_dir else tuple()
if writer is not None:
if not isinstance(writer, (list, tuple)):
writer = [writer]
# given a config filename or writer name
config_files = [w if w.endswith('.yaml') else w + '.yaml' for w in writer]
else:
writer_configs = glob_config(os.path.join('writers', '*.yaml'),
*search_paths)
config_files = set(writer_configs)
for config_file in config_files:
config_basename = os.path.basename(config_file)
writer_configs = config_search_paths(
os.path.join("writers", config_basename), *search_paths)
if not writer_configs:
LOG.warning("No writer configs found for '%s'", writer)
continue
yield writer_configs | python | {
"resource": ""
} |
q26953 | available_writers | train | def available_writers(as_dict=False):
"""Available writers based on current configuration.
Args:
as_dict (bool): Optionally return writer information as a dictionary.
Default: False
Returns: List of available writer names. If `as_dict` is `True` then
a list of dictionaries including additionally writer information
is returned.
"""
writers = []
for writer_configs in configs_for_writer():
try:
writer_info = read_writer_config(writer_configs)
except (KeyError, IOError, yaml.YAMLError):
LOG.warning("Could not import writer config from: %s", writer_configs)
LOG.debug("Error loading YAML", exc_info=True)
continue
writers.append(writer_info if as_dict else writer_info['name'])
return writers | python | {
"resource": ""
} |
q26954 | add_text | train | def add_text(orig, dc, img, text=None):
"""Add text to an image using the pydecorate package.
All the features of pydecorate's ``add_text`` are available.
See documentation of :doc:`pydecorate:index` for more info.
"""
LOG.info("Add text to image.")
dc.add_text(**text)
arr = da.from_array(np.array(img) / 255.0, chunks=CHUNK_SIZE)
new_data = xr.DataArray(arr, dims=['y', 'x', 'bands'],
coords={'y': orig.data.coords['y'],
'x': orig.data.coords['x'],
'bands': list(img.mode)},
attrs=orig.data.attrs)
return XRImage(new_data) | python | {
"resource": ""
} |
q26955 | show | train | def show(dataset, **kwargs):
"""Display the dataset as an image.
"""
img = get_enhanced_image(dataset.squeeze(), **kwargs)
img.show()
return img | python | {
"resource": ""
} |
q26956 | compute_writer_results | train | def compute_writer_results(results):
"""Compute all the given dask graphs `results` so that the files are
saved.
Args:
results (iterable): Iterable of dask graphs resulting from calls to
`scn.save_datasets(..., compute=False)`
"""
if not results:
return
sources, targets, delayeds = split_results(results)
# one or more writers have targets that we need to close in the future
if targets:
delayeds.append(da.store(sources, targets, compute=False))
if delayeds:
da.compute(delayeds)
if targets:
for target in targets:
if hasattr(target, 'close'):
target.close() | python | {
"resource": ""
} |
q26957 | Writer.separate_init_kwargs | train | def separate_init_kwargs(cls, kwargs):
"""Helper class method to separate arguments between init and save methods.
Currently the :class:`~satpy.scene.Scene` is passed one set of
arguments to represent the Writer creation and saving steps. This is
not preferred for Writer structure, but provides a simpler interface
to users. This method splits the provided keyword arguments between
those needed for initialization and those needed for the ``save_dataset``
and ``save_datasets`` method calls.
Writer subclasses should try to prefer keyword arguments only for the
save methods only and leave the init keyword arguments to the base
classes when possible.
"""
# FUTURE: Don't pass Scene.save_datasets kwargs to init and here
init_kwargs = {}
kwargs = kwargs.copy()
for kw in ['base_dir', 'filename', 'file_pattern']:
if kw in kwargs:
init_kwargs[kw] = kwargs.pop(kw)
return init_kwargs, kwargs | python | {
"resource": ""
} |
q26958 | Writer.get_filename | train | def get_filename(self, **kwargs):
"""Create a filename where output data will be saved.
Args:
kwargs (dict): Attributes and other metadata to use for formatting
the previously provided `filename`.
"""
if self.filename_parser is None:
raise RuntimeError("No filename pattern or specific filename provided")
output_filename = self.filename_parser.compose(kwargs)
dirname = os.path.dirname(output_filename)
if dirname and not os.path.isdir(dirname):
LOG.info("Creating output directory: {}".format(dirname))
os.makedirs(dirname)
return output_filename | python | {
"resource": ""
} |
q26959 | LIFileHandler.get_area_def | train | def get_area_def(self, key, info=None):
"""Create AreaDefinition for specified product.
Projection information are hard coded for 0 degree geos projection
Test dataset doesn't provide the values in the file container.
Only fill values are inserted.
"""
# TODO Get projection information from input file
a = 6378169.
h = 35785831.
b = 6356583.8
lon_0 = 0.
# area_extent = (-5432229.9317116784, -5429229.5285458621,
# 5429229.5285458621, 5432229.9317116784)
area_extent = (-5570248.4773392612, -5567248.074173444,
5567248.074173444, 5570248.4773392612)
proj_dict = {'a': float(a),
'b': float(b),
'lon_0': float(lon_0),
'h': float(h),
'proj': 'geos',
'units': 'm'}
area = geometry.AreaDefinition(
'LI_area_name',
"LI area",
'geosli',
proj_dict,
self.ncols,
self.nlines,
area_extent)
self.area = area
logger.debug("Dataset area definition: \n {}".format(area))
return area | python | {
"resource": ""
} |
q26960 | AbstractYAMLReader.get_dataset_key | train | def get_dataset_key(self, key, **kwargs):
"""Get the fully qualified `DatasetID` matching `key`.
See `satpy.readers.get_key` for more information about kwargs.
"""
return get_key(key, self.ids.keys(), **kwargs) | python | {
"resource": ""
} |
q26961 | AbstractYAMLReader.load_ds_ids_from_config | train | def load_ds_ids_from_config(self):
"""Get the dataset ids from the config."""
ids = []
for dataset in self.datasets.values():
# xarray doesn't like concatenating attributes that are lists
# https://github.com/pydata/xarray/issues/2060
if 'coordinates' in dataset and \
isinstance(dataset['coordinates'], list):
dataset['coordinates'] = tuple(dataset['coordinates'])
# Build each permutation/product of the dataset
id_kwargs = []
for key in DATASET_KEYS:
val = dataset.get(key)
if key in ["wavelength", "modifiers"] and isinstance(val,
list):
# special case: wavelength can be [min, nominal, max]
# but is still considered 1 option
# it also needs to be a tuple so it can be used in
# a dictionary key (DatasetID)
id_kwargs.append((tuple(val), ))
elif key == "modifiers" and val is None:
# empty modifiers means no modifiers applied
id_kwargs.append((tuple(), ))
elif isinstance(val, (list, tuple, set)):
# this key has multiple choices
# (ex. 250 meter, 500 meter, 1000 meter resolutions)
id_kwargs.append(val)
elif isinstance(val, dict):
id_kwargs.append(val.keys())
else:
# this key only has one choice so make it a one
# item iterable
id_kwargs.append((val, ))
for id_params in itertools.product(*id_kwargs):
dsid = DatasetID(*id_params)
ids.append(dsid)
# create dataset infos specifically for this permutation
ds_info = dataset.copy()
for key in DATASET_KEYS:
if isinstance(ds_info.get(key), dict):
ds_info.update(ds_info[key][getattr(dsid, key)])
# this is important for wavelength which was converted
# to a tuple
ds_info[key] = getattr(dsid, key)
self.ids[dsid] = ds_info
return ids | python | {
"resource": ""
} |
q26962 | FileYAMLReader.check_file_covers_area | train | def check_file_covers_area(file_handler, check_area):
"""Checks if the file covers the current area.
If the file doesn't provide any bounding box information or 'area'
was not provided in `filter_parameters`, the check returns True.
"""
try:
gbb = Boundary(*file_handler.get_bounding_box())
except NotImplementedError as err:
logger.debug("Bounding box computation not implemented: %s",
str(err))
else:
abb = AreaDefBoundary(get_area_def(check_area), frequency=1000)
intersection = gbb.contour_poly.intersection(abb.contour_poly)
if not intersection:
return False
return True | python | {
"resource": ""
} |
q26963 | FileYAMLReader.find_required_filehandlers | train | def find_required_filehandlers(self, requirements, filename_info):
"""Find the necessary file handlers for the given requirements.
We assume here requirements are available.
Raises:
KeyError, if no handler for the given requirements is available.
RuntimeError, if there is a handler for the given requirements,
but it doesn't match the filename info.
"""
req_fh = []
filename_info = set(filename_info.items())
if requirements:
for requirement in requirements:
for fhd in self.file_handlers[requirement]:
if set(fhd.filename_info.items()).issubset(filename_info):
req_fh.append(fhd)
break
else:
raise RuntimeError("No matching requirement file of type "
"{}".format(requirement))
# break everything and continue to next
# filetype!
return req_fh | python | {
"resource": ""
} |
q26964 | FileYAMLReader.sorted_filetype_items | train | def sorted_filetype_items(self):
"""Sort the instance's filetypes in using order."""
processed_types = []
file_type_items = deque(self.config['file_types'].items())
while len(file_type_items):
filetype, filetype_info = file_type_items.popleft()
requirements = filetype_info.get('requires')
if requirements is not None:
# requirements have not been processed yet -> wait
missing = [req for req in requirements
if req not in processed_types]
if missing:
file_type_items.append((filetype, filetype_info))
continue
processed_types.append(filetype)
yield filetype, filetype_info | python | {
"resource": ""
} |
q26965 | FileYAMLReader.new_filehandler_instances | train | def new_filehandler_instances(self, filetype_info, filename_items, fh_kwargs=None):
"""Generate new filehandler instances."""
requirements = filetype_info.get('requires')
filetype_cls = filetype_info['file_reader']
if fh_kwargs is None:
fh_kwargs = {}
for filename, filename_info in filename_items:
try:
req_fh = self.find_required_filehandlers(requirements,
filename_info)
except KeyError as req:
msg = "No handler for reading requirement {} for {}".format(
req, filename)
warnings.warn(msg)
continue
except RuntimeError as err:
warnings.warn(str(err) + ' for {}'.format(filename))
continue
yield filetype_cls(filename, filename_info, filetype_info, *req_fh, **fh_kwargs) | python | {
"resource": ""
} |
q26966 | FileYAMLReader.filter_fh_by_metadata | train | def filter_fh_by_metadata(self, filehandlers):
"""Filter out filehandlers using provide filter parameters."""
for filehandler in filehandlers:
filehandler.metadata['start_time'] = filehandler.start_time
filehandler.metadata['end_time'] = filehandler.end_time
if self.metadata_matches(filehandler.metadata, filehandler):
yield filehandler | python | {
"resource": ""
} |
q26967 | FileYAMLReader.new_filehandlers_for_filetype | train | def new_filehandlers_for_filetype(self, filetype_info, filenames, fh_kwargs=None):
"""Create filehandlers for a given filetype."""
filename_iter = self.filename_items_for_filetype(filenames,
filetype_info)
if self.filter_filenames:
# preliminary filter of filenames based on start/end time
# to reduce the number of files to open
filename_iter = self.filter_filenames_by_info(filename_iter)
filehandler_iter = self.new_filehandler_instances(filetype_info,
filename_iter,
fh_kwargs=fh_kwargs)
filtered_iter = self.filter_fh_by_metadata(filehandler_iter)
return list(filtered_iter) | python | {
"resource": ""
} |
q26968 | FileYAMLReader.create_filehandlers | train | def create_filehandlers(self, filenames, fh_kwargs=None):
"""Organize the filenames into file types and create file handlers."""
filenames = list(OrderedDict.fromkeys(filenames))
logger.debug("Assigning to %s: %s", self.info['name'], filenames)
self.info.setdefault('filenames', []).extend(filenames)
filename_set = set(filenames)
created_fhs = {}
# load files that we know about by creating the file handlers
for filetype, filetype_info in self.sorted_filetype_items():
filehandlers = self.new_filehandlers_for_filetype(filetype_info,
filename_set,
fh_kwargs=fh_kwargs)
filename_set -= set([fhd.filename for fhd in filehandlers])
if filehandlers:
created_fhs[filetype] = filehandlers
self.file_handlers[filetype] = sorted(
self.file_handlers.get(filetype, []) + filehandlers,
key=lambda fhd: (fhd.start_time, fhd.filename))
# update existing dataset IDs with information from the file handler
self.update_ds_ids_from_file_handlers()
# load any additional dataset IDs determined dynamically from the file
self.add_ds_ids_from_files()
return created_fhs | python | {
"resource": ""
} |
q26969 | FileYAMLReader.update_ds_ids_from_file_handlers | train | def update_ds_ids_from_file_handlers(self):
"""Update DatasetIDs with information from loaded files.
This is useful, for example, if dataset resolution may change
depending on what files were loaded.
"""
for file_handlers in self.file_handlers.values():
fh = file_handlers[0]
# update resolution in the dataset IDs for this files resolution
res = getattr(fh, 'resolution', None)
if res is None:
continue
for ds_id, ds_info in list(self.ids.items()):
file_types = ds_info['file_type']
if not isinstance(file_types, list):
file_types = [file_types]
if fh.filetype_info['file_type'] not in file_types:
continue
if ds_id.resolution is not None:
continue
ds_info['resolution'] = res
new_id = DatasetID.from_dict(ds_info)
self.ids[new_id] = ds_info
del self.ids[ds_id] | python | {
"resource": ""
} |
q26970 | FileYAMLReader.add_ds_ids_from_files | train | def add_ds_ids_from_files(self):
"""Check files for more dynamically discovered datasets."""
for file_handlers in self.file_handlers.values():
try:
fh = file_handlers[0]
avail_ids = fh.available_datasets()
except NotImplementedError:
continue
# dynamically discover other available datasets
for ds_id, ds_info in avail_ids:
# don't overwrite an existing dataset
# especially from the yaml config
coordinates = ds_info.get('coordinates')
if isinstance(coordinates, list):
# xarray doesn't like concatenating attributes that are
# lists: https://github.com/pydata/xarray/issues/2060
ds_info['coordinates'] = tuple(ds_info['coordinates'])
self.ids.setdefault(ds_id, ds_info) | python | {
"resource": ""
} |
q26971 | FileYAMLReader._load_dataset | train | def _load_dataset(dsid, ds_info, file_handlers, dim='y'):
"""Load only a piece of the dataset."""
slice_list = []
failure = True
for fh in file_handlers:
try:
projectable = fh.get_dataset(dsid, ds_info)
if projectable is not None:
slice_list.append(projectable)
failure = False
except KeyError:
logger.warning("Failed to load {} from {}".format(dsid, fh),
exc_info=True)
if failure:
raise KeyError(
"Could not load {} from any provided files".format(dsid))
if dim not in slice_list[0].dims:
return slice_list[0]
res = xr.concat(slice_list, dim=dim)
combined_info = file_handlers[0].combine_info(
[p.attrs for p in slice_list])
res.attrs = combined_info
return res | python | {
"resource": ""
} |
q26972 | FileYAMLReader._get_coordinates_for_dataset_keys | train | def _get_coordinates_for_dataset_keys(self, dsids):
"""Get all coordinates."""
coordinates = {}
for dsid in dsids:
cids = self._get_coordinates_for_dataset_key(dsid)
coordinates.setdefault(dsid, []).extend(cids)
return coordinates | python | {
"resource": ""
} |
q26973 | FileYAMLReader._load_ancillary_variables | train | def _load_ancillary_variables(self, datasets):
"""Load the ancillary variables of `datasets`."""
all_av_ids = set()
for dataset in datasets.values():
ancillary_variables = dataset.attrs.get('ancillary_variables', [])
if not isinstance(ancillary_variables, (list, tuple, set)):
ancillary_variables = ancillary_variables.split(' ')
av_ids = []
for key in ancillary_variables:
try:
av_ids.append(self.get_dataset_key(key))
except KeyError:
logger.warning("Can't load ancillary dataset %s", str(key))
all_av_ids |= set(av_ids)
dataset.attrs['ancillary_variables'] = av_ids
loadable_av_ids = [av_id for av_id in all_av_ids if av_id not in datasets]
if not all_av_ids:
return
if loadable_av_ids:
self.load(loadable_av_ids, previous_datasets=datasets)
for dataset in datasets.values():
new_vars = []
for av_id in dataset.attrs.get('ancillary_variables', []):
if isinstance(av_id, DatasetID):
new_vars.append(datasets[av_id])
else:
new_vars.append(av_id)
dataset.attrs['ancillary_variables'] = new_vars | python | {
"resource": ""
} |
q26974 | FileYAMLReader.load | train | def load(self, dataset_keys, previous_datasets=None):
"""Load `dataset_keys`.
If `previous_datasets` is provided, do not reload those."""
all_datasets = previous_datasets or DatasetDict()
datasets = DatasetDict()
# Include coordinates in the list of datasets to load
dsids = [self.get_dataset_key(ds_key) for ds_key in dataset_keys]
coordinates = self._get_coordinates_for_dataset_keys(dsids)
all_dsids = list(set().union(*coordinates.values())) + dsids
for dsid in all_dsids:
if dsid in all_datasets:
continue
coords = [all_datasets.get(cid, None)
for cid in coordinates.get(dsid, [])]
ds = self._load_dataset_with_area(dsid, coords)
if ds is not None:
all_datasets[dsid] = ds
if dsid in dsids:
datasets[dsid] = ds
self._load_ancillary_variables(all_datasets)
return datasets | python | {
"resource": ""
} |
q26975 | calibrate_counts | train | def calibrate_counts(array, attributes, index):
"""Calibration for counts channels."""
offset = np.float32(attributes["corrected_counts_offsets"][index])
scale = np.float32(attributes["corrected_counts_scales"][index])
array = (array - offset) * scale
return array | python | {
"resource": ""
} |
q26976 | calibrate_radiance | train | def calibrate_radiance(array, attributes, index):
"""Calibration for radiance channels."""
offset = np.float32(attributes["radiance_offsets"][index])
scale = np.float32(attributes["radiance_scales"][index])
array = (array - offset) * scale
return array | python | {
"resource": ""
} |
q26977 | calibrate_refl | train | def calibrate_refl(array, attributes, index):
"""Calibration for reflective channels."""
offset = np.float32(attributes["reflectance_offsets"][index])
scale = np.float32(attributes["reflectance_scales"][index])
# convert to reflectance and convert from 1 to %
array = (array - offset) * scale * 100
return array | python | {
"resource": ""
} |
q26978 | calibrate_bt | train | def calibrate_bt(array, attributes, index, band_name):
"""Calibration for the emissive channels."""
offset = np.float32(attributes["radiance_offsets"][index])
scale = np.float32(attributes["radiance_scales"][index])
array = (array - offset) * scale
# Planck constant (Joule second)
h__ = np.float32(6.6260755e-34)
# Speed of light in vacuum (meters per second)
c__ = np.float32(2.9979246e+8)
# Boltzmann constant (Joules per Kelvin)
k__ = np.float32(1.380658e-23)
# Derived constants
c_1 = 2 * h__ * c__ * c__
c_2 = (h__ * c__) / k__
# Effective central wavenumber (inverse centimeters)
cwn = np.array([
2.641775E+3, 2.505277E+3, 2.518028E+3, 2.465428E+3,
2.235815E+3, 2.200346E+3, 1.477967E+3, 1.362737E+3,
1.173190E+3, 1.027715E+3, 9.080884E+2, 8.315399E+2,
7.483394E+2, 7.308963E+2, 7.188681E+2, 7.045367E+2],
dtype=np.float32)
# Temperature correction slope (no units)
tcs = np.array([
9.993411E-1, 9.998646E-1, 9.998584E-1, 9.998682E-1,
9.998819E-1, 9.998845E-1, 9.994877E-1, 9.994918E-1,
9.995495E-1, 9.997398E-1, 9.995608E-1, 9.997256E-1,
9.999160E-1, 9.999167E-1, 9.999191E-1, 9.999281E-1],
dtype=np.float32)
# Temperature correction intercept (Kelvin)
tci = np.array([
4.770532E-1, 9.262664E-2, 9.757996E-2, 8.929242E-2,
7.310901E-2, 7.060415E-2, 2.204921E-1, 2.046087E-1,
1.599191E-1, 8.253401E-2, 1.302699E-1, 7.181833E-2,
1.972608E-2, 1.913568E-2, 1.817817E-2, 1.583042E-2],
dtype=np.float32)
# Transfer wavenumber [cm^(-1)] to wavelength [m]
cwn = 1. / (cwn * 100)
# Some versions of the modis files do not contain all the bands.
emmissive_channels = ["20", "21", "22", "23", "24", "25", "27", "28", "29",
"30", "31", "32", "33", "34", "35", "36"]
global_index = emmissive_channels.index(band_name)
cwn = cwn[global_index]
tcs = tcs[global_index]
tci = tci[global_index]
array = c_2 / (cwn * xu.log(c_1 / (1000000 * array * cwn ** 5) + 1))
array = (array - tci) / tcs
return array | python | {
"resource": ""
} |
q26979 | HDFEOSGeoReader.load | train | def load(self, file_key):
"""Load the data."""
var = self.sd.select(file_key)
data = xr.DataArray(from_sds(var, chunks=CHUNK_SIZE),
dims=['y', 'x']).astype(np.float32)
data = data.where(data != var._FillValue)
try:
data = data * np.float32(var.scale_factor)
except AttributeError:
pass
return data | python | {
"resource": ""
} |
q26980 | read_geo | train | def read_geo(fid, key):
"""Read geolocation and related datasets."""
dsid = GEO_NAMES[key.name]
add_epoch = False
if "time" in key.name:
days = fid["/L1C/" + dsid["day"]].value
msecs = fid["/L1C/" + dsid["msec"]].value
data = _form_datetimes(days, msecs)
add_epoch = True
dtype = np.float64
else:
data = fid["/L1C/" + dsid].value
dtype = np.float32
data = xr.DataArray(da.from_array(data, chunks=CHUNK_SIZE),
name=key.name, dims=['y', 'x']).astype(dtype)
if add_epoch:
data.attrs['sensing_time_epoch'] = EPOCH
return data | python | {
"resource": ""
} |
q26981 | _form_datetimes | train | def _form_datetimes(days, msecs):
"""Calculate seconds since EPOCH from days and milliseconds for each of IASI scan."""
all_datetimes = []
for i in range(days.size):
day = int(days[i])
msec = msecs[i]
scanline_datetimes = []
for j in range(int(VALUES_PER_SCAN_LINE / 4)):
usec = 1000 * (j * VIEW_TIME_ADJUSTMENT + msec)
delta = (dt.timedelta(days=day, microseconds=usec))
for k in range(4):
scanline_datetimes.append(delta.total_seconds())
all_datetimes.append(scanline_datetimes)
return np.array(all_datetimes, dtype=np.float64) | python | {
"resource": ""
} |
q26982 | runtime_import | train | def runtime_import(object_path):
"""Import at runtime."""
obj_module, obj_element = object_path.rsplit(".", 1)
loader = __import__(obj_module, globals(), locals(), [str(obj_element)])
return getattr(loader, obj_element) | python | {
"resource": ""
} |
q26983 | get_config | train | def get_config(filename, *search_dirs, **kwargs):
"""Blends the different configs, from package defaults to ."""
config = kwargs.get("config_reader_class", configparser.ConfigParser)()
paths = config_search_paths(filename, *search_dirs)
successes = config.read(reversed(paths))
if successes:
LOG.debug("Read config from %s", str(successes))
return config, successes
LOG.warning("Couldn't file any config file matching %s", filename)
return None, [] | python | {
"resource": ""
} |
q26984 | glob_config | train | def glob_config(pattern, *search_dirs):
"""Return glob results for all possible configuration locations.
Note: This method does not check the configuration "base" directory if the pattern includes a subdirectory.
This is done for performance since this is usually used to find *all* configs for a certain component.
"""
patterns = config_search_paths(pattern, *search_dirs, check_exists=False)
for pattern in patterns:
for path in glob.iglob(pattern):
yield path | python | {
"resource": ""
} |
q26985 | _check_import | train | def _check_import(module_names):
"""Import the specified modules and provide status."""
diagnostics = {}
for module_name in module_names:
try:
__import__(module_name)
res = 'ok'
except ImportError as err:
res = str(err)
diagnostics[module_name] = res
return diagnostics | python | {
"resource": ""
} |
q26986 | check_satpy | train | def check_satpy(readers=None, writers=None, extras=None):
"""Check the satpy readers and writers for correct installation.
Args:
readers (list or None): Limit readers checked to those specified
writers (list or None): Limit writers checked to those specified
extras (list or None): Limit extras checked to those specified
Returns: bool
True if all specified features were successfully loaded.
"""
from satpy.readers import configs_for_reader
from satpy.writers import configs_for_writer
print('Readers')
print('=======')
for reader, res in sorted(check_yaml_configs(configs_for_reader(reader=readers), 'reader').items()):
print(reader + ': ', res)
print()
print('Writers')
print('=======')
for writer, res in sorted(check_yaml_configs(configs_for_writer(writer=writers), 'writer').items()):
print(writer + ': ', res)
print()
print('Extras')
print('======')
module_names = extras if extras is not None else ('cartopy', 'geoviews')
for module_name, res in sorted(_check_import(module_names).items()):
print(module_name + ': ', res)
print() | python | {
"resource": ""
} |
q26987 | ensure_dir | train | def ensure_dir(filename):
"""Checks if the dir of f exists, otherwise create it."""
directory = os.path.dirname(filename)
if directory and not os.path.isdir(directory):
os.makedirs(directory) | python | {
"resource": ""
} |
q26988 | logging_on | train | def logging_on(level=logging.WARNING):
"""Turn logging on.
"""
global _is_logging_on
if not _is_logging_on:
console = logging.StreamHandler()
console.setFormatter(logging.Formatter("[%(levelname)s: %(asctime)s :"
" %(name)s] %(message)s",
'%Y-%m-%d %H:%M:%S'))
console.setLevel(level)
logging.getLogger('').addHandler(console)
_is_logging_on = True
log = logging.getLogger('')
log.setLevel(level)
for h in log.handlers:
h.setLevel(level) | python | {
"resource": ""
} |
q26989 | get_logger | train | def get_logger(name):
"""Return logger with null handler added if needed."""
if not hasattr(logging.Logger, 'trace'):
logging.addLevelName(TRACE_LEVEL, 'TRACE')
def trace(self, message, *args, **kwargs):
if self.isEnabledFor(TRACE_LEVEL):
# Yes, logger takes its '*args' as 'args'.
self._log(TRACE_LEVEL, message, args, **kwargs)
logging.Logger.trace = trace
log = logging.getLogger(name)
if not log.handlers:
log.addHandler(logging.NullHandler())
return log | python | {
"resource": ""
} |
q26990 | lonlat2xyz | train | def lonlat2xyz(lon, lat):
"""Convert lon lat to cartesian."""
lat = xu.deg2rad(lat)
lon = xu.deg2rad(lon)
x = xu.cos(lat) * xu.cos(lon)
y = xu.cos(lat) * xu.sin(lon)
z = xu.sin(lat)
return x, y, z | python | {
"resource": ""
} |
q26991 | xyz2lonlat | train | def xyz2lonlat(x, y, z):
"""Convert cartesian to lon lat."""
lon = xu.rad2deg(xu.arctan2(y, x))
lat = xu.rad2deg(xu.arctan2(z, xu.sqrt(x**2 + y**2)))
return lon, lat | python | {
"resource": ""
} |
q26992 | angle2xyz | train | def angle2xyz(azi, zen):
"""Convert azimuth and zenith to cartesian."""
azi = xu.deg2rad(azi)
zen = xu.deg2rad(zen)
x = xu.sin(zen) * xu.sin(azi)
y = xu.sin(zen) * xu.cos(azi)
z = xu.cos(zen)
return x, y, z | python | {
"resource": ""
} |
q26993 | xyz2angle | train | def xyz2angle(x, y, z):
"""Convert cartesian to azimuth and zenith."""
azi = xu.rad2deg(xu.arctan2(x, y))
zen = 90 - xu.rad2deg(xu.arctan2(z, xu.sqrt(x**2 + y**2)))
return azi, zen | python | {
"resource": ""
} |
q26994 | proj_units_to_meters | train | def proj_units_to_meters(proj_str):
"""Convert projection units from kilometers to meters."""
proj_parts = proj_str.split()
new_parts = []
for itm in proj_parts:
key, val = itm.split('=')
key = key.strip('+')
if key in ['a', 'b', 'h']:
val = float(val)
if val < 6e6:
val *= 1000.
val = '%.3f' % val
if key == 'units' and val == 'km':
continue
new_parts.append('+%s=%s' % (key, val))
return ' '.join(new_parts) | python | {
"resource": ""
} |
q26995 | sunzen_corr_cos | train | def sunzen_corr_cos(data, cos_zen, limit=88., max_sza=95.):
"""Perform Sun zenith angle correction.
The correction is based on the provided cosine of the zenith
angle (``cos_zen``). The correction is limited
to ``limit`` degrees (default: 88.0 degrees). For larger zenith
angles, the correction is the same as at the ``limit`` if ``max_sza``
is `None`. The default behavior is to gradually reduce the correction
past ``limit`` degrees up to ``max_sza`` where the correction becomes
0. Both ``data`` and ``cos_zen`` should be 2D arrays of the same shape.
"""
# Convert the zenith angle limit to cosine of zenith angle
limit_rad = np.deg2rad(limit)
limit_cos = np.cos(limit_rad)
max_sza_rad = np.deg2rad(max_sza) if max_sza is not None else max_sza
# Cosine correction
corr = 1. / cos_zen
if max_sza is not None:
# gradually fall off for larger zenith angle
grad_factor = (np.arccos(cos_zen) - limit_rad) / (max_sza_rad - limit_rad)
# invert the factor so maximum correction is done at `limit` and falls off later
grad_factor = 1. - np.log(grad_factor + 1) / np.log(2)
# make sure we don't make anything negative
grad_factor = grad_factor.clip(0.)
else:
# Use constant value (the limit) for larger zenith angles
grad_factor = 1.
corr = corr.where(cos_zen > limit_cos, grad_factor / limit_cos)
# Force "night" pixels to 0 (where SZA is invalid)
corr = corr.where(cos_zen.notnull(), 0)
return data * corr | python | {
"resource": ""
} |
q26996 | OrderedConfigParser.read | train | def read(self, filename):
"""Reads config file
"""
try:
conf_file = open(filename, 'r')
config = conf_file.read()
config_keys = re.findall(r'\[.*\]', config)
self.section_keys = [key[1:-1] for key in config_keys]
except IOError as e:
# Pass if file not found
if e.errno != 2:
raise
return self.config_parser.read(filename) | python | {
"resource": ""
} |
q26997 | SCMIFileHandler._get_sensor | train | def _get_sensor(self):
"""Determine the sensor for this file."""
# sometimes Himawari-8 (or 9) data is stored in SCMI format
is_h8 = 'H8' in self.platform_name
is_h9 = 'H9' in self.platform_name
is_ahi = is_h8 or is_h9
return 'ahi' if is_ahi else 'abi' | python | {
"resource": ""
} |
q26998 | SCMIFileHandler._get_cf_grid_mapping_var | train | def _get_cf_grid_mapping_var(self):
"""Figure out which grid mapping should be used"""
gmaps = ['fixedgrid_projection', 'goes_imager_projection',
'lambert_projection', 'polar_projection',
'mercator_projection']
if 'grid_mapping' in self.filename_info:
gmaps = [self.filename_info.get('grid_mapping')] + gmaps
for grid_mapping in gmaps:
if grid_mapping in self.nc:
return self.nc[grid_mapping]
raise KeyError("Can't find grid mapping variable in SCMI file") | python | {
"resource": ""
} |
q26999 | SCMIFileHandler._get_proj4_name | train | def _get_proj4_name(self, projection):
"""Map CF projection name to PROJ.4 name."""
gmap_name = projection.attrs['grid_mapping_name']
proj = {
'geostationary': 'geos',
'lambert_conformal_conic': 'lcc',
'polar_stereographic': 'stere',
'mercator': 'merc',
}.get(gmap_name, gmap_name)
return proj | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.