_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q27100
DependencyTree._find_reader_dataset
train
def _find_reader_dataset(self, dataset_key, **dfilter): """Attempt to find a `DatasetID` in the available readers. Args: dataset_key (str, float, DatasetID): Dataset name, wavelength, or a combination of `DatasetID` parameters to use in searching for the dataset from the available readers. **dfilter (list or str): `DatasetID` parameters besides `name` and `wavelength` to use to filter the available datasets. Passed directly to `get_dataset_key` of the readers, see that method for more information. """ too_many = False for reader_name, reader_instance in self.readers.items(): try: ds_id = reader_instance.get_dataset_key(dataset_key, **dfilter) except TooManyResults: LOG.trace("Too many datasets matching key {} in reader {}".format(dataset_key, reader_name)) too_many = True continue except KeyError: LOG.trace("Can't find dataset %s in reader %s", str(dataset_key), reader_name) continue LOG.trace("Found {} in reader {} when asking for {}".format(str(ds_id), reader_name, repr(dataset_key))) try: # now that we know we have the exact DatasetID see if we have already created a Node for it return self.getitem(ds_id) except KeyError: # we haven't created a node yet, create it now return Node(ds_id, {'reader_name': reader_name}) if too_many: raise TooManyResults("Too many keys matching: {}".format(dataset_key))
python
{ "resource": "" }
q27101
DependencyTree._get_compositor_prereqs
train
def _get_compositor_prereqs(self, parent, prereq_names, skip=False, **dfilter): """Determine prerequisite Nodes for a composite. Args: parent (Node): Compositor node to add these prerequisites under prereq_names (sequence): Strings (names), floats (wavelengths), or DatasetIDs to analyze. skip (bool, optional): If True, prerequisites are considered optional if they can't be found and a debug message is logged. If False (default), the missing prerequisites are not logged and are expected to be handled by the caller. """ prereq_ids = [] unknowns = set() for prereq in prereq_names: n, u = self._find_dependencies(prereq, **dfilter) if u: unknowns.update(u) if skip: u_str = ", ".join([str(x) for x in u]) LOG.debug('Skipping optional %s: Unknown dataset %s', str(prereq), u_str) else: prereq_ids.append(n) self.add_child(parent, n) return prereq_ids, unknowns
python
{ "resource": "" }
q27102
DependencyTree._find_compositor
train
def _find_compositor(self, dataset_key, **dfilter): """Find the compositor object for the given dataset_key.""" # NOTE: This function can not find a modifier that performs # one or more modifications if it has modifiers see if we can find # the unmodified version first src_node = None if isinstance(dataset_key, DatasetID) and dataset_key.modifiers: new_prereq = DatasetID( *dataset_key[:-1] + (dataset_key.modifiers[:-1],)) src_node, u = self._find_dependencies(new_prereq, **dfilter) # Update the requested DatasetID with information from the src if src_node is not None: dataset_key = self._update_modifier_key(dataset_key, src_node.name) if u: return None, u try: compositor = self.get_compositor(dataset_key) except KeyError: raise KeyError("Can't find anything called {}".format(str(dataset_key))) dataset_key = compositor.id root = Node(dataset_key, data=(compositor, [], [])) if src_node is not None: self.add_child(root, src_node) root.data[1].append(src_node) # 2.1 get the prerequisites LOG.trace("Looking for composite prerequisites for: {}".format(dataset_key)) prereqs, unknowns = self._get_compositor_prereqs(root, compositor.attrs['prerequisites'], **dfilter) if unknowns: # Should we remove all of the unknown nodes that were found # if there is an unknown prerequisite are we in trouble? return None, unknowns root.data[1].extend(prereqs) LOG.trace("Looking for optional prerequisites for: {}".format(dataset_key)) optional_prereqs, _ = self._get_compositor_prereqs( root, compositor.attrs['optional_prerequisites'], skip=True, **dfilter) root.data[2].extend(optional_prereqs) return root, set()
python
{ "resource": "" }
q27103
DependencyTree.find_dependencies
train
def find_dependencies(self, dataset_keys, **dfilter): """Create the dependency tree. Args: dataset_keys (iterable): Strings or DatasetIDs to find dependencies for **dfilter (dict): Additional filter parameters. See `satpy.readers.get_key` for more details. Returns: (Node, set): Root node of the dependency tree and a set of unknown datasets """ unknown_datasets = set() for key in dataset_keys.copy(): n, unknowns = self._find_dependencies(key, **dfilter) dataset_keys.discard(key) # remove old non-DatasetID if n is not None: dataset_keys.add(n.name) # add equivalent DatasetID if unknowns: unknown_datasets.update(unknowns) continue self.add_child(self, n) return unknown_datasets
python
{ "resource": "" }
q27104
average_datetimes
train
def average_datetimes(dt_list): """Average a series of datetime objects. .. note:: This function assumes all datetime objects are naive and in the same time zone (UTC). Args: dt_list (iterable): Datetime objects to average Returns: Average datetime as a datetime object """ if sys.version_info < (3, 3): # timestamp added in python 3.3 import time def timestamp_func(dt): return time.mktime(dt.timetuple()) else: timestamp_func = datetime.timestamp total = [timestamp_func(dt) for dt in dt_list] return datetime.fromtimestamp(sum(total) / len(total))
python
{ "resource": "" }
q27105
combine_metadata
train
def combine_metadata(*metadata_objects, **kwargs): """Combine the metadata of two or more Datasets. If any keys are not equal or do not exist in all provided dictionaries then they are not included in the returned dictionary. By default any keys with the word 'time' in them and consisting of datetime objects will be averaged. This is to handle cases where data were observed at almost the same time but not exactly. Args: *metadata_objects: MetadataObject or dict objects to combine average_times (bool): Average any keys with 'time' in the name Returns: dict: the combined metadata """ average_times = kwargs.get('average_times', True) # python 2 compatibility (no kwarg after *args) shared_keys = None info_dicts = [] # grab all of the dictionary objects provided and make a set of the shared keys for metadata_object in metadata_objects: if isinstance(metadata_object, dict): metadata_dict = metadata_object elif hasattr(metadata_object, "attrs"): metadata_dict = metadata_object.attrs else: continue info_dicts.append(metadata_dict) if shared_keys is None: shared_keys = set(metadata_dict.keys()) else: shared_keys &= set(metadata_dict.keys()) # combine all of the dictionaries shared_info = {} for k in shared_keys: values = [nfo[k] for nfo in info_dicts] any_arrays = any([isinstance(val, np.ndarray) for val in values]) if any_arrays: if all(np.all(val == values[0]) for val in values[1:]): shared_info[k] = values[0] elif 'time' in k and isinstance(values[0], datetime) and average_times: shared_info[k] = average_datetimes(values) elif all(val == values[0] for val in values[1:]): shared_info[k] = values[0] return shared_info
python
{ "resource": "" }
q27106
DatasetID.wavelength_match
train
def wavelength_match(a, b): """Return if two wavelengths are equal. Args: a (tuple or scalar): (min wl, nominal wl, max wl) or scalar wl b (tuple or scalar): (min wl, nominal wl, max wl) or scalar wl """ if type(a) == (type(b) or isinstance(a, numbers.Number) and isinstance(b, numbers.Number)): return a == b elif a is None or b is None: return False elif isinstance(a, (list, tuple)) and len(a) == 3: return a[0] <= b <= a[2] elif isinstance(b, (list, tuple)) and len(b) == 3: return b[0] <= a <= b[2] else: raise ValueError("Can only compare wavelengths of length 1 or 3")
python
{ "resource": "" }
q27107
DatasetID._comparable
train
def _comparable(self): """Get a comparable version of the DatasetID. Without this DatasetIDs often raise an exception when compared in Python 3 due to None not being comparable with other types. """ return self._replace( name='' if self.name is None else self.name, wavelength=tuple() if self.wavelength is None else self.wavelength, resolution=0 if self.resolution is None else self.resolution, polarization='' if self.polarization is None else self.polarization, calibration='' if self.calibration is None else self.calibration, )
python
{ "resource": "" }
q27108
DatasetID.from_dict
train
def from_dict(cls, d, **kwargs): """Convert a dict to an ID.""" args = [] for k in DATASET_KEYS: val = kwargs.get(k, d.get(k)) # force modifiers to tuple if k == 'modifiers' and val is not None: val = tuple(val) args.append(val) return cls(*args)
python
{ "resource": "" }
q27109
DatasetID.to_dict
train
def to_dict(self, trim=True): """Convert the ID to a dict.""" if trim: return self._to_trimmed_dict() else: return dict(zip(DATASET_KEYS, self))
python
{ "resource": "" }
q27110
NUCAPSFileHandler.sensor_names
train
def sensor_names(self): """Return standard sensor or instrument name for the file's data. """ res = self['/attr/instrument_name'] if isinstance(res, np.ndarray): res = str(res.astype(str)) res = [x.strip() for x in res.split(',')] if len(res) == 1: return res[0] return res
python
{ "resource": "" }
q27111
NUCAPSFileHandler.get_shape
train
def get_shape(self, ds_id, ds_info): """Return data array shape for item specified. """ var_path = ds_info.get('file_key', '{}'.format(ds_id.name)) if var_path + '/shape' not in self: # loading a scalar value shape = 1 else: shape = self[var_path + "/shape"] if "index" in ds_info: shape = shape[1:] if "pressure_index" in ds_info: shape = shape[:-1] return shape
python
{ "resource": "" }
q27112
NUCAPSFileHandler.get_dataset
train
def get_dataset(self, dataset_id, ds_info): """Load data array and metadata for specified dataset""" var_path = ds_info.get('file_key', '{}'.format(dataset_id.name)) metadata = self.get_metadata(dataset_id, ds_info) valid_min, valid_max = self[var_path + '/attr/valid_range'] fill_value = self.get(var_path + '/attr/_FillValue') d_tmp = self[var_path] if "index" in ds_info: d_tmp = d_tmp[int(ds_info["index"])] if "pressure_index" in ds_info: d_tmp = d_tmp[..., int(ds_info["pressure_index"])] # this is a pressure based field # include surface_pressure as metadata sp = self['Surface_Pressure'] if 'surface_pressure' in ds_info: ds_info['surface_pressure'] = xr.concat((ds_info['surface_pressure'], sp)) else: ds_info['surface_pressure'] = sp # include all the pressure levels ds_info.setdefault('pressure_levels', self['Pressure'][0]) data = d_tmp if valid_min is not None and valid_max is not None: # the original .cfg/INI based reader only checked valid_max data = data.where((data <= valid_max)) # | (data >= valid_min)) if fill_value is not None: data = data.where(data != fill_value) data.attrs.update(metadata) return data
python
{ "resource": "" }
q27113
NUCAPSReader.load_ds_ids_from_config
train
def load_ds_ids_from_config(self): """Convert config dataset entries to DatasetIDs Special handling is done to provide level specific datasets for any pressured based datasets. For example, a dataset is added for each pressure level of 'Temperature' with each new dataset being named 'Temperature_Xmb' where X is the pressure level. """ super(NUCAPSReader, self).load_ds_ids_from_config() for ds_id in list(self.ids.keys()): ds_info = self.ids[ds_id] if ds_info.get('pressure_based', False): for idx, lvl_num in enumerate(ALL_PRESSURE_LEVELS): if lvl_num < 5.0: suffix = "_{:0.03f}mb".format(lvl_num) else: suffix = "_{:0.0f}mb".format(lvl_num) new_info = ds_info.copy() new_info['pressure_level'] = lvl_num new_info['pressure_index'] = idx new_info['file_key'] = '{}'.format(ds_id.name) new_info['name'] = ds_id.name + suffix new_ds_id = ds_id._replace(name=new_info['name']) new_info['id'] = new_ds_id self.ids[new_ds_id] = new_info self.pressure_dataset_names[ds_id.name].append(new_info['name'])
python
{ "resource": "" }
q27114
AHIHSDFileHandler.scheduled_time
train
def scheduled_time(self): """Time this band was scheduled to be recorded.""" timeline = "{:04d}".format(self.basic_info['observation_timeline'][0]) return self.start_time.replace(hour=int(timeline[:2]), minute=int(timeline[2:4]), second=0, microsecond=0)
python
{ "resource": "" }
q27115
AHIHSDFileHandler._check_fpos
train
def _check_fpos(self, fp_, fpos, offset, block): """Check file position matches blocksize""" if (fp_.tell() + offset != fpos): warnings.warn("Actual "+block+" header size does not match expected") return
python
{ "resource": "" }
q27116
AHIHSDFileHandler._read_data
train
def _read_data(self, fp_, header): """Read data block""" nlines = int(header["block2"]['number_of_lines'][0]) ncols = int(header["block2"]['number_of_columns'][0]) return da.from_array(np.memmap(self.filename, offset=fp_.tell(), dtype='<u2', shape=(nlines, ncols), mode='r'), chunks=CHUNK_SIZE)
python
{ "resource": "" }
q27117
AHIHSDFileHandler._mask_invalid
train
def _mask_invalid(self, data, header): """Mask invalid data""" invalid = da.logical_or(data == header['block5']["count_value_outside_scan_pixels"][0], data == header['block5']["count_value_error_pixels"][0]) return da.where(invalid, np.float32(np.nan), data)
python
{ "resource": "" }
q27118
AHIHSDFileHandler.convert_to_radiance
train
def convert_to_radiance(self, data): """Calibrate to radiance.""" bnum = self._header["block5"]['band_number'][0] # Check calibration mode and select corresponding coefficients if self.calib_mode == "UPDATE" and bnum < 7: gain = self._header['calibration']["cali_gain_count2rad_conversion"][0] offset = self._header['calibration']["cali_offset_count2rad_conversion"][0] if gain == 0 and offset == 0: logger.info( "No valid updated coefficients, fall back to default values.") gain = self._header["block5"]["gain_count2rad_conversion"][0] offset = self._header["block5"]["offset_count2rad_conversion"][0] else: gain = self._header["block5"]["gain_count2rad_conversion"][0] offset = self._header["block5"]["offset_count2rad_conversion"][0] return (data * gain + offset).clip(0)
python
{ "resource": "" }
q27119
AHIHSDFileHandler._ir_calibrate
train
def _ir_calibrate(self, data): """IR calibration.""" # No radiance -> no temperature data = da.where(data == 0, np.float32(np.nan), data) cwl = self._header['block5']["central_wave_length"][0] * 1e-6 c__ = self._header['calibration']["speed_of_light"][0] h__ = self._header['calibration']["planck_constant"][0] k__ = self._header['calibration']["boltzmann_constant"][0] a__ = (h__ * c__) / (k__ * cwl) b__ = ((2 * h__ * c__ ** 2) / (data * 1.0e6 * cwl ** 5)) + 1 Te_ = a__ / da.log(b__) c0_ = self._header['calibration']["c0_rad2tb_conversion"][0] c1_ = self._header['calibration']["c1_rad2tb_conversion"][0] c2_ = self._header['calibration']["c2_rad2tb_conversion"][0] return (c0_ + c1_ * Te_ + c2_ * Te_ ** 2).clip(0)
python
{ "resource": "" }
q27120
process_field
train
def process_field(elt, ascii=False): """Process a 'field' tag. """ # NOTE: if there is a variable defined in this field and it is different # from the default, we could change the value and restart. scale = np.uint8(1) if elt.get("type") == "bitfield" and not ascii: current_type = ">u" + str(int(elt.get("length")) // 8) scale = np.dtype(current_type).type(1) elif (elt.get("length") is not None): if ascii: add = 33 else: add = 0 current_type = "S" + str(int(elt.get("length")) + add) else: current_type = TYPEC[elt.get("type")] try: scale = (10 / float(elt.get("scaling-factor", "10").replace("^", "e"))) except ValueError: scale = (10 / np.array( elt.get("scaling-factor").replace("^", "e").split(","), dtype=np.float)) return ((elt.get("name"), current_type, scale))
python
{ "resource": "" }
q27121
process_array
train
def process_array(elt, ascii=False): """Process an 'array' tag. """ del ascii chld = elt.getchildren() if len(chld) > 1: raise ValueError() chld = chld[0] try: name, current_type, scale = CASES[chld.tag](chld) size = None except ValueError: name, current_type, size, scale = CASES[chld.tag](chld) del name myname = elt.get("name") or elt.get("label") if elt.get("length").startswith("$"): length = int(VARIABLES[elt.get("length")[1:]]) else: length = int(elt.get("length")) if size is not None: return (myname, current_type, (length, ) + size, scale) else: return (myname, current_type, (length, ), scale)
python
{ "resource": "" }
q27122
parse_format
train
def parse_format(xml_file): """Parse the xml file to create types, scaling factor types, and scales. """ tree = ElementTree() tree.parse(xml_file) for param in tree.find("parameters").getchildren(): VARIABLES[param.get("name")] = param.get("value") types_scales = {} for prod in tree.find("product"): ascii = (prod.tag in ["mphr", "sphr"]) res = [] for i in prod: lres = CASES[i.tag](i, ascii) if lres is not None: res.append(lres) types_scales[(prod.tag, int(prod.get("subclass")))] = res types = {} stypes = {} scales = {} for key, val in types_scales.items(): types[key] = to_dtype(val) stypes[key] = to_scaled_dtype(val) scales[key] = to_scales(val) return types, stypes, scales
python
{ "resource": "" }
q27123
_apply_scales
train
def _apply_scales(array, scales, dtype): """Apply scales to the array. """ new_array = np.empty(array.shape, dtype) for i in array.dtype.names: try: new_array[i] = array[i] * scales[i] except TypeError: if np.all(scales[i] == 1): new_array[i] = array[i] else: raise return new_array
python
{ "resource": "" }
q27124
get_coeffs
train
def get_coeffs(page): '''Parse coefficients from the page.''' coeffs = {} coeffs['datetime'] = [] coeffs['slope1'] = [] coeffs['intercept1'] = [] coeffs['slope2'] = [] coeffs['intercept2'] = [] slope1_idx, intercept1_idx, slope2_idx, intercept2_idx = \ None, None, None, None date_idx = 0 for row in page.lower().split('\n'): row = row.split() if len(row) == 0: continue if row[0] == 'update': # Get the column indices from the header line slope1_idx = row.index('slope_lo') intercept1_idx = row.index('int_lo') slope2_idx = row.index('slope_hi') intercept2_idx = row.index('int_hi') continue if slope1_idx is None: continue # In some cases the fields are connected, skip those rows if max([slope1_idx, intercept1_idx, slope2_idx, intercept2_idx]) >= len(row): continue try: dat = dt.datetime.strptime(row[date_idx], "%m/%d/%Y") except ValueError: continue coeffs['datetime'].append([dat.year, dat.month, dat.day]) coeffs['slope1'].append(float(row[slope1_idx])) coeffs['intercept1'].append(float(row[intercept1_idx])) coeffs['slope2'].append(float(row[slope2_idx])) coeffs['intercept2'].append(float(row[intercept2_idx])) return coeffs
python
{ "resource": "" }
q27125
get_all_coeffs
train
def get_all_coeffs(): '''Get all available calibration coefficients for the satellites.''' coeffs = {} for platform in URLS.keys(): if platform not in coeffs: coeffs[platform] = {} for chan in URLS[platform].keys(): url = URLS[platform][chan] print url page = get_page(url) coeffs[platform][chan] = get_coeffs(page) return coeffs
python
{ "resource": "" }
q27126
save_coeffs
train
def save_coeffs(coeffs, out_dir=''): '''Save calibration coefficients to HDF5 files.''' for platform in coeffs.keys(): fname = os.path.join(out_dir, "%s_calibration_data.h5" % platform) fid = h5py.File(fname, 'w') for chan in coeffs[platform].keys(): fid.create_group(chan) fid[chan]['datetime'] = coeffs[platform][chan]['datetime'] fid[chan]['slope1'] = coeffs[platform][chan]['slope1'] fid[chan]['intercept1'] = coeffs[platform][chan]['intercept1'] fid[chan]['slope2'] = coeffs[platform][chan]['slope2'] fid[chan]['intercept2'] = coeffs[platform][chan]['intercept2'] fid.close() print "Calibration coefficients saved for %s" % platform
python
{ "resource": "" }
q27127
main
train
def main(): '''Create calibration coefficient files for AVHRR''' out_dir = sys.argv[1] coeffs = get_all_coeffs() save_coeffs(coeffs, out_dir=out_dir)
python
{ "resource": "" }
q27128
NcNWCSAF.remove_timedim
train
def remove_timedim(self, var): """Remove time dimension from dataset""" if self.pps and var.dims[0] == 'time': data = var[0, :, :] data.attrs = var.attrs var = data return var
python
{ "resource": "" }
q27129
NcNWCSAF.scale_dataset
train
def scale_dataset(self, dsid, variable, info): """Scale the data set, applying the attributes from the netCDF file""" variable = remove_empties(variable) scale = variable.attrs.get('scale_factor', np.array(1)) offset = variable.attrs.get('add_offset', np.array(0)) if np.issubdtype((scale + offset).dtype, np.floating) or np.issubdtype(variable.dtype, np.floating): if '_FillValue' in variable.attrs: variable = variable.where( variable != variable.attrs['_FillValue']) variable.attrs['_FillValue'] = np.nan if 'valid_range' in variable.attrs: variable = variable.where( variable <= variable.attrs['valid_range'][1]) variable = variable.where( variable >= variable.attrs['valid_range'][0]) if 'valid_max' in variable.attrs: variable = variable.where( variable <= variable.attrs['valid_max']) if 'valid_min' in variable.attrs: variable = variable.where( variable >= variable.attrs['valid_min']) attrs = variable.attrs variable = variable * scale + offset variable.attrs = attrs variable.attrs.update({'platform_name': self.platform_name, 'sensor': self.sensor}) variable.attrs.setdefault('units', '1') ancillary_names = variable.attrs.get('ancillary_variables', '') try: variable.attrs['ancillary_variables'] = ancillary_names.split() except AttributeError: pass if 'palette_meanings' in variable.attrs: variable.attrs['palette_meanings'] = [int(val) for val in variable.attrs['palette_meanings'].split()] if variable.attrs['palette_meanings'][0] == 1: variable.attrs['palette_meanings'] = [0] + variable.attrs['palette_meanings'] variable = xr.DataArray(da.vstack((np.array(variable.attrs['fill_value_color']), variable.data)), coords=variable.coords, dims=variable.dims, attrs=variable.attrs) val, idx = np.unique(variable.attrs['palette_meanings'], return_index=True) variable.attrs['palette_meanings'] = val variable = variable[idx] if 'standard_name' in info: variable.attrs.setdefault('standard_name', info['standard_name']) if self.sw_version == 'NWC/PPS version v2014' and dsid.name == 'ctth_alti': # pps 2014 valid range and palette don't match variable.attrs['valid_range'] = (0., 9000.) if self.sw_version == 'NWC/PPS version v2014' and dsid.name == 'ctth_alti_pal': # pps 2014 palette has the nodata color (black) first variable = variable[1:, :] return variable
python
{ "resource": "" }
q27130
NcNWCSAF.get_area_def
train
def get_area_def(self, dsid): """Get the area definition of the datasets in the file. Only applicable for MSG products! """ if self.pps: # PPS: raise NotImplementedError if dsid.name.endswith('_pal'): raise NotImplementedError proj_str, area_extent = self._get_projection() nlines, ncols = self.nc[dsid.name].shape area = get_area_def('some_area_name', "On-the-fly area", 'geosmsg', proj_str, ncols, nlines, area_extent) return area
python
{ "resource": "" }
q27131
NcNWCSAF.end_time
train
def end_time(self): """Return the end time of the object.""" try: # MSG: try: return datetime.strptime(self.nc.attrs['time_coverage_end'], '%Y-%m-%dT%H:%M:%SZ') except TypeError: return datetime.strptime(self.nc.attrs['time_coverage_end'].astype(str), '%Y-%m-%dT%H:%M:%SZ') except ValueError: # PPS: return datetime.strptime(self.nc.attrs['time_coverage_end'], '%Y%m%dT%H%M%S%fZ')
python
{ "resource": "" }
q27132
NcNWCSAF._get_projection
train
def _get_projection(self): """Get projection from the NetCDF4 attributes""" try: proj_str = self.nc.attrs['gdal_projection'] except TypeError: proj_str = self.nc.attrs['gdal_projection'].decode() # Check the a/b/h units radius_a = proj_str.split('+a=')[-1].split()[0] if float(radius_a) > 10e3: units = 'm' scale = 1.0 else: units = 'km' scale = 1e3 if 'units' not in proj_str: proj_str = proj_str + ' +units=' + units area_extent = (float(self.nc.attrs['gdal_xgeo_up_left']) / scale, float(self.nc.attrs['gdal_ygeo_low_right']) / scale, float(self.nc.attrs['gdal_xgeo_low_right']) / scale, float(self.nc.attrs['gdal_ygeo_up_left']) / scale) return proj_str, area_extent
python
{ "resource": "" }
q27133
get_geotiff_area_def
train
def get_geotiff_area_def(filename, crs): """Read area definition from a geotiff.""" from osgeo import gdal from pyresample.geometry import AreaDefinition from pyresample.utils import proj4_str_to_dict fid = gdal.Open(filename) geo_transform = fid.GetGeoTransform() pcs_id = fid.GetProjection().split('"')[1] min_x = geo_transform[0] max_y = geo_transform[3] x_size = fid.RasterXSize y_size = fid.RasterYSize max_x = min_x + geo_transform[1] * x_size min_y = max_y + geo_transform[5] * y_size area_extent = [min_x, min_y, max_x, max_y] area_def = AreaDefinition('geotiff_area', pcs_id, pcs_id, proj4_str_to_dict(crs), x_size, y_size, area_extent) return area_def
python
{ "resource": "" }
q27134
mask_image_data
train
def mask_image_data(data): """Mask image data if alpha channel is present.""" if data.bands.size in (2, 4): if not np.issubdtype(data.dtype, np.integer): raise ValueError("Only integer datatypes can be used as a mask.") mask = data.data[-1, :, :] == np.iinfo(data.dtype).min data = data.astype(np.float64) masked_data = da.stack([da.where(mask, np.nan, data.data[i, :, :]) for i in range(data.shape[0])]) data.data = masked_data data = data.sel(bands=BANDS[data.bands.size - 1]) return data
python
{ "resource": "" }
q27135
GenericImageFileHandler.read
train
def read(self): """Read the image""" data = xr.open_rasterio(self.finfo["filename"], chunks=(1, CHUNK_SIZE, CHUNK_SIZE)) attrs = data.attrs.copy() # Create area definition if hasattr(data, 'crs'): self.area = self.get_geotiff_area_def(data.crs) # Rename to Satpy convention data = data.rename({'band': 'bands'}) # Rename bands to [R, G, B, A], or a subset of those data['bands'] = BANDS[data.bands.size] # Mask data if alpha channel is present try: data = mask_image_data(data) except ValueError as err: logger.warning(err) data.attrs = attrs self.file_content['image'] = data
python
{ "resource": "" }
q27136
GRIBFileHandler.get_area_def
train
def get_area_def(self, dsid): """Get area definition for message. If latlong grid then convert to valid eqc grid. """ msg = self._get_message(self._msg_datasets[dsid]) try: return self._area_def_from_msg(msg) except (RuntimeError, KeyError): raise RuntimeError("Unknown GRIB projection information")
python
{ "resource": "" }
q27137
GRIBFileHandler.get_dataset
train
def get_dataset(self, dataset_id, ds_info): """Read a GRIB message into an xarray DataArray.""" msg = self._get_message(ds_info) ds_info = self.get_metadata(msg, ds_info) fill = msg['missingValue'] data = msg.values.astype(np.float32) if msg.valid_key('jScansPositively') and msg['jScansPositively'] == 1: data = data[::-1] if isinstance(data, np.ma.MaskedArray): data = data.filled(np.nan) data = da.from_array(data, chunks=CHUNK_SIZE) else: data[data == fill] = np.nan data = da.from_array(data, chunks=CHUNK_SIZE) return xr.DataArray(data, attrs=ds_info, dims=('y', 'x'))
python
{ "resource": "" }
q27138
stack
train
def stack(datasets): """First dataset at the bottom.""" base = datasets[0].copy() for dataset in datasets[1:]: base = base.where(dataset.isnull(), dataset) return base
python
{ "resource": "" }
q27139
timeseries
train
def timeseries(datasets): """Expands dataset with and concats by time dimension""" expanded_ds = [] for ds in datasets: tmp = ds.expand_dims("time") tmp.coords["time"] = pd.DatetimeIndex([ds.attrs["start_time"]]) expanded_ds.append(tmp) res = xr.concat(expanded_ds, dim="time") res.attrs = combine_metadata(*[x.attrs for x in expanded_ds]) return res
python
{ "resource": "" }
q27140
_SceneGenerator._create_cached_iter
train
def _create_cached_iter(self): """Iterate over the provided scenes, caching them for later.""" for scn in self._scene_gen: self._scene_cache.append(scn) yield scn
python
{ "resource": "" }
q27141
MultiScene.from_files
train
def from_files(cls, files_to_sort, reader=None, **kwargs): """Create multiple Scene objects from multiple files. This uses the :func:`satpy.readers.group_files` function to group files. See this function for more details on possible keyword arguments. .. versionadded:: 0.12 """ from satpy.readers import group_files file_groups = group_files(files_to_sort, reader=reader, **kwargs) scenes = (Scene(filenames=fg) for fg in file_groups) return cls(scenes)
python
{ "resource": "" }
q27142
MultiScene.scenes
train
def scenes(self): """Get list of Scene objects contained in this MultiScene. .. note:: If the Scenes contained in this object are stored in a generator (not list or tuple) then accessing this property will load/iterate through the generator possibly """ if self.is_generator: log.debug("Forcing iteration of generator-like object of Scenes") self._scenes = list(self._scenes) return self._scenes
python
{ "resource": "" }
q27143
MultiScene.loaded_dataset_ids
train
def loaded_dataset_ids(self): """Union of all Dataset IDs loaded by all children.""" return set(ds_id for scene in self.scenes for ds_id in scene.keys())
python
{ "resource": "" }
q27144
MultiScene.shared_dataset_ids
train
def shared_dataset_ids(self): """Dataset IDs shared by all children.""" shared_ids = set(self.scenes[0].keys()) for scene in self.scenes[1:]: shared_ids &= set(scene.keys()) return shared_ids
python
{ "resource": "" }
q27145
MultiScene._all_same_area
train
def _all_same_area(self, dataset_ids): """Return True if all areas for the provided IDs are equal.""" all_areas = [] for ds_id in dataset_ids: for scn in self.scenes: ds = scn.get(ds_id) if ds is None: continue all_areas.append(ds.attrs.get('area')) all_areas = [area for area in all_areas if area is not None] return all(all_areas[0] == area for area in all_areas[1:])
python
{ "resource": "" }
q27146
MultiScene.load
train
def load(self, *args, **kwargs): """Load the required datasets from the multiple scenes.""" self._generate_scene_func(self._scenes, 'load', False, *args, **kwargs)
python
{ "resource": "" }
q27147
MultiScene.crop
train
def crop(self, *args, **kwargs): """Crop the multiscene and return a new cropped multiscene.""" return self._generate_scene_func(self._scenes, 'crop', True, *args, **kwargs)
python
{ "resource": "" }
q27148
MultiScene.resample
train
def resample(self, destination=None, **kwargs): """Resample the multiscene.""" return self._generate_scene_func(self._scenes, 'resample', True, destination=destination, **kwargs)
python
{ "resource": "" }
q27149
MultiScene.blend
train
def blend(self, blend_function=stack): """Blend the datasets into one scene. .. note:: Blending is not currently optimized for generator-based MultiScene. """ new_scn = Scene() common_datasets = self.shared_dataset_ids for ds_id in common_datasets: datasets = [scn[ds_id] for scn in self.scenes if ds_id in scn] new_scn[ds_id] = blend_function(datasets) return new_scn
python
{ "resource": "" }
q27150
MultiScene._distribute_save_datasets
train
def _distribute_save_datasets(self, scenes_iter, client, batch_size=1, **kwargs): """Distribute save_datasets across a cluster.""" def load_data(q): idx = 0 while True: future_list = q.get() if future_list is None: break # save_datasets shouldn't be returning anything for future in future_list: future.result() log.info("Finished saving %d scenes", idx) idx += 1 q.task_done() input_q = Queue(batch_size if batch_size is not None else 1) load_thread = Thread(target=load_data, args=(input_q,)) load_thread.start() for scene in scenes_iter: delayed = scene.save_datasets(compute=False, **kwargs) if isinstance(delayed, (list, tuple)) and len(delayed) == 2: # TODO Make this work for (source, target) datasets # given a target, source combination raise NotImplementedError("Distributed save_datasets does not support writers " "that return (source, target) combinations at this time. Use " "the non-distributed save_datasets instead.") future = client.compute(delayed) input_q.put(future) input_q.put(None) log.debug("Waiting for child thread to get saved results...") load_thread.join() log.debug("Child thread died successfully")
python
{ "resource": "" }
q27151
MultiScene.save_datasets
train
def save_datasets(self, client=True, batch_size=1, **kwargs): """Run save_datasets on each Scene. Note that some writers may not be multi-process friendly and may produce unexpected results or fail by raising an exception. In these cases ``client`` should be set to ``False``. This is currently a known issue for basic 'geotiff' writer work loads. Args: batch_size (int): Number of scenes to compute at the same time. This only has effect if the `dask.distributed` package is installed. This will default to 1. Setting this to 0 or less will attempt to process all scenes at once. This option should be used with care to avoid memory issues when trying to improve performance. client (bool or dask.distributed.Client): Dask distributed client to use for computation. If this is ``True`` (default) then any existing clients will be used. If this is ``False`` or ``None`` then a client will not be created and ``dask.distributed`` will not be used. If this is a dask ``Client`` object then it will be used for distributed computation. kwargs: Additional keyword arguments to pass to :meth:`~satpy.scene.Scene.save_datasets`. Note ``compute`` can not be provided. """ if 'compute' in kwargs: raise ValueError("The 'compute' keyword argument can not be provided.") client = self._get_client(client=client) scenes = iter(self._scenes) if client is not None: self._distribute_save_datasets(scenes, client, batch_size=batch_size, **kwargs) else: self._simple_save_datasets(scenes, **kwargs)
python
{ "resource": "" }
q27152
MultiScene._get_animation_info
train
def _get_animation_info(self, all_datasets, filename, fill_value=None): """Determine filename and shape of animation to be created.""" valid_datasets = [ds for ds in all_datasets if ds is not None] first_dataset = valid_datasets[0] last_dataset = valid_datasets[-1] first_img = get_enhanced_image(first_dataset) first_img_data = first_img.finalize(fill_value=fill_value)[0] shape = tuple(first_img_data.sizes.get(dim_name) for dim_name in ('y', 'x', 'bands')) if fill_value is None and filename.endswith('gif'): log.warning("Forcing fill value to '0' for GIF Luminance images") fill_value = 0 shape = shape[:2] attrs = first_dataset.attrs.copy() if 'end_time' in last_dataset.attrs: attrs['end_time'] = last_dataset.attrs['end_time'] this_fn = filename.format(**attrs) return this_fn, shape, fill_value
python
{ "resource": "" }
q27153
MultiScene._get_animation_frames
train
def _get_animation_frames(self, all_datasets, shape, fill_value=None, ignore_missing=False): """Create enhanced image frames to save to a file.""" for idx, ds in enumerate(all_datasets): if ds is None and ignore_missing: continue elif ds is None: log.debug("Missing frame: %d", idx) data = da.zeros(shape, dtype=np.uint8, chunks=shape) data = xr.DataArray(data) else: img = get_enhanced_image(ds) data, mode = img.finalize(fill_value=fill_value) if data.ndim == 3: # assume all other shapes are (y, x) # we need arrays grouped by pixel so # transpose if needed data = data.transpose('y', 'x', 'bands') yield data.data
python
{ "resource": "" }
q27154
MultiScene._get_client
train
def _get_client(self, client=True): """Determine what dask distributed client to use.""" client = client or None # convert False/None to None if client is True and get_client is None: log.debug("'dask.distributed' library was not found, will " "use simple serial processing.") client = None elif client is True: try: # get existing client client = get_client() except ValueError: log.warning("No dask distributed client was provided or found, " "but distributed features were requested. Will use simple serial processing.") client = None return client
python
{ "resource": "" }
q27155
MultiScene._distribute_frame_compute
train
def _distribute_frame_compute(self, writers, frame_keys, frames_to_write, client, batch_size=1): """Use ``dask.distributed`` to compute multiple frames at a time.""" def load_data(frame_gen, q): for frame_arrays in frame_gen: future_list = client.compute(frame_arrays) for frame_key, arr_future in zip(frame_keys, future_list): q.put({frame_key: arr_future}) q.put(None) input_q = Queue(batch_size if batch_size is not None else 1) load_thread = Thread(target=load_data, args=(frames_to_write, input_q,)) remote_q = client.gather(input_q) load_thread.start() while True: future_dict = remote_q.get() if future_dict is None: break # write the current frame # this should only be one element in the dictionary, but this is # also the easiest way to get access to the data for frame_key, result in future_dict.items(): # frame_key = rev_future_dict[future] w = writers[frame_key] w.append_data(result) input_q.task_done() log.debug("Waiting for child thread...") load_thread.join(10) if load_thread.is_alive(): import warnings warnings.warn("Background thread still alive after failing to die gracefully") else: log.debug("Child thread died successfully")
python
{ "resource": "" }
q27156
MultiScene._simple_frame_compute
train
def _simple_frame_compute(self, writers, frame_keys, frames_to_write): """Compute frames the plain dask way.""" for frame_arrays in frames_to_write: for frame_key, product_frame in zip(frame_keys, frame_arrays): w = writers[frame_key] w.append_data(product_frame.compute())
python
{ "resource": "" }
q27157
get_bucket_files
train
def get_bucket_files(glob_pattern, base_dir, force=False, pattern_slice=slice(None)): """Helper function to download files from Google Cloud Storage. Args: glob_pattern (str or list): Glob pattern string or series of patterns used to search for on Google Cloud Storage. The pattern should include the "gs://" protocol prefix. If a list of lists, then the results of each sublist pattern are concatenated and the result is treated as one pattern result. This is important for things like ``pattern_slice`` and complicated glob patterns not supported by GCP. base_dir (str): Root directory to place downloaded files on the local system. force (bool): Force re-download of data regardless of its existence on the local system. Warning: May delete non-demo files stored in download directory. pattern_slice (slice): Slice object to limit the number of files returned by each glob pattern. """ if gcsfs is None: raise RuntimeError("Missing 'gcsfs' dependency for GCS download.") if not os.path.isdir(base_dir): # it is the caller's responsibility to make this raise OSError("Directory does not exist: {}".format(base_dir)) if isinstance(glob_pattern, str): glob_pattern = [glob_pattern] fs = gcsfs.GCSFileSystem(token='anon') filenames = [] for gp in glob_pattern: # handle multiple glob patterns being treated as one pattern # for complicated patterns that GCP can't handle if isinstance(gp, str): glob_results = list(fs.glob(gp)) else: # flat list of results glob_results = [fn for pat in gp for fn in fs.glob(pat)] for fn in glob_results[pattern_slice]: ondisk_fn = os.path.basename(fn) ondisk_pathname = os.path.join(base_dir, ondisk_fn) filenames.append(ondisk_pathname) if force and os.path.isfile(ondisk_pathname): os.remove(ondisk_pathname) elif os.path.isfile(ondisk_pathname): LOG.info("Found existing: {}".format(ondisk_pathname)) continue LOG.info("Downloading: {}".format(ondisk_pathname)) fs.get('gs://' + fn, ondisk_pathname) if not filenames: raise OSError("No files could be found or downloaded.") return filenames
python
{ "resource": "" }
q27158
HRITGOESPrologueFileHandler.process_prologue
train
def process_prologue(self): """Reprocess prologue to correct types.""" for key in ['TCurr', 'TCHED', 'TCTRL', 'TLHED', 'TLTRL', 'TIPFS', 'TINFS', 'TISPC', 'TIECL', 'TIBBC', 'TISTR', 'TLRAN', 'TIIRT', 'TIVIT', 'TCLMT', 'TIONA']: try: self.prologue[key] = make_sgs_time(self.prologue[key]) except ValueError: self.prologue.pop(key, None) logger.debug("Invalid data for %s", key) for key in ['SubSatLatitude', "SubSatLongitude", "ReferenceLongitude", "ReferenceDistance", "ReferenceLatitude"]: self.prologue[key] = make_gvar_float(self.prologue[key])
python
{ "resource": "" }
q27159
HRITGOESFileHandler._get_calibration_params
train
def _get_calibration_params(self): """Get the calibration parameters from the metadata.""" params = {} idx_table = [] val_table = [] for elt in self.mda['image_data_function'].split(b'\r\n'): try: key, val = elt.split(b':=') try: idx_table.append(int(key)) val_table.append(float(val)) except ValueError: params[key] = val except ValueError: pass params['indices'] = np.array(idx_table) params['values'] = np.array(val_table, dtype=np.float32) return params
python
{ "resource": "" }
q27160
NCOLCI1B._get_solar_flux
train
def _get_solar_flux(self, band): """Get the solar flux for the band.""" solar_flux = self.cal['solar_flux'].isel(bands=band).values d_index = self.cal['detector_index'].fillna(0).astype(int) return da.map_blocks(self._get_items, d_index.data, solar_flux=solar_flux, dtype=solar_flux.dtype)
python
{ "resource": "" }
q27161
GOESNCBaseFileHandler._get_platform_name
train
def _get_platform_name(ncattr): """Determine name of the platform""" match = re.match(r'G-(\d+)', ncattr) if match: return SPACECRAFTS.get(int(match.groups()[0])) return None
python
{ "resource": "" }
q27162
GOESNCBaseFileHandler._get_sector
train
def _get_sector(self, channel, nlines, ncols): """Determine which sector was scanned""" if self._is_vis(channel): margin = 100 sectors_ref = self.vis_sectors else: margin = 50 sectors_ref = self.ir_sectors for (nlines_ref, ncols_ref), sector in sectors_ref.items(): if np.fabs(ncols - ncols_ref) < margin and \ np.fabs(nlines - nlines_ref) < margin: return sector return UNKNOWN_SECTOR
python
{ "resource": "" }
q27163
GOESNCBaseFileHandler._is_vis
train
def _is_vis(channel): """Determine whether the given channel is a visible channel""" if isinstance(channel, str): return channel == '00_7' elif isinstance(channel, int): return channel == 1 else: raise ValueError('Invalid channel')
python
{ "resource": "" }
q27164
GOESNCBaseFileHandler._get_nadir_pixel
train
def _get_nadir_pixel(earth_mask, sector): """Find the nadir pixel Args: earth_mask: Mask identifying earth and space pixels sector: Specifies the scanned sector Returns: nadir row, nadir column """ if sector == FULL_DISC: logger.debug('Computing nadir pixel') # The earth is not centered in the image, compute bounding box # of the earth disc first rmin, rmax, cmin, cmax = bbox(earth_mask) # The nadir pixel is approximately at the centre of the earth disk nadir_row = rmin + (rmax - rmin) // 2 nadir_col = cmin + (cmax - cmin) // 2 return nadir_row, nadir_col return None, None
python
{ "resource": "" }
q27165
GOESNCBaseFileHandler._get_area_def_uniform_sampling
train
def _get_area_def_uniform_sampling(self, lon0, channel): """Get area definition with uniform sampling""" logger.debug('Computing area definition') if lon0 is not None: # Define proj4 projection parameters proj_dict = {'a': EQUATOR_RADIUS, 'b': POLE_RADIUS, 'lon_0': lon0, 'h': ALTITUDE, 'proj': 'geos', 'units': 'm'} # Calculate maximum scanning angles xmax, ymax = get_geostationary_angle_extent( namedtuple('area', ['proj_dict'])(proj_dict)) # Derive area extent using small angle approximation (maximum # scanning angle is ~8.6 degrees) llx, lly, urx, ury = ALTITUDE * np.array([-xmax, -ymax, xmax, ymax]) area_extent = [llx, lly, urx, ury] # Original image is oversampled. Create pyresample area definition # with uniform sampling in N-S and E-W direction if self._is_vis(channel): sampling = SAMPLING_NS_VIS else: sampling = SAMPLING_NS_IR pix_size = ALTITUDE * sampling area_def = pyresample.geometry.AreaDefinition( 'goes_geos_uniform', '{} geostationary projection (uniform sampling)'.format(self.platform_name), 'goes_geos_uniform', proj_dict, np.rint((urx - llx) / pix_size).astype(int), np.rint((ury - lly) / pix_size).astype(int), area_extent) return area_def else: return None
python
{ "resource": "" }
q27166
GOESNCBaseFileHandler.start_time
train
def start_time(self): """Start timestamp of the dataset""" dt = self.nc['time'].dt return datetime(year=dt.year, month=dt.month, day=dt.day, hour=dt.hour, minute=dt.minute, second=dt.second, microsecond=dt.microsecond)
python
{ "resource": "" }
q27167
GOESNCBaseFileHandler.end_time
train
def end_time(self): """End timestamp of the dataset""" try: return self.start_time + SCAN_DURATION[self.sector] except KeyError: return self.start_time
python
{ "resource": "" }
q27168
GOESNCBaseFileHandler.meta
train
def meta(self): """Derive metadata from the coordinates""" # Use buffered data if available if self._meta is None: lat = self.geo_data['lat'] earth_mask = self._get_earth_mask(lat) crow, ccol = self._get_nadir_pixel(earth_mask=earth_mask, sector=self.sector) lat0 = lat.values[crow, ccol] if crow is not None else None yaw_flip = self._is_yaw_flip(lat) del lat lon = self.geo_data['lon'] lon0 = lon.values[crow, ccol] if crow is not None else None area_def_uni = self._get_area_def_uniform_sampling( lon0=lon0, channel=self.gvar_channel) del lon self._meta = {'earth_mask': earth_mask, 'yaw_flip': yaw_flip, 'lat0': lat0, 'lon0': lon0, 'nadir_row': crow, 'nadir_col': ccol, 'area_def_uni': area_def_uni} return self._meta
python
{ "resource": "" }
q27169
GOESNCBaseFileHandler._counts2radiance
train
def _counts2radiance(self, counts, coefs, channel): """Convert raw detector counts to radiance""" logger.debug('Converting counts to radiance') if self._is_vis(channel): # Since the scanline-detector assignment is unknown, use the average # coefficients for all scanlines. slope = np.array(coefs['slope']).mean() offset = np.array(coefs['offset']).mean() return self._viscounts2radiance(counts=counts, slope=slope, offset=offset) return self._ircounts2radiance(counts=counts, scale=coefs['scale'], offset=coefs['offset'])
python
{ "resource": "" }
q27170
GOESNCBaseFileHandler._calibrate
train
def _calibrate(self, radiance, coefs, channel, calibration): """Convert radiance to reflectance or brightness temperature""" if self._is_vis(channel): if not calibration == 'reflectance': raise ValueError('Cannot calibrate VIS channel to ' '{}'.format(calibration)) return self._calibrate_vis(radiance=radiance, k=coefs['k']) else: if not calibration == 'brightness_temperature': raise ValueError('Cannot calibrate IR channel to ' '{}'.format(calibration)) # Since the scanline-detector assignment is unknown, use the average # coefficients for all scanlines. mean_coefs = {'a': np.array(coefs['a']).mean(), 'b': np.array(coefs['b']).mean(), 'n': np.array(coefs['n']).mean(), 'btmin': coefs['btmin'], 'btmax': coefs['btmax']} return self._calibrate_ir(radiance=radiance, coefs=mean_coefs)
python
{ "resource": "" }
q27171
GOESNCBaseFileHandler._ircounts2radiance
train
def _ircounts2radiance(counts, scale, offset): """Convert IR counts to radiance Reference: [IR]. Args: counts: Raw detector counts scale: Scale [mW-1 m2 cm sr] offset: Offset [1] Returns: Radiance [mW m-2 cm-1 sr-1] """ rad = (counts - offset) / scale return rad.clip(min=0)
python
{ "resource": "" }
q27172
GOESNCBaseFileHandler._calibrate_ir
train
def _calibrate_ir(radiance, coefs): """Convert IR radiance to brightness temperature Reference: [IR] Args: radiance: Radiance [mW m-2 cm-1 sr-1] coefs: Dictionary of calibration coefficients. Keys: n: The channel's central wavenumber [cm-1] a: Offset [K] b: Slope [1] btmin: Minimum brightness temperature threshold [K] btmax: Maximum brightness temperature threshold [K] Returns: Brightness temperature [K] """ logger.debug('Calibrating to brightness temperature') # Compute brightness temperature using inverse Planck formula n = coefs['n'] bteff = C2 * n / xu.log(1 + C1 * n**3 / radiance.where(radiance > 0)) bt = xr.DataArray(bteff * coefs['b'] + coefs['a']) # Apply BT threshold return bt.where(xu.logical_and(bt >= coefs['btmin'], bt <= coefs['btmax']))
python
{ "resource": "" }
q27173
GOESNCBaseFileHandler._viscounts2radiance
train
def _viscounts2radiance(counts, slope, offset): """Convert VIS counts to radiance References: [VIS] Args: counts: Raw detector counts slope: Slope [W m-2 um-1 sr-1] offset: Offset [W m-2 um-1 sr-1] Returns: Radiance [W m-2 um-1 sr-1] """ rad = counts * slope + offset return rad.clip(min=0)
python
{ "resource": "" }
q27174
GOESNCBaseFileHandler._calibrate_vis
train
def _calibrate_vis(radiance, k): """Convert VIS radiance to reflectance Note: Angle of incident radiation and annual variation of the earth-sun distance is not taken into account. A value of 100% corresponds to the radiance of a perfectly reflecting diffuse surface illuminated at normal incidence when the sun is at its annual-average distance from the Earth. TODO: Take angle of incident radiation (cos sza) and annual variation of the earth-sun distance into account. Reference: [VIS] Args: radiance: Radiance [mW m-2 cm-1 sr-1] k: pi / H, where H is the solar spectral irradiance at annual-average sun-earth distance, averaged over the spectral response function of the detector). Units of k: [m2 um sr W-1] Returns: Reflectance [%] """ logger.debug('Calibrating to reflectance') refl = 100 * k * radiance return refl.clip(min=0)
python
{ "resource": "" }
q27175
GOESNCBaseFileHandler._update_metadata
train
def _update_metadata(self, data, ds_info): """Update metadata of the given DataArray""" # Metadata from the dataset definition data.attrs.update(ds_info) # If the file_type attribute is a list and the data is xarray # the concat of the dataset will not work. As the file_type is # not needed this will be popped here. if 'file_type' in data.attrs: data.attrs.pop('file_type') # Metadata discovered from the file data.attrs.update( {'platform_name': self.platform_name, 'sensor': self.sensor, 'sector': self.sector, 'yaw_flip': self.meta['yaw_flip']} ) if self.meta['lon0'] is not None: # Attributes only available for full disc images. YAML reader # doesn't like it if satellite_* is present but None data.attrs.update( {'satellite_longitude': self.meta['lon0'], 'satellite_latitude': self.meta['lat0'], 'satellite_altitude': ALTITUDE, 'nadir_row': self.meta['nadir_row'], 'nadir_col': self.meta['nadir_col'], 'area_def_uniform_sampling': self.meta['area_def_uni']} )
python
{ "resource": "" }
q27176
GOESCoefficientReader._float
train
def _float(self, string): """Convert string to float Take care of numbers in exponential format """ string = self._denoise(string) exp_match = re.match(r'^[-.\d]+x10-(\d)$', string) if exp_match: exp = int(exp_match.groups()[0]) fac = 10 ** -exp string = string.replace('x10-{}'.format(exp), '') else: fac = 1 return fac * float(string)
python
{ "resource": "" }
q27177
NC_ABI_L1B._vis_calibrate
train
def _vis_calibrate(self, data): """Calibrate visible channels to reflectance.""" solar_irradiance = self['esun'] esd = self["earth_sun_distance_anomaly_in_AU"].astype(float) factor = np.pi * esd * esd / solar_irradiance res = data * factor res.attrs = data.attrs res.attrs['units'] = '1' res.attrs['standard_name'] = 'toa_bidirectional_reflectance' return res
python
{ "resource": "" }
q27178
NC_ABI_L1B._ir_calibrate
train
def _ir_calibrate(self, data): """Calibrate IR channels to BT.""" fk1 = float(self["planck_fk1"]) fk2 = float(self["planck_fk2"]) bc1 = float(self["planck_bc1"]) bc2 = float(self["planck_bc2"]) res = (fk2 / xu.log(fk1 / data + 1) - bc1) / bc2 res.attrs = data.attrs res.attrs['units'] = 'K' res.attrs['standard_name'] = 'toa_brightness_temperature' return res
python
{ "resource": "" }
q27179
time_seconds
train
def time_seconds(tc_array, year): """Return the time object from the timecodes """ tc_array = np.array(tc_array, copy=True) word = tc_array[:, 0] day = word >> 1 word = tc_array[:, 1].astype(np.uint64) msecs = ((127) & word) * 1024 word = tc_array[:, 2] msecs += word & 1023 msecs *= 1024 word = tc_array[:, 3] msecs += word & 1023 return (np.datetime64( str(year) + '-01-01T00:00:00Z', 's') + msecs[:].astype('timedelta64[ms]') + (day - 1)[:].astype('timedelta64[D]'))
python
{ "resource": "" }
q27180
apply_enhancement
train
def apply_enhancement(data, func, exclude=None, separate=False, pass_dask=False): """Apply `func` to the provided data. Args: data (xarray.DataArray): Data to be modified inplace. func (callable): Function to be applied to an xarray exclude (iterable): Bands in the 'bands' dimension to not include in the calculations. separate (bool): Apply `func` one band at a time. Default is False. pass_dask (bool): Pass the underlying dask array instead of the xarray.DataArray. """ attrs = data.attrs bands = data.coords['bands'].values if exclude is None: exclude = ['A'] if 'A' in bands else [] if separate: data_arrs = [] for idx, band_name in enumerate(bands): band_data = data.sel(bands=[band_name]) if band_name in exclude: # don't modify alpha data_arrs.append(band_data) continue if pass_dask: dims = band_data.dims coords = band_data.coords d_arr = func(band_data.data, index=idx) band_data = xr.DataArray(d_arr, dims=dims, coords=coords) else: band_data = func(band_data, index=idx) data_arrs.append(band_data) # we assume that the func can add attrs attrs.update(band_data.attrs) data.data = xr.concat(data_arrs, dim='bands').data data.attrs = attrs return data else: band_data = data.sel(bands=[b for b in bands if b not in exclude]) if pass_dask: dims = band_data.dims coords = band_data.coords d_arr = func(band_data.data) band_data = xr.DataArray(d_arr, dims=dims, coords=coords) else: band_data = func(band_data) attrs.update(band_data.attrs) # combine the new data with the excluded data new_data = xr.concat([band_data, data.sel(bands=exclude)], dim='bands') data.data = new_data.sel(bands=bands).data data.attrs = attrs return data
python
{ "resource": "" }
q27181
cira_stretch
train
def cira_stretch(img, **kwargs): """Logarithmic stretch adapted to human vision. Applicable only for visible channels. """ LOG.debug("Applying the cira-stretch") def func(band_data): log_root = np.log10(0.0223) denom = (1.0 - log_root) * 0.75 band_data *= 0.01 band_data = band_data.clip(np.finfo(float).eps) band_data = xu.log10(band_data) band_data -= log_root band_data /= denom return band_data return apply_enhancement(img.data, func)
python
{ "resource": "" }
q27182
lookup
train
def lookup(img, **kwargs): """Assign values to channels based on a table.""" luts = np.array(kwargs['luts'], dtype=np.float32) / 255.0 def func(band_data, luts=luts, index=-1): # NaN/null values will become 0 lut = luts[:, index] if len(luts.shape) == 2 else luts band_data = band_data.clip(0, lut.size - 1).astype(np.uint8) new_delay = dask.delayed(_lookup_delayed)(lut, band_data) new_data = da.from_delayed(new_delay, shape=band_data.shape, dtype=luts.dtype) return new_data return apply_enhancement(img.data, func, separate=True, pass_dask=True)
python
{ "resource": "" }
q27183
_merge_colormaps
train
def _merge_colormaps(kwargs): """Merge colormaps listed in kwargs.""" from trollimage.colormap import Colormap full_cmap = None palette = kwargs['palettes'] if isinstance(palette, Colormap): full_cmap = palette else: for itm in palette: cmap = create_colormap(itm) cmap.set_range(itm["min_value"], itm["max_value"]) if full_cmap is None: full_cmap = cmap else: full_cmap = full_cmap + cmap return full_cmap
python
{ "resource": "" }
q27184
create_colormap
train
def create_colormap(palette): """Create colormap of the given numpy file, color vector or colormap.""" from trollimage.colormap import Colormap fname = palette.get('filename', None) if fname: data = np.load(fname) cmap = [] num = 1.0 * data.shape[0] for i in range(int(num)): cmap.append((i / num, (data[i, 0] / 255., data[i, 1] / 255., data[i, 2] / 255.))) return Colormap(*cmap) colors = palette.get('colors', None) if isinstance(colors, list): cmap = [] values = palette.get('values', None) for idx, color in enumerate(colors): if values: value = values[idx] else: value = idx / float(len(colors) - 1) cmap.append((value, tuple(color))) return Colormap(*cmap) if isinstance(colors, str): from trollimage import colormap import copy return copy.copy(getattr(colormap, colors)) return None
python
{ "resource": "" }
q27185
three_d_effect
train
def three_d_effect(img, **kwargs): """Create 3D effect using convolution""" w = kwargs.get('weight', 1) LOG.debug("Applying 3D effect with weight %.2f", w) kernel = np.array([[-w, 0, w], [-w, 1, w], [-w, 0, w]]) mode = kwargs.get('convolve_mode', 'same') def func(band_data, kernel=kernel, mode=mode, index=None): del index delay = dask.delayed(_three_d_effect_delayed)(band_data, kernel, mode) new_data = da.from_delayed(delay, shape=band_data.shape, dtype=band_data.dtype) return new_data return apply_enhancement(img.data, func, separate=True, pass_dask=True)
python
{ "resource": "" }
q27186
btemp_threshold
train
def btemp_threshold(img, min_in, max_in, threshold, threshold_out=None, **kwargs): """Scale data linearly in two separate regions. This enhancement scales the input data linearly by splitting the data into two regions; min_in to threshold and threshold to max_in. These regions are mapped to 1 to threshold_out and threshold_out to 0 respectively, resulting in the data being "flipped" around the threshold. A default threshold_out is set to `176.0 / 255.0` to match the behavior of the US National Weather Service's forecasting tool called AWIPS. Args: img (XRImage): Image object to be scaled min_in (float): Minimum input value to scale max_in (float): Maximum input value to scale threshold (float): Input value where to split data in to two regions threshold_out (float): Output value to map the input `threshold` to. Optional, defaults to 176.0 / 255.0. """ threshold_out = threshold_out if threshold_out is not None else (176 / 255.0) low_factor = (threshold_out - 1.) / (min_in - threshold) low_offset = 1. + (low_factor * min_in) high_factor = threshold_out / (max_in - threshold) high_offset = high_factor * max_in def _bt_threshold(band_data): # expects dask array to be passed return da.where(band_data >= threshold, high_offset - high_factor * band_data, low_offset - low_factor * band_data) return apply_enhancement(img.data, _bt_threshold, pass_dask=True)
python
{ "resource": "" }
q27187
API._make_request
train
def _make_request(self, path, method='GET', params_=None): """Make a request to the API""" uri = self.api_root + path if params_: params_['text_format'] = self.response_format else: params_ = {'text_format': self.response_format} # Make the request response = None try: response = self._session.request(method, uri, timeout=self.timeout, params=params_) except Timeout as e: print("Timeout raised and caught:\n{e}".format(e=e)) # Enforce rate limiting time.sleep(max(self._SLEEP_MIN, self.sleep_time)) return response.json()['response'] if response else None
python
{ "resource": "" }
q27188
API.get_song
train
def get_song(self, id_): """Data for a specific song.""" endpoint = "songs/{id}".format(id=id_) return self._make_request(endpoint)
python
{ "resource": "" }
q27189
API.get_artist
train
def get_artist(self, id_): """Data for a specific artist.""" endpoint = "artists/{id}".format(id=id_) return self._make_request(endpoint)
python
{ "resource": "" }
q27190
API.search_genius
train
def search_genius(self, search_term): """Search documents hosted on Genius.""" endpoint = "search/" params = {'q': search_term} return self._make_request(endpoint, params_=params)
python
{ "resource": "" }
q27191
API.search_genius_web
train
def search_genius_web(self, search_term, per_page=5): """Use the web-version of Genius search""" endpoint = "search/multi?" params = {'per_page': per_page, 'q': search_term} # This endpoint is not part of the API, requires different formatting url = "https://genius.com/api/" + endpoint + urlencode(params) response = requests.get(url, timeout=self.timeout) time.sleep(max(self._SLEEP_MIN, self.sleep_time)) return response.json()['response'] if response else None
python
{ "resource": "" }
q27192
API.get_annotation
train
def get_annotation(self, id_): """Data for a specific annotation.""" endpoint = "annotations/{id}".format(id=id_) return self._make_request(endpoint)
python
{ "resource": "" }
q27193
Genius._result_is_lyrics
train
def _result_is_lyrics(self, song_title): """ Returns False if result from Genius is not actually song lyrics Set the `excluded_terms` and `replace_default_terms` as instance variables within the Genius class. """ default_terms = ['track\\s?list', 'album art(work)?', 'liner notes', 'booklet', 'credits', 'interview', 'skit', 'instrumental', 'setlist'] if self.excluded_terms: if self.replace_default_terms: default_terms = self.excluded_terms else: default_terms.extend(self.excluded_terms) expression = r"".join(["({})|".format(term) for term in default_terms]).strip('|') regex = re.compile(expression, re.IGNORECASE) return not regex.search(self._clean_str(song_title))
python
{ "resource": "" }
q27194
Genius._get_item_from_search_response
train
def _get_item_from_search_response(self, response, type_): """ Returns either a Song or Artist result from search_genius_web """ sections = sorted(response['sections'], key=lambda sect: sect['type'] == type_, reverse=True) for section in sections: hits = [hit for hit in section['hits'] if hit['type'] == type_] if hits: return hits[0]['result']
python
{ "resource": "" }
q27195
Genius._result_is_match
train
def _result_is_match(self, result, title, artist=None): """ Returns True if search result matches searched song """ result_title = self._clean_str(result['title']) title_is_match = result_title == self._clean_str(title) if not artist: return title_is_match result_artist = self._clean_str(result['primary_artist']['name']) return title_is_match and result_artist == self._clean_str(artist)
python
{ "resource": "" }
q27196
Song.to_dict
train
def to_dict(self): """ Create a dictionary from the song object Used in save_lyrics to create json object :return: Dictionary """ return dict({'title': self.title, 'album': self.album, 'year': self.year, 'lyrics': self.lyrics, 'image': self.song_art_image_url})
python
{ "resource": "" }
q27197
Song.save_lyrics
train
def save_lyrics(self, filename=None, extension='json', verbose=True, overwrite=None, binary_encoding=False): """Allows user to save song lyrics from Song object to a .json or .txt file.""" extension = extension.lstrip(".") assert (extension == 'json') or (extension == 'txt'), "format_ must be JSON or TXT" # Determine the filename if filename: for ext in ["txt", "TXT", "json", "JSON"]: filename = filename.replace("." + ext, "") filename += "." + extension else: filename = "Lyrics_{}_{}.{}".format(self.artist.replace(" ", ""), self.title.replace(" ", ""), extension).lower() filename = self._sanitize_filename(filename) # Check if file already exists write_file = False if not os.path.isfile(filename): write_file = True elif overwrite: write_file = True else: if input("{} already exists. Overwrite?\n(y/n): ".format(filename)).lower() == 'y': write_file = True # Format lyrics as either .txt or .json if extension == 'json': lyrics_to_write = {'songs': [], 'artist': self.artist} lyrics_to_write['songs'].append(self.to_dict()) else: lyrics_to_write = self.lyrics if binary_encoding: lyrics_to_write = lyrics_to_write.encode('utf8') # Write the lyrics to either a .json or .txt file if write_file: with open(filename, 'wb' if binary_encoding else 'w') as lyrics_file: if extension == 'json': json.dump(lyrics_to_write, lyrics_file) else: lyrics_file.write(lyrics_to_write) if verbose: print('Wrote {} to {}.'.format(self.title, filename)) else: if verbose: print('Skipping file save.\n') return lyrics_to_write
python
{ "resource": "" }
q27198
Artist.add_song
train
def add_song(self, new_song, verbose=True): """Add a Song object to the Artist object""" if any([song.title == new_song.title for song in self._songs]): if verbose: print('{s} already in {a}, not adding song.'.format(s=new_song.title, a=self.name)) return 1 # Failure if new_song.artist == self.name: self._songs.append(new_song) self._num_songs += 1 return 0 # Success if verbose: print("Can't add song by {b}, artist must be {a}.".format(b=new_song.artist, a=self.name)) return 1
python
{ "resource": "" }
q27199
Artist.save_lyrics
train
def save_lyrics(self, extension='json', overwrite=False, verbose=True, binary_encoding=False): """Allows user to save all lyrics within an Artist object""" extension = extension.lstrip(".") assert (extension == 'json') or (extension == 'txt'), "format_ must be JSON or TXT" for song in self.songs: song.save_lyrics(extension=extension, overwrite=overwrite, verbose=verbose, binary_encoding=binary_encoding)
python
{ "resource": "" }