_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q27000
SCMIFileHandler._get_proj_specific_params
train
def _get_proj_specific_params(self, projection): """Convert CF projection parameters to PROJ.4 dict.""" proj = self._get_proj4_name(projection) proj_dict = { 'proj': proj, 'a': float(projection.attrs['semi_major_axis']), 'b': float(projection.attrs['semi_minor_axis']), 'units': 'm', } if proj == 'geos': proj_dict['h'] = float(projection.attrs['perspective_point_height']) proj_dict['sweep'] = projection.attrs.get('sweep_angle_axis', 'y') proj_dict['lon_0'] = float(projection.attrs['longitude_of_projection_origin']) proj_dict['lat_0'] = float(projection.attrs.get('latitude_of_projection_origin', 0.0)) elif proj == 'lcc': proj_dict['lat_0'] = float(projection.attrs['standard_parallel']) proj_dict['lon_0'] = float(projection.attrs['longitude_of_central_meridian']) proj_dict['lat_1'] = float(projection.attrs['latitude_of_projection_origin']) elif proj == 'stere': proj_dict['lat_ts'] = float(projection.attrs['standard_parallel']) proj_dict['lon_0'] = float(projection.attrs['straight_vertical_longitude_from_pole']) proj_dict['lat_0'] = float(projection.attrs['latitude_of_projection_origin']) elif proj == 'merc': proj_dict['lat_ts'] = float(projection.attrs['standard_parallel']) proj_dict['lat_0'] = proj_dict['lat_ts'] proj_dict['lon_0'] = float(projection.attrs['longitude_of_projection_origin']) else: raise ValueError("Can't handle projection '{}'".format(proj)) return proj_dict
python
{ "resource": "" }
q27001
timecds2datetime
train
def timecds2datetime(tcds): """Method for converting time_cds-variables to datetime-objectsself. Works both with a dictionary and a numpy record_array. """ days = int(tcds['Days']) milliseconds = int(tcds['Milliseconds']) try: microseconds = int(tcds['Microseconds']) except (KeyError, ValueError): microseconds = 0 try: microseconds += int(tcds['Nanoseconds']) / 1000. except (KeyError, ValueError): pass reference = datetime(1958, 1, 1) delta = timedelta(days=days, milliseconds=milliseconds, microseconds=microseconds) return reference + delta
python
{ "resource": "" }
q27002
show
train
def show(data, negate=False): """Show the stretched data. """ from PIL import Image as pil data = np.array((data - data.min()) * 255.0 / (data.max() - data.min()), np.uint8) if negate: data = 255 - data img = pil.fromarray(data) img.show()
python
{ "resource": "" }
q27003
HRITMSGPrologueFileHandler._get_satpos_cart
train
def _get_satpos_cart(self): """Determine satellite position in earth-centered cartesion coordinates The coordinates as a function of time are encoded in the coefficients of an 8th-order Chebyshev polynomial. In the prologue there is one set of coefficients for each coordinate (x, y, z). The coordinates are obtained by evalutaing the polynomials at the start time of the scan. Returns: x, y, z [m] """ orbit_polynomial = self.prologue['SatelliteStatus']['Orbit']['OrbitPolynomial'] # Find Chebyshev coefficients for the given time coef_idx = self._find_navigation_coefs() tstart = orbit_polynomial['StartTime'][0, coef_idx] tend = orbit_polynomial['EndTime'][0, coef_idx] # Obtain cartesian coordinates (x, y, z) of the satellite by evaluating the Chebyshev polynomial at the # start time of the scan. Express timestamps in microseconds since 1970-01-01 00:00. time = self.prologue['ImageAcquisition']['PlannedAcquisitionTime']['TrueRepeatCycleStart'] time64 = np.datetime64(time).astype('int64') domain = [np.datetime64(tstart).astype('int64'), np.datetime64(tend).astype('int64')] x = chebyshev(coefs=orbit_polynomial['X'][coef_idx], time=time64, domain=domain) y = chebyshev(coefs=orbit_polynomial['Y'][coef_idx], time=time64, domain=domain) z = chebyshev(coefs=orbit_polynomial['Z'][coef_idx], time=time64, domain=domain) return x*1000, y*1000, z*1000
python
{ "resource": "" }
q27004
HRITMSGPrologueFileHandler._find_navigation_coefs
train
def _find_navigation_coefs(self): """Find navigation coefficients for the current time The navigation Chebyshev coefficients are only valid for a certain time interval. The header entry SatelliteStatus/Orbit/OrbitPolynomial contains multiple coefficients for multiple time intervals. Find the coefficients which are valid for the nominal timestamp of the scan. Returns: Corresponding index in the coefficient list. """ # Find index of interval enclosing the nominal timestamp of the scan time = np.datetime64(self.prologue['ImageAcquisition']['PlannedAcquisitionTime']['TrueRepeatCycleStart']) intervals_tstart = self.prologue['SatelliteStatus']['Orbit']['OrbitPolynomial']['StartTime'][0].astype( 'datetime64[us]') intervals_tend = self.prologue['SatelliteStatus']['Orbit']['OrbitPolynomial']['EndTime'][0].astype( 'datetime64[us]') try: return np.where(np.logical_and(time >= intervals_tstart, time < intervals_tend))[0][0] except IndexError: raise NoValidNavigationCoefs('Unable to find navigation coefficients valid for {}'.format(time))
python
{ "resource": "" }
q27005
HRITMSGPrologueFileHandler.get_earth_radii
train
def get_earth_radii(self): """Get earth radii from prologue Returns: Equatorial radius, polar radius [m] """ earth_model = self.prologue['GeometricProcessing']['EarthModel'] a = earth_model['EquatorialRadius'] * 1000 b = (earth_model['NorthPolarRadius'] + earth_model['SouthPolarRadius']) / 2.0 * 1000 return a, b
python
{ "resource": "" }
q27006
HRITMSGEpilogueFileHandler.read_epilogue
train
def read_epilogue(self): """Read the epilogue metadata.""" with open(self.filename, "rb") as fp_: fp_.seek(self.mda['total_header_length']) data = np.fromfile(fp_, dtype=hrit_epilogue, count=1) self.epilogue.update(recarray2dict(data))
python
{ "resource": "" }
q27007
HRITMSGFileHandler._get_header
train
def _get_header(self): """Read the header info, and fill the metadata dictionary""" earth_model = self.prologue['GeometricProcessing']['EarthModel'] self.mda['offset_corrected'] = earth_model['TypeOfEarthModel'] == 2 # Projection a, b = self.prologue_.get_earth_radii() self.mda['projection_parameters']['a'] = a self.mda['projection_parameters']['b'] = b ssp = self.prologue['ImageDescription'][ 'ProjectionDescription']['LongitudeOfSSP'] self.mda['projection_parameters']['SSP_longitude'] = ssp self.mda['projection_parameters']['SSP_latitude'] = 0.0 # Navigation actual_lon, actual_lat, actual_alt = self.prologue_.get_satpos() self.mda['navigation_parameters']['satellite_nominal_longitude'] = self.prologue['SatelliteStatus'][ 'SatelliteDefinition']['NominalLongitude'] self.mda['navigation_parameters']['satellite_nominal_latitude'] = 0.0 self.mda['navigation_parameters']['satellite_actual_longitude'] = actual_lon self.mda['navigation_parameters']['satellite_actual_latitude'] = actual_lat self.mda['navigation_parameters']['satellite_actual_altitude'] = actual_alt # Misc self.platform_id = self.prologue["SatelliteStatus"][ "SatelliteDefinition"]["SatelliteId"] self.platform_name = "Meteosat-" + SATNUM[self.platform_id] self.mda['platform_name'] = self.platform_name service = self._filename_info['service'] if service == '': self.mda['service'] = '0DEG' else: self.mda['service'] = service self.channel_name = CHANNEL_NAMES[self.mda['spectral_channel_id']]
python
{ "resource": "" }
q27008
omerc2cf
train
def omerc2cf(area): """Return the cf grid mapping for the omerc projection.""" proj_dict = area.proj_dict args = dict(azimuth_of_central_line=proj_dict.get('alpha'), latitude_of_projection_origin=proj_dict.get('lat_0'), longitude_of_projection_origin=proj_dict.get('lonc'), grid_mapping_name='oblique_mercator', reference_ellipsoid_name=proj_dict.get('ellps', 'WGS84'), false_easting=0., false_northing=0. ) if "no_rot" in proj_dict: args['no_rotation'] = 1 if "gamma" in proj_dict: args['gamma'] = proj_dict['gamma'] return args
python
{ "resource": "" }
q27009
geos2cf
train
def geos2cf(area): """Return the cf grid mapping for the geos projection.""" proj_dict = area.proj_dict args = dict(perspective_point_height=proj_dict.get('h'), latitude_of_projection_origin=proj_dict.get('lat_0'), longitude_of_projection_origin=proj_dict.get('lon_0'), grid_mapping_name='geostationary', semi_major_axis=proj_dict.get('a'), semi_minor_axis=proj_dict.get('b'), sweep_axis=proj_dict.get('sweep'), ) return args
python
{ "resource": "" }
q27010
laea2cf
train
def laea2cf(area): """Return the cf grid mapping for the laea projection.""" proj_dict = area.proj_dict args = dict(latitude_of_projection_origin=proj_dict.get('lat_0'), longitude_of_projection_origin=proj_dict.get('lon_0'), grid_mapping_name='lambert_azimuthal_equal_area', ) return args
python
{ "resource": "" }
q27011
create_grid_mapping
train
def create_grid_mapping(area): """Create the grid mapping instance for `area`.""" try: grid_mapping = mappings[area.proj_dict['proj']](area) grid_mapping['name'] = area.proj_dict['proj'] except KeyError: raise NotImplementedError return grid_mapping
python
{ "resource": "" }
q27012
area2lonlat
train
def area2lonlat(dataarray): """Convert an area to longitudes and latitudes.""" area = dataarray.attrs['area'] lons, lats = area.get_lonlats_dask() lons = xr.DataArray(lons, dims=['y', 'x'], attrs={'name': "longitude", 'standard_name': "longitude", 'units': 'degrees_east'}, name='longitude') lats = xr.DataArray(lats, dims=['y', 'x'], attrs={'name': "latitude", 'standard_name': "latitude", 'units': 'degrees_north'}, name='latitude') dataarray.attrs['coordinates'] = 'longitude latitude' return [dataarray, lons, lats]
python
{ "resource": "" }
q27013
CFWriter.da2cf
train
def da2cf(dataarray, epoch=EPOCH): """Convert the dataarray to something cf-compatible.""" new_data = dataarray.copy() # Remove the area new_data.attrs.pop('area', None) anc = [ds.attrs['name'] for ds in new_data.attrs.get('ancillary_variables', [])] if anc: new_data.attrs['ancillary_variables'] = ' '.join(anc) # TODO: make this a grid mapping or lon/lats # new_data.attrs['area'] = str(new_data.attrs.get('area')) for key, val in new_data.attrs.copy().items(): if val is None: new_data.attrs.pop(key) new_data.attrs.pop('_last_resampler', None) if 'time' in new_data.coords: new_data['time'].encoding['units'] = epoch new_data['time'].attrs['standard_name'] = 'time' new_data['time'].attrs.pop('bounds', None) if 'time' not in new_data.dims: new_data = new_data.expand_dims('time') if 'x' in new_data.coords: new_data['x'].attrs['standard_name'] = 'projection_x_coordinate' new_data['x'].attrs['units'] = 'm' if 'y' in new_data.coords: new_data['y'].attrs['standard_name'] = 'projection_y_coordinate' new_data['y'].attrs['units'] = 'm' new_data.attrs.setdefault('long_name', new_data.attrs.pop('name')) if 'prerequisites' in new_data.attrs: new_data.attrs['prerequisites'] = [np.string_(str(prereq)) for prereq in new_data.attrs['prerequisites']] return new_data
python
{ "resource": "" }
q27014
HRITJMAFileHandler._get_platform
train
def _get_platform(self): """Get the platform name The platform is not specified explicitly in JMA HRIT files. For segmented data it is not even specified in the filename. But it can be derived indirectly from the projection name: GEOS(140.00): MTSAT-1R GEOS(140.25): MTSAT-1R # TODO: Check if there is more... GEOS(140.70): Himawari-8 GEOS(145.00): MTSAT-2 See [MTSAT], section 3.1. Unfortunately Himawari-8 and 9 are not distinguishable using that method at the moment. From [HIMAWARI]: "HRIT/LRIT files have the same file naming convention in the same format in Himawari-8 and Himawari-9, so there is no particular difference." TODO: Find another way to distinguish Himawari-8 and 9. References: [MTSAT] http://www.data.jma.go.jp/mscweb/notice/Himawari7_e.html [HIMAWARI] http://www.data.jma.go.jp/mscweb/en/himawari89/space_segment/sample_hrit.html """ try: return PLATFORMS[self.projection_name] except KeyError: logger.error('Unable to determine platform: Unknown projection ' 'name "{}"'.format(self.projection_name)) return UNKNOWN_PLATFORM
python
{ "resource": "" }
q27015
HRITJMAFileHandler._check_sensor_platform_consistency
train
def _check_sensor_platform_consistency(self, sensor): """Make sure sensor and platform are consistent Args: sensor (str) : Sensor name from YAML dataset definition Raises: ValueError if they don't match """ ref_sensor = SENSORS.get(self.platform, None) if ref_sensor and not sensor == ref_sensor: logger.error('Sensor-Platform mismatch: {} is not a payload ' 'of {}. Did you choose the correct reader?' .format(sensor, self.platform))
python
{ "resource": "" }
q27016
HRITJMAFileHandler._get_line_offset
train
def _get_line_offset(self): """Get line offset for the current segment Read line offset from the file and adapt it to the current segment or half disk scan so that y(l) ~ l - loff because this is what get_geostationary_area_extent() expects. """ # Get line offset from the file nlines = int(self.mda['number_of_lines']) loff = np.float32(self.mda['loff']) # Adapt it to the current segment if self.is_segmented: # loff in the file specifies the offset of the full disk image # centre (1375/2750 for VIS/IR) segment_number = self.mda['segment_sequence_number'] - 1 loff -= (self.mda['total_no_image_segm'] - segment_number - 1) * nlines elif self.area_id in (NORTH_HEMIS, SOUTH_HEMIS): # loff in the file specifies the start line of the half disk image # in the full disk image loff = nlines - loff elif self.area_id == UNKNOWN_AREA: logger.error('Cannot compute line offset for unknown area') return loff
python
{ "resource": "" }
q27017
HRITJMAFileHandler._mask_space
train
def _mask_space(self, data): """Mask space pixels""" geomask = get_geostationary_mask(area=self.area) return data.where(geomask)
python
{ "resource": "" }
q27018
BaseFileHandler.combine_info
train
def combine_info(self, all_infos): """Combine metadata for multiple datasets. When loading data from multiple files it can be non-trivial to combine things like start_time, end_time, start_orbit, end_orbit, etc. By default this method will produce a dictionary containing all values that were equal across **all** provided info dictionaries. Additionally it performs the logical comparisons to produce the following if they exist: - start_time - end_time - start_orbit - end_orbit - satellite_altitude - satellite_latitude - satellite_longitude Also, concatenate the areas. """ combined_info = combine_metadata(*all_infos) new_dict = self._combine(all_infos, min, 'start_time', 'start_orbit') new_dict.update(self._combine(all_infos, max, 'end_time', 'end_orbit')) new_dict.update(self._combine(all_infos, np.mean, 'satellite_longitude', 'satellite_latitude', 'satellite_altitude')) try: area = SwathDefinition(lons=np.ma.vstack([info['area'].lons for info in all_infos]), lats=np.ma.vstack([info['area'].lats for info in all_infos])) area.name = '_'.join([info['area'].name for info in all_infos]) combined_info['area'] = area except KeyError: pass new_dict.update(combined_info) return new_dict
python
{ "resource": "" }
q27019
get_available_channels
train
def get_available_channels(header): """Get the available channels from the header information""" chlist_str = header['15_SECONDARY_PRODUCT_HEADER'][ 'SelectedBandIDs']['Value'] retv = {} for idx, char in zip(range(12), chlist_str): retv[CHANNEL_NAMES[idx + 1]] = (char == 'X') return retv
python
{ "resource": "" }
q27020
NativeMSGFileHandler._get_data_dtype
train
def _get_data_dtype(self): """Get the dtype of the file based on the actual available channels""" pkhrec = [ ('GP_PK_HEADER', GSDTRecords.gp_pk_header), ('GP_PK_SH1', GSDTRecords.gp_pk_sh1) ] pk_head_dtype = np.dtype(pkhrec) def get_lrec(cols): lrec = [ ("gp_pk", pk_head_dtype), ("version", np.uint8), ("satid", np.uint16), ("time", (np.uint16, 5)), ("lineno", np.uint32), ("chan_id", np.uint8), ("acq_time", (np.uint16, 3)), ("line_validity", np.uint8), ("line_rquality", np.uint8), ("line_gquality", np.uint8), ("line_data", (np.uint8, cols)) ] return lrec # each pixel is 10-bits -> one line of data has 25% more bytes # than the number of columns suggest (10/8 = 1.25) visir_rec = get_lrec(int(self.mda['number_of_columns'] * 1.25)) number_of_visir_channels = len( [s for s in self.mda['channel_list'] if not s == 'HRV']) drec = [('visir', (visir_rec, number_of_visir_channels))] if self.mda['available_channels']['HRV']: hrv_rec = get_lrec(int(self.mda['hrv_number_of_columns'] * 1.25)) drec.append(('hrv', (hrv_rec, 3))) return np.dtype(drec)
python
{ "resource": "" }
q27021
NativeMSGFileHandler._get_memmap
train
def _get_memmap(self): """Get the memory map for the SEVIRI data""" with open(self.filename) as fp: data_dtype = self._get_data_dtype() hdr_size = native_header.itemsize return np.memmap(fp, dtype=data_dtype, shape=(self.mda['number_of_lines'],), offset=hdr_size, mode="r")
python
{ "resource": "" }
q27022
NativeMSGFileHandler._read_header
train
def _read_header(self): """Read the header info""" data = np.fromfile(self.filename, dtype=native_header, count=1) self.header.update(recarray2dict(data)) data15hd = self.header['15_DATA_HEADER'] sec15hd = self.header['15_SECONDARY_PRODUCT_HEADER'] # Set the list of available channels: self.mda['available_channels'] = get_available_channels(self.header) self.mda['channel_list'] = [i for i in CHANNEL_NAMES.values() if self.mda['available_channels'][i]] self.platform_id = data15hd[ 'SatelliteStatus']['SatelliteDefinition']['SatelliteId'] self.mda['platform_name'] = "Meteosat-" + SATNUM[self.platform_id] equator_radius = data15hd['GeometricProcessing'][ 'EarthModel']['EquatorialRadius'] * 1000. north_polar_radius = data15hd[ 'GeometricProcessing']['EarthModel']['NorthPolarRadius'] * 1000. south_polar_radius = data15hd[ 'GeometricProcessing']['EarthModel']['SouthPolarRadius'] * 1000. polar_radius = (north_polar_radius + south_polar_radius) * 0.5 ssp_lon = data15hd['ImageDescription'][ 'ProjectionDescription']['LongitudeOfSSP'] self.mda['projection_parameters'] = {'a': equator_radius, 'b': polar_radius, 'h': 35785831.00, 'ssp_longitude': ssp_lon} north = int(sec15hd['NorthLineSelectedRectangle']['Value']) east = int(sec15hd['EastColumnSelectedRectangle']['Value']) south = int(sec15hd['SouthLineSelectedRectangle']['Value']) west = int(sec15hd['WestColumnSelectedRectangle']['Value']) ncolumns = west - east + 1 nrows = north - south + 1 # check if the file has less rows or columns than # the maximum, if so it is an area of interest file if (nrows < VISIR_NUM_LINES) or (ncolumns < VISIR_NUM_COLUMNS): self.mda['is_full_disk'] = False # If the number of columns in the file is not divisible by 4, # UMARF will add extra columns to the file modulo = ncolumns % 4 padding = 0 if modulo > 0: padding = 4 - modulo cols_visir = ncolumns + padding # Check the VISIR calculated column dimension against # the header information cols_visir_hdr = int(sec15hd['NumberColumnsVISIR']['Value']) if cols_visir_hdr != cols_visir: logger.warning( "Number of VISIR columns from the header is incorrect!") logger.warning("Header: %d", cols_visir_hdr) logger.warning("Calculated: = %d", cols_visir) # HRV Channel - check if the area is reduced in east west # direction as this affects the number of columns in the file cols_hrv_hdr = int(sec15hd['NumberColumnsHRV']['Value']) if ncolumns < VISIR_NUM_COLUMNS: cols_hrv = cols_hrv_hdr else: cols_hrv = int(cols_hrv_hdr / 2) # self.mda represents the 16bit dimensions not 10bit self.mda['number_of_lines'] = int(sec15hd['NumberLinesVISIR']['Value']) self.mda['number_of_columns'] = cols_visir self.mda['hrv_number_of_lines'] = int(sec15hd["NumberLinesHRV"]['Value']) self.mda['hrv_number_of_columns'] = cols_hrv
python
{ "resource": "" }
q27023
radiance_to_bt
train
def radiance_to_bt(arr, wc_, a__, b__): """Convert to BT. """ return a__ + b__ * (C2 * wc_ / (da.log(1 + (C1 * (wc_ ** 3) / arr))))
python
{ "resource": "" }
q27024
EPSAVHRRFile.keys
train
def keys(self): """List of reader's keys. """ keys = [] for val in self.form.scales.values(): keys += val.dtype.fields.keys() return keys
python
{ "resource": "" }
q27025
GHRSST_OSISAFL2.get_lonlats
train
def get_lonlats(self, navid, nav_info, lon_out=None, lat_out=None): """Load an area. """ lon_key = 'lon' valid_min = self[lon_key + '/attr/valid_min'] valid_max = self[lon_key + '/attr/valid_max'] lon_out.data[:] = self[lon_key][::-1] lon_out.mask[:] = (lon_out < valid_min) | (lon_out > valid_max) lat_key = 'lat' valid_min = self[lat_key + '/attr/valid_min'] valid_max = self[lat_key + '/attr/valid_max'] lat_out.data[:] = self[lat_key][::-1] lat_out.mask[:] = (lat_out < valid_min) | (lat_out > valid_max) return {}
python
{ "resource": "" }
q27026
_vis_calibrate
train
def _vis_calibrate(data, chn, calib_type, pre_launch_coeffs=False, calib_coeffs=None, mask=False): """Visible channel calibration only. *calib_type* in count, reflectance, radiance """ # Calibration count to albedo, the calibration is performed separately for # two value ranges. if calib_type not in ['counts', 'radiance', 'reflectance']: raise ValueError('Calibration ' + calib_type + ' unknown!') arr = data["hrpt"][:, :, chn] mask |= arr == 0 channel = arr.astype(np.float) if calib_type == 'counts': return channel if calib_type == 'radiance': logger.info("Radiances are not yet supported for " + "the VIS/NIR channels!") if pre_launch_coeffs: coeff_idx = 2 else: # check that coeffs are valid if np.all(data["calvis"][:, chn, 0, 4] == 0): logger.info( "No valid operational coefficients, fall back to pre-launch") coeff_idx = 2 else: coeff_idx = 0 intersection = data["calvis"][:, chn, coeff_idx, 4] if calib_coeffs is not None: logger.info("Updating from external calibration coefficients.") # intersection = np.expand_dims slope1 = np.expand_dims(calib_coeffs[0], 1) intercept1 = np.expand_dims(calib_coeffs[1], 1) slope2 = np.expand_dims(calib_coeffs[2], 1) intercept2 = np.expand_dims(calib_coeffs[3], 1) else: slope1 = np.expand_dims(data["calvis"][:, chn, coeff_idx, 0] * 1e-10, 1) intercept1 = np.expand_dims(data["calvis"][:, chn, coeff_idx, 1] * 1e-7, 1) slope2 = np.expand_dims(data["calvis"][:, chn, coeff_idx, 2] * 1e-10, 1) intercept2 = np.expand_dims(data["calvis"][:, chn, coeff_idx, 3] * 1e-7, 1) if chn == 2: slope2[slope2 < 0] += 0.4294967296 mask1 = channel <= np.expand_dims(intersection, 1) mask2 = channel > np.expand_dims(intersection, 1) channel[mask1] = (channel * slope1 + intercept1)[mask1] channel[mask2] = (channel * slope2 + intercept2)[mask2] channel = channel.clip(min=0) return np.where(mask, np.nan, channel)
python
{ "resource": "" }
q27027
AVHRRAAPPL1BFile.get_angles
train
def get_angles(self, angle_id): """Get sun-satellite viewing angles""" tic = datetime.now() sunz40km = self._data["ang"][:, :, 0] * 1e-2 satz40km = self._data["ang"][:, :, 1] * 1e-2 azidiff40km = self._data["ang"][:, :, 2] * 1e-2 try: from geotiepoints.interpolator import Interpolator except ImportError: logger.warning("Could not interpolate sun-sat angles, " "python-geotiepoints missing.") self.sunz, self.satz, self.azidiff = sunz40km, satz40km, azidiff40km else: cols40km = np.arange(24, 2048, 40) cols1km = np.arange(2048) lines = sunz40km.shape[0] rows40km = np.arange(lines) rows1km = np.arange(lines) along_track_order = 1 cross_track_order = 3 satint = Interpolator( [sunz40km, satz40km, azidiff40km], (rows40km, cols40km), (rows1km, cols1km), along_track_order, cross_track_order) self.sunz, self.satz, self.azidiff = satint.interpolate() logger.debug("Interpolate sun-sat angles: time %s", str(datetime.now() - tic)) return create_xarray(getattr(self, ANGLES[angle_id]))
python
{ "resource": "" }
q27028
AVHRRAAPPL1BFile.navigate
train
def navigate(self): """Return the longitudes and latitudes of the scene. """ tic = datetime.now() lons40km = self._data["pos"][:, :, 1] * 1e-4 lats40km = self._data["pos"][:, :, 0] * 1e-4 try: from geotiepoints import SatelliteInterpolator except ImportError: logger.warning("Could not interpolate lon/lats, " "python-geotiepoints missing.") self.lons, self.lats = lons40km, lats40km else: cols40km = np.arange(24, 2048, 40) cols1km = np.arange(2048) lines = lons40km.shape[0] rows40km = np.arange(lines) rows1km = np.arange(lines) along_track_order = 1 cross_track_order = 3 satint = SatelliteInterpolator( (lons40km, lats40km), (rows40km, cols40km), (rows1km, cols1km), along_track_order, cross_track_order) self.lons, self.lats = satint.interpolate() logger.debug("Navigation time %s", str(datetime.now() - tic))
python
{ "resource": "" }
q27029
NetCDF4FileHandler._collect_attrs
train
def _collect_attrs(self, name, obj): """Collect all the attributes for the provided file object. """ for key in obj.ncattrs(): value = getattr(obj, key) fc_key = "{}/attr/{}".format(name, key) try: self.file_content[fc_key] = np2str(value) except ValueError: self.file_content[fc_key] = value
python
{ "resource": "" }
q27030
NetCDF4FileHandler.collect_metadata
train
def collect_metadata(self, name, obj): """Collect all file variables and attributes for the provided file object. This method also iterates through subgroups of the provided object. """ # Look through each subgroup base_name = name + "/" if name else "" for group_name, group_obj in obj.groups.items(): self.collect_metadata(base_name + group_name, group_obj) for var_name, var_obj in obj.variables.items(): var_name = base_name + var_name self.file_content[var_name] = var_obj self.file_content[var_name + "/dtype"] = var_obj.dtype self.file_content[var_name + "/shape"] = var_obj.shape self._collect_attrs(var_name, var_obj) self._collect_attrs(name, obj)
python
{ "resource": "" }
q27031
ACSPOFileHandler.get_dataset
train
def get_dataset(self, dataset_id, ds_info, xslice=slice(None), yslice=slice(None)): """Load data array and metadata from file on disk.""" var_path = ds_info.get('file_key', '{}'.format(dataset_id.name)) metadata = self.get_metadata(dataset_id, ds_info) shape = metadata['shape'] file_shape = self[var_path + '/shape'] if isinstance(shape, tuple) and len(shape) == 2: # 2D array if xslice.start is not None: shape = (shape[0], xslice.stop - xslice.start) if yslice.start is not None: shape = (yslice.stop - yslice.start, shape[1]) elif isinstance(shape, tuple) and len(shape) == 1 and yslice.start is not None: shape = ((yslice.stop - yslice.start) / yslice.step,) metadata['shape'] = shape valid_min = self[var_path + '/attr/valid_min'] valid_max = self[var_path + '/attr/valid_max'] # no need to check fill value since we are using valid min/max scale_factor = self.get(var_path + '/attr/scale_factor') add_offset = self.get(var_path + '/attr/add_offset') if isinstance(file_shape, tuple) and len(file_shape) == 3: data = self[var_path][0, yslice, xslice] elif isinstance(file_shape, tuple) and len(file_shape) == 2: data = self[var_path][yslice, xslice] elif isinstance(file_shape, tuple) and len(file_shape) == 1: data = self[var_path][yslice] else: data = self[var_path] data = data.where((data >= valid_min) & (data <= valid_max)) if scale_factor is not None: data = data * scale_factor + add_offset if ds_info.get('cloud_clear', False): # clear-sky if bit 15-16 are 00 clear_sky_mask = (self['l2p_flags'][0] & 0b1100000000000000) != 0 data = data.where(~clear_sky_mask) data.attrs.update(metadata) return data
python
{ "resource": "" }
q27032
VIIRSActiveFiresFileHandler.get_dataset
train
def get_dataset(self, dsid, dsinfo): """Get dataset function Args: dsid: Dataset ID param2: Dataset Information Returns: Dask DataArray: Data """ data = self[dsinfo.get('file_key', dsid.name)] data.attrs.update(dsinfo) data.attrs["platform_name"] = self['/attr/satellite_name'] data.attrs["sensor"] = self['/attr/instrument_name'] return data
python
{ "resource": "" }
q27033
np2str
train
def np2str(value): """Convert an `numpy.string_` to str. Args: value (ndarray): scalar or 1-element numpy array to convert Raises: ValueError: if value is array larger than 1-element or it is not of type `numpy.string_` or it is not a numpy array """ if hasattr(value, 'dtype') and \ issubclass(value.dtype.type, (np.string_, np.object_)) and value.size == 1: value = np.asscalar(value) if not isinstance(value, str): # python 3 - was scalar numpy array of bytes # otherwise python 2 - scalar numpy array of 'str' value = value.decode() return value else: raise ValueError("Array is not a string type or is larger than 1")
python
{ "resource": "" }
q27034
get_geostationary_mask
train
def get_geostationary_mask(area): """Compute a mask of the earth's shape as seen by a geostationary satellite Args: area (pyresample.geometry.AreaDefinition) : Corresponding area definition Returns: Boolean mask, True inside the earth's shape, False outside. """ # Compute projection coordinates at the earth's limb h = area.proj_dict['h'] xmax, ymax = get_geostationary_angle_extent(area) xmax *= h ymax *= h # Compute projection coordinates at the centre of each pixel x, y = area.get_proj_coords_dask() # Compute mask of the earth's elliptical shape return ((x / xmax) ** 2 + (y / ymax) ** 2) <= 1
python
{ "resource": "" }
q27035
_lonlat_from_geos_angle
train
def _lonlat_from_geos_angle(x, y, geos_area): """Get lons and lats from x, y in projection coordinates.""" h = (geos_area.proj_dict['h'] + geos_area.proj_dict['a']) / 1000 b__ = (geos_area.proj_dict['a'] / geos_area.proj_dict['b']) ** 2 sd = np.sqrt((h * np.cos(x) * np.cos(y)) ** 2 - (np.cos(y)**2 + b__ * np.sin(y)**2) * (h**2 - (geos_area.proj_dict['a'] / 1000)**2)) # sd = 0 sn = (h * np.cos(x) * np.cos(y) - sd) / (np.cos(y)**2 + b__ * np.sin(y)**2) s1 = h - sn * np.cos(x) * np.cos(y) s2 = sn * np.sin(x) * np.cos(y) s3 = -sn * np.sin(y) sxy = np.sqrt(s1**2 + s2**2) lons = np.rad2deg(np.arctan2(s2, s1)) + geos_area.proj_dict.get('lon_0', 0) lats = np.rad2deg(-np.arctan2(b__ * s3, sxy)) return lons, lats
python
{ "resource": "" }
q27036
get_sub_area
train
def get_sub_area(area, xslice, yslice): """Apply slices to the area_extent and size of the area.""" new_area_extent = ((area.pixel_upper_left[0] + (xslice.start - 0.5) * area.pixel_size_x), (area.pixel_upper_left[1] - (yslice.stop - 0.5) * area.pixel_size_y), (area.pixel_upper_left[0] + (xslice.stop - 0.5) * area.pixel_size_x), (area.pixel_upper_left[1] - (yslice.start - 0.5) * area.pixel_size_y)) return AreaDefinition(area.area_id, area.name, area.proj_id, area.proj_dict, xslice.stop - xslice.start, yslice.stop - yslice.start, new_area_extent)
python
{ "resource": "" }
q27037
unzip_file
train
def unzip_file(filename): """Unzip the file if file is bzipped = ending with 'bz2'""" if filename.endswith('bz2'): bz2file = bz2.BZ2File(filename) fdn, tmpfilepath = tempfile.mkstemp() with closing(os.fdopen(fdn, 'wb')) as ofpt: try: ofpt.write(bz2file.read()) except IOError: import traceback traceback.print_exc() LOGGER.info("Failed to read bzipped file %s", str(filename)) os.remove(tmpfilepath) return None return tmpfilepath return None
python
{ "resource": "" }
q27038
bbox
train
def bbox(img): """Find the bounding box around nonzero elements in the given array Copied from https://stackoverflow.com/a/31402351/5703449 . Returns: rowmin, rowmax, colmin, colmax """ rows = np.any(img, axis=1) cols = np.any(img, axis=0) rmin, rmax = np.where(rows)[0][[0, -1]] cmin, cmax = np.where(cols)[0][[0, -1]] return rmin, rmax, cmin, cmax
python
{ "resource": "" }
q27039
Scene._compute_metadata_from_readers
train
def _compute_metadata_from_readers(self): """Determine pieces of metadata from the readers loaded.""" mda = {'sensor': self._get_sensor_names()} # overwrite the request start/end times with actual loaded data limits if self.readers: mda['start_time'] = min(x.start_time for x in self.readers.values()) mda['end_time'] = max(x.end_time for x in self.readers.values()) return mda
python
{ "resource": "" }
q27040
Scene._get_sensor_names
train
def _get_sensor_names(self): """Join the sensors from all loaded readers.""" # if the user didn't tell us what sensors to work with, let's figure it # out if not self.attrs.get('sensor'): # reader finder could return multiple readers return set([sensor for reader_instance in self.readers.values() for sensor in reader_instance.sensor_names]) elif not isinstance(self.attrs['sensor'], (set, tuple, list)): return set([self.attrs['sensor']]) else: return set(self.attrs['sensor'])
python
{ "resource": "" }
q27041
Scene.create_reader_instances
train
def create_reader_instances(self, filenames=None, reader=None, reader_kwargs=None): """Find readers and return their instances.""" return load_readers(filenames=filenames, reader=reader, reader_kwargs=reader_kwargs, ppp_config_dir=self.ppp_config_dir)
python
{ "resource": "" }
q27042
Scene._compare_areas
train
def _compare_areas(self, datasets=None, compare_func=max): """Get for the provided datasets. Args: datasets (iterable): Datasets whose areas will be compared. Can be either `xarray.DataArray` objects or identifiers to get the DataArrays from the current Scene. Defaults to all datasets. This can also be a series of area objects, typically AreaDefinitions. compare_func (callable): `min` or `max` or other function used to compare the dataset's areas. """ if datasets is None: datasets = list(self.values()) areas = [] for ds in datasets: if isinstance(ds, BaseDefinition): areas.append(ds) continue elif not isinstance(ds, DataArray): ds = self[ds] area = ds.attrs.get('area') areas.append(area) areas = [x for x in areas if x is not None] if not areas: raise ValueError("No dataset areas available") if not all(isinstance(x, type(areas[0])) for x in areas[1:]): raise ValueError("Can't compare areas of different types") elif isinstance(areas[0], AreaDefinition): first_pstr = areas[0].proj_str if not all(ad.proj_str == first_pstr for ad in areas[1:]): raise ValueError("Can't compare areas with different " "projections.") def key_func(ds): return 1. / ds.pixel_size_x else: def key_func(ds): return ds.shape # find the highest/lowest area among the provided return compare_func(areas, key=key_func)
python
{ "resource": "" }
q27043
Scene.available_dataset_names
train
def available_dataset_names(self, reader_name=None, composites=False): """Get the list of the names of the available datasets.""" return sorted(set(x.name for x in self.available_dataset_ids( reader_name=reader_name, composites=composites)))
python
{ "resource": "" }
q27044
Scene.all_dataset_ids
train
def all_dataset_ids(self, reader_name=None, composites=False): """Get names of all datasets from loaded readers or `reader_name` if specified.. :return: list of all dataset names """ try: if reader_name: readers = [self.readers[reader_name]] else: readers = self.readers.values() except (AttributeError, KeyError): raise KeyError("No reader '%s' found in scene" % reader_name) all_datasets = [dataset_id for reader in readers for dataset_id in reader.all_dataset_ids] if composites: all_datasets += self.all_composite_ids() return all_datasets
python
{ "resource": "" }
q27045
Scene.available_composite_ids
train
def available_composite_ids(self, available_datasets=None): """Get names of compositors that can be generated from the available datasets. Returns: generator of available compositor's names """ if available_datasets is None: available_datasets = self.available_dataset_ids(composites=False) else: if not all(isinstance(ds_id, DatasetID) for ds_id in available_datasets): raise ValueError( "'available_datasets' must all be DatasetID objects") all_comps = self.all_composite_ids() # recreate the dependency tree so it doesn't interfere with the user's # wishlist comps, mods = self.cpl.load_compositors(self.attrs['sensor']) dep_tree = DependencyTree(self.readers, comps, mods) dep_tree.find_dependencies(set(available_datasets + all_comps)) available_comps = set(x.name for x in dep_tree.trunk()) # get rid of modified composites that are in the trunk return sorted(available_comps & set(all_comps))
python
{ "resource": "" }
q27046
Scene.available_composite_names
train
def available_composite_names(self, available_datasets=None): """All configured composites known to this Scene.""" return sorted(set(x.name for x in self.available_composite_ids( available_datasets=available_datasets)))
python
{ "resource": "" }
q27047
Scene.all_composite_ids
train
def all_composite_ids(self, sensor_names=None): """Get all composite IDs that are configured. Returns: generator of configured composite names """ if sensor_names is None: sensor_names = self.attrs['sensor'] compositors = [] # Note if we get compositors from the dep tree then it will include # modified composites which we don't want for sensor_name in sensor_names: sensor_comps = self.cpl.compositors.get(sensor_name, {}).keys() # ignore inline compositor dependencies starting with '_' compositors.extend(c for c in sensor_comps if not c.name.startswith('_')) return sorted(set(compositors))
python
{ "resource": "" }
q27048
Scene.iter_by_area
train
def iter_by_area(self): """Generate datasets grouped by Area. :return: generator of (area_obj, list of dataset objects) """ datasets_by_area = {} for ds in self: a = ds.attrs.get('area') datasets_by_area.setdefault(a, []).append( DatasetID.from_dict(ds.attrs)) return datasets_by_area.items()
python
{ "resource": "" }
q27049
Scene.copy
train
def copy(self, datasets=None): """Create a copy of the Scene including dependency information. Args: datasets (list, tuple): `DatasetID` objects for the datasets to include in the new Scene object. """ new_scn = self.__class__() new_scn.attrs = self.attrs.copy() new_scn.dep_tree = self.dep_tree.copy() for ds_id in (datasets or self.keys()): # NOTE: Must use `.datasets` or side effects of `__setitem__` # could hurt us with regards to the wishlist new_scn.datasets[ds_id] = self[ds_id] if not datasets: new_scn.wishlist = self.wishlist.copy() else: new_scn.wishlist = set([DatasetID.from_dict(ds.attrs) for ds in new_scn]) return new_scn
python
{ "resource": "" }
q27050
Scene.all_same_area
train
def all_same_area(self): """All contained data arrays are on the same area.""" all_areas = [x.attrs.get('area', None) for x in self.values()] all_areas = [x for x in all_areas if x is not None] return all(all_areas[0] == x for x in all_areas[1:])
python
{ "resource": "" }
q27051
Scene.all_same_proj
train
def all_same_proj(self): """All contained data array are in the same projection.""" all_areas = [x.attrs.get('area', None) for x in self.values()] all_areas = [x for x in all_areas if x is not None] return all(all_areas[0].proj_str == x.proj_str for x in all_areas[1:])
python
{ "resource": "" }
q27052
Scene._slice_area_from_bbox
train
def _slice_area_from_bbox(self, src_area, dst_area, ll_bbox=None, xy_bbox=None): """Slice the provided area using the bounds provided.""" if ll_bbox is not None: dst_area = AreaDefinition( 'crop_area', 'crop_area', 'crop_latlong', {'proj': 'latlong'}, 100, 100, ll_bbox) elif xy_bbox is not None: dst_area = AreaDefinition( 'crop_area', 'crop_area', 'crop_xy', src_area.proj_dict, src_area.x_size, src_area.y_size, xy_bbox) x_slice, y_slice = src_area.get_area_slices(dst_area) return src_area[y_slice, x_slice], y_slice, x_slice
python
{ "resource": "" }
q27053
Scene._slice_datasets
train
def _slice_datasets(self, dataset_ids, slice_key, new_area, area_only=True): """Slice scene in-place for the datasets specified.""" new_datasets = {} datasets = (self[ds_id] for ds_id in dataset_ids) for ds, parent_ds in dataset_walker(datasets): ds_id = DatasetID.from_dict(ds.attrs) # handle ancillary variables pres = None if parent_ds is not None: pres = new_datasets[DatasetID.from_dict(parent_ds.attrs)] if ds_id in new_datasets: replace_anc(ds, pres) continue if area_only and ds.attrs.get('area') is None: new_datasets[ds_id] = ds replace_anc(ds, pres) continue if not isinstance(slice_key, dict): # match dimension name to slice object key = dict(zip(ds.dims, slice_key)) else: key = slice_key new_ds = ds.isel(**key) if new_area is not None: new_ds.attrs['area'] = new_area new_datasets[ds_id] = new_ds if parent_ds is None: # don't use `__setitem__` because we don't want this to # affect the existing wishlist/dep tree self.datasets[ds_id] = new_ds else: replace_anc(new_ds, pres)
python
{ "resource": "" }
q27054
Scene.slice
train
def slice(self, key): """Slice Scene by dataset index. .. note:: DataArrays that do not have an ``area`` attribute will not be sliced. """ if not self.all_same_area: raise RuntimeError("'Scene' has different areas and cannot " "be usefully sliced.") # slice new_scn = self.copy() new_scn.wishlist = self.wishlist for area, dataset_ids in self.iter_by_area(): if area is not None: # assume dimensions for area are y and x one_ds = self[dataset_ids[0]] area_key = tuple(sl for dim, sl in zip(one_ds.dims, key) if dim in ['y', 'x']) new_area = area[area_key] else: new_area = None new_scn._slice_datasets(dataset_ids, key, new_area) return new_scn
python
{ "resource": "" }
q27055
Scene.crop
train
def crop(self, area=None, ll_bbox=None, xy_bbox=None, dataset_ids=None): """Crop Scene to a specific Area boundary or bounding box. Args: area (AreaDefinition): Area to crop the current Scene to ll_bbox (tuple, list): 4-element tuple where values are in lon/lat degrees. Elements are ``(xmin, ymin, xmax, ymax)`` where X is longitude and Y is latitude. xy_bbox (tuple, list): Same as `ll_bbox` but elements are in projection units. dataset_ids (iterable): DatasetIDs to include in the returned `Scene`. Defaults to all datasets. This method will attempt to intelligently slice the data to preserve relationships between datasets. For example, if we are cropping two DataArrays of 500m and 1000m pixel resolution then this method will assume that exactly 4 pixels of the 500m array cover the same geographic area as a single 1000m pixel. It handles these cases based on the shapes of the input arrays and adjusting slicing indexes accordingly. This method will have trouble handling cases where data arrays seem related but don't cover the same geographic area or if the coarsest resolution data is not related to the other arrays which are related. It can be useful to follow cropping with a call to the native resampler to resolve all datasets to the same resolution and compute any composites that could not be generated previously:: >>> cropped_scn = scn.crop(ll_bbox=(-105., 40., -95., 50.)) >>> remapped_scn = cropped_scn.resample(resampler='native') .. note:: The `resample` method automatically crops input data before resampling to save time/memory. """ if len([x for x in [area, ll_bbox, xy_bbox] if x is not None]) != 1: raise ValueError("One and only one of 'area', 'll_bbox', " "or 'xy_bbox' can be specified.") new_scn = self.copy(datasets=dataset_ids) if not new_scn.all_same_proj and xy_bbox is not None: raise ValueError("Can't crop when dataset_ids are not all on the " "same projection.") # get the lowest resolution area, use it as the base of the slice # this makes sure that the other areas *should* be a consistent factor min_area = new_scn.min_area() if isinstance(area, str): area = get_area_def(area) new_min_area, min_y_slice, min_x_slice = self._slice_area_from_bbox( min_area, area, ll_bbox, xy_bbox) new_target_areas = {} for src_area, dataset_ids in new_scn.iter_by_area(): if src_area is None: for ds_id in dataset_ids: new_scn.datasets[ds_id] = self[ds_id] continue y_factor, y_remainder = np.divmod(float(src_area.shape[0]), min_area.shape[0]) x_factor, x_remainder = np.divmod(float(src_area.shape[1]), min_area.shape[1]) y_factor = int(y_factor) x_factor = int(x_factor) if y_remainder == 0 and x_remainder == 0: y_slice = slice(min_y_slice.start * y_factor, min_y_slice.stop * y_factor) x_slice = slice(min_x_slice.start * x_factor, min_x_slice.stop * x_factor) new_area = src_area[y_slice, x_slice] slice_key = {'y': y_slice, 'x': x_slice} new_scn._slice_datasets(dataset_ids, slice_key, new_area) else: new_target_areas[src_area] = self._slice_area_from_bbox( src_area, area, ll_bbox, xy_bbox ) return new_scn
python
{ "resource": "" }
q27056
Scene.aggregate
train
def aggregate(self, dataset_ids=None, boundary='exact', side='left', func='mean', **dim_kwargs): """Create an aggregated version of the Scene. Args: dataset_ids (iterable): DatasetIDs to include in the returned `Scene`. Defaults to all datasets. func (string): Function to apply on each aggregation window. One of 'mean', 'sum', 'min', 'max', 'median', 'argmin', 'argmax', 'prod', 'std', 'var'. 'mean' is the default. boundary: Not implemented. side: Not implemented. dim_kwargs: the size of the windows to aggregate. Returns: A new aggregated scene See also: xarray.DataArray.coarsen Example: `scn.aggregate(func='min', x=2, y=2)` will aggregate 2x2 pixels by applying the `min` function. """ new_scn = self.copy(datasets=dataset_ids) for src_area, ds_ids in new_scn.iter_by_area(): if src_area is None: for ds_id in ds_ids: new_scn.datasets[ds_id] = self[ds_id] continue if boundary != 'exact': raise NotImplementedError("boundary modes appart from 'exact' are not implemented yet.") target_area = src_area.aggregate(**dim_kwargs) resolution = max(target_area.pixel_size_x, target_area.pixel_size_y) for ds_id in ds_ids: res = self[ds_id].coarsen(boundary=boundary, side=side, func=func, **dim_kwargs) new_scn.datasets[ds_id] = getattr(res, func)() new_scn.datasets[ds_id].attrs['area'] = target_area new_scn.datasets[ds_id].attrs['resolution'] = resolution return new_scn
python
{ "resource": "" }
q27057
Scene._read_datasets
train
def _read_datasets(self, dataset_nodes, **kwargs): """Read the given datasets from file.""" # Sort requested datasets by reader reader_datasets = {} for node in dataset_nodes: ds_id = node.name # if we already have this node loaded or the node was assigned # by the user (node data is None) then don't try to load from a # reader if ds_id in self.datasets or not isinstance(node.data, dict): continue reader_name = node.data.get('reader_name') if reader_name is None: # This shouldn't be possible raise RuntimeError("Dependency tree has a corrupt node.") reader_datasets.setdefault(reader_name, set()).add(ds_id) # load all datasets for one reader at a time loaded_datasets = DatasetDict() for reader_name, ds_ids in reader_datasets.items(): reader_instance = self.readers[reader_name] new_datasets = reader_instance.load(ds_ids, **kwargs) loaded_datasets.update(new_datasets) self.datasets.update(loaded_datasets) return loaded_datasets
python
{ "resource": "" }
q27058
Scene._get_prereq_datasets
train
def _get_prereq_datasets(self, comp_id, prereq_nodes, keepables, skip=False): """Get a composite's prerequisites, generating them if needed. Args: comp_id (DatasetID): DatasetID for the composite whose prerequisites are being collected. prereq_nodes (sequence of Nodes): Prerequisites to collect keepables (set): `set` to update if any prerequisites can't be loaded at this time (see `_generate_composite`). skip (bool): If True, consider prerequisites as optional and only log when they are missing. If False, prerequisites are considered required and will raise an exception and log a warning if they can't be collected. Defaults to False. Raises: KeyError: If required (skip=False) prerequisite can't be collected. """ prereq_datasets = [] delayed_gen = False for prereq_node in prereq_nodes: prereq_id = prereq_node.name if prereq_id not in self.datasets and prereq_id not in keepables \ and not prereq_node.is_leaf: self._generate_composite(prereq_node, keepables) if prereq_id in self.datasets: prereq_datasets.append(self.datasets[prereq_id]) elif not prereq_node.is_leaf and prereq_id in keepables: delayed_gen = True continue elif not skip: LOG.debug("Missing prerequisite for '{}': '{}'".format(comp_id, prereq_id)) raise KeyError("Missing composite prerequisite") else: LOG.debug("Missing optional prerequisite for {}: {}".format(comp_id, prereq_id)) if delayed_gen: keepables.add(comp_id) keepables.update([x.name for x in prereq_nodes]) LOG.debug("Delaying generation of %s because of dependency's delayed generation: %s", comp_id, prereq_id) if not skip: LOG.debug("Missing prerequisite for '{}': '{}'".format(comp_id, prereq_id)) raise KeyError("Missing composite prerequisite") else: LOG.debug("Missing optional prerequisite for {}: {}".format(comp_id, prereq_id)) return prereq_datasets
python
{ "resource": "" }
q27059
Scene._generate_composite
train
def _generate_composite(self, comp_node, keepables): """Collect all composite prereqs and create the specified composite. Args: comp_node (Node): Composite Node to generate a Dataset for keepables (set): `set` to update if any datasets are needed when generation is continued later. This can happen if generation is delayed to incompatible areas which would require resampling first. """ if comp_node.name in self.datasets: # already loaded return compositor, prereqs, optional_prereqs = comp_node.data try: prereq_datasets = self._get_prereq_datasets( comp_node.name, prereqs, keepables, ) except KeyError: return optional_datasets = self._get_prereq_datasets( comp_node.name, optional_prereqs, keepables, skip=True ) try: composite = compositor(prereq_datasets, optional_datasets=optional_datasets, **self.attrs) cid = DatasetID.from_dict(composite.attrs) self.datasets[cid] = composite # update the node with the computed DatasetID if comp_node.name in self.wishlist: self.wishlist.remove(comp_node.name) self.wishlist.add(cid) comp_node.name = cid except IncompatibleAreas: LOG.debug("Delaying generation of %s because of incompatible areas", str(compositor.id)) preservable_datasets = set(self.datasets.keys()) prereq_ids = set(p.name for p in prereqs) opt_prereq_ids = set(p.name for p in optional_prereqs) keepables |= preservable_datasets & (prereq_ids | opt_prereq_ids) # even though it wasn't generated keep a list of what # might be needed in other compositors keepables.add(comp_node.name) return
python
{ "resource": "" }
q27060
Scene.read
train
def read(self, nodes=None, **kwargs): """Load datasets from the necessary reader. Args: nodes (iterable): DependencyTree Node objects **kwargs: Keyword arguments to pass to the reader's `load` method. Returns: DatasetDict of loaded datasets """ if nodes is None: required_nodes = self.wishlist - set(self.datasets.keys()) nodes = self.dep_tree.leaves(nodes=required_nodes) return self._read_datasets(nodes, **kwargs)
python
{ "resource": "" }
q27061
Scene.generate_composites
train
def generate_composites(self, nodes=None): """Compute all the composites contained in `requirements`. """ if nodes is None: required_nodes = self.wishlist - set(self.datasets.keys()) nodes = set(self.dep_tree.trunk(nodes=required_nodes)) - \ set(self.datasets.keys()) return self._read_composites(nodes)
python
{ "resource": "" }
q27062
Scene.unload
train
def unload(self, keepables=None): """Unload all unneeded datasets. Datasets are considered unneeded if they weren't directly requested or added to the Scene by the user or they are no longer needed to generate composites that have yet to be generated. Args: keepables (iterable): DatasetIDs to keep whether they are needed or not. """ to_del = [ds_id for ds_id, projectable in self.datasets.items() if ds_id not in self.wishlist and (not keepables or ds_id not in keepables)] for ds_id in to_del: LOG.debug("Unloading dataset: %r", ds_id) del self.datasets[ds_id]
python
{ "resource": "" }
q27063
Scene.load
train
def load(self, wishlist, calibration=None, resolution=None, polarization=None, level=None, generate=True, unload=True, **kwargs): """Read and generate requested datasets. When the `wishlist` contains `DatasetID` objects they can either be fully-specified `DatasetID` objects with every parameter specified or they can not provide certain parameters and the "best" parameter will be chosen. For example, if a dataset is available in multiple resolutions and no resolution is specified in the wishlist's DatasetID then the highest (smallest number) resolution will be chosen. Loaded `DataArray` objects are created and stored in the Scene object. Args: wishlist (iterable): Names (str), wavelengths (float), or DatasetID objects of the requested datasets to load. See `available_dataset_ids()` for what datasets are available. calibration (list, str): Calibration levels to limit available datasets. This is a shortcut to having to list each DatasetID in `wishlist`. resolution (list | float): Resolution to limit available datasets. This is a shortcut similar to calibration. polarization (list | str): Polarization ('V', 'H') to limit available datasets. This is a shortcut similar to calibration. level (list | str): Pressure level to limit available datasets. Pressure should be in hPa or mb. If an altitude is used it should be specified in inverse meters (1/m). The units of this parameter ultimately depend on the reader. generate (bool): Generate composites from the loaded datasets (default: True) unload (bool): Unload datasets that were required to generate the requested datasets (composite dependencies) but are no longer needed. """ dataset_keys = set(wishlist) needed_datasets = (self.wishlist | dataset_keys) - \ set(self.datasets.keys()) unknown = self.dep_tree.find_dependencies(needed_datasets, calibration=calibration, polarization=polarization, resolution=resolution, level=level) self.wishlist |= needed_datasets if unknown: unknown_str = ", ".join(map(str, unknown)) raise KeyError("Unknown datasets: {}".format(unknown_str)) self.read(**kwargs) if generate: keepables = self.generate_composites() else: # don't lose datasets we loaded to try to generate composites keepables = set(self.datasets.keys()) | self.wishlist if self.missing_datasets: # copy the set of missing datasets because they won't be valid # after they are removed in the next line missing = self.missing_datasets.copy() self._remove_failed_datasets(keepables) missing_str = ", ".join(str(x) for x in missing) LOG.warning("The following datasets were not created and may require " "resampling to be generated: {}".format(missing_str)) if unload: self.unload(keepables=keepables)
python
{ "resource": "" }
q27064
Scene._slice_data
train
def _slice_data(self, source_area, slices, dataset): """Slice the data to reduce it.""" slice_x, slice_y = slices dataset = dataset.isel(x=slice_x, y=slice_y) assert ('x', source_area.x_size) in dataset.sizes.items() assert ('y', source_area.y_size) in dataset.sizes.items() dataset.attrs['area'] = source_area return dataset
python
{ "resource": "" }
q27065
Scene._resampled_scene
train
def _resampled_scene(self, new_scn, destination_area, reduce_data=True, **resample_kwargs): """Resample `datasets` to the `destination` area. If data reduction is enabled, some local caching is perfomed in order to avoid recomputation of area intersections.""" new_datasets = {} datasets = list(new_scn.datasets.values()) if isinstance(destination_area, (str, six.text_type)): destination_area = get_area_def(destination_area) if hasattr(destination_area, 'freeze'): try: max_area = new_scn.max_area() destination_area = destination_area.freeze(max_area) except ValueError: raise ValueError("No dataset areas available to freeze " "DynamicAreaDefinition.") resamplers = {} reductions = {} for dataset, parent_dataset in dataset_walker(datasets): ds_id = DatasetID.from_dict(dataset.attrs) pres = None if parent_dataset is not None: pres = new_datasets[DatasetID.from_dict(parent_dataset.attrs)] if ds_id in new_datasets: replace_anc(new_datasets[ds_id], pres) if ds_id in new_scn.datasets: new_scn.datasets[ds_id] = new_datasets[ds_id] continue if dataset.attrs.get('area') is None: if parent_dataset is None: new_scn.datasets[ds_id] = dataset else: replace_anc(dataset, pres) continue LOG.debug("Resampling %s", ds_id) source_area = dataset.attrs['area'] try: if reduce_data: key = source_area try: (slice_x, slice_y), source_area = reductions[key] except KeyError: slice_x, slice_y = source_area.get_area_slices(destination_area) source_area = source_area[slice_y, slice_x] reductions[key] = (slice_x, slice_y), source_area dataset = self._slice_data(source_area, (slice_x, slice_y), dataset) else: LOG.debug("Data reduction disabled by the user") except NotImplementedError: LOG.info("Not reducing data before resampling.") if source_area not in resamplers: key, resampler = prepare_resampler( source_area, destination_area, **resample_kwargs) resamplers[source_area] = resampler self.resamplers[key] = resampler kwargs = resample_kwargs.copy() kwargs['resampler'] = resamplers[source_area] res = resample_dataset(dataset, destination_area, **kwargs) new_datasets[ds_id] = res if ds_id in new_scn.datasets: new_scn.datasets[ds_id] = res if parent_dataset is not None: replace_anc(res, pres)
python
{ "resource": "" }
q27066
Scene.resample
train
def resample(self, destination=None, datasets=None, generate=True, unload=True, resampler=None, reduce_data=True, **resample_kwargs): """Resample datasets and return a new scene. Args: destination (AreaDefinition, GridDefinition): area definition to resample to. If not specified then the area returned by `Scene.max_area()` will be used. datasets (list): Limit datasets to resample to these specified `DatasetID` objects . By default all currently loaded datasets are resampled. generate (bool): Generate any requested composites that could not be previously due to incompatible areas (default: True). unload (bool): Remove any datasets no longer needed after requested composites have been generated (default: True). resampler (str): Name of resampling method to use. By default, this is a nearest neighbor KDTree-based resampling ('nearest'). Other possible values include 'native', 'ewa', etc. See the :mod:`~satpy.resample` documentation for more information. reduce_data (bool): Reduce data by matching the input and output areas and slicing the data arrays (default: True) resample_kwargs: Remaining keyword arguments to pass to individual resampler classes. See the individual resampler class documentation :mod:`here <satpy.resample>` for available arguments. """ to_resample_ids = [dsid for (dsid, dataset) in self.datasets.items() if (not datasets) or dsid in datasets] if destination is None: destination = self.max_area(to_resample_ids) new_scn = self.copy(datasets=to_resample_ids) # we may have some datasets we asked for but don't exist yet new_scn.wishlist = self.wishlist.copy() self._resampled_scene(new_scn, destination, resampler=resampler, reduce_data=reduce_data, **resample_kwargs) # regenerate anything from the wishlist that needs it (combining # multiple resolutions, etc.) if generate: keepables = new_scn.generate_composites() else: # don't lose datasets that we may need later for generating # composites keepables = set(new_scn.datasets.keys()) | new_scn.wishlist if new_scn.missing_datasets: # copy the set of missing datasets because they won't be valid # after they are removed in the next line missing = new_scn.missing_datasets.copy() new_scn._remove_failed_datasets(keepables) missing_str = ", ".join(str(x) for x in missing) LOG.warning( "The following datasets " "were not created: {}".format(missing_str)) if unload: new_scn.unload(keepables) return new_scn
python
{ "resource": "" }
q27067
Scene.to_geoviews
train
def to_geoviews(self, gvtype=None, datasets=None, kdims=None, vdims=None, dynamic=False): """Convert satpy Scene to geoviews. Args: gvtype (gv plot type): One of gv.Image, gv.LineContours, gv.FilledContours, gv.Points Default to :class:`geoviews.Image`. See Geoviews documentation for details. datasets (list): Limit included products to these datasets kdims (list of str): Key dimensions. See geoviews documentation for more information. vdims : list of str, optional Value dimensions. See geoviews documentation for more information. If not given defaults to first data variable dynamic : boolean, optional, default False Returns: geoviews object Todo: * better handling of projection information in datasets which are to be passed to geoviews """ try: import geoviews as gv from cartopy import crs # noqa except ImportError: import warnings warnings.warn("This method needs the geoviews package installed.") if gvtype is None: gvtype = gv.Image ds = self.to_xarray_dataset(datasets) if vdims is None: # by default select first data variable as display variable vdims = ds.data_vars[list(ds.data_vars.keys())[0]].name if hasattr(ds, "area") and hasattr(ds.area, 'to_cartopy_crs'): dscrs = ds.area.to_cartopy_crs() gvds = gv.Dataset(ds, crs=dscrs) else: gvds = gv.Dataset(ds) if "latitude" in ds.coords.keys(): gview = gvds.to(gv.QuadMesh, kdims=["longitude", "latitude"], vdims=vdims, dynamic=dynamic) else: gview = gvds.to(gvtype, kdims=["x", "y"], vdims=vdims, dynamic=dynamic) return gview
python
{ "resource": "" }
q27068
Scene.to_xarray_dataset
train
def to_xarray_dataset(self, datasets=None): """Merge all xr.DataArrays of a scene to a xr.DataSet. Parameters: datasets (list): List of products to include in the :class:`xarray.Dataset` Returns: :class:`xarray.Dataset` """ if datasets is not None: datasets = [self[ds] for ds in datasets] else: datasets = [self.datasets.get(ds) for ds in self.wishlist] datasets = [ds for ds in datasets if ds is not None] ds_dict = {i.attrs['name']: i.rename(i.attrs['name']) for i in datasets if i.attrs.get('area') is not None} mdata = combine_metadata(*tuple(i.attrs for i in datasets)) if mdata.get('area') is None or not isinstance(mdata['area'], SwathDefinition): # either don't know what the area is or we have an AreaDefinition ds = xr.merge(ds_dict.values()) else: # we have a swath definition and should use lon/lat values lons, lats = mdata['area'].get_lonlats() if not isinstance(lons, DataArray): lons = DataArray(lons, dims=('y', 'x')) lats = DataArray(lats, dims=('y', 'x')) # ds_dict['longitude'] = lons # ds_dict['latitude'] = lats ds = xr.Dataset(ds_dict, coords={"latitude": (["y", "x"], lats), "longitude": (["y", "x"], lons)}) ds.attrs = mdata return ds
python
{ "resource": "" }
q27069
Scene.images
train
def images(self): """Generate images for all the datasets from the scene.""" for ds_id, projectable in self.datasets.items(): if ds_id in self.wishlist: yield projectable.to_image()
python
{ "resource": "" }
q27070
dictify
train
def dictify(r, root=True): """Convert an ElementTree into a dict.""" if root: return {r.tag: dictify(r, False)} d = {} if r.text and r.text.strip(): try: return int(r.text) except ValueError: try: return float(r.text) except ValueError: return r.text for x in r.findall("./*"): if x.tag in d and not isinstance(d[x.tag], list): d[x.tag] = [d[x.tag]] d[x.tag].append(dictify(x, False)) else: d[x.tag] = dictify(x, False) return d
python
{ "resource": "" }
q27071
interpolate_slice
train
def interpolate_slice(slice_rows, slice_cols, interpolator): """Interpolate the given slice of the larger array.""" fine_rows = np.arange(slice_rows.start, slice_rows.stop, slice_rows.step) fine_cols = np.arange(slice_cols.start, slice_cols.stop, slice_cols.step) return interpolator(fine_cols, fine_rows)
python
{ "resource": "" }
q27072
interpolate_xarray
train
def interpolate_xarray(xpoints, ypoints, values, shape, kind='cubic', blocksize=CHUNK_SIZE): """Interpolate, generating a dask array.""" vchunks = range(0, shape[0], blocksize) hchunks = range(0, shape[1], blocksize) token = tokenize(blocksize, xpoints, ypoints, values, kind, shape) name = 'interpolate-' + token from scipy.interpolate import interp2d interpolator = interp2d(xpoints, ypoints, values, kind=kind) dskx = {(name, i, j): (interpolate_slice, slice(vcs, min(vcs + blocksize, shape[0])), slice(hcs, min(hcs + blocksize, shape[1])), interpolator) for i, vcs in enumerate(vchunks) for j, hcs in enumerate(hchunks) } res = da.Array(dskx, name, shape=list(shape), chunks=(blocksize, blocksize), dtype=values.dtype) return DataArray(res, dims=('y', 'x'))
python
{ "resource": "" }
q27073
interpolate_xarray_linear
train
def interpolate_xarray_linear(xpoints, ypoints, values, shape, chunks=CHUNK_SIZE): """Interpolate linearly, generating a dask array.""" from scipy.interpolate.interpnd import (LinearNDInterpolator, _ndim_coords_from_arrays) if isinstance(chunks, (list, tuple)): vchunks, hchunks = chunks else: vchunks, hchunks = chunks, chunks points = _ndim_coords_from_arrays(np.vstack((np.asarray(ypoints), np.asarray(xpoints))).T) interpolator = LinearNDInterpolator(points, values) grid_x, grid_y = da.meshgrid(da.arange(shape[1], chunks=hchunks), da.arange(shape[0], chunks=vchunks)) # workaround for non-thread-safe first call of the interpolator: interpolator((0, 0)) res = da.map_blocks(intp, grid_x, grid_y, interpolator=interpolator) return DataArray(res, dims=('y', 'x'))
python
{ "resource": "" }
q27074
SAFEXML.read_azimuth_noise_array
train
def read_azimuth_noise_array(elts): """Read the azimuth noise vectors. The azimuth noise is normalized per swath to account for gain differences between the swaths in EW mode. This is based on the this reference: J. Park, A. A. Korosov, M. Babiker, S. Sandven and J. Won, "Efficient Thermal Noise Removal for Sentinel-1 TOPSAR Cross-Polarization Channel," in IEEE Transactions on Geoscience and Remote Sensing, vol. 56, no. 3, pp. 1555-1565, March 2018. doi: 10.1109/TGRS.2017.2765248 """ y = [] x = [] data = [] for elt in elts: first_pixel = int(elt.find('firstRangeSample').text) last_pixel = int(elt.find('lastRangeSample').text) lines = elt.find('line').text.split() lut = elt.find('noiseAzimuthLut').text.split() pixels = [first_pixel, last_pixel] swath = elt.find('swath').text corr = 1 if swath == 'EW1': corr = 1.5 if swath == 'EW4': corr = 1.2 if swath == 'EW5': corr = 1.5 for pixel in pixels: y += [int(val) for val in lines] x += [pixel] * len(lines) data += [float(val) * corr for val in lut] return np.asarray(data), (x, y)
python
{ "resource": "" }
q27075
SAFEXML.interpolate_xml_array
train
def interpolate_xml_array(data, low_res_coords, shape, chunks): """Interpolate arbitrary size dataset to a full sized grid.""" xpoints, ypoints = low_res_coords return interpolate_xarray_linear(xpoints, ypoints, data, shape, chunks=chunks)
python
{ "resource": "" }
q27076
SAFEXML.get_noise_correction
train
def get_noise_correction(self, shape, chunks=None): """Get the noise correction array.""" data_items = self.root.findall(".//noiseVector") data, low_res_coords = self.read_xml_array(data_items, 'noiseLut') if not data_items: data_items = self.root.findall(".//noiseRangeVector") data, low_res_coords = self.read_xml_array(data_items, 'noiseRangeLut') range_noise = self.interpolate_xml_array(data, low_res_coords, shape, chunks=chunks) data_items = self.root.findall(".//noiseAzimuthVector") data, low_res_coords = self.read_azimuth_noise_array(data_items) azimuth_noise = self.interpolate_xml_array(data, low_res_coords, shape, chunks=chunks) noise = range_noise * azimuth_noise else: noise = self.interpolate_xml_array(data, low_res_coords, shape, chunks=chunks) return noise
python
{ "resource": "" }
q27077
SAFEXML.get_calibration
train
def get_calibration(self, name, shape, chunks=None): """Get the calibration array.""" data_items = self.root.findall(".//calibrationVector") data, low_res_coords = self.read_xml_array(data_items, name) return self.interpolate_xml_array(data, low_res_coords, shape, chunks=chunks)
python
{ "resource": "" }
q27078
SAFEGRD.read_band_blocks
train
def read_band_blocks(self, blocksize=CHUNK_SIZE): """Read the band in native blocks.""" # For sentinel 1 data, the block are 1 line, and dask seems to choke on that. band = self.filehandle shape = band.shape token = tokenize(blocksize, band) name = 'read_band-' + token dskx = dict() if len(band.block_shapes) != 1: raise NotImplementedError('Bands with multiple shapes not supported.') else: chunks = band.block_shapes[0] def do_read(the_band, the_window, the_lock): with the_lock: return the_band.read(1, None, window=the_window) for ji, window in band.block_windows(1): dskx[(name, ) + ji] = (do_read, band, window, self.read_lock) res = da.Array(dskx, name, shape=list(shape), chunks=chunks, dtype=band.dtypes[0]) return DataArray(res, dims=('y', 'x'))
python
{ "resource": "" }
q27079
SAFEGRD.read_band
train
def read_band(self, blocksize=CHUNK_SIZE): """Read the band in chunks.""" band = self.filehandle shape = band.shape if len(band.block_shapes) == 1: total_size = blocksize * blocksize * 1.0 lines, cols = band.block_shapes[0] if cols > lines: hblocks = cols vblocks = int(total_size / cols / lines) else: hblocks = int(total_size / cols / lines) vblocks = lines else: hblocks = blocksize vblocks = blocksize vchunks = range(0, shape[0], vblocks) hchunks = range(0, shape[1], hblocks) token = tokenize(hblocks, vblocks, band) name = 'read_band-' + token def do_read(the_band, the_window, the_lock): with the_lock: return the_band.read(1, None, window=the_window) dskx = {(name, i, j): (do_read, band, Window(hcs, vcs, min(hblocks, shape[1] - hcs), min(vblocks, shape[0] - vcs)), self.read_lock) for i, vcs in enumerate(vchunks) for j, hcs in enumerate(hchunks) } res = da.Array(dskx, name, shape=list(shape), chunks=(vblocks, hblocks), dtype=band.dtypes[0]) return DataArray(res, dims=('y', 'x'))
python
{ "resource": "" }
q27080
SAFEGRD.get_lonlatalts
train
def get_lonlatalts(self): """Obtain GCPs and construct latitude and longitude arrays. Args: band (gdal band): Measurement band which comes with GCP's array_shape (tuple) : The size of the data array Returns: coordinates (tuple): A tuple with longitude and latitude arrays """ band = self.filehandle (xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), (gcps, crs) = self.get_gcps() # FIXME: do interpolation on cartesion coordinates if the area is # problematic. longitudes = interpolate_xarray(xpoints, ypoints, gcp_lons, band.shape) latitudes = interpolate_xarray(xpoints, ypoints, gcp_lats, band.shape) altitudes = interpolate_xarray(xpoints, ypoints, gcp_alts, band.shape) longitudes.attrs['gcps'] = gcps longitudes.attrs['crs'] = crs latitudes.attrs['gcps'] = gcps latitudes.attrs['crs'] = crs altitudes.attrs['gcps'] = gcps altitudes.attrs['crs'] = crs return longitudes, latitudes, altitudes
python
{ "resource": "" }
q27081
SAFEGRD.get_gcps
train
def get_gcps(self): """Read GCP from the GDAL band. Args: band (gdal band): Measurement band which comes with GCP's coordinates (tuple): A tuple with longitude and latitude arrays Returns: points (tuple): Pixel and Line indices 1d arrays gcp_coords (tuple): longitude and latitude 1d arrays """ gcps = self.filehandle.gcps gcp_array = np.array([(p.row, p.col, p.x, p.y, p.z) for p in gcps[0]]) ypoints = np.unique(gcp_array[:, 0]) xpoints = np.unique(gcp_array[:, 1]) gcp_lons = gcp_array[:, 2].reshape(ypoints.shape[0], xpoints.shape[0]) gcp_lats = gcp_array[:, 3].reshape(ypoints.shape[0], xpoints.shape[0]) gcp_alts = gcp_array[:, 4].reshape(ypoints.shape[0], xpoints.shape[0]) return (xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), gcps
python
{ "resource": "" }
q27082
VIIRSSDRFileHandler.scale_swath_data
train
def scale_swath_data(self, data, scaling_factors): """Scale swath data using scaling factors and offsets. Multi-granule (a.k.a. aggregated) files will have more than the usual two values. """ num_grans = len(scaling_factors) // 2 gran_size = data.shape[0] // num_grans factors = scaling_factors.where(scaling_factors > -999) factors = factors.data.reshape((-1, 2)) factors = xr.DataArray(da.repeat(factors, gran_size, axis=0), dims=(data.dims[0], 'factors')) data = data * factors[:, 0] + factors[:, 1] return data
python
{ "resource": "" }
q27083
VIIRSSDRFileHandler.expand_single_values
train
def expand_single_values(var, scans): """Expand single valued variable to full scan lengths.""" if scans.size == 1: return var else: expanded = np.repeat(var, scans) expanded.attrs = var.attrs expanded.rename({expanded.dims[0]: 'y'}) return expanded
python
{ "resource": "" }
q27084
VIIRSSDRFileHandler.get_bounding_box
train
def get_bounding_box(self): """Get the bounding box of this file.""" from pyproj import Geod geod = Geod(ellps='WGS84') dataset_group = DATASET_KEYS[self.datasets[0]] idx = 0 lons_ring = None lats_ring = None while True: path = 'Data_Products/{dataset_group}/{dataset_group}_Gran_{idx}/attr/' prefix = path.format(dataset_group=dataset_group, idx=idx) try: lats = self.file_content[prefix + 'G-Ring_Latitude'] lons = self.file_content[prefix + 'G-Ring_Longitude'] if lons_ring is None: lons_ring = lons lats_ring = lats else: prev_lon = lons_ring[0] prev_lat = lats_ring[0] dists = list(geod.inv(lon, lat, prev_lon, prev_lat)[2] for lon, lat in zip(lons, lats)) first_idx = np.argmin(dists) if first_idx == 2 and len(lons) == 8: lons_ring = np.hstack((lons[:3], lons_ring[:-2], lons[4:])) lats_ring = np.hstack((lats[:3], lats_ring[:-2], lats[4:])) else: raise NotImplementedError("Don't know how to handle G-Rings of length %d" % len(lons)) except KeyError: break idx += 1 return lons_ring, lats_ring
python
{ "resource": "" }
q27085
VIIRSSDRReader._load_from_geo_ref
train
def _load_from_geo_ref(self, dsid): """Load filenames from the N_GEO_Ref attribute of a dataset's file.""" file_handlers = self._get_file_handlers(dsid) if not file_handlers: return None fns = [] for fh in file_handlers: base_dir = os.path.dirname(fh.filename) try: # get the filename and remove the creation time # which is often wrong fn = fh['/attr/N_GEO_Ref'][:46] + '*.h5' fns.extend(glob(os.path.join(base_dir, fn))) # usually is non-terrain corrected file, add the terrain # corrected file too if fn[:5] == 'GIMGO': fn = 'GITCO' + fn[5:] elif fn[:5] == 'GMODO': fn = 'GMTCO' + fn[5:] else: continue fns.extend(glob(os.path.join(base_dir, fn))) except KeyError: LOG.debug("Could not load geo-reference information from {}".format(fh.filename)) return fns
python
{ "resource": "" }
q27086
VIIRSSDRReader._get_req_rem_geo
train
def _get_req_rem_geo(self, ds_info): """Find out which geolocation files are needed.""" if ds_info['dataset_groups'][0].startswith('GM'): if self.use_tc is False: req_geo = 'GMODO' rem_geo = 'GMTCO' else: req_geo = 'GMTCO' rem_geo = 'GMODO' elif ds_info['dataset_groups'][0].startswith('GI'): if self.use_tc is False: req_geo = 'GIMGO' rem_geo = 'GITCO' else: req_geo = 'GITCO' rem_geo = 'GIMGO' else: raise ValueError('Unknown dataset group %s' % ds_info['dataset_groups'][0]) return req_geo, rem_geo
python
{ "resource": "" }
q27087
VIIRSSDRReader._get_coordinates_for_dataset_key
train
def _get_coordinates_for_dataset_key(self, dsid): """Get the coordinate dataset keys for `dsid`. Wraps the base class method in order to load geolocation files from the geo reference attribute in the datasets file. """ coords = super(VIIRSSDRReader, self)._get_coordinates_for_dataset_key(dsid) for c_id in coords: c_info = self.ids[c_id] # c_info['dataset_groups'] should be a list of 2 elements self._get_file_handlers(c_id) if len(c_info['dataset_groups']) == 1: # filtering already done continue try: req_geo, rem_geo = self._get_req_rem_geo(c_info) except ValueError: # DNB continue # check the dataset file for the geolocation filename geo_filenames = self._load_from_geo_ref(dsid) if not geo_filenames: c_info['dataset_groups'] = [rem_geo] else: # concatenate all values new_fhs = sum(self.create_filehandlers(geo_filenames).values(), []) desired, other = split_desired_other(new_fhs, req_geo, rem_geo) if desired: c_info['dataset_groups'].remove(rem_geo) else: c_info['dataset_groups'].remove(req_geo) return coords
python
{ "resource": "" }
q27088
GeoTIFFWriter._gdal_write_datasets
train
def _gdal_write_datasets(self, dst_ds, datasets): """Write datasets in a gdal raster structure dts_ds""" for i, band in enumerate(datasets['bands']): chn = datasets.sel(bands=band) bnd = dst_ds.GetRasterBand(i + 1) bnd.SetNoDataValue(0) bnd.WriteArray(chn.values)
python
{ "resource": "" }
q27089
GeoTIFFWriter.save_image
train
def save_image(self, img, filename=None, dtype=None, fill_value=None, floating_point=None, compute=True, **kwargs): """Save the image to the given ``filename`` in geotiff_ format. Note for faster output and reduced memory usage the ``rasterio`` library must be installed. This writer currently falls back to using ``gdal`` directly, but that will be deprecated in the future. Args: img (xarray.DataArray): Data to save to geotiff. filename (str): Filename to save the image to. Defaults to ``filename`` passed during writer creation. Unlike the creation ``filename`` keyword argument, this filename does not get formatted with data attributes. dtype (numpy.dtype): Numpy data type to save the image as. Defaults to 8-bit unsigned integer (``np.uint8``). If the ``dtype`` argument is provided during writer creation then that will be used as the default. fill_value (int or float): Value to use where data values are NaN/null. If this is specified in the writer configuration file that value will be used as the default. floating_point (bool): Deprecated. Use ``dtype=np.float64`` instead. compute (bool): Compute dask arrays and save the image immediately. If ``False`` then the return value can be passed to :func:`~satpy.writers.compute_writer_results` to do the computation. This is useful when multiple images may share input calculations where dask can benefit from not repeating them multiple times. Defaults to ``True`` in the writer by itself, but is typically passed as ``False`` by callers where calculations can be combined. .. _geotiff: http://trac.osgeo.org/geotiff/ """ filename = filename or self.get_filename(**img.data.attrs) # Update global GDAL options with these specific ones gdal_options = self.gdal_options.copy() for k in kwargs.keys(): if k in self.GDAL_OPTIONS: gdal_options[k] = kwargs[k] if fill_value is None: # fall back to fill_value from configuration file fill_value = self.info.get('fill_value') if floating_point is not None: import warnings warnings.warn("'floating_point' is deprecated, use" "'dtype=np.float64' instead.", DeprecationWarning) dtype = np.float64 dtype = dtype if dtype is not None else self.dtype if dtype is None: dtype = np.uint8 if "alpha" in kwargs: raise ValueError( "Keyword 'alpha' is automatically set based on 'fill_value' " "and should not be specified") if np.issubdtype(dtype, np.floating): if img.mode != "L": raise ValueError("Image must be in 'L' mode for floating " "point geotiff saving") if fill_value is None: LOG.debug("Alpha band not supported for float geotiffs, " "setting fill value to 'NaN'") fill_value = np.nan try: import rasterio # noqa # we can use the faster rasterio-based save return img.save(filename, fformat='tif', fill_value=fill_value, dtype=dtype, compute=compute, **gdal_options) except ImportError: LOG.warning("Using legacy/slower geotiff save method, install " "'rasterio' for faster saving.") warnings.warn("Using legacy/slower geotiff save method with 'gdal'." "This will be deprecated in the future. Install " "'rasterio' for faster saving and future " "compatibility.", PendingDeprecationWarning) # Map numpy data types to GDAL data types NP2GDAL = { np.float32: gdal.GDT_Float32, np.float64: gdal.GDT_Float64, np.uint8: gdal.GDT_Byte, np.uint16: gdal.GDT_UInt16, np.uint32: gdal.GDT_UInt32, np.int16: gdal.GDT_Int16, np.int32: gdal.GDT_Int32, np.complex64: gdal.GDT_CFloat32, np.complex128: gdal.GDT_CFloat64, } # force to numpy dtype object dtype = np.dtype(dtype) gformat = NP2GDAL[dtype.type] gdal_options['nbits'] = int(gdal_options.get('nbits', dtype.itemsize * 8)) datasets, mode = img._finalize(fill_value=fill_value, dtype=dtype) LOG.debug("Saving to GeoTiff: %s", filename) g_opts = ["{0}={1}".format(k.upper(), str(v)) for k, v in gdal_options.items()] ensure_dir(filename) delayed = self._create_file(filename, img, gformat, g_opts, datasets, mode) if compute: return delayed.compute() return delayed
python
{ "resource": "" }
q27090
HDF4BandReader.get_end_time
train
def get_end_time(self): """Get observation end time from file metadata.""" mda_dict = self.filehandle.attributes() core_mda = mda_dict['coremetadata'] end_time_str = self.parse_metadata_string(core_mda) self._end_time = datetime.strptime(end_time_str, "%Y-%m-%dT%H:%M:%SZ")
python
{ "resource": "" }
q27091
HDF4BandReader.parse_metadata_string
train
def parse_metadata_string(metadata_string): """Grab end time with regular expression.""" regex = r"STOP_DATE.+?VALUE\s*=\s*\"(.+?)\"" match = re.search(regex, metadata_string, re.DOTALL) end_time_str = match.group(1) return end_time_str
python
{ "resource": "" }
q27092
HDF4BandReader.get_filehandle
train
def get_filehandle(self): """Get HDF4 filehandle.""" if os.path.exists(self.filename): self.filehandle = SD(self.filename, SDC.READ) logger.debug("Loading dataset {}".format(self.filename)) else: raise IOError("Path {} does not exist.".format(self.filename))
python
{ "resource": "" }
q27093
HDF4BandReader.get_sds_variable
train
def get_sds_variable(self, name): """Read variable from the HDF4 file.""" sds_obj = self.filehandle.select(name) data = sds_obj.get() return data
python
{ "resource": "" }
q27094
HDF4BandReader.get_lonlats
train
def get_lonlats(self): """Get longitude and latitude arrays from the file.""" longitudes = self.get_sds_variable('Longitude') latitudes = self.get_sds_variable('Latitude') return longitudes, latitudes
python
{ "resource": "" }
q27095
Node.flatten
train
def flatten(self, d=None): """Flatten tree structure to a one level dictionary. Args: d (dict, optional): output dictionary to update Returns: dict: Node.name -> Node. The returned dictionary includes the current Node and all its children. """ if d is None: d = {} if self.name is not None: d[self.name] = self for child in self.children: child.flatten(d=d) return d
python
{ "resource": "" }
q27096
Node.add_child
train
def add_child(self, obj): """Add a child to the node.""" self.children.append(obj) obj.parents.append(self)
python
{ "resource": "" }
q27097
Node.display
train
def display(self, previous=0, include_data=False): """Display the node.""" no_data = " (No Data)" if self.data is None else "" return ( (" +" * previous) + str(self.name) + no_data + '\n' + ''.join([child.display(previous + 1) for child in self.children]))
python
{ "resource": "" }
q27098
Node.trunk
train
def trunk(self, unique=True): """Get the trunk of the tree starting at this root.""" # uniqueness is not correct in `trunk` yet unique = False res = [] if self.children: if self.name is not None: res.append(self) for child in self.children: for sub_child in child.trunk(unique=unique): if not unique or sub_child not in res: res.append(sub_child) return res
python
{ "resource": "" }
q27099
DependencyTree.copy
train
def copy(self): """Copy the this node tree Note all references to readers are removed. This is meant to avoid tree copies accessing readers that would return incompatible (Area) data. Theoretically it should be possible for tree copies to request compositor or modifier information as long as they don't depend on any datasets not already existing in the dependency tree. """ new_tree = DependencyTree({}, self.compositors, self.modifiers) for c in self.children: c = c.copy(node_cache=new_tree._all_nodes) new_tree.add_child(new_tree, c) return new_tree
python
{ "resource": "" }