signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def load(self):
|
return super(GiniFile, self).load()<EOL>
|
Load the variables and attributes simultaneously.
A centralized loading function makes it easier to create
data stores that do automatic encoding/decoding.
For example::
class SuffixAppendingDataStore(AbstractDataStore):
def load(self):
variables, attributes = AbstractDataStore.load(self)
variables = {'%s_suffix' % k: v
for k, v in iteritems(variables)}
attributes = {'%s_suffix' % k: v
for k, v in iteritems(attributes)}
return variables, attributes
This function will be called anytime variables or attributes
are requested, so care should be taken to make sure its fast.
|
f8494:c1:m8
|
def get_variables(self):
|
variables = [self._make_time_var()]<EOL>proj_var_name, proj_var = self._make_proj_var()<EOL>variables.append((proj_var_name, proj_var))<EOL>variables.extend(self._make_coord_vars())<EOL>name = self.prod_desc.channel<EOL>if '<STR_LIT:(>' in name:<EOL><INDENT>name = name.split('<STR_LIT:(>')[<NUM_LIT:0>].rstrip()<EOL><DEDENT>missing_val = self.missing<EOL>attrs = {'<STR_LIT>': self.prod_desc.channel, '<STR_LIT>': missing_val,<EOL>'<STR_LIT>': '<STR_LIT>', '<STR_LIT>': proj_var_name}<EOL>data_var = Variable(('<STR_LIT:y>', '<STR_LIT:x>'),<EOL>data=np.ma.array(self.data,<EOL>mask=self.data == missing_val),<EOL>attrs=attrs)<EOL>variables.append((name, data_var))<EOL>return FrozenOrderedDict(variables)<EOL>
|
Get all variables in the file.
This is used by `xarray.open_dataset`.
|
f8494:c1:m9
|
def get_attrs(self):
|
return FrozenOrderedDict(satellite=self.prod_desc.creating_entity,<EOL>sector=self.prod_desc.sector_id)<EOL>
|
Get the global attributes.
This is used by `xarray.open_dataset`.
|
f8494:c1:m10
|
def get_dimensions(self):
|
return FrozenOrderedDict(x=self.prod_desc.nx, y=self.prod_desc.ny)<EOL>
|
Get the file's dimensions.
This is used by `xarray.open_dataset`.
|
f8494:c1:m11
|
def open_as_needed(filename):
|
if hasattr(filename, '<STR_LIT>'):<EOL><INDENT>return filename<EOL><DEDENT>if filename.endswith('<STR_LIT>'):<EOL><INDENT>return bz2.BZ2File(filename, '<STR_LIT:rb>')<EOL><DEDENT>elif filename.endswith('<STR_LIT>'):<EOL><INDENT>return gzip.GzipFile(filename, '<STR_LIT:rb>')<EOL><DEDENT>else:<EOL><INDENT>return open(filename, '<STR_LIT:rb>')<EOL><DEDENT>
|
Return a file-object given either a filename or an object.
Handles opening with the right class based on the file extension.
|
f8495:m0
|
def zlib_decompress_all_frames(data):
|
frames = bytearray()<EOL>data = bytes(data)<EOL>while data:<EOL><INDENT>decomp = zlib.decompressobj()<EOL>try:<EOL><INDENT>frames.extend(decomp.decompress(data))<EOL>data = decomp.unused_data<EOL><DEDENT>except zlib.error:<EOL><INDENT>frames.extend(data)<EOL>break<EOL><DEDENT><DEDENT>return frames<EOL>
|
Decompress all frames of zlib-compressed bytes.
Repeatedly tries to decompress `data` until all data are decompressed, or decompression
fails. This will skip over bytes that are not compressed with zlib.
Parameters
----------
data : bytearray or bytes
Binary data compressed using zlib.
Returns
-------
bytearray
All decompressed bytes
|
f8495:m1
|
def bits_to_code(val):
|
if val == <NUM_LIT:8>:<EOL><INDENT>return '<STR_LIT:B>'<EOL><DEDENT>elif val == <NUM_LIT:16>:<EOL><INDENT>return '<STR_LIT:H>'<EOL><DEDENT>else:<EOL><INDENT>log.warning('<STR_LIT>', val)<EOL>return '<STR_LIT:B>'<EOL><DEDENT>
|
Convert the number of bits to the proper code for unpacking.
|
f8495:m2
|
def hexdump(buf, num_bytes, offset=<NUM_LIT:0>, width=<NUM_LIT:32>):
|
ind = offset<EOL>end = offset + num_bytes<EOL>lines = []<EOL>while ind < end:<EOL><INDENT>chunk = buf[ind:ind + width]<EOL>actual_width = len(chunk)<EOL>hexfmt = '<STR_LIT>'<EOL>blocksize = <NUM_LIT:4><EOL>blocks = [hexfmt * blocksize for _ in range(actual_width // blocksize)]<EOL>num_left = actual_width % blocksize <EOL>if num_left:<EOL><INDENT>blocks += [hexfmt * num_left + '<STR_LIT>' * (blocksize - num_left)]<EOL><DEDENT>blocks += ['<STR_LIT>' * blocksize] * (width // blocksize - len(blocks))<EOL>hexoutput = '<STR_LIT:U+0020>'.join(blocks)<EOL>printable = tuple(chunk)<EOL>lines.append('<STR_LIT:U+0020>'.join((hexoutput.format(*printable), str(ind).ljust(len(str(end))),<EOL>str(ind - offset).ljust(len(str(end))),<EOL>'<STR_LIT>'.join(chr(c) if <NUM_LIT> < c < <NUM_LIT> else '<STR_LIT:.>' for c in chunk))))<EOL>ind += width<EOL><DEDENT>return '<STR_LIT:\n>'.join(lines)<EOL>
|
Perform a hexudmp of the buffer.
Returns the hexdump as a canonically-formatted string.
|
f8495:m3
|
def __init__(self, var):
|
self._var = var<EOL>try:<EOL><INDENT>self._unit = units(self._var.units)<EOL><DEDENT>except (AttributeError, UndefinedUnitError):<EOL><INDENT>self._unit = None<EOL><DEDENT>
|
r"""Construct a new :class:`UnitLinker`.
Parameters
----------
var : Variable
The :class:`metpy.io.cdm.Variable` to be wrapped.
|
f8495:c0:m0
|
def __getitem__(self, ind):
|
ret = self._var[ind]<EOL>return ret if self._unit is None else ret * self._unit<EOL>
|
Get data from the underlying variable and add units.
|
f8495:c0:m1
|
def __getattr__(self, item):
|
return getattr(self._var, item)<EOL>
|
Forward all attribute access onto underlying variable.
|
f8495:c0:m2
|
@property<EOL><INDENT>def units(self):<DEDENT>
|
return self._unit<EOL>
|
Access the units from the underlying variable as a :class:`pint.Quantity`.
|
f8495:c0:m3
|
@units.setter<EOL><INDENT>def units(self, val):<DEDENT>
|
if isinstance(val, units.Unit):<EOL><INDENT>self._unit = val<EOL><DEDENT>else:<EOL><INDENT>self._unit = units(val)<EOL><DEDENT>
|
Override the units on the underlying variable.
|
f8495:c0:m4
|
def __init__(self, info, prefmt='<STR_LIT>', tuple_name=None):
|
if tuple_name is None:<EOL><INDENT>tuple_name = '<STR_LIT>'<EOL><DEDENT>names, fmts = zip(*info)<EOL>self.converters = {}<EOL>conv_off = <NUM_LIT:0><EOL>for ind, i in enumerate(info):<EOL><INDENT>if len(i) > <NUM_LIT:2>:<EOL><INDENT>self.converters[ind - conv_off] = i[-<NUM_LIT:1>]<EOL><DEDENT>elif not i[<NUM_LIT:0>]: <EOL><INDENT>conv_off += <NUM_LIT:1><EOL><DEDENT><DEDENT>self._tuple = namedtuple(tuple_name, '<STR_LIT:U+0020>'.join(n for n in names if n))<EOL>super(NamedStruct, self).__init__(prefmt + '<STR_LIT>'.join(f for f in fmts if f))<EOL>
|
Initialize the NamedStruct.
|
f8495:c1:m0
|
def make_tuple(self, *args, **kwargs):
|
return self._tuple(*args, **kwargs)<EOL>
|
Construct the underlying tuple from values.
|
f8495:c1:m2
|
def unpack(self, s):
|
return self._create(super(NamedStruct, self).unpack(s))<EOL>
|
Parse bytes and return a namedtuple.
|
f8495:c1:m3
|
def unpack_from(self, buff, offset=<NUM_LIT:0>):
|
return self._create(super(NamedStruct, self).unpack_from(buff, offset))<EOL>
|
Read bytes from a buffer and return as a namedtuple.
|
f8495:c1:m4
|
def unpack_file(self, fobj):
|
return self.unpack(fobj.read(self.size))<EOL>
|
Unpack the next bytes from a file object.
|
f8495:c1:m5
|
def __init__(self, info, prefmt='<STR_LIT>'):
|
names, formats = zip(*info)<EOL>self._names = [n for n in names if n]<EOL>super(DictStruct, self).__init__(prefmt + '<STR_LIT>'.join(f for f in formats if f))<EOL>
|
Initialize the DictStruct.
|
f8495:c2:m0
|
def unpack(self, s):
|
return self._create(super(DictStruct, self).unpack(s))<EOL>
|
Parse bytes and return a namedtuple.
|
f8495:c2:m2
|
def unpack_from(self, buff, offset=<NUM_LIT:0>):
|
return self._create(super(DictStruct, self).unpack_from(buff, offset))<EOL>
|
Unpack the next bytes from a file object.
|
f8495:c2:m3
|
def __init__(self, *args, **kwargs):
|
<EOL>self.val_map = {ind: a for ind, a in enumerate(args)}<EOL>self.val_map.update(zip(kwargs.values(), kwargs.keys()))<EOL>
|
Initialize the mapping.
|
f8495:c3:m0
|
def __call__(self, val):
|
return self.val_map.get(val, '<STR_LIT>'.format(val))<EOL>
|
Map an integer to the string representation.
|
f8495:c3:m1
|
def __init__(self, num_bits):
|
self._bits = range(num_bits)<EOL>
|
Initialize the number of bits.
|
f8495:c4:m0
|
def __call__(self, val):
|
return [bool((val >> i) & <NUM_LIT>) for i in self._bits]<EOL>
|
Convert the integer to the list of True/False values.
|
f8495:c4:m1
|
def __init__(self, *names):
|
self._names = names<EOL>
|
Initialize the list of named bits.
|
f8495:c5:m0
|
def __call__(self, val):
|
if not val:<EOL><INDENT>return None<EOL><DEDENT>bits = []<EOL>for n in self._names:<EOL><INDENT>if val & <NUM_LIT>:<EOL><INDENT>bits.append(n)<EOL><DEDENT>val >>= <NUM_LIT:1><EOL>if not val:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return bits[<NUM_LIT:0>] if len(bits) == <NUM_LIT:1> else bits<EOL>
|
Return a list with a string for each True bit in the integer.
|
f8495:c5:m1
|
def __init__(self, fmt):
|
self._struct = Struct(fmt)<EOL>
|
Initialize the Struct unpacker.
|
f8495:c6:m0
|
def __call__(self, buf):
|
return list(self._struct.unpack(buf))<EOL>
|
Perform the actual unpacking.
|
f8495:c6:m1
|
def __init__(self, source):
|
self._data = bytearray(source)<EOL>self._offset = <NUM_LIT:0><EOL>self.clear_marks()<EOL>
|
Initialize the IOBuffer with the source data.
|
f8495:c7:m0
|
@classmethod<EOL><INDENT>def fromfile(cls, fobj):<DEDENT>
|
return cls(fobj.read())<EOL>
|
Initialize the IOBuffer with the contents of the file object.
|
f8495:c7:m1
|
def set_mark(self):
|
self._bookmarks.append(self._offset)<EOL>return len(self._bookmarks) - <NUM_LIT:1><EOL>
|
Mark the current location and return its id so that the buffer can return later.
|
f8495:c7:m2
|
def jump_to(self, mark, offset=<NUM_LIT:0>):
|
self._offset = self._bookmarks[mark] + offset<EOL>
|
Jump to a previously set mark.
|
f8495:c7:m3
|
def offset_from(self, mark):
|
return self._offset - self._bookmarks[mark]<EOL>
|
Calculate the current offset relative to a marked location.
|
f8495:c7:m4
|
def clear_marks(self):
|
self._bookmarks = []<EOL>
|
Clear all marked locations.
|
f8495:c7:m5
|
def splice(self, mark, newdata):
|
self.jump_to(mark)<EOL>self._data = self._data[:self._offset] + bytearray(newdata)<EOL>
|
Replace the data after the marked location with the specified data.
|
f8495:c7:m6
|
def read_struct(self, struct_class):
|
struct = struct_class.unpack_from(bytearray_to_buff(self._data), self._offset)<EOL>self.skip(struct_class.size)<EOL>return struct<EOL>
|
Parse and return a structure from the current buffer offset.
|
f8495:c7:m7
|
def read_func(self, func, num_bytes=None):
|
<EOL>res = func(self.get_next(num_bytes))<EOL>self.skip(num_bytes)<EOL>return res<EOL>
|
Parse data from the current buffer offset using a function.
|
f8495:c7:m8
|
def read_ascii(self, num_bytes=None):
|
return self.read(num_bytes).decode('<STR_LIT:ascii>')<EOL>
|
Return the specified bytes as ascii-formatted text.
|
f8495:c7:m9
|
def read_binary(self, num, item_type='<STR_LIT:B>'):
|
if '<STR_LIT:B>' in item_type:<EOL><INDENT>return self.read(num)<EOL><DEDENT>if item_type[<NUM_LIT:0>] in ('<STR_LIT:@>', '<STR_LIT:=>', '<STR_LIT:<>', '<STR_LIT:>>', '<STR_LIT:!>'):<EOL><INDENT>order = item_type[<NUM_LIT:0>]<EOL>item_type = item_type[<NUM_LIT:1>:]<EOL><DEDENT>else:<EOL><INDENT>order = '<STR_LIT:@>'<EOL><DEDENT>return list(self.read_struct(Struct(order + '<STR_LIT>'.format(int(num)) + item_type)))<EOL>
|
Parse the current buffer offset as the specified code.
|
f8495:c7:m10
|
def read_int(self, code):
|
return self.read_struct(Struct(code))[<NUM_LIT:0>]<EOL>
|
Parse the current buffer offset as the specified integer code.
|
f8495:c7:m11
|
def read(self, num_bytes=None):
|
res = self.get_next(num_bytes)<EOL>self.skip(len(res))<EOL>return res<EOL>
|
Read and return the specified bytes from the buffer.
|
f8495:c7:m12
|
def get_next(self, num_bytes=None):
|
if num_bytes is None:<EOL><INDENT>return self._data[self._offset:]<EOL><DEDENT>else:<EOL><INDENT>return self._data[self._offset:self._offset + num_bytes]<EOL><DEDENT>
|
Get the next bytes in the buffer without modifying the offset.
|
f8495:c7:m13
|
def skip(self, num_bytes):
|
if num_bytes is None:<EOL><INDENT>self._offset = len(self._data)<EOL><DEDENT>else:<EOL><INDENT>self._offset += num_bytes<EOL><DEDENT>
|
Jump the ahead the specified bytes in the buffer.
|
f8495:c7:m14
|
def check_remains(self, num_bytes):
|
return len(self._data[self._offset:]) == num_bytes<EOL>
|
Check that the number of bytes specified remains in the buffer.
|
f8495:c7:m15
|
def truncate(self, num_bytes):
|
self._data = self._data[:-num_bytes]<EOL>
|
Remove the specified number of bytes from the end of the buffer.
|
f8495:c7:m16
|
def at_end(self):
|
return self._offset >= len(self._data)<EOL>
|
Return whether the buffer has reached the end of data.
|
f8495:c7:m17
|
def __getitem__(self, item):
|
return self._data[item]<EOL>
|
Return the data at the specified location.
|
f8495:c7:m18
|
def __str__(self):
|
return '<STR_LIT>'.format(len(self._data), self._offset)<EOL>
|
Return a string representation of the IOBuffer.
|
f8495:c7:m19
|
def __len__(self):
|
return len(self._data)<EOL>
|
Return the amount of data in the buffer.
|
f8495:c7:m20
|
def version(val):
|
if val / <NUM_LIT> > <NUM_LIT>:<EOL><INDENT>ver = val / <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>ver = val / <NUM_LIT><EOL><DEDENT>return '<STR_LIT>'.format(ver)<EOL>
|
Calculate a string version from an integer value.
|
f8496:m0
|
def scaler(scale):
|
def inner(val):<EOL><INDENT>return val * scale<EOL><DEDENT>return inner<EOL>
|
Create a function that scales by a specific value.
|
f8496:m1
|
def angle(val):
|
return val * <NUM_LIT> / <NUM_LIT:2>**<NUM_LIT:16><EOL>
|
Convert an integer value to a floating point angle.
|
f8496:m2
|
def az_rate(val):
|
return val * <NUM_LIT> / <NUM_LIT:2>**<NUM_LIT:16><EOL>
|
Convert an integer value to a floating point angular rate.
|
f8496:m3
|
def bzip_blocks_decompress_all(data):
|
frames = bytearray()<EOL>offset = <NUM_LIT:0><EOL>while offset < len(data):<EOL><INDENT>size_bytes = data[offset:offset + <NUM_LIT:4>]<EOL>offset += <NUM_LIT:4><EOL>block_cmp_bytes = abs(Struct('<STR_LIT>').unpack(size_bytes)[<NUM_LIT:0>])<EOL>try:<EOL><INDENT>frames.extend(bz2.decompress(data[offset:offset + block_cmp_bytes]))<EOL>offset += block_cmp_bytes<EOL><DEDENT>except IOError:<EOL><INDENT>if frames:<EOL><INDENT>logging.warning('<STR_LIT>',<EOL>offset - <NUM_LIT:4>)<EOL>break<EOL><DEDENT>else: <EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>return frames<EOL>
|
Decompress all of the bzip2-ed blocks.
Returns the decompressed data as a `bytearray`.
|
f8496:m4
|
def nexrad_to_datetime(julian_date, ms_midnight):
|
<EOL>return datetime.datetime.utcfromtimestamp((julian_date - <NUM_LIT:1>) * day + ms_midnight * milli)<EOL>
|
Convert NEXRAD date time format to python `datetime.datetime`.
|
f8496:m5
|
def remap_status(val):
|
status = <NUM_LIT:0><EOL>bad = BAD_DATA if val & <NUM_LIT> else <NUM_LIT:0><EOL>val &= <NUM_LIT><EOL>if val == <NUM_LIT:0>:<EOL><INDENT>status = START_ELEVATION<EOL><DEDENT>elif val == <NUM_LIT:1>:<EOL><INDENT>status = <NUM_LIT:0><EOL><DEDENT>elif val == <NUM_LIT:2>:<EOL><INDENT>status = END_ELEVATION<EOL><DEDENT>elif val == <NUM_LIT:3>:<EOL><INDENT>status = START_ELEVATION | START_VOLUME<EOL><DEDENT>elif val == <NUM_LIT:4>:<EOL><INDENT>status = END_ELEVATION | END_VOLUME<EOL><DEDENT>elif val == <NUM_LIT:5>:<EOL><INDENT>status = START_ELEVATION | LAST_ELEVATION<EOL><DEDENT>return status | bad<EOL>
|
Convert status integer value to appropriate bitmask.
|
f8496:m6
|
def reduce_lists(d):
|
for field in d:<EOL><INDENT>old_data = d[field]<EOL>if len(old_data) == <NUM_LIT:1>:<EOL><INDENT>d[field] = old_data[<NUM_LIT:0>]<EOL><DEDENT><DEDENT>
|
Replace single item lists in a dictionary with the single item.
|
f8496:m7
|
def two_comp16(val):
|
if val >> <NUM_LIT:15>:<EOL><INDENT>val = -(~val & <NUM_LIT>) - <NUM_LIT:1><EOL><DEDENT>return val<EOL>
|
Return the two's-complement signed representation of a 16-bit unsigned integer.
|
f8496:m8
|
def float16(val):
|
<EOL>frac = val & <NUM_LIT><EOL>exp = (val >> <NUM_LIT:10>) & <NUM_LIT><EOL>sign = val >> <NUM_LIT:15><EOL>if exp:<EOL><INDENT>value = <NUM_LIT:2> ** (exp - <NUM_LIT:16>) * (<NUM_LIT:1> + float(frac) / <NUM_LIT:2>**<NUM_LIT:10>)<EOL><DEDENT>else:<EOL><INDENT>value = float(frac) / <NUM_LIT:2>**<NUM_LIT:9><EOL><DEDENT>if sign:<EOL><INDENT>value *= -<NUM_LIT:1><EOL><DEDENT>return value<EOL>
|
Convert a 16-bit floating point value to a standard Python float.
|
f8496:m9
|
def float32(short1, short2):
|
<EOL>return struct.unpack('<STR_LIT>', struct.pack('<STR_LIT>', short1 & <NUM_LIT>, short2 & <NUM_LIT>))[<NUM_LIT:0>]<EOL>
|
Unpack a pair of 16-bit integers as a Python float.
|
f8496:m10
|
def date_elem(ind_days, ind_minutes):
|
def inner(seq):<EOL><INDENT>return nexrad_to_datetime(seq[ind_days], seq[ind_minutes] * <NUM_LIT> * <NUM_LIT:1000>)<EOL><DEDENT>return inner<EOL>
|
Create a function to parse a datetime from the product-specific blocks.
|
f8496:m11
|
def scaled_elem(index, scale):
|
def inner(seq):<EOL><INDENT>return seq[index] * scale<EOL><DEDENT>return inner<EOL>
|
Create a function to scale a certain product-specific block.
|
f8496:m12
|
def combine_elem(ind1, ind2):
|
def inner(seq):<EOL><INDENT>shift = <NUM_LIT:2>**<NUM_LIT:16><EOL>if seq[ind1] < <NUM_LIT:0>:<EOL><INDENT>seq[ind1] += shift<EOL><DEDENT>if seq[ind2] < <NUM_LIT:0>:<EOL><INDENT>seq[ind2] += shift<EOL><DEDENT>return (seq[ind1] << <NUM_LIT:16>) | seq[ind2]<EOL><DEDENT>return inner<EOL>
|
Create a function to combine two specified product-specific blocks into a single int.
|
f8496:m13
|
def float_elem(ind1, ind2):
|
return lambda seq: float32(seq[ind1], seq[ind2])<EOL>
|
Create a function to combine two specified product-specific blocks into a float.
|
f8496:m14
|
def high_byte(ind):
|
def inner(seq):<EOL><INDENT>return seq[ind] >> <NUM_LIT:8><EOL><DEDENT>return inner<EOL>
|
Create a function to return the high-byte of a product-specific block.
|
f8496:m15
|
def low_byte(ind):
|
def inner(seq):<EOL><INDENT>return seq[ind] & <NUM_LIT><EOL><DEDENT>return inner<EOL>
|
Create a function to return the low-byte of a product-specific block.
|
f8496:m16
|
@exporter.export<EOL>def is_precip_mode(vcp_num):
|
return not vcp_num // <NUM_LIT:10> == <NUM_LIT:3><EOL>
|
r"""Determine if the NEXRAD radar is operating in precipitation mode.
Parameters
----------
vcp_num : int
The NEXRAD volume coverage pattern (VCP) number
Returns
-------
bool
True if the VCP corresponds to precipitation mode, False otherwise
|
f8496:m17
|
def __init__(self, filename):
|
fobj = open_as_needed(filename)<EOL>with contextlib.closing(fobj):<EOL><INDENT>self._buffer = IOBuffer.fromfile(fobj)<EOL><DEDENT>self._read_volume_header()<EOL>start = self._buffer.set_mark()<EOL>try:<EOL><INDENT>self._buffer = IOBuffer(self._buffer.read_func(bzip_blocks_decompress_all))<EOL><DEDENT>except ValueError:<EOL><INDENT>self._buffer.jump_to(start)<EOL><DEDENT>self._read_data()<EOL>
|
r"""Create instance of `Level2File`.
Parameters
----------
filename : str or file-like object
If str, the name of the file to be opened. Gzip-ed files are
recognized with the extension '.gz', as are bzip2-ed files with
the extension `.bz2` If `fname` is a file-like object,
this will be read from directly.
|
f8496:c0:m0
|
def __call__(self, data):
|
return self.lut[data]<EOL>
|
Convert the values.
|
f8496:c1:m0
|
def __init__(self, prod):
|
min_val = two_comp16(prod.thresholds[<NUM_LIT:0>]) * self._min_scale<EOL>inc = prod.thresholds[<NUM_LIT:1>] * self._inc_scale<EOL>num_levels = prod.thresholds[<NUM_LIT:2>]<EOL>self.lut = [self.MISSING] * <NUM_LIT><EOL>num_levels = min(num_levels, self._max_data - self._min_data + <NUM_LIT:1>)<EOL>for i in range(num_levels):<EOL><INDENT>self.lut[i + self._min_data] = min_val + i * inc<EOL><DEDENT>self.lut = np.array(self.lut)<EOL>
|
Initialize the mapper and the lookup table.
|
f8496:c2:m0
|
def __init__(self, prod):
|
lin_scale = float16(prod.thresholds[<NUM_LIT:0>])<EOL>lin_offset = float16(prod.thresholds[<NUM_LIT:1>])<EOL>log_start = prod.thresholds[<NUM_LIT:2>]<EOL>log_scale = float16(prod.thresholds[<NUM_LIT:3>])<EOL>log_offset = float16(prod.thresholds[<NUM_LIT:4>])<EOL>self.lut = np.empty((<NUM_LIT>,), dtype=np.float)<EOL>self.lut.fill(self.MISSING)<EOL>ind = np.arange(<NUM_LIT:255>)<EOL>self.lut[<NUM_LIT:2>:log_start] = (ind[<NUM_LIT:2>:log_start] - lin_offset) / lin_scale<EOL>self.lut[log_start:-<NUM_LIT:1>] = np.exp((ind[log_start:] - log_offset) / log_scale)<EOL>
|
Initialize the VIL mapper.
|
f8496:c8:m0
|
def __init__(self, prod):
|
data_mask = prod.thresholds[<NUM_LIT:0>]<EOL>scale = prod.thresholds[<NUM_LIT:1>]<EOL>offset = prod.thresholds[<NUM_LIT:2>]<EOL>topped_mask = prod.thresholds[<NUM_LIT:3>]<EOL>self.lut = [self.MISSING] * <NUM_LIT><EOL>self.topped_lut = [False] * <NUM_LIT><EOL>for i in range(<NUM_LIT:2>, <NUM_LIT>):<EOL><INDENT>self.lut[i] = ((i & data_mask) - offset) / scale<EOL>self.topped_lut[i] = bool(i & topped_mask)<EOL><DEDENT>self.lut = np.array(self.lut)<EOL>self.topped_lut = np.array(self.topped_lut)<EOL>
|
Initialize the mapper.
|
f8496:c9:m0
|
def __call__(self, data_vals):
|
return self.lut[data_vals], self.topped_lut[data_vals]<EOL>
|
Convert the data values.
|
f8496:c9:m1
|
def __init__(self, prod):
|
scale = float32(prod.thresholds[<NUM_LIT:0>], prod.thresholds[<NUM_LIT:1>])<EOL>offset = float32(prod.thresholds[<NUM_LIT:2>], prod.thresholds[<NUM_LIT:3>])<EOL>max_data_val = prod.thresholds[<NUM_LIT:5>]<EOL>leading_flags = prod.thresholds[<NUM_LIT:6>]<EOL>trailing_flags = prod.thresholds[<NUM_LIT:7>]<EOL>self.lut = [self.MISSING] * (max_data_val + <NUM_LIT:1>)<EOL>if leading_flags > <NUM_LIT:1>:<EOL><INDENT>self.lut[<NUM_LIT:1>] = self.RANGE_FOLD<EOL><DEDENT>for i in range(leading_flags, max_data_val - trailing_flags + <NUM_LIT:1>):<EOL><INDENT>self.lut[i] = (i - offset) / scale<EOL><DEDENT>self.lut = np.array(self.lut)<EOL>
|
Initialize the mapper by pulling out all the information from the product.
|
f8496:c10:m0
|
def __init__(self, prod):
|
self.lut = [self.MISSING] * <NUM_LIT><EOL>for i in range(<NUM_LIT:10>, <NUM_LIT>):<EOL><INDENT>self.lut[i] = i // <NUM_LIT:10><EOL><DEDENT>self.lut[<NUM_LIT>] = self.RANGE_FOLD<EOL>self.lut = np.array(self.lut)<EOL>
|
Initialize the mapper.
|
f8496:c11:m0
|
def __init__(self, prod):
|
scale = prod.thresholds[<NUM_LIT:0>] / <NUM_LIT><EOL>offset = prod.thresholds[<NUM_LIT:1>] / <NUM_LIT><EOL>data_levels = prod.thresholds[<NUM_LIT:2>]<EOL>leading_flags = prod.thresholds[<NUM_LIT:3>]<EOL>self.lut = [self.MISSING] * data_levels<EOL>for i in range(leading_flags, data_levels):<EOL><INDENT>self.lut = scale * i + offset<EOL><DEDENT>self.lut = np.array(self.lut)<EOL>
|
Initialize the mapper based on the product.
|
f8496:c12:m0
|
def __init__(self, prod):
|
self.labels = []<EOL>self.lut = []<EOL>for t in prod.thresholds:<EOL><INDENT>codes, val = t >> <NUM_LIT:8>, t & <NUM_LIT><EOL>label = '<STR_LIT>'<EOL>if codes >> <NUM_LIT:7>:<EOL><INDENT>label = self.lut_names[val]<EOL>if label in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>'):<EOL><INDENT>val = self.MISSING<EOL><DEDENT>elif label == '<STR_LIT>':<EOL><INDENT>val = self.RANGE_FOLD<EOL><DEDENT><DEDENT>elif codes >> <NUM_LIT:6>:<EOL><INDENT>val *= <NUM_LIT><EOL>label = '<STR_LIT>'.format(val)<EOL><DEDENT>elif codes >> <NUM_LIT:5>:<EOL><INDENT>val *= <NUM_LIT><EOL>label = '<STR_LIT>'.format(val)<EOL><DEDENT>elif codes >> <NUM_LIT:4>:<EOL><INDENT>val *= <NUM_LIT:0.1><EOL>label = '<STR_LIT>'.format(val)<EOL><DEDENT>if codes & <NUM_LIT>:<EOL><INDENT>val *= -<NUM_LIT:1><EOL>label = '<STR_LIT:->' + label<EOL><DEDENT>elif (codes >> <NUM_LIT:1>) & <NUM_LIT>:<EOL><INDENT>label = '<STR_LIT:+>' + label<EOL><DEDENT>if (codes >> <NUM_LIT:2>) & <NUM_LIT>:<EOL><INDENT>label = '<STR_LIT:<>' + label<EOL><DEDENT>elif (codes >> <NUM_LIT:3>) & <NUM_LIT>:<EOL><INDENT>label = '<STR_LIT:>>' + label<EOL><DEDENT>if not label:<EOL><INDENT>label = str(val)<EOL><DEDENT>self.lut.append(val)<EOL>self.labels.append(label)<EOL><DEDENT>self.lut = np.array(self.lut)<EOL>
|
Initialize the values and labels from the product.
|
f8496:c13:m0
|
def __init__(self, filename):
|
fobj = open_as_needed(filename)<EOL>self.filename = filename if is_string_like(filename) else '<STR_LIT>'<EOL>with contextlib.closing(fobj):<EOL><INDENT>self._buffer = IOBuffer.fromfile(fobj)<EOL><DEDENT>self._process_wmo_header()<EOL>self._process_end_bytes()<EOL><INDENT>self.data = []<EOL><DEDENT>self.metadata = {}<EOL>if self.wmo_code == '<STR_LIT>':<EOL><INDENT>self.header = None<EOL>self.prod_desc = None<EOL>self.thresholds = None<EOL>self.depVals = None<EOL>self.product_name = '<STR_LIT>'<EOL>self.text = '<STR_LIT>'.join(self._buffer.read_ascii())<EOL>return<EOL><DEDENT>self._buffer = IOBuffer(self._buffer.read_func(zlib_decompress_all_frames))<EOL>self._process_wmo_header()<EOL>if len(self._buffer) == <NUM_LIT:0>:<EOL><INDENT>log.warning('<STR_LIT>', self.filename)<EOL>return<EOL><DEDENT>msg_start = self._buffer.set_mark()<EOL>self.header = self._buffer.read_struct(self.header_fmt)<EOL>log.debug('<STR_LIT>', len(self._buffer),<EOL>self.header.msg_len, self.header)<EOL>if not self._buffer.check_remains(self.header.msg_len - self.header_fmt.size):<EOL><INDENT>log.warning('<STR_LIT>'<EOL>'<STR_LIT>',<EOL>len(self._buffer) - self._buffer._offset,<EOL>self.header.msg_len - self.header_fmt.size)<EOL><DEDENT>if self.header.code == <NUM_LIT:2>:<EOL><INDENT>self.gsm = self._buffer.read_struct(self.gsm_fmt)<EOL>assert self.gsm.divider == -<NUM_LIT:1><EOL>if self.gsm.block_len > <NUM_LIT>:<EOL><INDENT>self.gsm_additional = self._buffer.read_struct(self.additional_gsm_fmt)<EOL>assert self.gsm.block_len == <NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>assert self.gsm.block_len == <NUM_LIT><EOL><DEDENT>return<EOL><DEDENT>self.prod_desc = self._buffer.read_struct(self.prod_desc_fmt)<EOL>self.thresholds = [getattr(self.prod_desc, '<STR_LIT>' + str(i)) for i in range(<NUM_LIT:1>, <NUM_LIT>)]<EOL>self.depVals = [getattr(self.prod_desc, '<STR_LIT>' + str(i)) for i in range(<NUM_LIT:1>, <NUM_LIT:11>)]<EOL>self.metadata['<STR_LIT>'] = nexrad_to_datetime(self.header.date,<EOL>self.header.time * <NUM_LIT:1000>)<EOL>self.metadata['<STR_LIT>'] = nexrad_to_datetime(self.prod_desc.vol_date,<EOL>self.prod_desc.vol_start_time * <NUM_LIT:1000>)<EOL>self.metadata['<STR_LIT>'] = nexrad_to_datetime(self.prod_desc.prod_gen_date,<EOL>self.prod_desc.prod_gen_time * <NUM_LIT:1000>)<EOL>self.lat = self.prod_desc.lat * <NUM_LIT><EOL>self.lon = self.prod_desc.lon * <NUM_LIT><EOL>self.height = self.prod_desc.height<EOL>default = ('<STR_LIT>', <NUM_LIT>, LegacyMapper,<EOL>(('<STR_LIT>', scaled_elem(<NUM_LIT:2>, <NUM_LIT:0.1>)), ('<STR_LIT>', <NUM_LIT:7>),<EOL>('<STR_LIT>', combine_elem(<NUM_LIT:8>, <NUM_LIT:9>)), ('<STR_LIT>', <NUM_LIT:0>)))<EOL>self.product_name, self.max_range, mapper, meta = self.prod_spec_map.get(<EOL>self.header.code, default)<EOL>for name, block in meta:<EOL><INDENT>if callable(block):<EOL><INDENT>self.metadata[name] = block(self.depVals)<EOL><DEDENT>else:<EOL><INDENT>self.metadata[name] = self.depVals[block]<EOL><DEDENT><DEDENT>self.map_data = mapper(self)<EOL>if self.metadata.get('<STR_LIT>', False):<EOL><INDENT>try:<EOL><INDENT>comp_start = self._buffer.set_mark()<EOL>decomp_data = self._buffer.read_func(bz2.decompress)<EOL>self._buffer.splice(comp_start, decomp_data)<EOL>assert self._buffer.check_remains(self.metadata['<STR_LIT>'])<EOL><DEDENT>except IOError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if self.header.code in self.standalone_tabular:<EOL><INDENT>if self.prod_desc.sym_off:<EOL><INDENT>self._unpack_tabblock(msg_start, <NUM_LIT:2> * self.prod_desc.sym_off, False)<EOL><DEDENT>if self.prod_desc.graph_off:<EOL><INDENT>self._unpack_standalone_graphblock(msg_start,<EOL><NUM_LIT:2> * (self.prod_desc.graph_off - <NUM_LIT:1>))<EOL><DEDENT><DEDENT>elif self.header.code == <NUM_LIT>:<EOL><INDENT>self._unpack_rcm(msg_start, <NUM_LIT:2> * self.prod_desc.sym_off)<EOL><DEDENT>else:<EOL><INDENT>if self.prod_desc.sym_off:<EOL><INDENT>self._unpack_symblock(msg_start, <NUM_LIT:2> * self.prod_desc.sym_off)<EOL><DEDENT>if self.prod_desc.graph_off:<EOL><INDENT>self._unpack_graphblock(msg_start, <NUM_LIT:2> * self.prod_desc.graph_off)<EOL><DEDENT>if self.prod_desc.tab_off:<EOL><INDENT>self._unpack_tabblock(msg_start, <NUM_LIT:2> * self.prod_desc.tab_off)<EOL><DEDENT><DEDENT>if '<STR_LIT>' in self.metadata:<EOL><INDENT>log.warning('<STR_LIT>',<EOL>self.filename, self.header.code)<EOL><DEDENT>
|
r"""Create instance of `Level3File`.
Parameters
----------
filename : str or file-like object
If str, the name of the file to be opened. If file-like object,
this will be read from directly.
|
f8496:c14:m0
|
@staticmethod<EOL><INDENT>def pos_scale(is_sym_block):<DEDENT>
|
return <NUM_LIT> if is_sym_block else <NUM_LIT:1><EOL>
|
Scale of the position information in km.
|
f8496:c14:m4
|
def __repr__(self):
|
items = [self.product_name, self.header, self.prod_desc, self.thresholds,<EOL>self.depVals, self.metadata, self.siteID]<EOL>return self.filename + '<STR_LIT>' + '<STR_LIT:\n>'.join(map(str, items))<EOL>
|
Return the string representation of the product.
|
f8496:c14:m10
|
def __call__(self, code):
|
xdr = OrderedDict()<EOL>if code == <NUM_LIT>:<EOL><INDENT>xdr.update(self._unpack_prod_desc())<EOL><DEDENT>else:<EOL><INDENT>log.warning('<STR_LIT>', code)<EOL><DEDENT>self.done()<EOL>return xdr<EOL>
|
Perform the actual unpacking.
|
f8496:c15:m0
|
def unpack_string(self):
|
return Unpacker.unpack_string(self).decode('<STR_LIT:ascii>')<EOL>
|
Unpack the internal data as a string.
|
f8496:c15:m1
|
@deprecated(<NUM_LIT>, alternative='<STR_LIT>')<EOL>def cf_to_proj(var):
|
import pyproj<EOL>kwargs = {'<STR_LIT>': var.latitude_of_projection_origin, '<STR_LIT:a>': var.earth_radius,<EOL>'<STR_LIT:b>': var.earth_radius}<EOL>if var.grid_mapping_name == '<STR_LIT>':<EOL><INDENT>kwargs['<STR_LIT>'] = '<STR_LIT>'<EOL>kwargs['<STR_LIT>'] = var.longitude_of_central_meridian<EOL>kwargs['<STR_LIT>'] = var.standard_parallel<EOL>kwargs['<STR_LIT>'] = var.standard_parallel<EOL><DEDENT>elif var.grid_mapping_name == '<STR_LIT>':<EOL><INDENT>kwargs['<STR_LIT>'] = '<STR_LIT>'<EOL>kwargs['<STR_LIT>'] = var.straight_vertical_longitude_from_pole<EOL>kwargs['<STR_LIT>'] = var.latitude_of_projection_origin<EOL>kwargs['<STR_LIT>'] = var.standard_parallel<EOL>kwargs['<STR_LIT>'] = False <EOL>kwargs['<STR_LIT>'] = False <EOL><DEDENT>elif var.grid_mapping_name == '<STR_LIT>':<EOL><INDENT>kwargs['<STR_LIT>'] = '<STR_LIT>'<EOL>kwargs['<STR_LIT>'] = var.longitude_of_projection_origin<EOL>kwargs['<STR_LIT>'] = var.standard_parallel<EOL>kwargs['<STR_LIT>'] = False <EOL>kwargs['<STR_LIT>'] = False <EOL><DEDENT>return pyproj.Proj(**kwargs)<EOL>
|
r"""Convert a Variable with projection information to a Proj.4 Projection instance.
The attributes of this Variable must conform to the Climate and Forecasting (CF)
netCDF conventions.
Parameters
----------
var : Variable
The projection variable with appropriate attributes.
|
f8497:m0
|
def __init__(self):
|
self._attrs = []<EOL>
|
r"""Initialize an :class:`AttributeContainer`.
|
f8497:c0:m0
|
def ncattrs(self):
|
return self._attrs<EOL>
|
r"""Get a list of the names of the netCDF attributes.
Returns
-------
List[str]
|
f8497:c0:m1
|
def __setattr__(self, key, value):
|
if hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._attrs.append(key)<EOL><DEDENT>self.__dict__[key] = value<EOL>
|
Handle setting attributes.
|
f8497:c0:m2
|
def __delattr__(self, item):
|
self.__dict__.pop(item)<EOL>if hasattr(self, '<STR_LIT>'):<EOL><INDENT>self._attrs.remove(item)<EOL><DEDENT>
|
Handle attribute deletion.
|
f8497:c0:m3
|
def __init__(self, parent, name):
|
self.parent = parent<EOL>if parent:<EOL><INDENT>self.parent.groups[name] = self<EOL><DEDENT>self.name = name<EOL>self.groups = OrderedDict()<EOL>self.variables = OrderedDict()<EOL>self.dimensions = OrderedDict()<EOL>super(Group, self).__init__()<EOL>
|
r"""Initialize this :class:`Group`.
Instead of constructing a :class:`Group` directly, you should use
:meth:`~Group.createGroup`.
Parameters
----------
parent : Group or None
The parent Group for this one. Passing in :data:`None` implies that this is
the root :class:`Group`.
name : str
The name of this group
See Also
--------
Group.createGroup
|
f8497:c1:m0
|
def createGroup(self, name):
|
grp = Group(self, name)<EOL>self.groups[name] = grp<EOL>return grp<EOL>
|
Create a new Group as a descendant of this one.
Parameters
----------
name : str
The name of the new Group.
Returns
-------
Group
The newly created :class:`Group`
|
f8497:c1:m1
|
def createDimension(self, name, size):
|
dim = Dimension(self, name, size)<EOL>self.dimensions[name] = dim<EOL>return dim<EOL>
|
Create a new :class:`Dimension` in this :class:`Group`.
Parameters
----------
name : str
The name of the new Dimension.
size : int
The size of the Dimension
Returns
-------
Dimension
The newly created :class:`Dimension`
|
f8497:c1:m2
|
def createVariable(self, name, datatype, dimensions=(), fill_value=None, <EOL>wrap_array=None):
|
var = Variable(self, name, datatype, dimensions, fill_value, wrap_array)<EOL>self.variables[name] = var<EOL>return var<EOL>
|
Create a new Variable in this Group.
Parameters
----------
name : str
The name of the new Variable.
datatype : str or numpy.dtype
A valid Numpy dtype that describes the layout of the data within the Variable.
dimensions : tuple[str], optional
The dimensions of this Variable. Defaults to empty, which implies a scalar
variable.
fill_value : number, optional
A scalar value that is used to fill the created storage. Defaults to None, which
performs no filling, leaving the storage uninitialized.
wrap_array : numpy.ndarray, optional
Instead of creating an array, the Variable instance will assume ownership of the
passed in array as its data storage. This is a performance optimization to avoid
copying large data blocks. Defaults to None, which means a new array will be
created.
Returns
-------
Variable
The newly created :class:`Variable`
|
f8497:c1:m3
|
def __str__(self):
|
print_groups = []<EOL>if self.name:<EOL><INDENT>print_groups.append(self.name)<EOL><DEDENT>if self.groups:<EOL><INDENT>print_groups.append('<STR_LIT>')<EOL>for group in self.groups.values():<EOL><INDENT>print_groups.append(str(group))<EOL><DEDENT><DEDENT>if self.dimensions:<EOL><INDENT>print_groups.append('<STR_LIT>')<EOL>for dim in self.dimensions.values():<EOL><INDENT>print_groups.append(str(dim))<EOL><DEDENT><DEDENT>if self.variables:<EOL><INDENT>print_groups.append('<STR_LIT>')<EOL>for var in self.variables.values():<EOL><INDENT>print_groups.append(str(var))<EOL><DEDENT><DEDENT>if self.ncattrs():<EOL><INDENT>print_groups.append('<STR_LIT>')<EOL>for att in self.ncattrs():<EOL><INDENT>print_groups.append('<STR_LIT>'.format(att, getattr(self, att)))<EOL><DEDENT><DEDENT>return '<STR_LIT:\n>'.join(print_groups)<EOL>
|
Return a string representation of the Group.
|
f8497:c1:m4
|
def __init__(self):
|
super(Dataset, self).__init__(None, '<STR_LIT:root>')<EOL>
|
Initialize a Dataset.
|
f8497:c2:m0
|
def __init__(self, group, name, datatype, dimensions, fill_value, wrap_array):
|
<EOL>self._group = group<EOL>self._name = name<EOL>self._dimensions = tuple(dimensions)<EOL>shape = tuple(len(group.dimensions.get(d)) for d in dimensions)<EOL>if wrap_array is not None:<EOL><INDENT>if shape != wrap_array.shape:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>self._data = wrap_array<EOL><DEDENT>else:<EOL><INDENT>self._data = np.empty(shape, dtype=datatype)<EOL>if fill_value is not None:<EOL><INDENT>self._data.fill(fill_value)<EOL><DEDENT><DEDENT>super(Variable, self).__init__()<EOL>
|
Initialize a Variable.
Instead of constructing a Variable directly, you should use
:meth:`Group.createVariable`.
Parameters
----------
group : Group
The parent :class:`Group` that owns this Variable.
name : str
The name of this Variable.
datatype : str or numpy.dtype
A valid Numpy dtype that describes the layout of each element of the data
dimensions : tuple[str], optional
The dimensions of this Variable. Defaults to empty, which implies a scalar
variable.
fill_value : scalar, optional
A scalar value that is used to fill the created storage. Defaults to None, which
performs no filling, leaving the storage uninitialized.
wrap_array : numpy.ndarray, optional
Instead of creating an array, the Variable instance will assume ownership of the
passed in array as its data storage. This is a performance optimization to avoid
copying large data blocks. Defaults to None, which means a new array will be
created.
See Also
--------
Group.createVariable
|
f8497:c3:m0
|
def group(self):
|
return self._group<EOL>
|
Get the Group that owns this Variable.
Returns
-------
Group
The parent Group.
|
f8497:c3:m1
|
@property<EOL><INDENT>def name(self):<DEDENT>
|
return self._name<EOL>
|
str: the name of the variable.
|
f8497:c3:m2
|
@property<EOL><INDENT>def size(self):<DEDENT>
|
return self._data.size<EOL>
|
int: the total number of elements.
|
f8497:c3:m3
|
@property<EOL><INDENT>def shape(self):<DEDENT>
|
return self._data.shape<EOL>
|
tuple[int]: Describes the size of the Variable along each of its dimensions.
|
f8497:c3:m4
|
@property<EOL><INDENT>def ndim(self):<DEDENT>
|
return self._data.ndim<EOL>
|
int: the number of dimensions used by this variable.
|
f8497:c3:m5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.