repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
sarpy | sarpy-master/tests/io/product/sidd3_elements/test_sidd3_elements.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import logging
import numpy as np
import pytest
import sarpy.io.complex.sicd_elements.Radiometric as SicdRadiometric
from sarpy.io.product.sidd3_elements import blocks
def test_anglezeroto360_nominal(caplog):
with caplog.at_level(logging.INFO, 'sarpy.io.xml.descriptors'):
angmag = blocks.AngleZeroToExclusive360MagnitudeType(12.34, 56.78)
np.testing.assert_array_equal(angmag.get_array(), [12.34, 56.78])
assert angmag.Angle == 12.34
assert angmag.Magnitude == 56.78
angmag2 = blocks.AngleZeroToExclusive360MagnitudeType.from_array(angmag.get_array())
np.testing.assert_array_equal(angmag.get_array(), angmag2.get_array())
assert not caplog.records
@pytest.mark.parametrize('angle', (-1, 361))
def test_anglezeroto360_bad_angle(angle, caplog):
with caplog.at_level(logging.INFO, 'sarpy.io.xml.descriptors'):
angmag = blocks.AngleZeroToExclusive360MagnitudeType(angle, 56.78)
assert len(caplog.records) == 1
assert 'required by standard to take value between (0.0, 360).' in caplog.text
def test_radiometric(caplog):
with caplog.at_level(logging.INFO, 'sarpy.io.xml.descriptors'):
new_fields = set(blocks.RadiometricType._fields) - set(SicdRadiometric.RadiometricType._fields)
assert new_fields == {'SigmaZeroSFIncidenceMap'}
rad = blocks.RadiometricType()
assert rad.SigmaZeroSFIncidenceMap is None
rad = blocks.RadiometricType(SigmaZeroSFIncidenceMap='APPLIED')
assert rad.SigmaZeroSFIncidenceMap == 'APPLIED'
rad = blocks.RadiometricType(SigmaZeroSFIncidenceMap='NOT_APPLIED')
assert rad.SigmaZeroSFIncidenceMap == 'NOT_APPLIED'
assert not caplog.records
def test_radiometric_invalid(caplog):
with caplog.at_level(logging.INFO, 'sarpy.io.xml.descriptors'):
rad = blocks.RadiometricType(SigmaZeroSFIncidenceMap='invalid')
assert rad.SigmaZeroSFIncidenceMap == 'invalid'
assert len(caplog.records) == 1
assert "values ARE REQUIRED to be one of ('APPLIED', 'NOT_APPLIED')" in caplog.text
| 2,200 | 36.305085 | 103 | py |
sarpy | sarpy-master/tests/io/phase_history/test_cphd.py | import logging
import os
import json
import tempfile
import unittest
import shutil
import numpy.testing
from sarpy.io.phase_history.cphd import CPHDReader, CPHDReader0_3, CPHDReader1, CPHDWriter1
from sarpy.io.phase_history.converter import open_phase_history
import sarpy.consistency.cphd_consistency
from tests import parse_file_entry
cphd_file_types = {}
this_loc = os.path.abspath(__file__)
file_reference = os.path.join(os.path.split(this_loc)[0], 'cphd_file_types.json') # specifies file locations
if os.path.isfile(file_reference):
with open(file_reference, 'r') as fi:
the_files = json.load(fi)
for the_type in the_files:
valid_entries = []
for entry in the_files[the_type]:
the_file = parse_file_entry(entry)
if the_file is not None:
valid_entries.append(the_file)
cphd_file_types[the_type] = valid_entries
def generic_io_test(instance, test_file, reader_type_string, reader_type):
assert isinstance(instance, unittest.TestCase)
reader = None
with instance.subTest(msg='establish reader for type {} and file {}'.format(reader_type_string, test_file)):
reader = open_phase_history(test_file)
instance.assertTrue(reader is not None, msg='Returned None, so opening failed.')
if reader is None:
return # remaining tests make no sense
assert isinstance(reader, CPHDReader)
with instance.subTest(msg='Reader for type {} should be appropriate reader'):
instance.assertTrue(isinstance(reader, reader_type), msg='Returned reader should be of type {}'.format(
reader_type))
if not isinstance(reader, reader_type):
return # remaining tests might be misleading
with instance.subTest(msg='Verify reader_type for type {} and file {}'.format(reader_type_string, test_file)):
instance.assertEqual(reader.reader_type, "CPHD", msg='reader.reader_type should be "CPHD"')
with instance.subTest(msg='Validity of cphd in reader of '
'type {} for file {}'.format(reader_type_string, test_file)):
if not reader.cphd_meta.is_valid(recursive=True, stack=False):
logging.warning(
'cphd in reader of type {} for file {} not valid'.format(reader_type_string, test_file))
with instance.subTest(msg='Fetch data_sizes and sidds for type {} and file {}'.format(
reader_type_string, test_file)):
data_sizes = reader.get_data_size_as_tuple()
if isinstance(reader, CPHDReader1):
elements = reader.cphd_meta.Data.Channels
elif isinstance(reader, CPHDReader0_3):
elements = reader.cphd_meta.Data.ArraySize
else:
raise TypeError('Got unhandled reader type {}'.format(type(reader)))
for i, (data_size, element) in enumerate(zip(data_sizes, elements)):
with instance.subTest(msg='Verify image size for sidd index {} in reader '
'of type {} for file {}'.format(i, reader_type_string, test_file)):
instance.assertEqual(data_size[0], element.NumVectors, msg='data_size[0] and NumVectors do not agree')
instance.assertEqual(data_size[1], element.NumSamples, msg='data_size[1] and NumSamples do not agree')
with instance.subTest(msg='Basic fetch test for cphd index {} in reader '
'of type {} for file {}'.format(i, reader_type_string, test_file)):
instance.assertEqual(reader[:2, :2, i].shape[:2], (2, 2), msg='upper left fetch')
instance.assertEqual(reader[-2:, :2, i].shape[:2], (2, 2), msg='lower left fetch')
instance.assertEqual(reader[-2:, -2:, i].shape[:2], (2, 2), msg='lower right fetch')
instance.assertEqual(reader[:2, -2:, i].shape[:2], (2, 2), msg='upper right fetch')
with instance.subTest(msg='Verify fetching complete row(s) have correct size '
'for cphd index {} in reader of type {} and file {}'.format(
i, reader_type_string, test_file)):
test_data = reader[:, :2, i]
instance.assertEqual(test_data.shape[:2], (data_size[0], 2), msg='Complete row fetch size mismatch')
with instance.subTest(msg='Verify fetching complete columns(s) have correct size '
'for cphd index {} in reader of type {} file {}'.format(
i, reader_type_string, test_file)):
test_data = reader[:2, :, i]
instance.assertEqual(test_data.shape[:2], (2, data_size[1]), msg='Complete row fetch size mismatch')
with instance.subTest(msg='Verify fetching entire pvp data has correct size for cphd '
'index {} in reader of type {} file {}'.format(i, reader_type_string, test_file)):
test_pvp = reader.read_pvp_variable('TxTime', i, the_range=None)
instance.assertEqual(test_pvp.shape, (data_size[0], ), msg='Unexpected pvp total fetch size')
with instance.subTest(msg='Verify fetching pvp data for slice has correct size for cphd '
'index {} in reader of type {} file {}'.format(i, reader_type_string, test_file)):
test_pvp = reader.read_pvp_variable('TxTime', i, the_range=(0, 10, 2))
instance.assertEqual(test_pvp.shape, (5, ), msg='Unexpected pvp strided slice fetch size')
# create a temp directory
temp_directory = tempfile.mkdtemp()
if isinstance(reader, CPHDReader1):
with instance.subTest(msg='cphd writer test'):
generic_writer_test(reader, temp_directory)
shutil.rmtree(temp_directory)
del reader
def generic_writer_test(cphd_reader, the_directory):
written_cphd_name = os.path.join(the_directory, 'example_cphd.cphd')
read_support = cphd_reader.read_support_block()
read_pvp = cphd_reader.read_pvp_block()
read_signal = cphd_reader.read_signal_block()
# write the cphd file
with CPHDWriter1(written_cphd_name, cphd_reader.cphd_meta, check_existence=False) as writer:
writer.write_file(read_pvp, read_signal, read_support)
# reread the newly written data
rereader = CPHDReader(written_cphd_name)
reread_support = rereader.read_support_block()
reread_pvp = rereader.read_pvp_block()
reread_signal = rereader.read_signal_block()
# byte compare that the original data and re-read data are identical
assert read_support.keys() == reread_support.keys(), 'Support keys are not identical'
for support_key in reread_support:
numpy.testing.assert_array_equal(read_support[support_key], reread_support[support_key])
assert reread_pvp.keys() == read_pvp.keys(), 'PVP keys are not identical'
for pvp_key in reread_pvp:
numpy.testing.assert_array_equal(read_pvp[pvp_key], reread_pvp[pvp_key])
assert read_signal.keys() == read_signal.keys(), 'Signal keys are not identical'
for signal_key in reread_signal:
numpy.testing.assert_array_equal(read_signal[signal_key], reread_signal[signal_key])
assert not sarpy.consistency.cphd_consistency.main([written_cphd_name, '--signal-data'])
class TestCPHD(unittest.TestCase):
@unittest.skipIf(len(cphd_file_types.get('CPHD', [])) == 0, 'No CPHD files specified or found')
def test_cphd_io(self):
for test_file in cphd_file_types['CPHD']:
generic_io_test(self, test_file, 'CPHD', CPHDReader)
| 7,535 | 48.254902 | 116 | py |
sarpy | sarpy-master/tests/io/phase_history/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/tests/io/phase_history/cphd1_elements/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/tests/io/phase_history/cphd1_elements/test_utils.py | import numpy as np
import sarpy.io.phase_history.cphd1_elements.utils
import unittest
class TestCphd1Utils(unittest.TestCase):
def test_binary_format_to_dtype(self):
self.assertEqual(sarpy.io.phase_history.cphd1_elements.utils.binary_format_string_to_dtype('I1'), np.int8)
dt = np.dtype([('a', '>i8'),
('b', '>f8'),
('c', '>f8')])
self.assertEqual(sarpy.io.phase_history.cphd1_elements.utils.binary_format_string_to_dtype('a=I8;b=F8;c=F8;'), dt)
| 523 | 39.307692 | 123 | py |
sarpy | sarpy-master/tests/io/complex/test_sicd.py | import os
import json
import tempfile
import unittest
from sarpy.io.complex.converter import conversion_utility
from sarpy.io.complex.sicd import SICDReader
from sarpy.io.complex.sicd_schema import get_schema_path, get_default_version_string
from tests import parse_file_entry
try:
from lxml import etree
except ImportError:
etree = None
complex_file_types = {}
this_loc = os.path.abspath(__file__)
file_reference = os.path.join(os.path.split(this_loc)[0], 'complex_file_types.json') # specifies file locations
if os.path.isfile(file_reference):
with open(file_reference, 'r') as fi:
the_files = json.load(fi)
for the_type in the_files:
valid_entries = []
for entry in the_files[the_type]:
the_file = parse_file_entry(entry)
if the_file is not None:
valid_entries.append(the_file)
complex_file_types[the_type] = valid_entries
sicd_files = complex_file_types.get('SICD', [])
the_version = get_default_version_string()
the_schema = get_schema_path(the_version)
class TestSICDWriting(unittest.TestCase):
@unittest.skipIf(len(sicd_files) == 0, 'No sicd files found')
def test_sicd_creation(self):
for fil in sicd_files:
reader = SICDReader(fil)
# check that sicd structure serializes according to the schema
if etree is not None:
sicd = reader.get_sicds_as_tuple()[0]
xml_doc = etree.fromstring(sicd.to_xml_bytes())
xml_schema = etree.XMLSchema(file=the_schema)
with self.subTest(msg='validate xml produced from sicd structure'):
self.assertTrue(xml_schema.validate(xml_doc),
msg='SICD structure serialized from file {} is '
'not valid versus schema {}'.format(fil, the_schema))
with self.subTest(msg='Test conversion (recreation) of the sicd file {}'.format(fil)):
with tempfile.TemporaryDirectory() as tmpdirname:
conversion_utility(reader, tmpdirname)
new_filename = os.path.join(tmpdirname, os.listdir(tmpdirname)[0])
reader2 = SICDReader(new_filename)
self.assertEqual(os.stat(new_filename).st_size, reader2.nitf_details.nitf_header.FL)
with self.subTest(msg='Test writing a single row of the sicd file {}'.format(fil)):
with tempfile.TemporaryDirectory() as tmpdirname:
conversion_utility(reader, tmpdirname, row_limits=(0, 1))
| 2,629 | 38.848485 | 112 | py |
sarpy | sarpy-master/tests/io/complex/test_reader.py | import logging
import os
import json
import unittest
from sarpy.io.complex.converter import open_complex
from sarpy.io.complex.sicd import SICDReader
from sarpy.io.complex.radarsat import RadarSatReader
from sarpy.io.complex.sentinel import SentinelReader
from sarpy.io.complex.tsx import TSXReader
from sarpy.io.complex.csk import CSKReader
from sarpy.io.complex.iceye import ICEYEReader
from sarpy.io.complex.palsar2 import PALSARReader
from sarpy.io.complex.capella import CapellaReader
from tests import parse_file_entry
complex_file_types = {}
this_loc = os.path.abspath(__file__)
file_reference = os.path.join(os.path.split(this_loc)[0], 'complex_file_types.json') # specifies file locations
if os.path.isfile(file_reference):
with open(file_reference, 'r') as fi:
the_files = json.load(fi)
for the_type in the_files:
valid_entries = []
for entry in the_files[the_type]:
the_file = parse_file_entry(entry)
if the_file is not None:
valid_entries.append(the_file)
complex_file_types[the_type] = valid_entries
def generic_reader_test(instance, test_file, reader_type_string, reader_type):
assert isinstance(instance, unittest.TestCase)
reader = None
with instance.subTest(msg='establish reader for type {} and file {}'.format(reader_type_string, test_file)):
reader = open_complex(test_file)
instance.assertTrue(reader is not None, msg='Returned None, so opening failed.')
if reader is None:
return # remaining tests make no sense
with instance.subTest(msg='Reader for type {} should be appropriate reader'):
instance.assertTrue(isinstance(reader, reader_type), msg='Returned reader should be of type {}'.format(reader_type))
if not isinstance(reader, reader_type):
return # remaining tests might be misleading
with instance.subTest(msg='Verify reader_type for type {} and file {}'.format(reader_type_string, test_file)):
instance.assertEqual(reader.reader_type, "SICD", msg='reader.reader_type should be "SICD""')
with instance.subTest(msg='Fetch data_sizes and sicds for type {} and file {}'.format(reader_type_string, test_file)):
data_sizes = reader.get_data_size_as_tuple()
# noinspection PyUnresolvedReferences
sicds = reader.get_sicds_as_tuple()
for i, (data_size, sicd) in enumerate(zip(data_sizes, sicds)):
with instance.subTest(msg='Verify image size for sicd index {} in reader '
'of type {} for file {}'.format(i, reader_type_string, test_file)):
instance.assertEqual(data_size[0], sicd.ImageData.NumRows, msg='data_size[0] and NumRows do not agree')
instance.assertEqual(data_size[1], sicd.ImageData.NumCols, msg='data_size[1] and NumCols do not agree')
with instance.subTest(msg='Basic fetch test for sicd index {} in reader '
'of type {} for file {}'.format(i, reader_type_string, test_file)):
instance.assertEqual(reader[:2, :2, i].shape, (2, 2), msg='upper left fetch')
instance.assertEqual(reader[-2:, :2, i].shape, (2, 2), msg='lower left fetch')
instance.assertEqual(reader[-2:, -2:, i].shape, (2, 2), msg='lower right fetch')
instance.assertEqual(reader[:2, -2:, i].shape, (2, 2), msg='upper right fetch')
with instance.subTest(msg='Verify fetching complete row(s) have correct size '
'for sicd index {} in reader of type {} and file {}'.format(i, reader_type_string, test_file)):
test_data = reader[:, :2, i]
instance.assertEqual(test_data.shape, (data_size[0], 2), msg='Complete row fetch size mismatch')
with instance.subTest(msg='Verify fetching complete columns(s) have correct size '
'for sicd index {} in reader of type {} file {}'.format(i, reader_type_string, test_file)):
test_data = reader[:2, :, i]
instance.assertEqual(test_data.shape, (2, data_size[1]), msg='Complete row fetch size mismatch')
with instance.subTest(msg='Validity of sicd at index {} in reader of '
'type {} for file {}'.format(i, reader_type_string, test_file)):
if not sicd.is_valid(recursive=True, stack=False):
logging.warning('sicd at index {} in reader of type {} for file {} not valid'.format(i, reader_type_string, test_file))
del reader
class TestSICD(unittest.TestCase):
@unittest.skipIf(len(complex_file_types.get('SICD', [])) == 0, 'No SICD files specified or found')
def test_sicd_reader(self):
for test_file in complex_file_types['SICD']:
generic_reader_test(self, test_file, 'SICD', SICDReader)
class TestRCM(unittest.TestCase):
@unittest.skipIf(len(complex_file_types.get('RCM', [])) == 0, 'No RCM files specified or found')
def test_rcm_reader(self):
for test_file in complex_file_types['RCM']:
generic_reader_test(self, test_file, 'RCM', RadarSatReader)
class TestRCM_NITF(unittest.TestCase):
@unittest.skipIf(len(complex_file_types.get('RCM_NITF', [])) == 0, 'No RCM_NITF files specified or found')
def test_rcm_nitf_reader(self):
for test_file in complex_file_types['RCM_NITF']:
generic_reader_test(self, test_file, 'RCM_NITF', RadarSatReader)
class TestRS2(unittest.TestCase):
@unittest.skipIf(len(complex_file_types.get('RadarSat-2', [])) == 0, 'No RadarSat-2 files specified or found')
def test_rs2_reader(self):
for test_file in complex_file_types['RadarSat-2']:
generic_reader_test(self, test_file, 'RadarSat-2', RadarSatReader)
class TestSentinel(unittest.TestCase):
@unittest.skipIf(len(complex_file_types.get('Sentinel-1', [])) == 0, 'No Sentinel-1 files specified or found')
def test_sentinel_reader(self):
for test_file in complex_file_types['Sentinel-1']:
generic_reader_test(self, test_file, 'Sentinel-1', SentinelReader)
class TestTerraSAR(unittest.TestCase):
@unittest.skipIf(len(complex_file_types.get('TerraSAR-X', [])) == 0, 'No TerraSAR-X files specified or found')
def test_terrasarx_reader(self):
for test_file in complex_file_types['TerraSAR-X']:
generic_reader_test(self, test_file, 'TerraSAR-X', TSXReader)
class TestCosmoSkymed(unittest.TestCase):
@unittest.skipIf(len(complex_file_types.get('CosmoSkymed', [])) == 0, 'No CosmoSkymed files specified or found')
def test_csk_reader(self):
for test_file in complex_file_types['CosmoSkymed']:
generic_reader_test(self, test_file, 'CosmoSkymed', CSKReader)
class TestKompSat5(unittest.TestCase):
@unittest.skipIf(len(complex_file_types.get('KompSat-5', [])) == 0, 'No KompSat-5 files specified or found')
def test_kompsat_reader(self):
for test_file in complex_file_types['KompSat-5']:
generic_reader_test(self, test_file, 'KompSat-5', CSKReader)
class TestICEYE(unittest.TestCase):
@unittest.skipIf(len(complex_file_types.get('ICEYE', [])) == 0, 'No ICEYE files specified or found')
def test_iceye_reader(self):
for test_file in complex_file_types['ICEYE']:
generic_reader_test(self, test_file, 'ICEYE', ICEYEReader)
class TestPALSAR(unittest.TestCase):
@unittest.skipIf(len(complex_file_types.get('PALSAR', [])) == 0, 'No PALSAR files specified or found')
def test_palsar_reader(self):
for test_file in complex_file_types['PALSAR']:
generic_reader_test(self, test_file, 'PALSAR', PALSARReader)
class TestCapella(unittest.TestCase):
@unittest.skipIf(len(complex_file_types.get('Capella', [])) == 0, 'No Capella files specified or found')
def test_capella_reader(self):
for test_file in complex_file_types['Capella']:
generic_reader_test(self, test_file, 'Capella', CapellaReader)
| 8,004 | 47.515152 | 135 | py |
sarpy | sarpy-master/tests/io/complex/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/tests/io/complex/test_remote.py | import unittest
from sarpy.io.complex.converter import open_complex
from sarpy.io.complex.sicd import SICDReader
try:
import smart_open
except ImportError:
smart_open = None
file_object = None
if smart_open is not None:
try:
file_object = smart_open.open(
'https://six-library.s3.amazonaws.com/sicd_example_RMA_RGZERO_RE32F_IM32F_cropped_multiple_image_segments.nitf',
mode='rb', # must be opened in binary mode
buffering=4*1024*1024) # it has been observed that setting a manual buffer size may help
except Exception:
pass
class TestRemoteSICD(unittest.TestCase):
@unittest.skipIf(file_object is None, 'No remote file reader defined')
def test_remote_reader(self):
reader = None
with self.subTest(msg='establish remote sicd reader'):
reader = open_complex(file_object)
file_object.close()
self.assertTrue(reader is not None, msg='Returned None, so opening failed.')
if reader is None:
return # remaining tests make no sense
with self.subTest(msg='Reader type should be SICD reader'):
self.assertTrue(
isinstance(reader, SICDReader),
msg='Returned reader should be SICDReader')
if not isinstance(reader, SICDReader):
return # remaining tests might be misleading
with self.subTest(
msg='Fetch data_sizes and sicds'):
data_sizes = reader.get_data_size_as_tuple()
# noinspection PyUnresolvedReferences
sicds = reader.get_sicds_as_tuple()
for i, (data_size, sicd) in enumerate(zip(data_sizes, sicds)):
with self.subTest(
msg='Verify image size for sicd index {} in reader'.format(i)):
self.assertEqual(data_size[0], sicd.ImageData.NumRows, msg='data_size[0] and NumRows do not agree')
self.assertEqual(data_size[1], sicd.ImageData.NumCols, msg='data_size[1] and NumCols do not agree')
with self.subTest(msg='Basic fetch test for sicd index {} in reader'.format(i)):
self.assertEqual(reader[:2, :2, i].shape, (2, 2), msg='upper left fetch')
self.assertEqual(reader[-2:, :2, i].shape, (2, 2), msg='lower left fetch')
self.assertEqual(reader[-2:, -2:, i].shape, (2, 2), msg='lower right fetch')
self.assertEqual(reader[:2, -2:, i].shape, (2, 2), msg='upper right fetch')
with self.subTest(
msg='Verify fetching complete row(s) have correct size for sicd index {}'.format(i)):
test_data = reader[:, :2, i]
self.assertEqual(test_data.shape, (data_size[0], 2), msg='Complete row fetch size mismatch')
with self.subTest(
msg='Verify fetching complete columns(s) have correct size for sicd index {}'.format(i)):
test_data = reader[:2, :, i]
self.assertEqual(test_data.shape, (2, data_size[1]), msg='Complete row fetch size mismatch')
reader.close()
| 3,118 | 43.557143 | 124 | py |
sarpy | sarpy-master/tests/io/complex/test_utils.py |
import numpy
from numpy.polynomial import polynomial
from sarpy.io.complex.utils import two_dim_poly_fit
import unittest
class TestRadarSatUtils(unittest.TestCase):
def test_two_dim_poly_fit(self):
coeffs = numpy.arange(9).reshape((3, 3))
y, x = numpy.meshgrid(numpy.arange(2, 6), numpy.arange(-2, 2))
z = polynomial.polyval2d(x, y, coeffs)
t_coeffs, residuals, rank, sing_vals = two_dim_poly_fit(x, y, z, x_order=2, y_order=2)
diff = (numpy.abs(coeffs - t_coeffs) < 1e-10)
self.assertTrue(numpy.all(diff))
| 562 | 34.1875 | 94 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_rgazcomp.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
from sarpy.io.complex.sicd_elements import RgAzComp
def test_rgazcomp(rma_sicd, kwargs):
rg_az = RgAzComp.RgAzCompType(None, None, **kwargs)
rg_az._derive_parameters(rma_sicd.Grid, rma_sicd.Timeline, rma_sicd.SCPCOA)
assert rg_az._xml_ns == kwargs['_xml_ns']
assert rg_az._xml_ns_key == kwargs['_xml_ns_key']
assert rg_az.AzSF is not None
assert rg_az.KazPoly is not None
| 494 | 29.9375 | 79 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_antenna.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import numpy as np
import pytest
from sarpy.io.complex.sicd_elements import Antenna
from sarpy.io.complex.sicd_elements import blocks
@pytest.fixture
def tx_ant_param(sicd, kwargs):
return Antenna.AntParamType(XAxisPoly=sicd.Antenna.Tx.XAxisPoly,
YAxisPoly=sicd.Antenna.Tx.YAxisPoly,
FreqZero=sicd.Antenna.Tx.FreqZero,
EB=sicd.Antenna.Tx.EB,
Array=sicd.Antenna.Tx.Array,
Elem=sicd.Antenna.Tx.Elem,
GainBSPoly=sicd.Antenna.Tx.GainBSPoly,
EBFreqShift=True,
MLFreqDilation=sicd.Antenna.Tx.MLFreqDilation,
**kwargs)
@pytest.fixture
def rcv_ant_param(sicd, kwargs):
return Antenna.AntParamType(XAxisPoly=sicd.Antenna.Rcv.XAxisPoly,
YAxisPoly=sicd.Antenna.Rcv.YAxisPoly,
FreqZero=sicd.Antenna.Rcv.FreqZero,
EB=sicd.Antenna.Rcv.EB,
Array=sicd.Antenna.Rcv.Array,
Elem=sicd.Antenna.Rcv.Elem,
GainBSPoly=sicd.Antenna.Rcv.GainBSPoly,
EBFreqShift=True,
MLFreqDilation=sicd.Antenna.Rcv.MLFreqDilation,
**kwargs)
@pytest.fixture
def twoway_ant_param(sicd, kwargs):
return Antenna.AntParamType(XAxisPoly=sicd.Antenna.TwoWay.XAxisPoly,
YAxisPoly=sicd.Antenna.TwoWay.YAxisPoly,
FreqZero=sicd.Antenna.TwoWay.FreqZero,
EB=sicd.Antenna.TwoWay.EB,
Array=sicd.Antenna.TwoWay.Array,
Elem=sicd.Antenna.TwoWay.Elem,
GainBSPoly=sicd.Antenna.TwoWay.GainBSPoly,
EBFreqShift=True,
MLFreqDilation=sicd.Antenna.TwoWay.MLFreqDilation,
**kwargs)
def test_antenna_ebtype(kwargs):
x_poly = blocks.Poly1DType(Coefs=[10.5, 5.1, 1.2, 0.2])
y_poly = blocks.Poly1DType(Coefs=[5.1, 1.2, 0.2])
antenna_eb = Antenna.EBType(DCXPoly=x_poly, DCYPoly=y_poly)
assert antenna_eb.DCXPoly == x_poly
assert antenna_eb.DCYPoly == y_poly
assert not hasattr(antenna_eb, "_xml_ns")
assert not hasattr(antenna_eb, "_xml_ns_key")
# Init with kwargs
antenna_eb = Antenna.EBType(DCXPoly=x_poly, DCYPoly=y_poly, **kwargs)
assert antenna_eb._xml_ns == kwargs["_xml_ns"]
assert antenna_eb._xml_ns_key == kwargs["_xml_ns_key"]
assert np.all(antenna_eb(0) == [x_poly.Coefs[0], y_poly.Coefs[0]])
antenna_eb = Antenna.EBType(DCXPoly=None, DCYPoly=y_poly)
assert antenna_eb(0) is None
def test_antenna_antparamtype(tx_ant_param, sicd, kwargs):
assert tx_ant_param._xml_ns == kwargs["_xml_ns"]
assert tx_ant_param._xml_ns_key == kwargs["_xml_ns_key"]
shift = 100000
tx_ant_param._apply_reference_frequency(shift)
assert tx_ant_param.FreqZero == sicd.Antenna.Tx.FreqZero + shift
def test_antenna_anttype(tx_ant_param, rcv_ant_param, twoway_ant_param, sicd, kwargs):
antenna = Antenna.AntennaType(Tx=tx_ant_param, Rcv=rcv_ant_param, TwoWay=twoway_ant_param, **kwargs)
assert antenna._xml_ns == kwargs["_xml_ns"]
assert antenna._xml_ns_key == kwargs["_xml_ns_key"]
shift = 100000
antenna._apply_reference_frequency(shift)
assert antenna.Tx.FreqZero == sicd.Antenna.Tx.FreqZero + shift
assert antenna.Rcv.FreqZero == sicd.Antenna.Rcv.FreqZero + shift
assert antenna.TwoWay.FreqZero == sicd.Antenna.TwoWay.FreqZero + shift
| 3,948 | 40.568421 | 104 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_blocks.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
from collections import OrderedDict
import copy
import re
from typing import Tuple
from xml.etree.ElementTree import Element, ElementTree, SubElement
import numpy as np
import pytest
from sarpy.io.complex.sicd_elements import blocks
from sarpy.io.xml.base import parse_xml_from_string
LATLONHAE = [33.483888, -112.073706, 100.0]
ROWCOL = [1, 2]
@pytest.fixture
def poly1d_doc(sicd):
root = Element("Poly1DType")
root.attrib["order1"] = str(sicd.Position.ARPPoly.X.order1)
coefs_node = SubElement(root, "Coefs")
coef_values = sicd.Position.ARPPoly.X.Coefs
for i, coef_value in enumerate(coef_values):
coef_node = SubElement(coefs_node, "Coef")
coef_node.attrib["exponent1"] = str(i)
coef_node.text = str(coef_value)
doc = ElementTree(root)
return doc
@pytest.fixture
def poly2d_doc(sicd):
root = Element("Poly2DType")
root.attrib["order1"] = str(sicd.Radiometric.RCSSFPoly.order1)
root.attrib["order2"] = str(sicd.Radiometric.RCSSFPoly.order2)
coefs_node = SubElement(root, "Coefs")
coef_values = sicd.Radiometric.RCSSFPoly.Coefs
for i in np.arange(len(coef_values)):
for j, coef_value in enumerate(coef_values[i]):
coef_node = SubElement(coefs_node, "Coef")
coef_node.attrib["exponent1"] = str(i)
coef_node.attrib["exponent2"] = str(j)
coef_node.text = str(coef_value)
doc = ElementTree(root)
return doc
def test_blocks_xyztype(kwargs):
# Smoke test
xyz_type = blocks.XYZType(X=1.0, Y=2.0, Z=3.0)
assert xyz_type.X == 1.0
assert xyz_type.Y == 2.0
assert xyz_type.Z == 3.0
assert not hasattr(xyz_type, "_xml_ns")
assert not hasattr(xyz_type, "_xml_ns_key")
# Init with kwargs
xyz_type = blocks.XYZType(X=1.0, Y=2.0, Z=3.0, **kwargs)
assert xyz_type._xml_ns == kwargs["_xml_ns"]
assert xyz_type._xml_ns_key == kwargs["_xml_ns_key"]
# get_array
expected_array = np.array([1.0, 2.0, 3.0], dtype=np.float64)
assert np.array_equal(xyz_type.get_array(), expected_array)
# from_array
xyz_type = blocks.XYZType.from_array(None)
assert xyz_type is None
array = [1.0, 2.0, 3.0]
xyz_type = blocks.XYZType.from_array(array)
assert xyz_type.X == 1.0
assert xyz_type.Y == 2.0
assert xyz_type.Z == 3.0
# from_array errors
array = [1.0, 2.0]
with pytest.raises(
ValueError,
match=re.escape(f"Expected array to be of length 3, and received `{array}`"),
):
blocks.XYZType.from_array(array)
array = "invalid"
with pytest.raises(
ValueError, match="Expected array to be numpy.ndarray, list, or tuple"
):
blocks.XYZType.from_array(array)
@pytest.mark.parametrize(
"array, class_to_test",
[
(LATLONHAE[0:2], blocks.LatLonType),
(LATLONHAE[0:2], blocks.LatLonRestrictionType),
],
)
def test_blocks_latlon_classes(array, class_to_test, kwargs, tol):
# Smoke test
class_instance = class_to_test(Lat=array[0], Lon=array[1])
assert class_instance.Lat == pytest.approx(array[0], abs=tol)
assert class_instance.Lon == pytest.approx(array[1], abs=tol)
assert not hasattr(class_instance, "_xml_ns")
assert not hasattr(class_instance, "_xml_ns_key")
# Init with kwargs
class_instance = class_to_test(Lat=array[0], Lon=array[1], **kwargs)
assert class_instance._xml_ns == kwargs["_xml_ns"]
assert class_instance._xml_ns_key == kwargs["_xml_ns_key"]
# from_array
class_instance = class_to_test.from_array(None)
assert class_instance is None
class_instance = class_to_test.from_array(array)
assert isinstance(class_instance, class_to_test)
assert class_instance.Lat == pytest.approx(array[0], abs=tol)
assert class_instance.Lon == pytest.approx(array[1], abs=tol)
# from_array errors
bad_array = array[0:1]
with pytest.raises(
ValueError,
match=re.escape(
f"Expected array to be of length 2, and received `{bad_array}`"
),
):
class_to_test.from_array(bad_array)
bad_array = "invalid"
with pytest.raises(
ValueError, match="Expected array to be numpy.ndarray, list, or tuple"
):
class_to_test.from_array(bad_array)
def test_blocks_latlontype_getarray(tol):
latlon_type = blocks.LatLonType(Lat=LATLONHAE[0], Lon=LATLONHAE[1])
result = latlon_type.get_array(dtype=np.float64, order="LAT")
assert isinstance(result, np.ndarray)
assert result.dtype == np.float64
assert result.tolist() == pytest.approx(LATLONHAE[0:2], abs=tol)
result = latlon_type.get_array(dtype=np.float64, order="LON")
assert isinstance(result, np.ndarray)
assert result.dtype == np.float64
assert result.tolist() == pytest.approx([LATLONHAE[1], LATLONHAE[0]], abs=tol)
def test_blocks_latlontype_dmsformat():
latlon_type = blocks.LatLonType(Lat=LATLONHAE[0], Lon=LATLONHAE[1])
result = latlon_type.dms_format(frac_secs=True)
assert isinstance(result, Tuple)
assert isinstance(result[0], Tuple)
assert isinstance(result[1], Tuple)
assert len(result[0]) == 4
assert len(result[1]) == 4
result = latlon_type.dms_format(frac_secs=False)
assert isinstance(result, Tuple)
assert isinstance(result[0], Tuple)
assert isinstance(result[1], Tuple)
assert len(result[0]) == 4
assert len(result[1]) == 4
def test_lltype_restriction(tol):
# Lat/Lon outside restricted range
latlon_restricted_type = blocks.LatLonRestrictionType(Lat=100, Lon=190)
assert latlon_restricted_type.Lat == pytest.approx(-80.0, abs=tol)
assert latlon_restricted_type.Lon == pytest.approx(-170.0, abs=tol)
@pytest.mark.parametrize(
"array, index, class_to_test",
[
(LATLONHAE[0:2], 1, blocks.LatLonArrayElementType),
(LATLONHAE[0:2], 1, blocks.LatLonCornerType),
(LATLONHAE[0:2], "3:LRLC", blocks.LatLonCornerStringType),
],
)
def test_blocks_latlon_classes_with_index(array, index, class_to_test, kwargs, tol):
# Smoke test
class_instance = class_to_test(Lat=array[0], Lon=array[1], index=index)
assert class_instance.Lat == pytest.approx(array[0], abs=tol)
assert class_instance.Lon == pytest.approx(array[1], abs=tol)
assert class_instance.index == index
assert not hasattr(class_instance, "_xml_ns")
assert not hasattr(class_instance, "_xml_ns_key")
# Init with kwargs
class_instance = class_to_test(Lat=array[0], Lon=array[1], index=index, **kwargs)
assert class_instance._xml_ns == kwargs["_xml_ns"]
assert class_instance._xml_ns_key == kwargs["_xml_ns_key"]
# from_array
class_instance = class_to_test.from_array(None)
assert class_instance is None
class_instance = class_to_test.from_array(array)
assert isinstance(class_instance, class_to_test)
assert class_instance.Lat == pytest.approx(array[0], abs=tol)
assert class_instance.Lon == pytest.approx(array[1], abs=tol)
# from_array errors
bad_array = array[0:1]
with pytest.raises(
ValueError,
match=re.escape(
f"Expected array to be of length 2, and received `{bad_array}`"
),
):
class_to_test.from_array(bad_array)
bad_array = "invalid"
with pytest.raises(
ValueError, match="Expected array to be numpy.ndarray, list, or tuple"
):
class_to_test.from_array(bad_array)
@pytest.mark.parametrize(
"array, class_to_test",
[
(LATLONHAE, blocks.LatLonHAEType),
(LATLONHAE, blocks.LatLonHAERestrictionType),
],
)
def test_blocks_latlonhae_classes(array, class_to_test, kwargs, tol):
# Smoke test
class_instance = class_to_test(Lat=array[0], Lon=array[1], HAE=array[2])
assert class_instance.Lat == pytest.approx(array[0], abs=tol)
assert class_instance.Lon == pytest.approx(array[1], abs=tol)
assert class_instance.HAE == pytest.approx(array[2], abs=tol)
assert not hasattr(class_instance, "_xml_ns")
assert not hasattr(class_instance, "_xml_ns_key")
# Init with kwargs
class_instance = class_to_test(Lat=array[0], Lon=array[1], HAE=array[2], **kwargs)
assert class_instance._xml_ns == kwargs["_xml_ns"]
assert class_instance._xml_ns_key == kwargs["_xml_ns_key"]
# get_array
result = class_instance.get_array(dtype=np.float64, order="LAT")
assert isinstance(result, np.ndarray)
assert result.dtype == np.float64
assert result.tolist() == pytest.approx(array, abs=tol)
result = class_instance.get_array(dtype=np.float64, order="LON")
assert isinstance(result, np.ndarray)
assert result.dtype == np.float64
assert result.tolist() == pytest.approx([array[1], array[0], array[2]], abs=tol)
# from_array
class_instance = class_to_test.from_array(None)
assert class_instance is None
class_instance = class_to_test.from_array(array)
assert isinstance(class_instance, class_to_test)
assert class_instance.Lat == pytest.approx(array[0], abs=tol)
assert class_instance.Lon == pytest.approx(array[1], abs=tol)
assert class_instance.HAE == pytest.approx(array[2], abs=tol)
# from_array errors
bad_array = array[0:2]
with pytest.raises(
ValueError,
match=re.escape(
f"Expected array to be of length 3, and received `{bad_array}`"
),
):
class_to_test.from_array(bad_array)
bad_array = "invalid"
with pytest.raises(
ValueError, match="Expected array to be numpy.ndarray, list, or tuple"
):
class_to_test.from_array(bad_array)
@pytest.mark.parametrize(
"array, index, class_to_test",
[
(LATLONHAE, 1, blocks.LatLonHAECornerRestrictionType),
(LATLONHAE, "3:LRLC", blocks.LatLonHAECornerStringType),
],
)
def test_blocks_latlonhae_classes_with_index(array, index, class_to_test, kwargs, tol):
# Smoke test
class_instance = class_to_test(
Lat=array[0], Lon=array[1], HAE=array[2], index=index
)
assert class_instance.Lat == pytest.approx(array[0], abs=tol)
assert class_instance.Lon == pytest.approx(array[1], abs=tol)
assert class_instance.HAE == pytest.approx(array[2], abs=tol)
assert class_instance.index == index
assert not hasattr(class_instance, "_xml_ns")
assert not hasattr(class_instance, "_xml_ns_key")
# Init with kwargs
class_instance = class_to_test(
Lat=array[0], Lon=array[1], HAE=array[2], index=index, **kwargs
)
assert class_instance._xml_ns == kwargs["_xml_ns"]
assert class_instance._xml_ns_key == kwargs["_xml_ns_key"]
# from_array
class_instance = class_to_test.from_array(None)
assert class_instance is None
class_instance = class_to_test.from_array(array)
assert isinstance(class_instance, class_to_test)
assert class_instance.Lat == pytest.approx(array[0], abs=tol)
assert class_instance.Lon == pytest.approx(array[1], abs=tol)
assert class_instance.HAE == pytest.approx(array[2], abs=tol)
# from_array errors
bad_array = array[0:2]
with pytest.raises(
ValueError,
match=re.escape(
f"Expected array to be of length 3, and received `{bad_array}`"
),
):
class_to_test.from_array(bad_array)
bad_array = "invalid"
with pytest.raises(
ValueError, match="Expected array to be numpy.ndarray, list, or tuple"
):
class_to_test.from_array(bad_array)
def test_blocks_rowcoltype(kwargs, tol):
# Smoke test
row_col = blocks.RowColType(Row=ROWCOL[0], Col=ROWCOL[1])
assert row_col.Row == pytest.approx(ROWCOL[0], abs=tol)
assert row_col.Col == pytest.approx(ROWCOL[1], abs=tol)
assert not hasattr(row_col, "_xml_ns")
assert not hasattr(row_col, "_xml_ns_key")
# Init with kwargs
row_col = blocks.RowColType(Row=ROWCOL[0], Col=ROWCOL[1], **kwargs)
assert row_col._xml_ns == kwargs["_xml_ns"]
assert row_col._xml_ns_key == kwargs["_xml_ns_key"]
# get_array
result = row_col.get_array(dtype=np.float64)
assert isinstance(result, np.ndarray)
assert result.dtype == np.float64
assert result.tolist() == pytest.approx(ROWCOL, abs=tol)
# from_array
row_col = blocks.RowColType.from_array(None)
assert row_col is None
row_col = blocks.RowColType.from_array(ROWCOL)
assert isinstance(row_col, blocks.RowColType)
assert row_col.Row == pytest.approx(ROWCOL[0], abs=tol)
assert row_col.Col == pytest.approx(ROWCOL[1], abs=tol)
# from_array errors
bad_array = ROWCOL[0:1]
with pytest.raises(
ValueError,
match=re.escape(
f"Expected array to be of length 2, and received `{bad_array}`"
),
):
blocks.RowColType.from_array(bad_array)
bad_array = "invalid"
with pytest.raises(
ValueError, match="Expected array to be numpy.ndarray, list, or tuple"
):
blocks.RowColType.from_array(bad_array)
def test_blocks_rowcolarrayelement(kwargs, tol):
# Smoke test
row_col_arr = blocks.RowColArrayElement(Row=ROWCOL[0], Col=ROWCOL[1], index=1)
assert row_col_arr.Row == pytest.approx(ROWCOL[0], abs=tol)
assert row_col_arr.Col == pytest.approx(ROWCOL[1], abs=tol)
assert row_col_arr.index == 1
assert not hasattr(row_col_arr, "_xml_ns")
assert not hasattr(row_col_arr, "_xml_ns_key")
# Init with kwargs
row_col_arr = blocks.RowColArrayElement(
Row=ROWCOL[0], Col=ROWCOL[1], index=1, **kwargs
)
assert row_col_arr._xml_ns == kwargs["_xml_ns"]
assert row_col_arr._xml_ns_key == kwargs["_xml_ns_key"]
# from_array
row_col_arr = blocks.RowColArrayElement.from_array(None)
assert row_col_arr is None
row_col_arr = blocks.RowColArrayElement.from_array(ROWCOL)
assert isinstance(row_col_arr, blocks.RowColArrayElement)
assert row_col_arr.Row == pytest.approx(ROWCOL[0], abs=tol)
assert row_col_arr.Col == pytest.approx(ROWCOL[1], abs=tol)
# from_array errors
bad_array = ROWCOL[0:1]
with pytest.raises(
ValueError,
match=re.escape(
f"Expected array to be of length 2, and received `{bad_array}`"
),
):
blocks.RowColArrayElement.from_array(bad_array)
bad_array = "invalid"
with pytest.raises(
ValueError, match="Expected array to be numpy.ndarray, list, or tuple"
):
blocks.RowColArrayElement.from_array(bad_array)
def test_blocks_poly1dtype(sicd, poly1d_doc, kwargs):
# Smoke test
poly = blocks.Poly1DType(Coefs=sicd.Position.ARPPoly.X.Coefs)
assert poly.order1 == 5
assert not hasattr(poly, "_xml_ns")
assert not hasattr(poly, "_xml_ns_key")
# Init with kwargs
poly = blocks.Poly1DType(Coefs=sicd.Position.ARPPoly.X.Coefs, **kwargs)
assert poly._xml_ns == kwargs["_xml_ns"]
assert poly._xml_ns_key == kwargs["_xml_ns_key"]
assert np.all(poly.Coefs == sicd.Position.ARPPoly.X.Coefs)
# Setter
poly.Coefs = np.cast[np.float32](sicd.Position.ARPPoly.Y.Coefs)
assert poly.Coefs.dtype.name == "float64"
poly.Coefs = sicd.Position.ARPPoly.Y.Coefs
poly.Coefs = sicd.Position.ARPPoly.Y.Coefs.tolist()
# Setter errors
with pytest.raises(
ValueError,
match="The coefficient array for a Poly1DType instance must be defined",
):
poly.Coefs = None
with pytest.raises(
ValueError, match="Coefs for class Poly1D must be a list or numpy.ndarray"
):
poly.Coefs = {"Coefs": 1}
with pytest.raises(
ValueError, match="Coefs for class Poly1D must be one-dimensional"
):
poly.Coefs = [sicd.Position.ARPPoly.X.Coefs, sicd.Position.ARPPoly.Y.Coefs]
assert poly(0) == poly.Coefs[0]
assert poly[0] == poly.Coefs[0]
poly1 = copy.copy(poly)
poly1[5] = 2e-11
assert poly1.Coefs[5] == 2e-11
# Poly derivative
coefs = poly.derivative(der_order=1)
assert isinstance(coefs, np.ndarray)
assert coefs[0] == poly.Coefs[1]
poly1 = poly.derivative(der_order=1, return_poly=True)
assert isinstance(poly1, blocks.Poly1DType)
assert poly1.Coefs[0] == poly.Coefs[1]
# Poly derivative eval
assert poly.derivative_eval(0) == poly.Coefs[1]
shifted_coefs = poly.shift(1.1, 2.0)
assert isinstance(shifted_coefs, np.ndarray)
shifted_poly = poly.shift(1.1, 2.0, return_poly=True)
assert isinstance(shifted_poly, blocks.Poly1DType)
# from_array
poly = blocks.Poly1DType.from_array(None)
assert poly is None
poly = blocks.Poly1DType.from_array(sicd.Position.ARPPoly.X.Coefs)
assert isinstance(poly, blocks.Poly1DType)
assert np.all(poly.Coefs == sicd.Position.ARPPoly.X.Coefs)
# get_array
coefs = poly.get_array()
assert np.all(coefs == sicd.Position.ARPPoly.X.Coefs)
# from_node and to_node
node_str = """
<Poly order1="5">
<Coef exponent1="0">7228127.9124448663</Coef>
<Coef exponent1="1">352.53242998756502</Coef>
<Coef exponent1="2">-3.5891719134975157</Coef>
<Coef exponent1="3">-5.7694198643316104e-05</Coef>
<Coef exponent1="4">2.7699968593303768e-07</Coef>
<Coef exponent1="5">2.1592636134572539e-09</Coef>
</Poly>
"""
poly = blocks.Poly1DType.from_array(sicd.Position.ARPPoly.X.Coefs)
node, ns = parse_xml_from_string(node_str)
poly1 = poly.from_node(node, ns)
assert isinstance(poly1, blocks.Poly1DType)
assert np.all(poly.Coefs == poly1.Coefs)
this_node = poly.to_node(doc=poly1d_doc, tag="Poly1DType")
assert this_node.tag == "Poly1DType"
assert this_node.attrib["order1"] == str(len(poly.Coefs) - 1)
assert len(this_node.findall("Coef")) == len(poly.Coefs)
for i, node in enumerate(this_node.findall("Coef")):
assert int(node.attrib["exponent1"]) == i
assert float(node.text) == poly.Coefs[i]
coef_dict = poly.to_dict()
assert isinstance(coef_dict, OrderedDict)
# minimize_order
poly.minimize_order()
assert len(poly.Coefs) == 6
poly[5] = 0.0
poly.minimize_order()
assert len(poly.Coefs) == 5
poly[3] = 0.0
poly.minimize_order()
assert len(poly.Coefs) == 5
poly[1:6] = 0.0
poly.minimize_order()
assert len(poly.Coefs) == 1
poly[0] = 0.0
poly.minimize_order()
assert len(poly.Coefs) == 1
def test_blocks_poly2dtype(sicd, poly2d_doc, kwargs):
# Smoke test
poly = blocks.Poly2DType(Coefs=sicd.Radiometric.RCSSFPoly.Coefs)
assert poly.order1 == 5
assert poly.order2 == 6
assert not hasattr(poly, "_xml_ns")
assert not hasattr(poly, "_xml_ns_key")
# Init with kwargs
poly = blocks.Poly2DType(Coefs=sicd.Radiometric.RCSSFPoly.Coefs, **kwargs)
assert poly._xml_ns == kwargs["_xml_ns"]
assert poly._xml_ns_key == kwargs["_xml_ns_key"]
assert np.all(poly.Coefs == sicd.Radiometric.RCSSFPoly.Coefs)
# Setter
poly.Coefs = sicd.Radiometric.RCSSFPoly.Coefs.tolist()
poly.Coefs = np.cast[np.float32](sicd.Radiometric.RCSSFPoly.Coefs)
assert poly.Coefs.dtype.name == "float64"
poly.Coefs = sicd.Radiometric.RCSSFPoly.Coefs
assert poly(0, 0) == poly.Coefs[0][0]
assert poly[0, 0] == poly.Coefs[0][0]
# Setter errors
with pytest.raises(
ValueError,
match="The coefficient array for a Poly2DType instance must be defined",
):
poly.Coefs = None
with pytest.raises(
ValueError, match="Coefs for class Poly2D must be a list or numpy.ndarray"
):
poly.Coefs = {"Coefs": 1}
with pytest.raises(
ValueError, match="Coefs for class Poly2D must be two-dimensional"
):
poly.Coefs = sicd.Radiometric.RCSSFPoly.Coefs[0]
poly1 = copy.copy(poly)
poly1[0, 5] = 2e-11
assert poly1.Coefs[0][5] == 2e-11
# shift smoke test
shifted_coefs = poly.shift(1.1, 2.0, 2.1, 3.0)
assert isinstance(shifted_coefs, np.ndarray)
shifted_poly = poly.shift(1.1, 2.0, 2.1, 3.0, return_poly=True)
assert isinstance(shifted_poly, blocks.Poly2DType)
# from_array
poly = blocks.Poly2DType.from_array(None)
assert poly is None
poly = blocks.Poly2DType.from_array(sicd.Radiometric.RCSSFPoly.Coefs)
assert isinstance(poly, blocks.Poly2DType)
assert np.all(poly.Coefs == sicd.Radiometric.RCSSFPoly.Coefs)
# get_array
coefs = poly.get_array()
assert np.all(coefs == sicd.Radiometric.RCSSFPoly.Coefs)
# from_node and to_node
node_str = """
<Poly order1="5" order2="6">
<Coef exponent1="0" exponent2="0">234.567891</Coef>
<Coef exponent1="0" exponent2="1">0.0123456789</Coef>
<Coef exponent1="0" exponent2="2">3.45678912e-05</Coef>
<Coef exponent1="0" exponent2="3">1.23456789e-09</Coef>
<Coef exponent1="0" exponent2="4">2.34567891e-12</Coef>
<Coef exponent1="0" exponent2="5">1.23456789e-16</Coef>
<Coef exponent1="0" exponent2="6">1.23456789e-19</Coef>
<Coef exponent1="1" exponent2="0">-0.023456789</Coef>
<Coef exponent1="1" exponent2="1">-5.67891234e-06</Coef>
<Coef exponent1="1" exponent2="2">-4.56789123e-09</Coef>
<Coef exponent1="1" exponent2="3">-8.91234567e-13</Coef>
<Coef exponent1="1" exponent2="4">-4.56789123e-16</Coef>
<Coef exponent1="1" exponent2="5">-9.12345678e-20</Coef>
<Coef exponent1="1" exponent2="6">-3.45678912e-23</Coef>
<Coef exponent1="2" exponent2="0">5.67891234e-05</Coef>
<Coef exponent1="2" exponent2="1">3.45678912e-09</Coef>
<Coef exponent1="2" exponent2="2">7.89123456e-12</Coef>
<Coef exponent1="2" exponent2="3">4.56789123e-16</Coef>
<Coef exponent1="2" exponent2="4">5.67891234e-19</Coef>
<Coef exponent1="2" exponent2="5">6.78912345e-23</Coef>
<Coef exponent1="2" exponent2="6">5.67891234e-26</Coef>
<Coef exponent1="3" exponent2="0">-6.78912345e-09</Coef>
<Coef exponent1="3" exponent2="1">-1.23456789e-12</Coef>
<Coef exponent1="3" exponent2="2">-1.23456789e-15</Coef>
<Coef exponent1="3" exponent2="3">-7.89123456e-20</Coef>
<Coef exponent1="3" exponent2="4">-6.78912345e-23</Coef>
<Coef exponent1="3" exponent2="5">-5.67891234e-26</Coef>
<Coef exponent1="3" exponent2="6">-1.23456789e-29</Coef>
<Coef exponent1="4" exponent2="0">7.89123456e-12</Coef>
<Coef exponent1="4" exponent2="1">5.67891234e-16</Coef>
<Coef exponent1="4" exponent2="2">1.23456789e-18</Coef>
<Coef exponent1="4" exponent2="3">1.23456789e-22</Coef>
<Coef exponent1="4" exponent2="4">1.23456789e-25</Coef>
<Coef exponent1="4" exponent2="5">1.23456789e-29</Coef>
<Coef exponent1="4" exponent2="6">6.78912345e-33</Coef>
<Coef exponent1="5" exponent2="0">-1.23456789e-15</Coef>
<Coef exponent1="5" exponent2="1">-1.23456789e-19</Coef>
<Coef exponent1="5" exponent2="2">-1.23456789e-22</Coef>
<Coef exponent1="5" exponent2="3">-8.91234567e-26</Coef>
<Coef exponent1="5" exponent2="4">-4.56789123e-29</Coef>
<Coef exponent1="5" exponent2="5">9.12345678e-33</Coef>
<Coef exponent1="5" exponent2="6">2.34567891e-36</Coef>
</Poly>
"""
poly = blocks.Poly2DType.from_array(sicd.Radiometric.RCSSFPoly.Coefs)
node, ns = parse_xml_from_string(node_str)
poly1 = poly.from_node(node, ns)
assert isinstance(poly1, blocks.Poly2DType)
assert np.all(poly.Coefs == poly1.Coefs)
this_node = poly.to_node(doc=poly2d_doc, tag="Poly2DType")
assert this_node.tag == "Poly2DType"
assert this_node.attrib["order1"] == str(len(poly.Coefs) - 1)
assert this_node.attrib["order2"] == str(len(poly.Coefs[0]) - 1)
assert len(this_node.findall("Coef")) == len(poly.Coefs.flatten())
for i, node in enumerate(this_node.findall("Coef")):
assert float(node.text) == poly.Coefs.flatten()[i]
coef_dict = poly.to_dict()
assert isinstance(coef_dict, OrderedDict)
# nothing to do
poly.minimize_order()
assert np.shape(poly.Coefs) == (6, 7)
# last non-zero row index and last non-zero column index not zero
poly[:, 6] = 0.0
poly.minimize_order()
assert np.shape(poly.Coefs) == (6, 6)
# last non-zero column index is zero
poly1 = copy.copy(poly)
poly1[:, 1:6] = 0.0
poly1.minimize_order()
assert np.shape(poly1.Coefs) == (6, 1)
# last non-zero row index is zero
poly[1:6, :] = 0.0
poly.minimize_order()
assert np.shape(poly.Coefs) == (1, 6)
# both last non-zero row and column index is zero
poly[0, 1:6] = 0.0
poly.minimize_order()
assert np.shape(poly.Coefs) == (1, 1)
poly[0] = 0.0
poly.minimize_order()
assert poly.Coefs[0][0] == 0.0
def test_blocks_xyzpolytype(sicd, kwargs):
# Smoke test
poly = blocks.XYZPolyType(
X=sicd.Position.ARPPoly.X,
Y=sicd.Position.ARPPoly.Y,
Z=sicd.Position.ARPPoly.Z
)
assert poly.X.order1 == 5
assert poly.Y.order1 == 5
assert poly.Z.order1 == 5
assert not hasattr(poly, "_xml_ns")
assert not hasattr(poly, "_xml_ns_key")
# Init with kwargs
poly = blocks.XYZPolyType(
X=sicd.Position.ARPPoly.X,
Y=sicd.Position.ARPPoly.Y,
Z=sicd.Position.ARPPoly.Z,
**kwargs,
)
assert poly._xml_ns == kwargs["_xml_ns"]
assert poly._xml_ns_key == kwargs["_xml_ns_key"]
assert np.array_equal(
poly(0),
np.array(
[
sicd.Position.ARPPoly.X[0],
sicd.Position.ARPPoly.Y[0],
sicd.Position.ARPPoly.Z[0],
]
),
)
# Caller
poly_eval = poly([0, 1, 2])
assert len(poly_eval) == 3
# get_array
coeff_arr = poly.get_array()
assert coeff_arr[0] == sicd.Position.ARPPoly.X
assert coeff_arr[1] == sicd.Position.ARPPoly.Y
assert coeff_arr[2] == sicd.Position.ARPPoly.Z
coeff_arr = poly.get_array(dtype=np.float64)
assert np.array_equal(
coeff_arr,
np.array(
[
sicd.Position.ARPPoly.X.Coefs,
sicd.Position.ARPPoly.Y.Coefs,
sicd.Position.ARPPoly.Z.Coefs,
]
),
)
# from_array
poly = blocks.XYZPolyType.from_array(None)
assert poly is None
array = np.array(
[
sicd.Position.ARPPoly.X.Coefs,
sicd.Position.ARPPoly.Y.Coefs,
sicd.Position.ARPPoly.Z.Coefs,
]
)
poly = blocks.XYZPolyType.from_array(array)
assert isinstance(poly, blocks.XYZPolyType)
assert np.all(poly.X.Coefs == sicd.Position.ARPPoly.X.Coefs)
assert np.all(poly.Y.Coefs == sicd.Position.ARPPoly.Y.Coefs)
assert np.all(poly.Z.Coefs == sicd.Position.ARPPoly.Z.Coefs)
# from_array errors
bad_array = array[0:2]
with pytest.raises(
ValueError,
match=re.escape(
f"Expected array to be of length 3, and received `{bad_array}`"
),
):
blocks.XYZPolyType.from_array(bad_array)
bad_array = "invalid"
with pytest.raises(
ValueError, match="Expected array to be numpy.ndarray, list, or tuple"
):
blocks.XYZPolyType.from_array(bad_array)
# poly derivative
coefs = poly.derivative(der_order=1)
assert coefs[0][0] == poly.X.Coefs[1]
assert coefs[1][0] == poly.Y.Coefs[1]
assert coefs[2][0] == poly.Z.Coefs[1]
poly1 = poly.derivative(der_order=1, return_poly=True)
assert isinstance(poly1, blocks.XYZPolyType)
assert poly1.X.Coefs[0] == poly.X.Coefs[1]
assert poly1.Y.Coefs[0] == poly.Y.Coefs[1]
assert poly1.Z.Coefs[0] == poly.Z.Coefs[1]
# poly derivative_eval
coefs = poly.derivative_eval(0)
assert np.array_equal(
coefs, np.array([poly.X.Coefs[1], poly.Y.Coefs[1], poly.Z.Coefs[1]])
)
# poly shift
shifted_coefs = poly.shift(1.1, 2.0)
assert isinstance(shifted_coefs[0], np.ndarray)
assert isinstance(shifted_coefs[1], np.ndarray)
assert isinstance(shifted_coefs[2], np.ndarray)
shifted_poly = poly.shift(1.1, 2.0, return_poly=True)
assert isinstance(shifted_poly, blocks.XYZPolyType)
# poly minimize_order
poly.minimize_order()
assert len(poly.X.Coefs) == 6
assert len(poly.Y.Coefs) == 6
assert len(poly.Z.Coefs) == 6
poly.X[5] = 0.0
poly.minimize_order()
assert len(poly.X.Coefs) == 5
poly.X[3] = 0.0
poly.minimize_order()
assert len(poly.X.Coefs) == 5
def test_blocks_xyzpolyattrtype(sicd, kwargs):
# Smoke test
poly = blocks.XYZPolyAttributeType(
X=sicd.Position.ARPPoly.X,
Y=sicd.Position.ARPPoly.Y,
Z=sicd.Position.ARPPoly.Z,
index=1,
)
assert poly.X.order1 == 5
assert poly.Y.order1 == 5
assert poly.Z.order1 == 5
assert not hasattr(poly, "_xml_ns")
assert not hasattr(poly, "_xml_ns_key")
# Init with kwargs
poly = blocks.XYZPolyAttributeType(
X=sicd.Position.ARPPoly.X,
Y=sicd.Position.ARPPoly.Y,
Z=sicd.Position.ARPPoly.Z,
index=1,
**kwargs,
)
assert poly._xml_ns == kwargs["_xml_ns"]
assert poly._xml_ns_key == kwargs["_xml_ns_key"]
# from_array
poly = blocks.XYZPolyAttributeType.from_array(None)
assert poly is None
array = np.array(
[
sicd.Position.ARPPoly.X.Coefs,
sicd.Position.ARPPoly.Y.Coefs,
sicd.Position.ARPPoly.Z.Coefs,
]
)
poly = blocks.XYZPolyAttributeType.from_array(array)
assert isinstance(poly, blocks.XYZPolyAttributeType)
assert np.all(poly.X.Coefs == sicd.Position.ARPPoly.X.Coefs)
assert np.all(poly.Y.Coefs == sicd.Position.ARPPoly.Y.Coefs)
assert np.all(poly.Z.Coefs == sicd.Position.ARPPoly.Z.Coefs)
# from_array errors
bad_array = array[0:2]
with pytest.raises(
ValueError,
match=re.escape(
f"Expected array to be of length 3, and received `{bad_array}`"
),
):
blocks.XYZPolyAttributeType.from_array(bad_array)
bad_array = "invalid"
with pytest.raises(
ValueError, match="Expected array to be numpy.ndarray, list, or tuple"
):
blocks.XYZPolyAttributeType.from_array(bad_array)
def test_blocks_gainphasepolytype(sicd, kwargs):
# Smoke test
poly = blocks.GainPhasePolyType(
GainPoly=sicd.Antenna.Tx.Array.GainPoly,
PhasePoly=sicd.Antenna.Tx.Array.PhasePoly,
)
assert poly.GainPoly.order1 == 8
assert poly.GainPoly.order2 == 8
assert poly.PhasePoly.order1 == 0
assert poly.PhasePoly.order2 == 0
assert not hasattr(poly, "_xml_ns")
assert not hasattr(poly, "_xml_ns_key")
# Init with kwargs
poly = blocks.GainPhasePolyType(
GainPoly=sicd.Antenna.Tx.Array.GainPoly,
PhasePoly=sicd.Antenna.Tx.Array.PhasePoly,
**kwargs,
)
assert poly._xml_ns == kwargs["_xml_ns"]
assert poly._xml_ns_key == kwargs["_xml_ns_key"]
# Poly minimize_order
poly.minimize_order()
assert np.shape(poly.GainPoly.Coefs) == (9, 9)
assert np.shape(poly.PhasePoly.Coefs) == (1, 1)
poly.GainPoly.Coefs[8][:] = 0.0
poly.minimize_order()
assert np.shape(poly.GainPoly.Coefs) == (8, 9)
poly.GainPoly.Coefs[3][:] = 0.0
poly.minimize_order()
assert np.shape(poly.GainPoly.Coefs) == (8, 9)
poly_eval = poly(0, 0)
assert np.array_equal(poly_eval, np.array([0.0, 0.0]))
poly = blocks.GainPhasePolyType(
GainPoly=None, PhasePoly=sicd.Antenna.Tx.Array.PhasePoly
)
assert poly(0, 0) is None
def test_blocks_errordecorrfunctype(kwargs):
# Smoke test
error_decorr = blocks.ErrorDecorrFuncType(CorrCoefZero=0.0, DecorrRate=0.5)
assert not hasattr(error_decorr, "_xml_ns")
assert not hasattr(error_decorr, "_xml_ns_key")
error_decorr = blocks.ErrorDecorrFuncType(
CorrCoefZero=0.0, DecorrRate=0.5, **kwargs
)
assert error_decorr._xml_ns == kwargs["_xml_ns"]
assert error_decorr._xml_ns_key == kwargs["_xml_ns_key"]
| 32,409 | 32.760417 | 87 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_grid.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import numpy as np
from sarpy.io.complex.sicd_elements import Grid
from sarpy.io.xml.base import parse_xml_from_string
def test_grid_gridtype(sicd, rma_sicd, caplog):
"""Test Grid.GridType class"""
# Start with empty Grid and populate values
grid_empty = Grid.GridType()
grid_empty._derive_time_coa_poly(sicd.CollectionInfo, sicd.SCPCOA)
assert grid_empty.TimeCOAPoly.Coefs[0][0] == sicd.SCPCOA.SCPTime
grid_empty._derive_rg_az_comp(sicd.GeoData,
sicd.SCPCOA,
sicd.RadarCollection,
sicd.ImageFormation)
assert grid_empty.ImagePlane == 'SLANT'
assert grid_empty.Type == 'RGAZIM'
assert grid_empty.Row is not None
assert grid_empty.Col is not None
# Incorrect ImagePlane option
grid_empty.ImagePlane = 'SLAT'
grid_empty._derive_rg_az_comp(sicd.GeoData,
sicd.SCPCOA,
sicd.RadarCollection,
sicd.ImageFormation)
assert 'Image Formation Algorithm is RgAzComp, which requires "SLANT"' in caplog.text
assert grid_empty.ImagePlane == 'SLANT'
# Incorrect Type option
grid_empty.Type = 'RGAZ'
grid_empty._derive_rg_az_comp(sicd.GeoData,
sicd.SCPCOA,
sicd.RadarCollection,
sicd.ImageFormation)
assert 'Image Formation Algorithm is RgAzComp, which requires "RGAZIM"' in caplog.text
assert grid_empty.Type == 'RGAZIM'
# Force KCtr=None path through _derive_rg_az_comp
grid_empty.Row.KCtr = None
grid_empty.Col.KCtr = None
grid_empty._derive_rg_az_comp(sicd.GeoData,
sicd.SCPCOA,
sicd.RadarCollection,
sicd.ImageFormation)
assert grid_empty.Row.KCtr is not None
assert grid_empty.Col.KCtr is not None
# Force KCtr=None path through _derive_pfa
grid_empty.Row.KCtr = None
grid_empty.Col.KCtr = None
grid_empty._derive_pfa(sicd.GeoData,
sicd.RadarCollection,
sicd.ImageFormation,
sicd.Position,
sicd.PFA)
assert grid_empty.Row.KCtr is not None
assert grid_empty.Col.KCtr is not None
# Force unit vector derivation path through _derive_pfa
grid_empty.Row.UVectECF = None
grid_empty.Col.UVectECF = None
grid_empty._derive_pfa(sicd.GeoData,
sicd.RadarCollection,
sicd.ImageFormation,
sicd.Position,
sicd.PFA)
assert grid_empty.Row.UVectECF is not None
assert grid_empty.Col.UVectECF is not None
# Force unit vector derivation path through _derive_rma
grid_empty.Row.UVectECF = None
grid_empty.Col.UVectECF = None
grid_empty._derive_rma(rma_sicd.RMA,
rma_sicd.GeoData,
rma_sicd.RadarCollection,
rma_sicd.ImageFormation,
rma_sicd.Position)
assert grid_empty.Row.UVectECF is not None
assert grid_empty.Col.UVectECF is not None
grid = Grid.GridType(sicd.Grid.ImagePlane,
sicd.Grid.Type,
sicd.Grid.TimeCOAPoly,
sicd.Grid.Row,
sicd.Grid.Col)
grid.derive_direction_params(sicd.ImageData, populate=True)
# Basic validity
assert grid._basic_validity_check()
# Resolution abbreviation checks
expected_abbr = int(100 * (abs(sicd.Grid.Row.ImpRespWid) *
abs(sicd.Grid.Col.ImpRespWid))**0.5)
assert grid.get_resolution_abbreviation() == '{0:04d}'.format(expected_abbr)
grid.Row.ImpRespWid = None
grid.Col.ImpRespWid = None
assert grid.get_resolution_abbreviation() == '0000'
grid.Row.ImpRespWid = 100
grid.Col.ImpRespWid = 100
assert grid.get_resolution_abbreviation() == '9999'
# Create a new row type with minimum input and WgtType defined
new_row = Grid.DirParamType(ImpRespBW=0.88798408351600244,
WgtType=Grid.WgtTypeType(WindowName='UNIFORM'))
# Define the weight function, so we can get the slant plane area
new_row.define_weight_function(weight_size=512, populate=True)
assert np.all(new_row.WgtFunct == 1.0)
grid.Row = new_row
grid.Col = new_row
area = grid.get_slant_plane_area()
assert isinstance(area, float)
def test_grid_wgttype(kwargs):
"""Test Grid.WgtTypeType class"""
name = 'UNIFORM'
params = {'fake_params': 'this_fake_str',
'fake_params1': 'another_fake_str'}
# Check basic WgtTypeType instantiation
weight = Grid.WgtTypeType(WindowName=name, Parameters=params, **kwargs)
assert weight._xml_ns == kwargs['_xml_ns']
assert weight._xml_ns_key == kwargs['_xml_ns_key']
# get_parameter_value checks
assert weight.get_parameter_value('fake_params1') == params['fake_params1']
# Passing None for value returns first parameter
assert weight.get_parameter_value(None) == params['fake_params']
# No parameters
weight1 = Grid.WgtTypeType(WindowName=name)
assert weight1.get_parameter_value('fake_params') is None
node_str = '''
<WgtType>
<WindowName>Taylor</WindowName>
<Parameter name="nbar">5</Parameter>
<Parameter name="sll_db">-35.0</Parameter>
<Parameter name="osf">1.2763784887678868</Parameter>
</WgtType>
'''
weight = Grid.WgtTypeType()
node, ns = parse_xml_from_string(node_str)
weight1 = weight.from_node(node, ns)
assert isinstance(weight1, Grid.WgtTypeType)
assert weight1.WindowName == 'Taylor'
assert weight1.Parameters.get('nbar') == '5'
assert weight1.Parameters.get('sll_db') == '-35.0'
assert weight1.Parameters.get('osf') == '1.2763784887678868'
| 6,197 | 36.792683 | 90 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/conftest.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import pathlib
import pytest
from sarpy.io.complex.sicd_elements import SICD
@pytest.fixture()
def sicd():
xml_file = pathlib.Path(pathlib.Path.cwd(), 'tests/data/example.sicd.xml')
structure = SICD.SICDType().from_xml_file(xml_file)
return structure
@pytest.fixture()
def rma_sicd():
xml_file = pathlib.Path(pathlib.Path.cwd(), 'tests/data/example.sicd.rma.xml')
structure = SICD.SICDType().from_xml_file(xml_file)
return structure
@pytest.fixture()
def kwargs():
return {'_xml_ns': 'ns', '_xml_ns_key': 'key'}
@pytest.fixture()
def tol():
return 1e-8
| 693 | 17.756757 | 82 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_radiometric.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
from sarpy.io.complex.sicd_elements import Radiometric
def test_radiometric(sicd, rma_sicd, kwargs):
noise_level = Radiometric.NoiseLevelType_()
assert noise_level.NoiseLevelType is None
assert noise_level.NoisePoly is None
noise_level = Radiometric.NoiseLevelType_('ABSOLUTE')
assert noise_level.NoiseLevelType == 'ABSOLUTE'
assert noise_level.NoisePoly is None
noise_level = Radiometric.NoiseLevelType_(None, rma_sicd.Radiometric.NoiseLevel.NoisePoly, **kwargs)
assert noise_level.NoiseLevelType == 'RELATIVE'
noise_level = Radiometric.NoiseLevelType_(None, sicd.Radiometric.NoiseLevel.NoisePoly, **kwargs)
assert noise_level._xml_ns == kwargs['_xml_ns']
assert noise_level._xml_ns_key == kwargs['_xml_ns_key']
assert noise_level.NoiseLevelType == 'ABSOLUTE'
assert noise_level.NoisePoly == sicd.Radiometric.NoiseLevel.NoisePoly
radio_type = Radiometric.RadiometricType()
assert radio_type.NoiseLevel is None
assert radio_type.RCSSFPoly is None
assert radio_type.SigmaZeroSFPoly is None
assert radio_type.BetaZeroSFPoly is None
assert radio_type.GammaZeroSFPoly is None
radio_type = Radiometric.RadiometricType(noise_level, sicd.Radiometric.RCSSFPoly)
assert radio_type.NoiseLevel == noise_level
assert radio_type.RCSSFPoly == sicd.Radiometric.RCSSFPoly
radio_type._derive_parameters(sicd.Grid, sicd.SCPCOA)
assert radio_type.SigmaZeroSFPoly is not None
assert radio_type.BetaZeroSFPoly is not None
assert radio_type.GammaZeroSFPoly is not None
radio_type = Radiometric.RadiometricType(noise_level)
assert radio_type.NoiseLevel == noise_level
assert radio_type.RCSSFPoly is None
assert radio_type.SigmaZeroSFPoly is None
assert radio_type.BetaZeroSFPoly is None
assert radio_type.GammaZeroSFPoly is None
radio_type._derive_parameters(sicd.Grid, sicd.SCPCOA)
assert radio_type.NoiseLevel == noise_level
assert radio_type.RCSSFPoly is None
assert radio_type.SigmaZeroSFPoly is None
assert radio_type.BetaZeroSFPoly is None
assert radio_type.GammaZeroSFPoly is None
| 2,223 | 38.714286 | 104 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_sicd.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import copy
import re
import numpy as np
import pytest
from sarpy.io.complex.sicd_elements import RgAzComp
def test_sicd_smoke_tests(sicd, rma_sicd, tol):
assert sicd.is_valid()
sicd_copy = sicd.copy()
assert sicd_copy.is_valid()
sicd_copy.NITF = None
assert sicd_copy.NITF == {}
sicd_copy.NITF = {'test': 'TEST'}
assert sicd_copy.NITF == {'test': 'TEST'}
assert sicd_copy.ImageFormType == 'PFA'
scp_ecf = [6378138., 0., 0.]
scp_llh = [.01, 0., 0.]
sicd_copy.update_scp(scp_ecf)
assert sicd_copy.GeoData.SCP.ECF.X == 6378138.
sicd_copy.update_scp(scp_llh, 'LLH')
assert sicd_copy.GeoData.SCP.LLH.Lat == 0.01
# Check nothing to do path
sicd_copy.define_geo_image_corners()
sicd_copy.derive()
assert sicd_copy.get_transmit_band_name() == 'X'
assert sicd_copy.get_processed_polarization_abbreviation() == 'VV'
assert sicd_copy.get_processed_polarization() == 'V:V'
# Take the RMA path through derive
rma_sicd.derive()
rma_sicd.RadarCollection.RefFreqIndex = 1
rma_sicd.apply_reference_frequency(10000)
assert np.all(rma_sicd.RadarCollection.TxFrequency.get_array() ==
sicd.RadarCollection.TxFrequency.get_array()+10000)
row_res, col_res = sicd_copy.get_ground_resolution()
assert row_res is not None
assert col_res is not None
assert sicd_copy.can_project_coordinates()
assert sicd_copy.coa_projection is None
sicd_copy.define_coa_projection(override=True)
assert sicd_copy.coa_projection is not None
scp_pixel, _, _ = sicd_copy.project_ground_to_image([sicd_copy.GeoData.SCP.ECF.X,
sicd_copy.GeoData.SCP.ECF.Y,
sicd_copy.GeoData.SCP.ECF.Z])
assert scp_pixel[0] == pytest.approx(sicd_copy.ImageData.SCPPixel.Row, abs=tol)
assert scp_pixel[1] == pytest.approx(sicd_copy.ImageData.SCPPixel.Col, abs=tol)
scp_pixel, _, _ = sicd_copy.project_ground_to_image_geo([sicd_copy.GeoData.SCP.LLH.Lat,
sicd_copy.GeoData.SCP.LLH.Lon,
sicd_copy.GeoData.SCP.LLH.HAE])
assert scp_pixel[0] == pytest.approx(sicd_copy.ImageData.SCPPixel.Row, abs=tol)
assert scp_pixel[1] == pytest.approx(sicd_copy.ImageData.SCPPixel.Col, abs=tol)
scp_ecef = sicd_copy.project_image_to_ground([sicd_copy.ImageData.SCPPixel.Row, sicd_copy.ImageData.SCPPixel.Col],
projection_type='PLANE')
assert scp_ecef == pytest.approx([sicd_copy.GeoData.SCP.ECF.X,
sicd_copy.GeoData.SCP.ECF.Y,
sicd_copy.GeoData.SCP.ECF.Z], abs=tol)
scp_geo = sicd_copy.project_image_to_ground_geo([sicd_copy.ImageData.SCPPixel.Row, sicd_copy.ImageData.SCPPixel.Col])
assert scp_geo == pytest.approx([sicd_copy.GeoData.SCP.LLH.Lat,
sicd_copy.GeoData.SCP.LLH.Lon,
sicd_copy.GeoData.SCP.LLH.HAE], abs=tol)
sicd_copy.populate_rniirs()
assert sicd_copy.CollectionInfo.Parameters['INFORMATION_DENSITY'] is not None
assert sicd_copy.CollectionInfo.Parameters['PREDICTED_RNIIRS'] is not None
name = sicd_copy.get_suggested_name()
assert name == 'SyntheticCore_001_184124_SL0099L_00N000E_001X___SVV_0101_SPY'
version = sicd_copy.version_required()
assert version == (1, 1, 0)
details = sicd_copy.get_des_details()
assert details['DESSHSI'] == 'SICD Volume 1 Design & Implementation Description Document'
assert details['DESSHSV'] == '1.3.0'
assert details['DESSHSD'] == '2022-11-30T00:00:00Z'
assert details['DESSHTN'] == 'urn:SICD:1.3.0'
xml_bytes = sicd.to_xml_bytes()
assert isinstance(xml_bytes, bytes)
xml_string = sicd.to_xml_string()
assert isinstance(xml_string, str)
sicd1 = sicd.from_xml_string(xml_string)
assert sicd1.is_valid()
sicd2, out_row_bounds, out_col_bounds = sicd.create_subset_structure()
assert sicd2.ImageData.FirstRow == sicd.ImageData.FirstRow
assert sicd2.ImageData.NumRows == sicd.ImageData.NumRows
assert sicd2.ImageData.FirstCol == sicd.ImageData.FirstCol
assert sicd2.ImageData.NumCols == sicd.ImageData.NumCols
assert out_row_bounds == (0, sicd.ImageData.NumRows)
assert out_col_bounds == (0, sicd.ImageData.NumCols)
min_max_vals = (10, 100)
sicd2, out_row_bounds, out_col_bounds = sicd.create_subset_structure(row_bounds=min_max_vals,
column_bounds=min_max_vals)
assert sicd2.ImageData.FirstRow == sicd.ImageData.FirstRow + min_max_vals[0]
assert sicd2.ImageData.NumRows == min_max_vals[1] - min_max_vals[0]
assert sicd2.ImageData.FirstCol == sicd.ImageData.FirstCol + min_max_vals[0]
assert sicd2.ImageData.NumCols == min_max_vals[1] - min_max_vals[0]
assert out_row_bounds == min_max_vals
assert out_col_bounds == min_max_vals
def test_nitf_setter_failures(sicd):
a_list = ['test', 'TEST']
with pytest.raises(TypeError, match=f'data must be dictionary instance. Received {type(a_list)}'):
sicd.NITF = a_list
def test_update_scp_failures(sicd):
bad_scp = [0, 0, 0, 0]
with pytest.raises(TypeError, match='point must be an numpy.ndarray'):
sicd.update_scp({})
with pytest.raises(ValueError, match='point must be a one-dimensional, 3 element array'):
sicd.update_scp(bad_scp)
def test_smoke_test_derive_with_rgazcomp(sicd):
sicd.ImageFormation.ImageFormAlgo = 'RGAZCOMP'
sicd.derive()
def test_define_geo_image_corners(sicd):
sicd.GeoData = None
sicd.define_geo_image_corners()
assert sicd.GeoData is not None
def test_define_geo_valid_data(sicd):
sicd.GeoData.ValidData = None
sicd.define_geo_valid_data()
assert sicd.GeoData.ValidData is not None
sicd.GeoData = None
sicd.define_geo_valid_data()
# Nothing to be done path
assert sicd.GeoData is None
def test_missing_image_formation(sicd):
sicd.ImageFormation = None
assert sicd.get_transmit_band_name() == 'UN'
assert sicd.get_processed_polarization_abbreviation() == 'UN'
assert sicd.get_processed_polarization() == 'UN'
def test_apply_reference_frequency_errors(sicd):
with pytest.raises(ValueError, match='RadarCollection.RefFreqIndex is not defined. '
'The reference frequency should not be applied.'):
sicd.apply_reference_frequency(10000)
sicd.RadarCollection.RefFreqIndex = 1
sicd.apply_reference_frequency(10000)
sicd.RadarCollection = None
with pytest.raises(ValueError, match='RadarCollection is not defined. The reference frequency cannot be applied.'):
sicd.apply_reference_frequency(10000)
def test_create_subset_structure_errors(sicd):
min_max_vals = (10, 100000)
with pytest.raises(ValueError, match=re.escape(f'row bounds ({min_max_vals[0]}, {min_max_vals[1]}) '
f'are not sensible for NumRows {sicd.ImageData.NumRows}')):
sicd.create_subset_structure(row_bounds=min_max_vals)
min_max_vals = (100000, 100)
with pytest.raises(ValueError, match=re.escape(f'column bounds ({min_max_vals[0]}, {min_max_vals[1]}) '
f'are not sensible for NumCols {sicd.ImageData.NumCols}')):
sicd.create_subset_structure(column_bounds=min_max_vals)
def test_can_project_coordinates_geo1(sicd, caplog):
sicd.GeoData.SCP = None
sicd.can_project_coordinates()
assert 'Formulating a projection is not feasible because GeoData.SCP is not populated' in caplog.text
def test_can_project_coordinates_geo2(sicd, caplog):
sicd.GeoData = None
sicd.can_project_coordinates()
assert 'Formulating a projection is not feasible because GeoData is not populated' in caplog.text
def test_can_project_coordinates_image1(sicd, caplog):
sicd.ImageData.SCPPixel = None
sicd.can_project_coordinates()
assert 'Formulating a projection is not feasible because ImageData.SCPPixel is not populated' in caplog.text
def test_can_project_coordinates_image2(sicd, caplog):
sicd.ImageData.FirstCol = None
sicd.can_project_coordinates()
assert 'Formulating a projection is not feasible because ImageData.FirstCol is not populated' in caplog.text
def test_can_project_coordinates_image3(sicd, caplog):
sicd.ImageData.FirstRow = None
sicd.can_project_coordinates()
assert 'Formulating a projection is not feasible because ImageData.FirstRow is not populated' in caplog.text
def test_can_project_coordinates_image4(sicd, caplog):
sicd.ImageData = None
sicd.can_project_coordinates()
assert 'Formulating a projection is not feasible because ImageData is not populated' in caplog.text
def test_can_project_coordinates_pos1(sicd, caplog):
sicd.Position.ARPPoly = None
sicd.can_project_coordinates()
assert 'Formulating a projection is not feasible because Position.ARPPoly is not populated' in caplog.text
def test_can_project_coordinates_pos2(sicd, caplog):
sicd.Position = None
sicd.can_project_coordinates()
assert 'Formulating a projection is not feasible because Position is not populated' in caplog.text
def test_can_project_coordinates_grid1(sicd, caplog):
sicd.Grid.Type = None
sicd.can_project_coordinates()
assert 'Formulating a projection is not feasible because Grid.Type is not populated' in caplog.text
def test_can_project_coordinates_grid2(sicd, caplog):
sicd.Grid.Col.SS = None
sicd.can_project_coordinates()
assert 'Formulating a projection is not feasible because Grid.Col.SS is not populated' in caplog.text
def test_can_project_coordinates_grid3(sicd, caplog):
sicd.Grid.Col = None
sicd.can_project_coordinates()
assert 'Formulating a projection is not feasible because Grid.Col is not populated' in caplog.text
def test_can_project_coordinates_grid4(sicd, caplog):
sicd.Grid.Row.SS = None
sicd.can_project_coordinates()
assert 'Formulating a projection is not feasible because Grid.Row.SS is not populated' in caplog.text
def test_can_project_coordinates_grid5(sicd, caplog):
sicd.Grid.Row = None
sicd.can_project_coordinates()
assert 'Formulating a projection is not feasible because Grid.Row is not populated' in caplog.text
def test_can_project_coordinates_grid6(sicd, caplog):
sicd.Grid.TimeCOAPoly = None
sicd.can_project_coordinates()
assert 'Formulating a projection may be inaccurate, because Grid.TimeCOAPoly is not populated' in caplog.text
def test_can_project_coordinates_grid7(sicd, caplog):
sicd.Grid = None
sicd.can_project_coordinates()
assert 'Formulating a projection is not feasible because Grid is not populated' in caplog.text
def test_can_project_coordinates_grid8(sicd, caplog):
sicd.Grid.Type = 'PLANE'
sicd.Grid.Row.UVectECF = None
sicd.can_project_coordinates()
assert 'UVectECF parameter of Grid.Row or Grid.Col is not populated' in caplog.text
def test_can_project_coordinates_grid9(sicd, caplog):
sicd.Grid.Type = 'BADTYPE'
sicd.can_project_coordinates()
assert 'Unhandled Grid.Type' in caplog.text
def test_can_project_coordinates_pfa1(sicd, caplog):
sicd.PFA.SpatialFreqSFPoly = None
sicd.can_project_coordinates()
assert 'the PFA.SpatialFreqSFPoly parameter is not populated' in caplog.text
def test_can_project_coordinates_pfa2(sicd, caplog):
sicd.PFA.PolarAngPoly = None
sicd.can_project_coordinates()
assert 'but the PFA.PolarAngPoly parameter is not populated' in caplog.text
def test_can_project_coordinates_pfa3(sicd, caplog):
sicd.PFA = None
sicd.can_project_coordinates()
assert 'but the PFA parameter is not populated' in caplog.text
def test_can_project_coordinates_if1(sicd, caplog):
sicd.Grid.Type = 'RGAZIM'
sicd.ImageFormation.ImageFormAlgo = None
sicd.can_project_coordinates()
assert 'ImageFormation.ImageFormAlgo is not populated' in caplog.text
def test_can_project_coordinates_if2(sicd, caplog):
sicd.ImageFormation = None
sicd.can_project_coordinates()
assert 'ImageFormation is not populated' in caplog.text
def test_can_project_coordinates_if3(sicd, caplog):
sicd.ImageFormation.ImageFormAlgo = 'PFACOMP'
sicd.can_project_coordinates()
assert 'got unhandled ImageFormation.ImageFormAlgo' in caplog.text
def test_can_project_coordinates_rgazcomp(sicd, caplog):
sicd.ImageFormation.ImageFormAlgo = 'RGAZCOMP'
rg_az_comp_type = RgAzComp.RgAzCompType()
rg_az_comp_type._derive_parameters(sicd.Grid, sicd.Timeline, sicd.SCPCOA)
sicd.RgAzComp = rg_az_comp_type
sicd.RgAzComp.AzSF = None
sicd.can_project_coordinates()
assert 'RgAzComp.AzSF parameter is not populated' in caplog.text
sicd.RgAzComp = None
sicd.can_project_coordinates()
assert 'RgAzComp parameter is not populated' in caplog.text
def test_can_project_coordinates_rma(rma_sicd, caplog):
bad_rma_sicd = copy.copy(rma_sicd)
bad_rma_sicd.Grid.Type = 'RGZERO'
bad_rma_sicd.RMA.INCA = None
bad_rma_sicd.can_project_coordinates()
assert 'but the RMA.INCA parameter is not populated' in caplog.text
| 13,569 | 37.660969 | 121 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_utils.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import copy
from sarpy.io.complex.sicd_elements import utils
from sarpy.io.complex.sicd_elements import validation_checks
def test_validation_checks(sicd):
"""Smoke test with PFA SICD"""
validation_checks.detailed_validation_checks(sicd)
def test_validation_checks_with_rma(rma_sicd):
"""Smoke test with RMA SICD"""
validation_checks.detailed_validation_checks(rma_sicd)
def test_utils(sicd, rma_sicd, caplog):
"""Check sicd_elements utility functions"""
assert utils.is_same_sensor(sicd, rma_sicd)
assert utils.is_same_sensor(sicd, sicd)
assert utils.is_same_start_time(sicd, rma_sicd)
assert utils.is_same_start_time(sicd, sicd)
assert not utils.is_same_size(sicd, rma_sicd)
assert utils.is_same_size(sicd, sicd)
assert utils.is_same_duration(sicd, rma_sicd)
assert utils.is_same_duration(sicd, sicd)
assert utils.is_same_scp(sicd, rma_sicd)
assert utils.is_same_scp(sicd, sicd)
assert not utils.is_same_band(sicd, rma_sicd)
assert utils.is_same_band(sicd, sicd)
assert not utils.is_general_match(sicd, rma_sicd)
assert utils.is_general_match(sicd, sicd)
pol = utils.polstring_version_required(None)
assert pol == (1, 1, 0)
pol = utils.polstring_version_required('V:V:H')
assert 'Expected polarization string of length 2, but populated as `3`' in caplog.text
assert pol is None
pol = utils.polstring_version_required('V')
assert 'Expected polarization string of length 2, but populated as `1`' in caplog.text
assert pol is None
pol = utils.polstring_version_required('S:V')
assert pol == (1, 3, 0)
pol = utils.polstring_version_required('H:X')
assert pol == (1, 3, 0)
pol = utils.polstring_version_required('V:RHC')
assert pol == (1, 2, 1)
pol = utils.polstring_version_required('LHC:H')
assert pol == (1, 2, 1)
pol = utils.polstring_version_required('V:H')
assert pol == (1, 1, 0)
pol = utils.polstring_version_required('OTHER:H')
assert pol == (1, 3, 0)
pol = utils.polstring_version_required('H:OTHERpol')
assert pol == (1, 3, 0)
# Must have both ImageFormation and RadarCollection
freq = utils._get_center_frequency(None, sicd.ImageFormation)
assert freq is None
freq = utils._get_center_frequency(sicd.RadarCollection, None)
assert freq is None
# Use a copy to change RefFreqIndex value
radar_collection = copy.copy(sicd.RadarCollection)
radar_collection.RefFreqIndex = 10
freq = utils._get_center_frequency(radar_collection, sicd.ImageFormation)
assert freq is None
radar_collection.RefFreqIndex = None
freq = utils._get_center_frequency(radar_collection, sicd.ImageFormation)
assert freq == sicd.ImageFormation.TxFrequencyProc.center_frequency
| 2,882 | 33.73494 | 90 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_imageformation.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import re
import numpy as np
import pytest
from sarpy.io.complex.sicd_elements import ImageFormation
MIN_FREQ = 9.0e9
MAX_FREQ = 10.0e9
def test_image_formation_rcvchanproc(kwargs):
# Basic smoke test
rcv_chan_proc = ImageFormation.RcvChanProcType(
NumChanProc=1, PRFScaleFactor=1.2, ChanIndices=[1, 2], **kwargs
)
assert rcv_chan_proc._xml_ns == kwargs["_xml_ns"]
assert rcv_chan_proc._xml_ns_key == kwargs["_xml_ns_key"]
assert rcv_chan_proc.NumChanProc == 1
assert rcv_chan_proc.PRFScaleFactor == 1.2
assert rcv_chan_proc.ChanIndices == [1, 2]
def test_image_formation_txfreq(caplog, tol, kwargs):
tx_freq = ImageFormation.TxFrequencyProcType(
MinProc=MIN_FREQ, MaxProc=MAX_FREQ, **kwargs
)
assert tx_freq.center_frequency == pytest.approx(
np.mean([MIN_FREQ, MAX_FREQ]), abs=tol
)
assert tx_freq.bandwidth == pytest.approx(MAX_FREQ - MIN_FREQ, abs=tol)
assert tx_freq._basic_validity_check()
assert tx_freq.get_band_name() == "X"
tx_freq_array = tx_freq.get_array()
assert np.all(tx_freq_array == np.array([MIN_FREQ, MAX_FREQ]))
# Test from_array paths
assert tx_freq.from_array(None) is None
tx_freq1 = tx_freq.from_array(tx_freq_array)
assert tx_freq1.MinProc == tx_freq.MinProc
assert tx_freq1.MaxProc == tx_freq.MaxProc
with pytest.raises(
ValueError,
match=re.escape("Expected array to be of length 2, and received [1]"),
):
tx_freq.from_array([1])
with pytest.raises(
ValueError,
match="Expected array to be numpy.ndarray, list, or tuple, got <class 'dict'>",
):
tx_freq.from_array({"1": 1})
tx_freq._apply_reference_frequency(100000)
assert tx_freq.MinProc == MIN_FREQ + 100000
assert tx_freq.MaxProc == MAX_FREQ + 100000
tx_freq = ImageFormation.TxFrequencyProcType(MinProc=None, MaxProc=MAX_FREQ)
assert tx_freq.center_frequency is None
assert tx_freq.bandwidth is None
# Test invalid inputs path
tx_freq = ImageFormation.TxFrequencyProcType(MinProc=MAX_FREQ, MaxProc=MIN_FREQ)
assert not tx_freq._basic_validity_check()
assert (
f"Invalid frequency bounds MinProc ({tx_freq.MinProc}) > MaxProc ({tx_freq.MaxProc})"
in caplog.text
)
def test_image_formation_processing(kwargs):
# Basic smoke test
proc_type = ImageFormation.ProcessingType(Type="PFA", Applied="True", **kwargs)
assert proc_type._xml_ns == kwargs["_xml_ns"]
assert proc_type._xml_ns_key == kwargs["_xml_ns_key"]
assert proc_type.Type == "PFA"
assert proc_type.Applied is True
def test_image_formation_distortion(kwargs):
# Basic smoke test
distortion = ImageFormation.DistortionType(
A=1.0,
F1=complex(1, 2),
Q1=complex(3, 4),
Q2=complex(5, 6),
F2=complex(7, 8),
Q3=complex(9, 1),
Q4=complex(1, 3),
**kwargs,
)
assert distortion._xml_ns == kwargs["_xml_ns"]
assert distortion._xml_ns_key == kwargs["_xml_ns_key"]
def test_image_formation_polcal(kwargs):
# Basic smoke test
distortion = ImageFormation.DistortionType(
A=1.0,
F1=complex(1, 2),
Q1=complex(3, 4),
Q2=complex(5, 6),
F2=complex(7, 8),
Q3=complex(9, 1),
Q4=complex(1, 3),
)
pol_cal_type = ImageFormation.PolarizationCalibrationType(
DistortCorrectApplied=True, Distortion=distortion, **kwargs
)
assert pol_cal_type._xml_ns == kwargs["_xml_ns"]
assert pol_cal_type._xml_ns_key == kwargs["_xml_ns_key"]
def test_image_formation(sicd, caplog, kwargs):
# Test basic validity paths
image_form_type = ImageFormation.ImageFormationType(
RcvChanProc=sicd.ImageFormation.RcvChanProc,
TxRcvPolarizationProc=sicd.ImageFormation.TxRcvPolarizationProc,
TStartProc=sicd.ImageFormation.TStartProc,
TEndProc=sicd.ImageFormation.TEndProc,
TxFrequencyProc=sicd.ImageFormation.TxFrequencyProc,
ImageFormAlgo=sicd.ImageFormation.ImageFormAlgo,
STBeamComp=sicd.ImageFormation.STBeamComp,
ImageBeamComp=sicd.ImageFormation.ImageBeamComp,
AzAutofocus=sicd.ImageFormation.AzAutofocus,
RgAutofocus=sicd.ImageFormation.RgAutofocus,
**kwargs,
)
assert image_form_type._xml_ns == kwargs["_xml_ns"]
assert image_form_type._xml_ns_key == kwargs["_xml_ns_key"]
assert image_form_type._basic_validity_check()
image_form_type.TStartProc = image_form_type.TEndProc + 1
image_form_type._basic_validity_check()
assert (
f"Invalid time processing bounds TStartProc ({image_form_type.TStartProc}) > TEndProc ({image_form_type.TEndProc})"
in caplog.text
)
# Test derive TxFrequencyProc paths
image_form_type = ImageFormation.ImageFormationType(
RcvChanProc=sicd.ImageFormation.RcvChanProc,
TxRcvPolarizationProc=sicd.ImageFormation.TxRcvPolarizationProc,
TStartProc=sicd.ImageFormation.TStartProc,
TEndProc=sicd.ImageFormation.TEndProc,
TxFrequencyProc=None,
ImageFormAlgo=sicd.ImageFormation.ImageFormAlgo,
STBeamComp=sicd.ImageFormation.STBeamComp,
ImageBeamComp=sicd.ImageFormation.ImageBeamComp,
AzAutofocus=sicd.ImageFormation.AzAutofocus,
RgAutofocus=sicd.ImageFormation.RgAutofocus,
)
image_form_type._derive_tx_frequency_proc(sicd.RadarCollection)
assert image_form_type.TxFrequencyProc is not None
image_form_type = ImageFormation.ImageFormationType(
RcvChanProc=sicd.ImageFormation.RcvChanProc,
TxRcvPolarizationProc=sicd.ImageFormation.TxRcvPolarizationProc,
TStartProc=sicd.ImageFormation.TStartProc,
TEndProc=sicd.ImageFormation.TEndProc,
TxFrequencyProc=sicd.ImageFormation.TxFrequencyProc,
ImageFormAlgo=sicd.ImageFormation.ImageFormAlgo,
STBeamComp=sicd.ImageFormation.STBeamComp,
ImageBeamComp=sicd.ImageFormation.ImageBeamComp,
AzAutofocus=sicd.ImageFormation.AzAutofocus,
RgAutofocus=sicd.ImageFormation.RgAutofocus,
)
image_form_type.TxFrequencyProc.MinProc = None
image_form_type._derive_tx_frequency_proc(sicd.RadarCollection)
assert (
image_form_type.TxFrequencyProc.MinProc == sicd.RadarCollection.TxFrequency.Min
)
image_form_type.TxFrequencyProc.MaxProc = None
image_form_type._derive_tx_frequency_proc(sicd.RadarCollection)
assert (
image_form_type.TxFrequencyProc.MaxProc == sicd.RadarCollection.TxFrequency.Max
)
image_form_type._apply_reference_frequency(100000)
assert (
image_form_type.TxFrequencyProc.MinProc
== sicd.RadarCollection.TxFrequency.Min + 100000
)
assert (
image_form_type.TxFrequencyProc.MaxProc
== sicd.RadarCollection.TxFrequency.Max + 100000
)
assert (
image_form_type.get_polarization() == sicd.ImageFormation.TxRcvPolarizationProc
)
# Test paths through get_polarization_abbreviation
assert image_form_type.get_polarization_abbreviation() == "VV"
image_form_type.TxRcvPolarizationProc = "OTHER"
assert image_form_type.get_polarization_abbreviation() == "UN"
# Test paths through get_transmit_band_name
assert image_form_type.get_transmit_band_name() == "X"
image_form_type.TxFrequencyProc = None
assert image_form_type.get_transmit_band_name() == "UN"
assert image_form_type.version_required() == (1, 1, 0)
# Force the TxFrequencyProc from list path
image_form_type = ImageFormation.ImageFormationType(
RcvChanProc=sicd.ImageFormation.RcvChanProc,
TxRcvPolarizationProc=sicd.ImageFormation.TxRcvPolarizationProc,
TStartProc=sicd.ImageFormation.TStartProc,
TEndProc=sicd.ImageFormation.TEndProc,
TxFrequencyProc=[MIN_FREQ, MAX_FREQ],
ImageFormAlgo=sicd.ImageFormation.ImageFormAlgo,
STBeamComp=sicd.ImageFormation.STBeamComp,
ImageBeamComp=sicd.ImageFormation.ImageBeamComp,
AzAutofocus=sicd.ImageFormation.AzAutofocus,
RgAutofocus=sicd.ImageFormation.RgAutofocus,
)
assert image_form_type.TxFrequencyProc.MinProc == MIN_FREQ
assert image_form_type.TxFrequencyProc.MaxProc == MAX_FREQ
| 8,383 | 35.611354 | 123 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_matchinfo.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
from sarpy.io.complex.sicd_elements import MatchInfo
from sarpy.io.xml.base import parse_xml_from_string
def test_matchinfo(kwargs):
match_coll_type = MatchInfo.MatchCollectionType(CoreName='TEST', **kwargs)
assert match_coll_type._xml_ns == kwargs['_xml_ns']
assert match_coll_type._xml_ns_key == kwargs['_xml_ns_key']
match_type = MatchInfo.MatchType(TypeID='TEST', **kwargs)
assert match_type._xml_ns == kwargs['_xml_ns']
assert match_type._xml_ns_key == kwargs['_xml_ns_key']
assert match_type.NumMatchCollections == 0
match_type = MatchInfo.MatchType(TypeID='TEST', MatchCollections=[match_coll_type])
assert match_type.NumMatchCollections == 1
match_info_type = MatchInfo.MatchInfoType(MatchTypes=None, **kwargs)
assert match_info_type._xml_ns == kwargs['_xml_ns']
assert match_info_type._xml_ns_key == kwargs['_xml_ns_key']
assert match_info_type.NumMatchTypes == 0
match_info_type = MatchInfo.MatchInfoType(MatchTypes=[match_type], **kwargs)
assert match_info_type.NumMatchTypes == 1
match_node_str = '''
<MatchInfo>
<NumMatchTypes>1</NumMatchTypes>
<MatchType>
<TypeID>COHERENT</TypeID>
<NumMatchCollections>0</NumMatchCollections>
</MatchType>
</MatchInfo>
'''
node, ns = parse_xml_from_string(match_node_str)
match_info_type1 = match_info_type.from_node(node, ns)
assert match_info_type1.NumMatchTypes == 1
assert match_info_type1.MatchTypes[0].TypeID == 'COHERENT'
assert match_info_type1.MatchTypes[0].NumMatchCollections == 0
| 1,715 | 37.133333 | 87 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_pfa.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import numpy as np
import pytest
from sarpy.io.complex.sicd_elements import blocks
from sarpy.io.complex.sicd_elements import PFA
def test_pfa(sicd, tol, kwargs):
"""Test PFA classes"""
stdeskew = PFA.STDeskewType()
assert isinstance(stdeskew, PFA.STDeskewType)
made_up_poly = blocks.Poly2DType([[10, 5], [8, 3], [1, 0.5]])
stdeskew = PFA.STDeskewType(False, made_up_poly, **kwargs)
assert stdeskew.Applied is False
assert stdeskew.STDSPhasePoly == made_up_poly
assert stdeskew._xml_ns == kwargs['_xml_ns']
assert stdeskew._xml_ns_key == kwargs['_xml_ns_key']
# Nominal instantiation
pfa_nom = PFA.PFAType(sicd.PFA.FPN,
sicd.PFA.IPN,
sicd.PFA.PolarAngRefTime,
sicd.PFA.PolarAngPoly,
sicd.PFA.SpatialFreqSFPoly,
sicd.PFA.Krg1,
sicd.PFA.Krg2,
sicd.PFA.Kaz1,
sicd.PFA.Kaz2,
stdeskew,
**kwargs)
assert isinstance(pfa_nom, PFA.PFAType)
assert pfa_nom._xml_ns == kwargs['_xml_ns']
assert pfa_nom._xml_ns_key == kwargs['_xml_ns_key']
assert pfa_nom._basic_validity_check()
assert pfa_nom._check_polar_ang_ref()
# No PolarAnglePoly path
pfa_no_pap = PFA.PFAType(sicd.PFA.FPN,
sicd.PFA.IPN,
sicd.PFA.PolarAngRefTime,
None,
sicd.PFA.SpatialFreqSFPoly,
sicd.PFA.Krg1,
sicd.PFA.Krg2,
sicd.PFA.Kaz1,
sicd.PFA.Kaz2,
stdeskew,
**kwargs)
assert pfa_no_pap._check_polar_ang_ref()
# Populate empty PFAType with sicd components after instantiation
pfa_empty = PFA.PFAType()
pfa_empty._derive_parameters(sicd.Grid, sicd.SCPCOA, sicd.GeoData, sicd.Position, sicd.Timeline)
assert pfa_empty.PolarAngRefTime == pytest.approx(sicd.SCPCOA.SCPTime, abs=tol)
assert isinstance(pfa_empty.IPN, blocks.XYZType)
assert isinstance(pfa_empty.FPN, blocks.XYZType)
assert isinstance(pfa_empty.PolarAngPoly, blocks.Poly1DType)
assert isinstance(pfa_empty.SpatialFreqSFPoly, blocks.Poly1DType)
assert isinstance(pfa_empty.Krg1, float)
assert isinstance(pfa_empty.Krg2, float)
assert isinstance(pfa_empty.Kaz1, float)
assert isinstance(pfa_empty.Kaz2, float)
# Try it without GeoData
pfa_empty_no_geo = PFA.PFAType()
pfa_empty_no_geo._derive_parameters(sicd.Grid, sicd.SCPCOA, None, sicd.Position, sicd.Timeline)
assert pfa_empty_no_geo.PolarAngRefTime == pytest.approx(sicd.SCPCOA.SCPTime, abs=tol)
assert pfa_empty_no_geo.IPN is None
assert pfa_empty_no_geo.FPN is None
assert pfa_empty_no_geo.PolarAngPoly is None
assert pfa_empty_no_geo.SpatialFreqSFPoly is None
assert pfa_empty_no_geo.Krg1 is None
assert pfa_empty_no_geo.Krg2 is None
assert pfa_empty_no_geo.Kaz1 is None
assert pfa_empty_no_geo.Kaz2 is None
# Without FPN to test that path
pfa_no_fpn = PFA.PFAType(None,
sicd.PFA.IPN,
sicd.PFA.PolarAngRefTime,
sicd.PFA.PolarAngPoly,
sicd.PFA.SpatialFreqSFPoly,
sicd.PFA.Krg1,
sicd.PFA.Krg2,
sicd.PFA.Kaz1,
sicd.PFA.Kaz2,
stdeskew)
assert pfa_no_fpn.pfa_polar_coords(sicd.Position,
sicd.GeoData.SCP.ECF[:],
0.0) == (None, None)
assert pfa_no_fpn.pfa_polar_coords(sicd.Position,
sicd.GeoData.SCP.ECF[:],
np.array([6378137.0, 0])) == (None, None)
| 4,175 | 40.76 | 100 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_radarcollection.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import numpy as np
import pytest
from sarpy.io.complex.sicd_elements import RadarCollection
def test_radarcollection_getbandname():
assert RadarCollection.get_band_name(None) == 'UN'
assert RadarCollection.get_band_name(3.5e6) == 'HF'
assert RadarCollection.get_band_name(3.5e7) == 'VHF'
assert RadarCollection.get_band_name(3.5e8) == 'UHF'
assert RadarCollection.get_band_name(1.5e9) == 'L'
assert RadarCollection.get_band_name(2.5e9) == 'S'
assert RadarCollection.get_band_name(4.5e9) == 'C'
assert RadarCollection.get_band_name(8.5e9) == 'X'
assert RadarCollection.get_band_name(1.5e10) == 'KU'
assert RadarCollection.get_band_name(2.5e10) == 'K'
assert RadarCollection.get_band_name(3.0e10) == 'KA'
assert RadarCollection.get_band_name(6.0e10) == 'V'
assert RadarCollection.get_band_name(1.0e11) == 'W'
assert RadarCollection.get_band_name(2.0e11) == 'MM'
assert RadarCollection.get_band_name(5.0e11) == 'UN'
def test_radarcollection_txfreqtype(sicd, kwargs, caplog):
bad_tx_freq = RadarCollection.TxFrequencyType(None,
sicd.RadarCollection.TxFrequency.Max,
**kwargs)
assert bad_tx_freq.center_frequency is None
bad_tx_freq = RadarCollection.TxFrequencyType(sicd.RadarCollection.TxFrequency.Max,
sicd.RadarCollection.TxFrequency.Min,
**kwargs)
assert not bad_tx_freq._basic_validity_check()
assert 'Invalid frequency bounds Min ({}) > Max ({})'.format(bad_tx_freq.Min, bad_tx_freq.Max) in caplog.text
tx_freq = RadarCollection.TxFrequencyType(sicd.RadarCollection.TxFrequency.Min,
sicd.RadarCollection.TxFrequency.Max,
**kwargs)
assert tx_freq._xml_ns == kwargs['_xml_ns']
assert tx_freq._xml_ns_key == kwargs['_xml_ns_key']
assert tx_freq.Min == sicd.RadarCollection.TxFrequency.Min
assert tx_freq.Max == sicd.RadarCollection.TxFrequency.Max
assert tx_freq.center_frequency == 0.5 * (tx_freq.Min + tx_freq.Max)
tx_freq._apply_reference_frequency(1000)
assert tx_freq.Min == sicd.RadarCollection.TxFrequency.Min + 1000
assert tx_freq.Max == sicd.RadarCollection.TxFrequency.Max + 1000
assert tx_freq._basic_validity_check()
assert tx_freq.get_band_abbreviation() == 'X__'
tx_freq_arr = tx_freq.get_array()
assert np.all(tx_freq_arr == np.array([tx_freq.Min, tx_freq.Max]))
assert tx_freq.from_array(None) is None
tx_freq1 = tx_freq.from_array(tx_freq_arr)
assert tx_freq1.Min == tx_freq.Min
assert tx_freq1.Max == tx_freq.Max
with pytest.raises(ValueError, match='Expected array to be of length 2, and received 1'):
tx_freq.from_array([tx_freq_arr[0]])
with pytest.raises(ValueError, match='Expected array to be numpy.ndarray, list, or tuple'):
tx_freq.from_array(tx_freq)
wf_input = [(1.0, 2.0, None), (1.0, None, 2.0), (None, 1.0, 2.0)]
@pytest.mark.parametrize("tx_pulse_len, tx_rf_bw, tx_fm_rate", wf_input)
def test_radarcollection_waveformparamtype(tx_pulse_len, tx_rf_bw, tx_fm_rate):
wf_params = RadarCollection.WaveformParametersType(RcvDemodType='STRETCH', RcvFMRate=1.0)
assert wf_params.RcvFMRate == 1.0
assert wf_params.RcvDemodType == 'STRETCH'
wf_params.RcvFMRate = None
assert wf_params.RcvFMRate is None
wf_params.RcvFMRate = 12.0
assert wf_params.RcvFMRate == 12.0
assert wf_params._basic_validity_check()
wf_params = RadarCollection.WaveformParametersType(RcvDemodType='CHIRP', RcvFMRate=None)
assert wf_params.RcvFMRate == 0.0
assert wf_params.RcvDemodType == 'CHIRP'
wf_params = RadarCollection.WaveformParametersType(RcvDemodType='cHiRp', RcvFMRate=None)
assert wf_params.RcvFMRate is None
tx_freq_start = 3.0
rcv_freq_start = 2.0
wf_params = RadarCollection.WaveformParametersType(TxPulseLength=tx_pulse_len,
TxRFBandwidth=tx_rf_bw,
TxFreqStart=tx_freq_start,
TxFMRate=tx_fm_rate,
RcvFreqStart=rcv_freq_start)
wf_params.derive()
assert np.all([wf_params.TxRFBandwidth, wf_params.TxFMRate, wf_params.TxPulseLength] is not None)
ref_freq = 10.5
wf_params._apply_reference_frequency(ref_freq)
assert wf_params.TxFreqStart == tx_freq_start + ref_freq
assert wf_params.RcvFreqStart == rcv_freq_start + ref_freq
def test_radarcollection_txsteptype(kwargs):
tx_step = RadarCollection.TxStepType(1, 'V', 1, **kwargs)
assert tx_step._xml_ns == kwargs['_xml_ns']
assert tx_step._xml_ns_key == kwargs['_xml_ns_key']
def test_radarcollection_chanparameterstype(kwargs):
chan_params = RadarCollection.ChanParametersType(None, 1, 1)
assert chan_params.get_transmit_polarization() is None
chan_params = RadarCollection.ChanParametersType('OTHER', 1, 1)
assert chan_params.get_transmit_polarization() == 'OTHER'
chan_params = RadarCollection.ChanParametersType('V:H', 1, 1, **kwargs)
assert chan_params._xml_ns == kwargs['_xml_ns']
assert chan_params._xml_ns_key == kwargs['_xml_ns_key']
assert chan_params.get_transmit_polarization() == 'V'
assert chan_params.version_required() == (1, 1, 0)
def test_radarcollection_segmentarrayelement(kwargs):
seg_arr_elem = RadarCollection.SegmentArrayElement(0, 0, 2000, 5000, 'AA', 1, **kwargs)
assert seg_arr_elem._xml_ns == kwargs['_xml_ns']
assert seg_arr_elem._xml_ns_key == kwargs['_xml_ns_key']
def test_radarcollection_referenceplanetype(sicd, kwargs):
seg_arr_elem1 = RadarCollection.SegmentArrayElement(0, 0, 500, 1501, 'XY', 1)
seg_arr_elem2 = RadarCollection.SegmentArrayElement(501, 0, 1301, 1501, 'XZ', 2)
ref_plane = RadarCollection.ReferencePlaneType(sicd.RadarCollection.Area.Plane.RefPt,
sicd.RadarCollection.Area.Plane.XDir,
sicd.RadarCollection.Area.Plane.YDir,
[seg_arr_elem1, seg_arr_elem2],
'D',
**kwargs)
assert ref_plane._xml_ns == kwargs['_xml_ns']
assert ref_plane._xml_ns_key == kwargs['_xml_ns_key']
corners = ref_plane.get_ecf_corner_array()
assert np.all(corners is not None)
area = RadarCollection.AreaType(Corner=None, Plane=ref_plane)
assert area.Corner is not None
def test_radarcollection_getpolabbr():
chan_params1 = RadarCollection.ChanParametersType('V:V', 1, 1)
chan_params2 = RadarCollection.ChanParametersType('S:H', 2, 2)
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=None)
assert radar_collection.get_polarization_abbreviation() == 'U'
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=chan_params1)
assert radar_collection.get_polarization_abbreviation() == 'S'
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=[chan_params1, chan_params2])
assert radar_collection.get_polarization_abbreviation() == 'D'
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=[chan_params1, chan_params2, chan_params1])
assert radar_collection.get_polarization_abbreviation() == 'T'
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=[chan_params1, chan_params2, chan_params1, chan_params2])
assert radar_collection.get_polarization_abbreviation() == 'Q'
def test_radarcollection_derive(sicd):
wf_params = RadarCollection.WaveformParametersType(TxPulseLength=1.0,
TxRFBandwidth=2.0,
TxFreqStart=3.0,
TxFMRate=4.0,
RcvDemodType='STRETCH',
RcvWindowLength=5.0,
ADCSampleRate=6.0,
RcvIFBandwidth=7.0,
RcvFreqStart=0.0,
RcvFMRate=1.0,
index=1)
area = RadarCollection.AreaType(Plane=sicd.RadarCollection.Area.Plane)
tx_step1 = RadarCollection.TxStepType(1, 'V', 1)
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=sicd.RadarCollection.RcvChannels,
Area=area,
Waveform=wf_params,
TxSequence=[tx_step1])
radar_collection.derive()
assert radar_collection.TxPolarization == 'V'
tx_step2 = RadarCollection.TxStepType(2, 'H', 2)
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=sicd.RadarCollection.RcvChannels,
Area=area,
Waveform=wf_params,
TxSequence=[tx_step1, tx_step2])
radar_collection.derive()
assert radar_collection.TxPolarization == 'SEQUENCE'
chan_params1 = RadarCollection.ChanParametersType('V:V', 1, 1)
chan_params2 = RadarCollection.ChanParametersType('S:H', 2, 2)
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=[chan_params1, chan_params2],
Area=area,
Waveform=wf_params)
radar_collection.derive()
assert radar_collection.TxPolarization == 'SEQUENCE'
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=chan_params1,
Area=area,
Waveform=wf_params)
radar_collection.derive()
assert radar_collection.TxPolarization == 'V'
def test_radarcollection_version(sicd):
chan_params1 = RadarCollection.ChanParametersType('V:V', 1, 1)
chan_params2 = RadarCollection.ChanParametersType('S:H', 2, 2)
# Check SICD version requirements based on RcvChannels
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=None)
assert radar_collection.version_required() == (1, 1, 0)
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=[chan_params1, chan_params2])
assert radar_collection.version_required() == (1, 3, 0)
def test_radarcollection_smoketest(sicd, kwargs):
params = {'fake_params': 'this_fake_str',
'fake_params1': 'another_fake_str'}
ref_freq_idx = 0
radar_collection = RadarCollection.RadarCollectionType(sicd.RadarCollection.TxFrequency,
ref_freq_idx,
sicd.RadarCollection.Waveform,
sicd.RadarCollection.TxPolarization,
RadarCollection.TxStepType(TxPolarization='V'),
sicd.RadarCollection.RcvChannels,
sicd.RadarCollection.Area,
params,
**kwargs)
assert radar_collection._xml_ns == kwargs['_xml_ns']
assert radar_collection._xml_ns_key == kwargs['_xml_ns_key']
def test_radarcollection_private(sicd, caplog):
wf_params = RadarCollection.WaveformParametersType(TxPulseLength=1.0,
TxRFBandwidth=2.0,
TxFreqStart=3.0,
TxFMRate=4.0,
RcvDemodType='STRETCH',
RcvWindowLength=5.0,
ADCSampleRate=6.0,
RcvIFBandwidth=7.0,
RcvFreqStart=0.0,
RcvFMRate=1.0,
index=1)
area = RadarCollection.AreaType(Plane=sicd.RadarCollection.Area.Plane)
chan_params1 = RadarCollection.ChanParametersType('V:V', 1, 1)
wf_params.TxFreqStart = None
wf_params.TxRFBandwidth = None
tx_freq = RadarCollection.TxFrequencyType(sicd.RadarCollection.TxFrequency.Min,
sicd.RadarCollection.TxFrequency.Max)
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=chan_params1,
TxFrequency=tx_freq,
Area=area,
Waveform=wf_params)
radar_collection._derive_wf_params()
assert radar_collection.Waveform[0].TxFreqStart == sicd.RadarCollection.TxFrequency.Min
assert radar_collection.Waveform[0].TxRFBandwidth == sicd.RadarCollection.TxFrequency.Max - sicd.RadarCollection.TxFrequency.Min
ref_freq = 10000
radar_collection._apply_reference_frequency(ref_freq)
assert radar_collection.TxFrequency.Min == sicd.RadarCollection.TxFrequency.Min + ref_freq
assert radar_collection.TxFrequency.Max == sicd.RadarCollection.TxFrequency.Max + ref_freq
assert radar_collection.Waveform[0].TxFreqStart == sicd.RadarCollection.TxFrequency.Min + ref_freq
assert radar_collection._check_frequency()
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=chan_params1,
TxFrequency=tx_freq,
Area=area,
Waveform=wf_params)
assert radar_collection._check_frequency()
radar_collection.TxFrequency.Min *= -1
assert not radar_collection._check_frequency()
assert "TxFrequency.Min is negative, but RefFreqIndex is not populated." in caplog.text
caplog.clear()
radar_collection.RefFreqIndex = 10
assert radar_collection._check_frequency()
assert radar_collection._check_tx_sequence()
radar_collection.TxPolarization = 'SEQUENCE'
radar_collection.TxSequence = None
assert not radar_collection._check_tx_sequence()
assert 'TxPolarization is populated as "SEQUENCE", but TxSequence is not populated.' in caplog.text
caplog.clear()
tx_step1 = RadarCollection.TxStepType(1, 'V', 1)
tx_step2 = RadarCollection.TxStepType(2, 'H', 2)
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=chan_params1,
Area=area,
Waveform=wf_params,
TxSequence=[tx_step1, tx_step2])
radar_collection.TxPolarization = 'V'
assert not radar_collection._check_tx_sequence()
assert 'TxSequence is populated, but TxPolarization is populated as {}'.format(radar_collection.TxPolarization) in caplog.text
caplog.clear()
radar_collection = RadarCollection.RadarCollectionType(RcvChannels=chan_params1,
Area=area,
Waveform=wf_params,
TxSequence=[tx_step1, tx_step1])
radar_collection.TxPolarization = 'SEQUENCE'
assert not radar_collection._check_tx_sequence()
assert 'TxSequence is populated, but the only unique TxPolarization' in caplog.text
caplog.clear()
radar_collection._basic_validity_check()
| 16,641 | 50.364198 | 132 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_timeline.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import numpy as np
from sarpy.io.complex.sicd_elements import Timeline
def test_timeline(sicd, kwargs):
def get_ipp_set(ipp, idx, **kwargs):
return Timeline.IPPSetType(ipp.TStart,
ipp.TEnd,
ipp.IPPStart,
ipp.IPPEnd,
ipp.IPPPoly,
idx,
**kwargs)
ippset1 = get_ipp_set(sicd.Timeline.IPP[0], 1, **kwargs)
assert ippset1.TStart == sicd.Timeline.IPP[0].TStart
assert ippset1.TEnd == sicd.Timeline.IPP[0].TEnd
assert ippset1.IPPStart == sicd.Timeline.IPP[0].IPPStart
assert ippset1.IPPEnd == sicd.Timeline.IPP[0].IPPEnd
assert ippset1.IPPPoly == sicd.Timeline.IPP[0].IPPPoly
assert ippset1._basic_validity_check()
ippset2 = get_ipp_set(sicd.Timeline.IPP[1], 2, **kwargs)
ipp_list = [ippset1, ippset2]
timeline = Timeline.TimelineType(sicd.Timeline.CollectStart,
sicd.Timeline.CollectDuration,
ipp_list)
assert timeline.CollectStart == sicd.Timeline.CollectStart
assert timeline.CollectDuration == sicd.Timeline.CollectDuration
for idx in np.arange(len(timeline.IPP)):
assert timeline.IPP[idx].TStart == ipp_list[idx].TStart
assert timeline.IPP[idx].TEnd == ipp_list[idx].TEnd
assert timeline.IPP[idx].IPPStart == ipp_list[idx].IPPStart
assert timeline.IPP[idx].IPPEnd == ipp_list[idx].IPPEnd
assert timeline.IPP[idx].IPPPoly == ipp_list[idx].IPPPoly
assert timeline.CollectEnd == timeline.CollectStart + np.timedelta64(int(timeline.CollectDuration*1e6), 'us')
assert timeline._check_ipp_consecutive()
assert timeline._check_ipp_times()
assert timeline._basic_validity_check()
# Check bail out for IPP consecutive with a single IPP set
timeline = Timeline.TimelineType(sicd.Timeline.CollectStart,
sicd.Timeline.CollectDuration,
ipp_list[0])
assert timeline._check_ipp_consecutive()
def test_timeline_start_end_mismatches(sicd, kwargs, caplog):
# Swap Tstart and TEnd along with IPPStart and IPPEnd
ipp0 = sicd.Timeline.IPP[0]
bad_ippset = Timeline.IPPSetType(ipp0.TEnd,
ipp0.TStart,
ipp0.IPPEnd,
ipp0.IPPStart,
ipp0.IPPPoly,
1,
**kwargs)
bad_ippset._basic_validity_check()
assert 'TStart ({}) >= TEnd ({})'.format(ipp0.TEnd, ipp0.TStart) in caplog.text
assert 'IPPStart ({}) >= IPPEnd ({})'.format(ipp0.IPPEnd, ipp0.IPPStart) in caplog.text
# CollectEnd is None with no CollectStart
bad_timeline = Timeline.TimelineType(None, sicd.Timeline.CollectDuration, bad_ippset)
assert bad_timeline.CollectEnd is None
def test_timeline_negative_tstart(sicd, kwargs, caplog):
# Negative TStart
ipp0 = sicd.Timeline.IPP[0]
bad_ippset = Timeline.IPPSetType(-ipp0.TStart,
ipp0.TEnd,
ipp0.IPPStart,
ipp0.IPPEnd,
ipp0.IPPPoly,
1,
**kwargs)
bad_timeline = Timeline.TimelineType(None, sicd.Timeline.CollectDuration, bad_ippset)
bad_timeline._check_ipp_times()
assert 'IPP entry 0 has negative TStart' in caplog.text
def test_timeline_bad_tend(sicd, kwargs, caplog):
# TEnd too large
ipp0 = sicd.Timeline.IPP[0]
bad_ippset = Timeline.IPPSetType(ipp0.TStart,
sicd.Timeline.CollectDuration+1,
ipp0.IPPStart,
ipp0.IPPEnd,
ipp0.IPPPoly,
1,
**kwargs)
bad_timeline = Timeline.TimelineType(None, sicd.Timeline.CollectDuration, bad_ippset)
bad_timeline._check_ipp_times()
assert 'appreciably larger than CollectDuration' in caplog.text
def test_timeline_unset_ipp(sicd):
# Check times is True with no IPP set
bad_timeline = Timeline.TimelineType(sicd.Timeline.CollectStart, sicd.Timeline.CollectDuration, None)
assert bad_timeline._check_ipp_times()
| 4,681 | 41.563636 | 113 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_imagecreation.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import numpy as np
from sarpy.io.complex.sicd_elements import ImageCreation
def test_imagecreationtype(kwargs):
# Smoke test
image_creation_type = ImageCreation.ImageCreationType(
Application="Fake IFP",
DateTime=np.datetime64("2023-06-23"),
Site="Fake site",
Profile="Fake profile",
)
assert image_creation_type.Application == "Fake IFP"
assert image_creation_type.DateTime == np.datetime64("2023-06-23")
assert image_creation_type.Site == "Fake site"
assert image_creation_type.Profile == "Fake profile"
assert not hasattr(image_creation_type, "_xml_ns")
assert not hasattr(image_creation_type, "_xml_ns_key")
# Init with kwargs
image_creation_type = ImageCreation.ImageCreationType(
Application="Fake IFP",
DateTime=np.datetime64("today"),
Site="Fake site",
Profile="Fake profile",
**kwargs
)
assert image_creation_type._xml_ns == kwargs["_xml_ns"]
assert image_creation_type._xml_ns_key == kwargs["_xml_ns_key"]
| 1,144 | 30.805556 | 70 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_collectioninfo.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
from sarpy.io.complex.sicd_elements import CollectionInfo
def test_collectioninfo_radarmodetype(kwargs):
radar_mode_type = CollectionInfo.RadarModeType(ModeType="SPOTLIGHT", ModeID="SL")
assert radar_mode_type.ModeType == "SPOTLIGHT"
assert radar_mode_type.ModeID == "SL"
assert not hasattr(radar_mode_type, "_xml_ns")
assert not hasattr(radar_mode_type, "_xml_ns_key")
# Init with kwargs
radar_mode_type = CollectionInfo.RadarModeType(
ModeType="SPOTLIGHT", ModeID="SL", **kwargs
)
assert radar_mode_type._xml_ns == kwargs["_xml_ns"]
assert radar_mode_type._xml_ns_key == kwargs["_xml_ns_key"]
radar_mode_type = CollectionInfo.RadarModeType(ModeType="SPOTLIGHT")
assert radar_mode_type.get_mode_abbreviation() == "SL"
radar_mode_type = CollectionInfo.RadarModeType(ModeType="STRIPMAP")
assert radar_mode_type.get_mode_abbreviation() == "ST"
radar_mode_type = CollectionInfo.RadarModeType(ModeType="DYNAMIC STRIPMAP")
assert radar_mode_type.get_mode_abbreviation() == "DS"
def test_collectioninfo_collinfotype(sicd, kwargs):
collection_info_type = CollectionInfo.CollectionInfoType(
CollectorName=sicd.CollectionInfo.CollectorName,
IlluminatorName="FAKE_ILLUMINATOR",
CoreName=sicd.CollectionInfo.CoreName,
CollectType=sicd.CollectionInfo.CollectType,
RadarMode=sicd.CollectionInfo.RadarMode,
Classification=sicd.CollectionInfo.Classification,
CountryCodes=["FAKE_CC"],
Parameters={"FAKE": "PARAMETERS"},
)
assert collection_info_type.CollectorName == sicd.CollectionInfo.CollectorName
assert collection_info_type.IlluminatorName == "FAKE_ILLUMINATOR"
assert collection_info_type.CoreName == sicd.CollectionInfo.CoreName
assert collection_info_type.CollectType == sicd.CollectionInfo.CollectType
assert collection_info_type.RadarMode == sicd.CollectionInfo.RadarMode
assert collection_info_type.Classification == sicd.CollectionInfo.Classification
assert collection_info_type.CountryCodes == ["FAKE_CC"]
assert collection_info_type.Parameters["FAKE"] == "PARAMETERS"
assert not hasattr(collection_info_type, "_xml_ns")
assert not hasattr(collection_info_type, "_xml_ns_key")
# Init with kwargs
collection_info_type = CollectionInfo.CollectionInfoType(
CollectorName=sicd.CollectionInfo.CollectorName,
CoreName=sicd.CollectionInfo.CoreName,
RadarMode=sicd.CollectionInfo.RadarMode,
Classification=sicd.CollectionInfo.Classification,
**kwargs
)
assert collection_info_type._xml_ns == kwargs["_xml_ns"]
assert collection_info_type._xml_ns_key == kwargs["_xml_ns_key"]
| 2,812 | 43.650794 | 85 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_errorstatistics.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
from sarpy.io.complex.sicd_elements import blocks
from sarpy.io.complex.sicd_elements import ErrorStatistics
def test_errorstatistics(kwargs):
scp_error_type = ErrorStatistics.CompositeSCPErrorType(Rg=1.0, Az=2.0, RgAz=3.0, **kwargs)
assert scp_error_type._xml_ns == kwargs['_xml_ns']
assert scp_error_type._xml_ns_key == kwargs['_xml_ns_key']
corr_coefs_type = ErrorStatistics.CorrCoefsType(P1P2=1.0, P1P3=2.0, P1V1=3.0, P1V2=4.0, P1V3=5.0,
P2P3=5.0, P2V1=4.0, P2V2=3.0, P2V3=2.0, P3V1=1.0,
P3V2=1.0, P3V3=2.0, V1V2=3.0, V1V3=4.0, V2V3=5.0,
**kwargs)
assert corr_coefs_type._xml_ns == kwargs['_xml_ns']
assert corr_coefs_type._xml_ns_key == kwargs['_xml_ns_key']
pos_vel_err_type = ErrorStatistics.PosVelErrType(Frame='ECF', P1=1.0, P2=2.0, P3=3.0,
V1=3.0, V2=2.0, V3=1.0, **kwargs)
assert pos_vel_err_type._xml_ns == kwargs['_xml_ns']
assert pos_vel_err_type._xml_ns_key == kwargs['_xml_ns_key']
radar_sensor_error_type = ErrorStatistics.RadarSensorErrorType(RangeBias=1.0, **kwargs)
assert radar_sensor_error_type._xml_ns == kwargs['_xml_ns']
assert radar_sensor_error_type._xml_ns_key == kwargs['_xml_ns_key']
tropo_error_type = ErrorStatistics.TropoErrorType(**kwargs)
assert tropo_error_type._xml_ns == kwargs['_xml_ns']
assert tropo_error_type._xml_ns_key == kwargs['_xml_ns_key']
iono_error_type = ErrorStatistics.IonoErrorType(IonoRgRgRateCC=1.0, **kwargs)
assert iono_error_type._xml_ns == kwargs['_xml_ns']
assert iono_error_type._xml_ns_key == kwargs['_xml_ns_key']
error_comp_type = ErrorStatistics.ErrorComponentsType(PosVelErr=pos_vel_err_type,
RadarSensor=radar_sensor_error_type,
**kwargs)
assert error_comp_type._xml_ns == kwargs['_xml_ns']
assert error_comp_type._xml_ns_key == kwargs['_xml_ns_key']
unmodeled_decorr_type = ErrorStatistics.UnmodeledDecorrType(Xrow=blocks.ErrorDecorrFuncType(CorrCoefZero=0.0, DecorrRate=2.0),
Ycol=blocks.ErrorDecorrFuncType(CorrCoefZero=0.0, DecorrRate=4.0),
**kwargs)
assert unmodeled_decorr_type._xml_ns == kwargs['_xml_ns']
assert unmodeled_decorr_type._xml_ns_key == kwargs['_xml_ns_key']
unmodeled_type = ErrorStatistics.UnmodeledType(Xrow=1.0, Ycol=2.0, XrowYcol=3.0, **kwargs)
assert unmodeled_type._xml_ns == kwargs['_xml_ns']
assert unmodeled_type._xml_ns_key == kwargs['_xml_ns_key']
error_stats_type = ErrorStatistics.ErrorStatisticsType(**kwargs)
assert error_stats_type._xml_ns == kwargs['_xml_ns']
assert error_stats_type._xml_ns_key == kwargs['_xml_ns_key']
assert error_stats_type.version_required() == (1, 1, 0)
error_stats_type = ErrorStatistics.ErrorStatisticsType(Unmodeled=unmodeled_type)
assert error_stats_type.version_required() == (1, 3, 0)
| 3,326 | 51.809524 | 131 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_position.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import copy
import numpy as np
import pytest
from sarpy.io.complex.sicd_elements import Position
from sarpy.io.complex.sicd_elements import SCPCOA
@pytest.fixture()
def position(sicd, kwargs):
return Position.PositionType(
ARPPoly=sicd.Position.ARPPoly,
GRPPoly=sicd.Position.GRPPoly,
TxAPCPoly=sicd.Position.TxAPCPoly,
RcvAPC=sicd.Position.RcvAPC,
**kwargs,
)
def test_position_positiontype(sicd, position, kwargs):
# Smoke test
assert position._xml_ns == kwargs["_xml_ns"]
assert position._xml_ns_key == kwargs["_xml_ns_key"]
assert position.ARPPoly == sicd.Position.ARPPoly
assert position.GRPPoly == sicd.Position.GRPPoly
assert position.TxAPCPoly == sicd.Position.TxAPCPoly
assert position.RcvAPC == sicd.Position.RcvAPC
def test_position_derivearppoly(sicd, position):
scpcoa = SCPCOA.SCPCOAType(
SCPTime=sicd.SCPCOA.SCPTime,
ARPPos=sicd.SCPCOA.ARPPos,
ARPVel=sicd.SCPCOA.ARPVel,
ARPAcc=None,
SideOfTrack=sicd.SCPCOA.SideOfTrack,
SlantRange=sicd.SCPCOA.SlantRange,
GroundRange=sicd.SCPCOA.GroundRange,
DopplerConeAng=sicd.SCPCOA.DopplerConeAng,
GrazeAng=sicd.SCPCOA.GrazeAng,
IncidenceAng=sicd.SCPCOA.IncidenceAng,
TwistAng=sicd.SCPCOA.TwistAng,
SlopeAng=sicd.SCPCOA.SlopeAng,
AzimAng=sicd.SCPCOA.AzimAng,
LayoverAng=sicd.SCPCOA.LayoverAng,
)
arp_poly = copy.copy(position.ARPPoly)
# Do nothing path
position._derive_arp_poly(SCPCOA=scpcoa)
assert np.all(position.ARPPoly.X.Coefs == arp_poly.X.Coefs)
assert np.all(position.ARPPoly.Y.Coefs == arp_poly.Y.Coefs)
assert np.all(position.ARPPoly.Z.Coefs == arp_poly.Z.Coefs)
# Another do nothing path
position.ARPPoly = None
position._derive_arp_poly(SCPCOA=None)
assert position.ARPPoly is None
position._derive_arp_poly(SCPCOA=scpcoa)
assert position.ARPPoly is not None
position.ARPPoly.X.order1 == 2
position.ARPPoly.Y.order1 == 2
position.ARPPoly.Z.order1 == 2
def test_position_validitycheck(position, caplog):
assert position._basic_validity_check()
# Force the error condition
position.ARPPoly.X.Coefs = position.ARPPoly.X.Coefs[0:1]
assert not position._basic_validity_check()
assert "ARPPoly should be order at least 1 in each component" in caplog.text
| 2,501 | 30.275 | 80 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_imagedata.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import numpy as np
import pytest
from sarpy.io.complex.sicd_elements import ImageData
def test_imagedata(sicd, kwargs, caplog):
image_type = ImageData.FullImageType()
assert image_type.NumRows is None
assert image_type.NumCols is None
image_type = image_type.from_array([sicd.ImageData.NumRows, sicd.ImageData.NumCols])
assert image_type.NumRows == sicd.ImageData.NumRows
assert image_type.NumCols == sicd.ImageData.NumCols
with pytest.raises(ValueError, match='Expected array to be of length 2, and received 1'):
image_type.from_array([sicd.ImageData.NumRows])
with pytest.raises(ValueError, match='Expected array to be numpy.ndarray, list, or tuple'):
image_type.from_array(image_type)
image_type1 = ImageData.FullImageType(sicd.ImageData.NumRows, sicd.ImageData.NumCols, **kwargs)
assert image_type1._xml_ns == kwargs['_xml_ns']
assert image_type1._xml_ns_key == kwargs['_xml_ns_key']
assert image_type1.NumRows == sicd.ImageData.NumRows
assert image_type1.NumCols == sicd.ImageData.NumCols
image_array = image_type.get_array()
assert np.all(image_array == np.array([sicd.ImageData.NumRows, sicd.ImageData.NumCols]))
amp_table = np.ones((256, 256))
image_data = ImageData.ImageDataType('AMP8I_PHS8I',
None,
sicd.ImageData.NumRows,
sicd.ImageData.NumCols,
sicd.ImageData.FirstRow,
sicd.ImageData.FirstCol,
sicd.ImageData.FullImage,
sicd.ImageData.SCPPixel,
sicd.ImageData.ValidData,
**kwargs)
assert image_data._xml_ns == kwargs['_xml_ns']
assert image_data._xml_ns_key == kwargs['_xml_ns_key']
assert image_data.get_pixel_size() == 2
assert not image_data._basic_validity_check()
assert "We have `PixelType='AMP8I_PHS8I'` and `AmpTable` is not defined for ImageDataType" in caplog.text
image_data = ImageData.ImageDataType('RE32F_IM32F',
amp_table,
sicd.ImageData.NumRows,
sicd.ImageData.NumCols,
sicd.ImageData.FirstRow,
sicd.ImageData.FirstCol,
sicd.ImageData.FullImage,
sicd.ImageData.SCPPixel,
sicd.ImageData.ValidData,
**kwargs)
assert image_data.get_pixel_size() == 8
assert not image_data._basic_validity_check()
assert "We have `PixelType != 'AMP8I_PHS8I'` and `AmpTable` is defined for ImageDataType" in caplog.text
image_data = ImageData.ImageDataType('RE32F_IM32F',
None,
sicd.ImageData.NumRows,
sicd.ImageData.NumCols,
sicd.ImageData.FirstRow,
sicd.ImageData.FirstCol,
sicd.ImageData.FullImage,
sicd.ImageData.SCPPixel,
sicd.ImageData.ValidData,
**kwargs)
assert image_data._basic_validity_check()
assert image_data._check_valid_data()
valid_vertex_data = image_data.get_valid_vertex_data()
assert len(valid_vertex_data) == len(sicd.ImageData.ValidData)
full_vertex_data = image_data.get_full_vertex_data()
assert np.all(full_vertex_data == np.array([[0, 0],
[0, image_data.NumCols - 1],
[image_data.NumRows - 1, image_data.NumCols - 1],
[image_data.NumRows - 1, 0]]))
image_data.PixelType = 'RE16I_IM16I'
assert image_data.get_pixel_size() == 4
| 4,378 | 47.120879 | 109 | py |
sarpy | sarpy-master/tests/io/complex/sicd_elements/test_sicd_elements_scpcoa.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import copy
import re
import numpy as np
import pytest
from sarpy.io.complex.sicd_elements import SCPCOA
@pytest.fixture()
def scpcoa(sicd, kwargs):
return SCPCOA.SCPCOAType(
SCPTime=sicd.SCPCOA.SCPTime,
ARPPos=sicd.SCPCOA.ARPPos,
ARPVel=sicd.SCPCOA.ARPVel,
ARPAcc=sicd.SCPCOA.ARPAcc,
SideOfTrack=sicd.SCPCOA.SideOfTrack,
SlantRange=sicd.SCPCOA.SlantRange,
GroundRange=sicd.SCPCOA.GroundRange,
DopplerConeAng=sicd.SCPCOA.DopplerConeAng,
GrazeAng=sicd.SCPCOA.GrazeAng,
IncidenceAng=sicd.SCPCOA.IncidenceAng,
TwistAng=sicd.SCPCOA.TwistAng,
SlopeAng=sicd.SCPCOA.SlopeAng,
AzimAng=sicd.SCPCOA.AzimAng,
LayoverAng=sicd.SCPCOA.LayoverAng,
**kwargs,
)
def test_scpcoa_geometrycalculator(sicd, caplog):
# Smoke test
geom_calc = SCPCOA.GeometryCalculator(
SCP=sicd.GeoData.SCP.ECF.get_array(),
ARPPos=sicd.SCPCOA.ARPPos.get_array(),
ARPVel=sicd.SCPCOA.ARPVel.get_array(),
)
bad_vector = np.asarray([1e-7, 0, 0])
assert np.allclose(geom_calc._make_unit(bad_vector), [1, 0, 0])
assert f"The input vector to be normalized has norm" in caplog.text
rov = geom_calc.ROV
assert rov is not None
sot = geom_calc.SideOfTrack
assert sot == "L"
slant_range = geom_calc.SlantRange
assert slant_range > 0.0
ground_range = geom_calc.GroundRange
assert ground_range > 0.0
dca = geom_calc.DopplerConeAng
assert 0.0 <= dca <= 180.0
graze = geom_calc.GrazeAng
assert 0.0 <= graze <= 90.0
incidence = geom_calc.IncidenceAng
assert 0.0 <= incidence <= 90.0
graze, incidence = geom_calc.get_graze_and_incidence()
assert 0.0 <= graze <= 90.0
assert 0.0 <= incidence <= 90.0
twist = geom_calc.TwistAng
assert -90.0 <= twist <= 90.0
squint = geom_calc.SquintAngle
assert -90.0 <= squint <= 90.0
slope = geom_calc.SlopeAng
assert 0.0 < slope <= 90.0
azim = geom_calc.AzimAng
assert 0.0 <= azim <= 360.0
layover = geom_calc.LayoverAng
assert 0.0 <= layover <= 360.0
layover = geom_calc.get_layover()
assert np.all(layover is not None)
shadow = geom_calc.get_shadow()
assert np.all(shadow is not None)
def test_scpcoa(scpcoa, kwargs):
# Smoke test
assert scpcoa._xml_ns == kwargs["_xml_ns"]
assert scpcoa._xml_ns_key == kwargs["_xml_ns_key"]
def test_scpcoa_look(scpcoa):
assert scpcoa.look is not None
scpcoa.SideOfTrack = None
assert scpcoa.look is None
def test_scpcoa_rov(sicd, scpcoa):
assert scpcoa.ROV is None
scpcoa._derive_geometry_parameters(GeoData=sicd.GeoData, overwrite=True)
assert scpcoa.ROV is not None
def test_scpcoa_thetadot(sicd, scpcoa):
assert scpcoa.ThetaDot is None
scpcoa._derive_geometry_parameters(GeoData=sicd.GeoData, overwrite=True)
assert scpcoa.ThetaDot is not None
def test_scpcoa_multipathground(scpcoa):
assert scpcoa.MultipathGround is not None
scpcoa.GrazeAng = None
assert scpcoa.MultipathGround is None
def test_scpcoa_multipath(scpcoa):
assert scpcoa.Multipath is not None
scpcoa.AzimAng = None
assert scpcoa.Multipath is None
def test_scpcoa_shadow(sicd, scpcoa):
assert scpcoa.Shadow is None
scpcoa._derive_geometry_parameters(GeoData=sicd.GeoData, overwrite=True)
assert scpcoa.Shadow is not None
def test_scpcoa_shadowmagnitude(sicd, scpcoa):
assert scpcoa.ShadowMagnitude is None
scpcoa._derive_geometry_parameters(GeoData=sicd.GeoData, overwrite=True)
assert scpcoa.ShadowMagnitude is not None
def test_scpcoa_squint(sicd, scpcoa):
assert scpcoa.Squint is None
scpcoa._derive_geometry_parameters(GeoData=sicd.GeoData, overwrite=True)
assert scpcoa.Squint is not None
def test_scpcoa_layovermagnitude(sicd, scpcoa):
assert scpcoa.LayoverMagnitude is None
scpcoa._derive_geometry_parameters(GeoData=sicd.GeoData, overwrite=True)
assert scpcoa.LayoverMagnitude is not None
def test_scpcoa_derivescptime(sicd, scpcoa, tol):
scp_time = scpcoa.SCPTime
scpcoa.SCPTime = 0.0
# Do nothing path
scpcoa._derive_scp_time(Grid=None)
assert scpcoa.SCPTime == 0.0
# Another do nothing path
scpcoa._derive_scp_time(Grid=sicd.Grid)
assert scpcoa.SCPTime == 0.0
scpcoa._derive_scp_time(Grid=sicd.Grid, overwrite=True)
assert scpcoa.SCPTime == pytest.approx(scp_time, abs=tol)
def test_scpcoa_deriveposition(sicd, scpcoa, tol):
arp_pos = scpcoa.ARPPos.get_array()
arp_vel = scpcoa.ARPVel.get_array()
arp_acc = scpcoa.ARPAcc.get_array()
scpcoa.ARPPos = None
scpcoa.ARPVel = None
scpcoa.ARPAcc = None
# Do nothing path
scpcoa._derive_position(Position=None)
assert scpcoa.ARPPos == None
assert scpcoa.ARPVel == None
assert scpcoa.ARPAcc == None
scpcoa._derive_position(Position=sicd.Position, overwrite=True)
assert np.all(scpcoa.ARPPos.get_array() == pytest.approx(arp_pos, abs=tol))
assert np.all(scpcoa.ARPVel.get_array() == pytest.approx(arp_vel, abs=tol))
assert np.all(scpcoa.ARPAcc.get_array() == pytest.approx(arp_acc, abs=tol))
def test_scpcoa_derivegeometry(sicd, scpcoa, tol):
scpcoa_copy = copy.copy(scpcoa)
scpcoa_copy._ROV = None
scpcoa_copy.SideOfTrack = None
scpcoa_copy.SlantRange = None
scpcoa_copy.GroundRange = None
scpcoa_copy.DopplerConeAng = None
scpcoa_copy.GrazeAng = None
scpcoa_copy.IncidenceAng = None
scpcoa_copy.TwistAng = None
scpcoa_copy._squint = None
scpcoa_copy.SlopeAng = None
scpcoa_copy.AzimAng = None
scpcoa_copy.LayoverAng = None
# Do nothing path
scpcoa_copy._derive_geometry_parameters(GeoData=None)
assert scpcoa_copy._ROV == None
assert scpcoa_copy.SideOfTrack == None
assert scpcoa_copy.SlantRange == None
assert scpcoa_copy.GroundRange == None
assert scpcoa_copy.DopplerConeAng == None
assert scpcoa_copy.GrazeAng == None
assert scpcoa_copy.IncidenceAng == None
assert scpcoa_copy.TwistAng == None
assert scpcoa_copy._squint == None
assert scpcoa_copy.SlopeAng == None
assert scpcoa_copy.AzimAng == None
assert scpcoa_copy.LayoverAng == None
scpcoa_copy._derive_geometry_parameters(GeoData=sicd.GeoData, overwrite=True)
assert scpcoa_copy._ROV is not None
assert scpcoa_copy.SideOfTrack == pytest.approx(scpcoa.SideOfTrack, abs=tol)
assert scpcoa_copy.SlantRange == pytest.approx(scpcoa.SlantRange, abs=tol)
assert scpcoa_copy.GroundRange == pytest.approx(scpcoa.GroundRange, abs=tol)
assert scpcoa_copy.DopplerConeAng == pytest.approx(scpcoa.DopplerConeAng, abs=tol)
assert scpcoa_copy.GrazeAng == pytest.approx(scpcoa.GrazeAng, abs=tol)
assert scpcoa_copy.IncidenceAng == pytest.approx(scpcoa.IncidenceAng, abs=tol)
assert scpcoa_copy.TwistAng == pytest.approx(scpcoa.TwistAng, abs=tol)
assert scpcoa_copy._squint is not None
assert scpcoa_copy.SlopeAng == pytest.approx(scpcoa.SlopeAng, abs=tol)
assert scpcoa_copy.AzimAng == pytest.approx(scpcoa.AzimAng, abs=tol)
assert scpcoa_copy.LayoverAng == pytest.approx(scpcoa.LayoverAng, abs=tol)
def test_scpcoa_rederive(sicd, scpcoa):
# Smoke test
scpcoa.rederive(Grid=sicd.Grid, Position=sicd.Position, GeoData=sicd.GeoData)
def test_scpcoa_checkvalues(sicd, scpcoa):
# Do nothing path
assert scpcoa.check_values(GeoData=None)
# Smoke test
assert scpcoa.check_values(GeoData=sicd.GeoData)
def test_scpcoa_checkvalues_error1(sicd, scpcoa, caplog):
scpcoa.SideOfTrack = "R"
assert not scpcoa.check_values(GeoData=sicd.GeoData)
assert "SideOfTrack is expected to be L, and is populated as R" in caplog.text
def test_scpcoa_checkvalues_error2(sicd, scpcoa, caplog):
scpcoa.SlantRange = 1000000.0
scpcoa.GroundRange = 1000000.0
assert not scpcoa.check_values(GeoData=sicd.GeoData)
assert (
f"attribute SlantRange is expected to have value {np.round(sicd.SCPCOA.SlantRange, 10)}, but is populated as 1000000.0"
in caplog.text
)
assert (
f"attribute GroundRange is expected to have value {np.round(sicd.SCPCOA.GroundRange, 10)}, but is populated as 1000000.0"
in caplog.text
)
def test_scpcoa_checkvalues_error3(sicd, scpcoa, caplog):
scpcoa.DopplerConeAng = 360.0
scpcoa.GrazeAng = 360.0
scpcoa.IncidenceAng = 360.0
scpcoa.TwistAng = 360.0
scpcoa.SlopeAng = 360.0
scpcoa.AzimAng = 360.0
scpcoa.LayoverAng = 360.0
assert not scpcoa.check_values(GeoData=sicd.GeoData)
assert re.search(
r"attribute DopplerConeAng is expected to have value (\d+\.\d+), but is populated as 360.0",
caplog.text,
)
assert re.search(
r"attribute GrazeAng is expected to have value (\d+\.\d+), but is populated as 360.0",
caplog.text,
)
assert re.search(
r"attribute IncidenceAng is expected to have value (\d+\.\d+), but is populated as 360.0",
caplog.text,
)
assert re.search(
r"attribute TwistAng is expected to have value (\d+\.\d+), but is populated as 360.0",
caplog.text,
)
assert re.search(
r"attribute SlopeAng is expected to have value (\d+\.\d+), but is populated as 360.0",
caplog.text,
)
assert re.search(
r"attribute AzimAng is expected to have value (\d+\.\d+), but is populated as 360.0",
caplog.text,
)
assert re.search(
r"attribute LayoverAng is expected to have value (\d+\.\d+), but is populated as 360.0",
caplog.text,
)
| 9,720 | 32.177474 | 129 | py |
sarpy | sarpy-master/tests/io/DEM/test_geotiff1deg_list.py | """
These test functions will exercise the GeoTIFF1DegList class which contains methods used to determine
which GeoTIFF files are needed to cover a specified geodetic bounding box. These tests create dummy,
temporary, DEM files on-the-fly, so there is no need to provide any actual DEM files.
"""
import logging
import pathlib
import re
import tempfile
import pytest
from sarpy.io.DEM.geotiff1deg import GeoTIFF1DegList
# SW corner (degrees) of valid DEM pixels
MIN_LAT = -3
MIN_LON = -2
# NE corner (degrees) of valid DEM pixels
MAX_LAT = 2
MAX_LON = 4
logging.basicConfig(level=logging.WARNING)
def infer_filename_format(root_dir_path):
"""
This is a helper function used to generate a dem_filename_pattern string without explicit
knowledge of the DEM filenames. It assumes the Lat/Lon is encoded in the filename using
a string like his: {NS}{abslat:02}{EW}{abslon:03}.
"""
lat_lon_regex = '(n|s)([0-8][0-9])(e|w)((0[0-9][0-9])|(1[0-7][0-9])|180)'
lat_lon_munge = [{'fmt_str': '{ns}{abslat:02}{ew}{abslon:03}', 'regex': lat_lon_regex},
{'fmt_str': '{NS}{abslat:02}{EW}{abslon:03}', 'regex': lat_lon_regex.upper()}]
filename_formats = []
tiff_filenames = [str(f) for f in root_dir_path.glob("**/*DEM.tif")]
if len(tiff_filenames) == 0:
raise FileNotFoundError(f"Could not find any TIFF files in ({str(root_dir_path)}).")
for tiff_filename in tiff_filenames:
munged_filename = tiff_filename
for munge in lat_lon_munge:
munged_filename = re.sub(munge['regex'], munge['fmt_str'], munged_filename)
if munged_filename == tiff_filename:
raise ValueError(f"Could not find a Lat/Lon substring in filename ({tiff_filename}).")
if munged_filename not in filename_formats:
filename_formats.append(munged_filename)
fmt_ref = list(filename_formats[0])
for tst_str in filename_formats[1:]:
fmt_tst = list(tst_str)
if len(fmt_ref) != len(fmt_tst):
raise ValueError('Format string lengths do not match')
for i, (c0, c1) in enumerate(zip(fmt_ref, fmt_tst)):
if c0 != c1:
fmt_ref[i] = '?'
filename_format = ''.join(fmt_ref)
return filename_format
@pytest.fixture(scope='module')
def dem_file_path():
"""
Create a directory of empty files that satisfy the DEM naming convention.
"""
ver_choice = ('01', '02')
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = pathlib.Path(temp_dir)
for lat in range(MIN_LAT, MAX_LAT):
ns = 's' if lat < 0 else 'n'
for lon in range(MIN_LON, MAX_LON):
ew = 'w' if lon < 0 else 'e'
# Make files that span the prime meridian and the equator
stem = f"tdt_{ns}{abs(lat):02}{ew}{abs(lon):03}_{ver_choice[0]}"
filename = temp_path / f"{stem}" / "DEM" / f"{stem.upper()}_DEM.tif"
filename.parent.mkdir(parents=True, exist_ok=True)
filename.touch()
# Make files that span the anti-meridian and the equator
lon2 = lon + 180
lon2 = (lon2 + 180) % 360 - 180
ew = 'w' if lon2 < 0 else 'e'
stem = f"tdt_{ns}{abs(lat):02}{ew}{abs(lon2):03}_{ver_choice[1]}"
filename = temp_path / f"{stem}" / "DEM" / f"{stem.upper()}_DEM.tif"
filename.parent.mkdir(parents=True, exist_ok=True)
filename.touch()
yield temp_path
def test_filename_from_lat_lon():
obj = GeoTIFF1DegList('dummy_format')
# Test fully specified filename_format
filename_format = "Test_{lat:02d}_{lon:03d}_{abslat:02d}_{abslon:03d}_{ns:1s}{NS:1s}_{ew:1s}{EW:1s}"
filename = obj.filename_from_lat_lon(-1, -2, filename_format)
assert filename == "Test_-1_-02_01_002_sS_wW"
# Test fully specified filename_format with an extra {*}
filename_format = "Test_{lat:02d}_{lon:03d}_{abslat:02d}_{abslon:03d}_{ns:1s}{NS:1s}_{ew:1s}{EW:1s}_{bad}"
filename = obj.filename_from_lat_lon(-1, -2, filename_format)
assert filename == "Test_-1_-02_01_002_sS_wW_{bad}"
# Test filename_format where field width is omitted for single character fields
filename_format = "Test_{lat:02d}_{lon:03d}_{abslat:02d}_{abslon:03d}_{ns}{NS}_{ew}{EW}"
filename = obj.filename_from_lat_lon(-1, -2, filename_format)
assert filename == "Test_-1_-02_01_002_sS_wW"
def test_find_dem_files(dem_file_path):
filename_format = infer_filename_format(dem_file_path)
obj = GeoTIFF1DegList(filename_format)
filenames = obj.find_dem_files(MIN_LAT - 1, MIN_LON - 1)
assert len(filenames) == 0
filenames = obj.find_dem_files(MIN_LAT, MIN_LON)
expected_filename = obj.filename_from_lat_lon(MIN_LAT, MIN_LON, filename_format).replace('?', '1')
assert len(filenames) == 1 and filenames[0].endswith(expected_filename)
filenames = obj.find_dem_files(MIN_LAT + 0.5, MIN_LON + 0.5)
assert len(filenames) == 1 and filenames[0] == expected_filename
filenames = obj.find_dem_files(MAX_LAT, MAX_LON)
expected_filename = obj.filename_from_lat_lon(MAX_LAT-1, MAX_LON-1, filename_format).replace('?', '1')
assert len(filenames) == 1 and filenames[0] == expected_filename
filenames = obj.find_dem_files(MIN_LAT+1, MIN_LON)
assert len(filenames) == 2
filenames = obj.find_dem_files(MIN_LAT, MIN_LON+1)
assert len(filenames) == 2
filenames = obj.find_dem_files(MIN_LAT+1, MIN_LON + 1)
assert len(filenames) == 4
def test_file_list(dem_file_path):
filename_format = infer_filename_format(dem_file_path)
obj = GeoTIFF1DegList(filename_format)
# Zero files
lat_lon_box = [MIN_LAT - 10, MIN_LAT - 9, MIN_LON - 10, MIN_LON - 9]
filenames = obj.get_file_list(lat_lon_box)
assert len(filenames) == 0
# Single file
lat_lon_box = [MIN_LAT + 0.3, MIN_LAT + 0.6, MIN_LON + 0.2, MIN_LON + 0.5]
filenames = obj.get_file_list(lat_lon_box)
assert len(filenames) == 1
# All files near the prime meridian
lat_lon_box = [MIN_LAT + 0.3, MAX_LAT - 0.5, MIN_LON + 0.2, MAX_LON - 0.5]
filenames = obj.get_file_list(lat_lon_box)
assert len(filenames) == 30
# All files near the anti-meridian
lat_lon_box = [MIN_LAT + 0.3, MAX_LAT - 0.5, 180 + MIN_LON + 0.2, MAX_LON - 180 - 0.5]
filenames = obj.get_file_list(lat_lon_box)
assert len(filenames) == 30
# 360 degrees of longitude
lat_lon_box = [MIN_LAT + 0.3, MAX_LAT - 0.5, 0.1, -0.1]
filenames = obj.get_file_list(lat_lon_box)
assert len(filenames) == 60
def test_exceptions(dem_file_path, caplog):
filename_format = infer_filename_format(dem_file_path)
obj = GeoTIFF1DegList(filename_format)
with pytest.raises(ValueError) as info:
obj.find_dem_files(999, 999)
msgs = str(info.value).split('\n')
assert len(msgs) == 2
assert info.match("The latitude value must be between \\[-90, \\+90\\]")
assert info.match("The longitude value must be between \\[-180, \\+180\\)")
with pytest.raises(ValueError) as info:
obj.get_file_list([999, 999, 999, 999])
msgs = str(info.value).split('\n')
assert len(msgs) == 4
assert info.match("The minimum latitude value must be between \\[-90, \\+90\\]")
assert info.match("The maximum latitude value must be between \\[-90, \\+90\\]")
assert info.match("The minimum longitude value must be between \\[-180, \\+180\\)")
assert info.match("The maximum longitude value must be between \\[-180, \\+180\\)")
caplog.clear()
caplog.set_level(logging.WARNING)
obj.get_file_list([45.1, 45.3, 90.1, 90.3])
assert caplog.text.startswith("WARNING sarpy.io.DEM.geotiff1deg:geotiff1deg.py")
assert "Missing expected DEM file for tile with lower left lat/lon corner (45.0, 90.0)" in caplog.text
obj = GeoTIFF1DegList(filename_format, missing_error=True)
with pytest.raises(ValueError,
match="^Missing expected DEM file for tile with lower left lat/lon corner \\(45.0, 90.0\\)"):
obj.get_file_list([45.1, 45.3, 90.1, 90.3])
| 8,182 | 37.238318 | 116 | py |
sarpy | sarpy-master/tests/io/DEM/test_geoid.py | import time
import os
import logging
import numpy
import json
import unittest
import sarpy.io.DEM.geoid as geoid
from tests import parse_file_entry
test_file = None
geoid_files = []
this_loc = os.path.abspath(__file__)
file_reference = os.path.join(os.path.split(this_loc)[0], 'geoid.json') # specifies file locations
if os.path.isfile(file_reference):
with open(file_reference, 'r') as fi:
the_files = json.load(fi)
test_file = parse_file_entry(the_files.get('test_file', None))
for entry in the_files.get('geoid_files', []):
the_file = parse_file_entry(entry)
if the_file is not None:
geoid_files.append(the_file)
def generic_geoid_test(instance, test_file, geoid_file):
assert isinstance(instance, unittest.TestCase)
_, gname = os.path.split(geoid_file)
if gname.lower().startswith('egm84'):
zcol = 2
elif gname.lower().startswith('egm96'):
zcol = 3
else:
zcol = 4
with open(test_file, 'r') as fi:
lins = fi.read().splitlines()
lats = numpy.zeros((len(lins),), dtype=numpy.float64)
lons = numpy.zeros((len(lins),), dtype=numpy.float64)
zs = numpy.zeros((len(lins),), dtype=numpy.float64)
for i, lin in enumerate(lins):
slin = lin.strip().split()
lats[i] = float(slin[0])
lons[i] = float(slin[1])
zs[i] = float(slin[zcol])
logging.info('number of test points {}'.format(lats.size))
with instance.subTest(msg="Load geoid file"):
start = time.time()
gh = geoid.GeoidHeight(file_name=geoid_file)
logging.info('time loading geoid file {}'.format(time.time() - start))
recs = 10
# small linear test
with instance.subTest(msg='Small linear interpolation test'):
start = time.time()
zs1 = gh.get(lats[:recs], lons[:recs], cubic=False)
logging.info('linear - time {}, diff {}'.format(time.time() - start, zs1 - zs[:recs]))
# small cubic test
with instance.subTest(msg='Small cubic interpolation test'):
start = time.time()
zs1 = gh.get(lats[:recs], lons[:recs], cubic=True)
logging.info('cubic - time {}, diff {}'.format(time.time() - start, zs1 - zs[:recs]))
# full linear test
with instance.subTest(msg="Full linear interpolation test"):
start = time.time()
zs1 = gh.get(lats, lons, cubic=False)
diff = numpy.abs(zs1 - zs)
max_diff = numpy.max(diff)
mean_diff = numpy.mean(diff)
logging.info('linear - time {}, max diff - {}, mean diff - {}'.format(time.time() - start, max_diff, mean_diff))
instance.assertLessEqual(max_diff, 2, msg="Max difference should be less than 2 meters")
instance.assertLessEqual(mean_diff, 0.5, msg="Mean difference should be (much) less than 0.5 meters.")
# full cubic test
with instance.subTest(msg="Full cubic interpolation test"):
start = time.time()
zs1 = gh.get(lats, lons, cubic=True)
diff = numpy.abs(zs1 - zs)
max_diff = numpy.max(diff)
mean_diff = numpy.mean(diff)
logging.info('cubic - time {}, max diff - {}, mean diff - {}'.format(time.time() - start, max_diff, mean_diff))
instance.assertLessEqual(max_diff, 2, msg="Max difference should be less than 2 meters")
instance.assertLessEqual(mean_diff, 0.5, msg="Mean difference should be (much) less than 0.5 meters.")
class TestGeoidHeight(unittest.TestCase):
@unittest.skipIf(test_file is None or len(geoid_files) == 0, 'No test file or geoid files found')
def test_geoid_height(self):
for fil in geoid_files:
generic_geoid_test(self, test_file, fil)
| 3,694 | 38.308511 | 120 | py |
sarpy | sarpy-master/tests/io/DEM/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/tests/io/DEM/test_geotiff1deg_reader.py | """
These test functions will exercise the GeoTIFF1DegInterpolator class which reads DEM data files and interpolates
data points as needed. Most of these tests use fabricated data, so the GeoTIFF1DegInterpolator class is moderately
well tests without providing external DEM data. However, when available, real DEM data is used for some tests.
The location of the real DEM data files and real Geoid data files are defined by the parameters:
GEOTIFF_ROOT_PATH - A pathlib.Path to the root directory containing DEM data GeoTIFF files
Low resolution GeoTIFF data files can be downloaded from here:
https://download.geoservice.dlr.de/TDM90/
High resolution GeoTIFF data files are typically restricted, but more information can be found here:
https://data.europa.eu/data/datasets/5eecdf4c-de57-4624-99e9-60086b032aea?locale=en
The Geoid model files are available in either ZIP of BZ2 format from here:
https://sourceforge.net/projects/geographiclib/files/geoids-distrib/
If real DEM files and/or real Geoid files are not available then tests that require these files will be skipped.
"""
import json
import logging
import os
import pathlib
import re
import tempfile
import numpy as np
from PIL import Image
import pytest
from sarpy.io.DEM.geotiff1deg import GeoTIFF1DegInterpolator
SRC_FILE_PATH = pathlib.Path(__file__).parent
parent_path = os.environ.get('SARPY_TEST_PATH', None)
if parent_path == 'NONE':
parent_path = None
if parent_path is None:
GEOTIFF_ROOT_PATH = SRC_FILE_PATH / "dem_data"
if parent_path is not None:
parent_path = pathlib.Path(os.path.expanduser(parent_path))
GEOTIFF_ROOT_PATH = pathlib.Path(parent_path, "dem")
# The geoid.json file, used by test_geoid, is reused here to define the location of the geoid files.
geoid_json_path = SRC_FILE_PATH / "geoid.json"
geoid_file_info = json.loads(geoid_json_path.read_text())
geoid_path_type = geoid_file_info['geoid_files'][-1]['path_type']
geoid_path_sufx = geoid_file_info['geoid_files'][-1]['path']
if parent_path is None:
GEOID_FILE_PATH = SRC_FILE_PATH / geoid_path_sufx if geoid_path_type == 'relative' else pathlib.Path(geoid_path_sufx)
elif parent_path:
GEOID_FILE_PATH = parent_path / geoid_path_sufx if geoid_path_type == 'relative' else pathlib.Path(
geoid_path_sufx)
NUM_LATS_DUMMY = 201
NUM_LONS_DUMMY = 101
def lat_lon_to_dummy_height(lat, lon):
return lat + 1000 * lon
def lat_lon_from_filename(filename):
m = re.search('(N|S)([0-8][0-9])(E|W)((0[0-9][0-9])|(1[0-7][0-9])|180)', filename.upper())
if m is None:
raise ValueError(f"Could not find a Lat/Lon substring in filename ({filename}).")
lat_sgn = 1 if m.group(1) == 'N' else -1
lat_abs = int(m.group(2))
lon_sgn = 1 if m.group(3) == 'E' else -1
lon_abs = int(m.group(4))
return lat_sgn * lat_abs, lon_sgn * lon_abs
def infer_filename_format(root_dir_path):
lat_lon_regex = '(n|s)([0-8][0-9])(e|w)((0[0-9][0-9])|(1[0-7][0-9])|180)'
lat_lon_munge = [{'fmt_str': '{ns}{abslat:02}{ew}{abslon:03}', 'regex': lat_lon_regex},
{'fmt_str': '{NS}{abslat:02}{EW}{abslon:03}', 'regex': lat_lon_regex.upper()}]
filename_formats = []
tiff_filenames = [str(f) for f in root_dir_path.glob("**/*DEM.tif")]
if len(tiff_filenames) == 0:
raise FileNotFoundError(f"Could not find any TIFF files in ({str(root_dir_path)}).")
for tiff_filename in tiff_filenames:
munged_filename = tiff_filename
for munge in lat_lon_munge:
munged_filename = re.sub(munge['regex'], munge['fmt_str'], munged_filename)
if munged_filename == tiff_filename:
raise ValueError(f"Could not find a Lat/Lon substring in filename ({tiff_filename}).")
if munged_filename not in filename_formats:
filename_formats.append(munged_filename)
fmt_ref = list(filename_formats[0])
for tst_str in filename_formats[1:]:
fmt_tst = list(tst_str)
if len(fmt_ref) != len(fmt_tst):
raise ValueError('Format string lengths do not match')
for i, (c0, c1) in enumerate(zip(fmt_ref, fmt_tst)):
if c0 != c1:
fmt_ref[i] = '?'
filename_format = ''.join(fmt_ref)
return filename_format
def dummy_pil_image_open(filename, ref_surface):
"""
This function is intended to be a monkeypatch target for PIL.Image.open(). It returns a PIL.Image
object of dummy DEM values. The tile's SW corner Lat/Lon value is extracted from the filename.
The DEM height values are a linear function of the Lat/Lon value so that interpolated
values of the DEM are predictable and easily compared to the expected result. The filename
is assumed to be in "high_res" format.
"""
lat, lon = lat_lon_from_filename(str(filename))
lat_values = np.linspace(lat+1, lat, NUM_LATS_DUMMY) # Increasing axis-0 index is decreasing latitude
lon_values = np.linspace(lon, lon+1, NUM_LONS_DUMMY) # Increasing axis-1 index is increasing longitude
lon_mat, lat_mat = np.meshgrid(lon_values, lat_values)
heights = lat_lon_to_dummy_height(lat_mat, lon_mat)
im = Image.fromarray(heights.astype(np.float64))
im.tag = {256: (NUM_LONS_DUMMY,), # ImageWidth
257: (NUM_LATS_DUMMY,), # ImageLength
34737: (f"Dummy: {ref_surface}",)} # GeoAsciiParamsTag
return im
@pytest.fixture(scope='module')
def dummy_dem_file_path_high_res():
dataset = 'high_res'
filename_format = ["tdt_{ns}{abslat:02}{ew}{abslon:03}_{ver:2s}", "DEM",
"TDT_{NS}{abslat:02}{EW}{abslon:03}_{ver:2s}_DEM.tif"]
with tempfile.TemporaryDirectory() as temp_dir:
root_path = pathlib.Path(temp_dir) / dataset
dummy_dem_file_path(root_path, filename_format)
yield root_path
@pytest.fixture(scope='module')
def dummy_dem_file_path_low_res():
dataset = 'low_res'
filename_format = ["TDM1_DEM__30_{NS:1s}{abslat:02}{EW:1s}{abslon:03}_V{ver:2s}_C", "DEM",
"TDM1_DEM__30_{NS:1s}{abslat:02}{EW:1s}{abslon:03}_DEM.tif"]
with tempfile.TemporaryDirectory() as temp_dir:
root_path = pathlib.Path(temp_dir) / dataset
dummy_dem_file_path(root_path, filename_format)
yield root_path
def dummy_dem_file_path(root_path, filename_format):
"""
Create a directory of empty files that satisfy the DEM naming convention.
"""
min_lat = -1
max_lat = +2
min_lon = -1
max_lon = +2
ver_choice = ('01', '02')
for lat in range(min_lat, max_lat):
for lon in range(min_lon, max_lon):
anti_lon = (lon + 360) % 360 - 180
# Make empty files that span the prime meridian and the equator,
# then make more empty files that span the anti-meridian and the equator.
for xlon, ver in zip([lon, anti_lon], ver_choice):
pars = {"abslat": int(abs(np.floor(lat))), "abslon": int(abs(np.floor(xlon))),
"ns": 's' if lat < 0 else 'n', "ew": 'w' if xlon < 0 else 'e',
"NS": 'S' if lat < 0 else 'N', "EW": 'W' if xlon < 0 else 'E', "ver": ver}
filename = root_path / os.path.join(*filename_format).format_map(pars)
filename.parent.mkdir(parents=True, exist_ok=True)
filename.touch()
def lat_lon_bounds(root_dir_path):
"""
Determine the min/max lat/lon covered by DEM files in a specified directory.
"""
filenames = list(root_dir_path.glob("**/*DEM.tif"))
lats = np.zeros(len(filenames))
lons = np.zeros(len(filenames))
for i, filename in enumerate(filenames):
lats[i], lons[i] = lat_lon_from_filename(str(filename))
sw_lat = np.min(lats)
ne_lat = np.max(lats) + 1
sw_lon = np.min(lons)
ne_lon = np.max(lons) + 1
return [sw_lat, ne_lat, sw_lon, ne_lon]
def test_setter_getter(dummy_dem_file_path_high_res):
obj = GeoTIFF1DegInterpolator('/dummy', interp_method="smarty")
assert obj.interp_method == "smarty"
obj.interp_method = "dummy"
assert obj.interp_method == "dummy"
@pytest.mark.parametrize("dataset", [
pytest.param("high_res", marks=pytest.mark.skipif(not (GEOTIFF_ROOT_PATH / "high_res").exists(),
reason="GeoTIFF test data does not exist.")),
pytest.param("low_res", marks=pytest.mark.skipif(not (GEOTIFF_ROOT_PATH / "low_res").exists(),
reason="GeoTIFF test data does not exist."))])
def test_get_elevation_native(dataset):
root_dir_path = GEOTIFF_ROOT_PATH / dataset
sw_lat, ne_lat, sw_lon, ne_lon = lat_lon_bounds(root_dir_path)
filename_format = infer_filename_format(root_dir_path)
obj = GeoTIFF1DegInterpolator(filename_format)
num_points = 8
d_offset = 1 / 18111 # Offset used to avoid exact Lat/Lon samples which occur ever 1/9000
# All test points are outside valid DEM area
lats = np.linspace(sw_lat - 0.9, sw_lat - 0.1, num_points)
lons = np.linspace(sw_lon - 0.9, sw_lon - 0.1, num_points)
hgts = obj.get_elevation_native(lats, lons)
assert np.all(np.equal(hgts, np.zeros(num_points)))
# All test points are inside valid DEM area and are inside a single tile
lats = np.linspace(sw_lat + d_offset, sw_lat + (1 - d_offset), num_points)
lons = np.linspace(sw_lon + d_offset, sw_lon + (1 - d_offset), num_points)
hgts = obj.get_elevation_native(lats, lons)
assert not np.all(np.equal(hgts, np.zeros(num_points)))
# Test all point are inside valid DEM area and span several tiles
lats = np.linspace(sw_lat + d_offset, ne_lat - d_offset, num_points)
lons = np.linspace(sw_lon + d_offset, ne_lon - d_offset, num_points)
hght = obj.get_elevation_native(lats, lons)
assert not np.all(np.equal(hght, np.zeros(num_points)))
# Test scale lat / lon arguments
hght = obj.get_elevation_native(lats[0], lons[0])
assert not np.all(np.equal(hght, np.zeros(1)))
def test_get_elevation_native_dummy(dummy_dem_file_path_high_res, monkeypatch):
monkeypatch.setattr(Image, 'open', lambda filename: dummy_pil_image_open(filename, "EGM2008"))
sw_lat = -1
ne_lat = 2
sw_lon = -1
ne_lon = 2
filename_format = infer_filename_format(dummy_dem_file_path_high_res)
obj = GeoTIFF1DegInterpolator(filename_format)
num_points = 8
d_offset = 1 / 18000
lats = np.linspace(sw_lat + d_offset, ne_lat - d_offset, num_points)
lons = np.linspace(sw_lon + d_offset, ne_lon/2 - d_offset, num_points)
hght = obj.get_elevation_native(lats, lons)
expected_hght = lat_lon_to_dummy_height(lats, lons)
assert np.allclose(hght, expected_hght)
def test_get_min_max_native_dummy(dummy_dem_file_path_high_res, monkeypatch):
monkeypatch.setattr(Image, 'open', lambda filename: dummy_pil_image_open(filename, "EGM2008"))
sw_lat = -1
ne_lat = 2
sw_lon = -1
ne_lon = 2
lat_ss = 1 / (NUM_LATS_DUMMY - 1)
lon_ss = 1 / (NUM_LONS_DUMMY - 1)
def assert_result_good(pars, box):
def lt_or_close(lower, upper):
return lower < upper or np.isclose(lower, upper)
assert pars['box'] == box
assert lt_or_close(box[0], pars['min']['lat']) and lt_or_close(pars['min']['lat'], box[0] + lat_ss)
assert lt_or_close(box[2], pars['min']['lon']) and lt_or_close(pars['min']['lon'], box[2] + lon_ss)
assert (lt_or_close(lat_lon_to_dummy_height(box[0], box[2]), pars['min']['height']) and
lt_or_close(pars['min']['height'], lat_lon_to_dummy_height(box[0] + lat_ss, box[2] + lon_ss)))
assert lt_or_close(box[1] - lat_ss, pars['max']['lat']) and lt_or_close(pars['max']['lat'], box[1])
assert lt_or_close(box[3] - lon_ss, pars['max']['lon']) and lt_or_close(pars['max']['lon'], box[3])
assert (lt_or_close(lat_lon_to_dummy_height(box[1] - lat_ss, box[3] - lon_ss), pars['max']['height']) and
lt_or_close(pars['max']['height'], lat_lon_to_dummy_height(box[1], box[3] + lon_ss)))
filename_format = infer_filename_format(dummy_dem_file_path_high_res)
obj = GeoTIFF1DegInterpolator(filename_format)
# Test bounding box outside the DEM tiles
lat_lon_bounding_box = [sw_lat-10, ne_lat - 10, sw_lon - 10, ne_lon - 10]
result = obj.get_min_max_native(lat_lon_bounding_box)
expected_result = {'box': lat_lon_bounding_box,
'min': {'lat': lat_lon_bounding_box[0], 'lon': lat_lon_bounding_box[2], 'height': 0.0},
'max': {'lat': lat_lon_bounding_box[0], 'lon': lat_lon_bounding_box[2], 'height': 0.0}}
assert result == expected_result
# All test points are inside valid DEM area and are inside a single tile
lat_min = sw_lat + 0.111111
lat_max = sw_lat + 0.811111
lon_min = sw_lon + 0.211111
lon_max = sw_lon + 0.711111
result = obj.get_min_max_native([lat_min, lat_max, lon_min, lon_max])
assert_result_good(result, [lat_min, lat_max, lon_min, lon_max])
# Test all point are inside valid DEM area and span several tiles
lat_min = sw_lat + 0.111
lat_max = sw_lat + 1.811
lon_min = sw_lon + 0.211
lon_max = sw_lon + 1.711
result = obj.get_min_max_native([lat_min, lat_max, lon_min, lon_max])
assert_result_good(result, [lat_min, lat_max, lon_min, lon_max])
# Exercise the bounding_box_cache
result = obj.get_min_max_native([lat_min, lat_max, lon_min, lon_max])
assert_result_good(result, [lat_min, lat_max, lon_min, lon_max])
@pytest.mark.parametrize("ref_surface", [
pytest.param("EGM2008", marks=pytest.mark.skipif(not GEOID_FILE_PATH.exists(), reason="Geoid data does not exist")),
pytest.param("WGS84", marks=pytest.mark.skipif(not GEOID_FILE_PATH.exists(), reason="Geoid data does not exist"))])
def test_get_elevation_hae_geoid(ref_surface, dummy_dem_file_path_high_res, monkeypatch):
if ref_surface == "EGM2008":
monkeypatch.setattr(Image, 'open', lambda filename: dummy_pil_image_open(filename, "EGM2008"))
else:
monkeypatch.setattr(Image, 'open', lambda filename: dummy_pil_image_open(filename, "WGS84"))
sw_lat = -1
ne_lat = 2
sw_lon = -1
ne_lon = 2
filename_format = infer_filename_format(dummy_dem_file_path_high_res)
obj = GeoTIFF1DegInterpolator(filename_format,
ref_surface=ref_surface,
geoid_path=str(GEOID_FILE_PATH.parent.parent))
num_points = 8
d_offset = 1 / 18000
lats = np.linspace(sw_lat + d_offset, ne_lat - d_offset, num_points)
lons = np.linspace(sw_lon + d_offset, ne_lon/2 - d_offset, num_points)
hght_wgs84 = obj.get_elevation_hae(lats, lons)
hght_geoid = obj.get_elevation_geoid(lats, lons)
assert np.all(np.abs(hght_wgs84 - hght_geoid) > 0)
min_hae = obj.get_min_hae([lats[0], lats[-1], lons[0], lons[-1]])
max_hae = obj.get_max_hae([lats[0], lats[-1], lons[0], lons[-1]])
min_geoid = obj.get_min_geoid([lats[0], lats[-1], lons[0], lons[-1]])
max_geoid = obj.get_max_geoid([lats[0], lats[-1], lons[0], lons[-1]])
assert np.all(np.abs(min_hae - min_geoid))
assert np.all(np.abs(max_hae - max_geoid))
# Test that the default value of ref_surface is "EGM2008"
obj2 = GeoTIFF1DegInterpolator(filename_format, geoid_path=str(GEOID_FILE_PATH))
hght_geoid2 = obj2.get_elevation_geoid(lats, lons)
assert np.all(hght_geoid2 == hght_geoid) if ref_surface == "EGM2008" else not np.all(hght_geoid2 == hght_geoid)
def test_exceptions(dummy_dem_file_path_high_res, monkeypatch, caplog):
filename_format = infer_filename_format(dummy_dem_file_path_high_res)
obj = GeoTIFF1DegInterpolator(filename_format)
with pytest.raises(ValueError, match="^The lat and lon arrays are not the same shape\\."):
obj.get_elevation_native([1, 2, 3], [1, 2])
monkeypatch.setattr(Image, 'open', lambda filename: dummy_pil_image_open(filename, "WGS84"))
obj = GeoTIFF1DegInterpolator(filename_format, ref_surface="WGS84")
with pytest.raises(ValueError,
match="^The geoid_dir parameter was not defined so geoid calculations are disabled\\."):
obj.get_elevation_geoid(1, 1)
caplog.clear()
caplog.set_level(logging.WARNING)
obj = GeoTIFF1DegInterpolator(filename_format, ref_surface="EGM2008")
obj.get_elevation_geoid(1, 1)
assert caplog.text.startswith("WARNING sarpy.io.DEM.geotiff1deg:geotiff1deg.py")
assert "The GeoAsciiParamsTag tag implies that the reference surface is WGS84" in caplog.text
monkeypatch.setattr(Image, 'open', lambda filename: dummy_pil_image_open(filename, "EGM2008"))
obj = GeoTIFF1DegInterpolator(filename_format)
with pytest.raises(ValueError,
match="^The geoid_dir parameter was not defined so geoid calculations are disabled\\."):
obj.get_elevation_hae(1, 1)
caplog.clear()
caplog.set_level(logging.WARNING)
obj = GeoTIFF1DegInterpolator(filename_format, ref_surface="WGS84")
obj.get_elevation_native(1, 1)
assert caplog.text.startswith("WARNING sarpy.io.DEM.geotiff1deg:geotiff1deg.py")
assert "The GeoAsciiParamsTag tag implies that the reference surface is EGM2008" in caplog.text
monkeypatch.setattr(Image, 'open', lambda filename: dummy_pil_image_open(filename, "Unknown"))
obj = GeoTIFF1DegInterpolator(filename_format, ref_surface="Unknown")
with pytest.raises(ValueError, match="^The reference surface is UNKNOWN, which is not supported"):
obj.get_elevation_geoid(1, 1)
with pytest.raises(ValueError, match="^The reference surface is UNKNOWN, which is not supported"):
obj.get_elevation_hae(1, 1)
| 17,833 | 43.253102 | 121 | py |
sarpy | sarpy-master/tests/consistency/test_consistency.py | #
# Copyright 2020-2021 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import itertools
import pytest
import sarpy.consistency.consistency as con
class DummyConsistency(con.ConsistencyChecker):
"""A ConsistencyChecker used for unit testing and code coverage"""
def __init__(self):
super(DummyConsistency, self).__init__()
def check_need_pass(self):
with self.need('need pass'):
assert True
def check_need_fail(self):
with self.need('need fail'):
assert False
def check_need_both(self):
with self.need('need pass'):
assert True
with self.need('need fail'):
assert False
def check_need_fail_nodetails(self):
with self.need():
assert False
def check_pre_need_pass(self):
with self.precondition():
assert True
with self.need('need pass'):
assert True
def check_nopre_need_pass(self):
with self.precondition():
assert False
with self.need('need pass'):
assert True
def check_want_pass(self):
with self.want('want pass'):
assert True
def check_want_fail(self):
with self.want('want fail'):
assert False
def check_pre_want_pass(self):
with self.precondition():
assert True
with self.want('want pass'):
assert True
def check_nopre_want_pass(self):
with self.precondition():
assert False
with self.want('want pass'):
assert True
def check_exception(self):
raise ValueError
@pytest.fixture
def dummycon():
"""Fixture which initializes a DummyConsistency object
Yields
------
DummyConsistency object
"""
import ast
import os
import _pytest.assertion.rewrite
base, _ = os.path.splitext(__file__) # python2 can return the '*.pyc' file
with open(base + '.py', 'r') as fd:
source = fd.read()
tree = ast.parse(source)
try:
_pytest.assertion.rewrite.rewrite_asserts(tree)
except TypeError as e:
_pytest.assertion.rewrite.rewrite_asserts(tree, source)
co = compile(tree, __file__, 'exec', dont_inherit=True)
ns = {}
exec(co, ns)
cover_con = ns['DummyConsistency']()
yield cover_con
def test_all(dummycon, capsys):
dummycon.check()
assert len(dummycon.all()) == 11
assert len(dummycon.failures()) == 5
num_checks_by_part = [len(x) for x in (dummycon.passes(), dummycon.skips(), dummycon.failures())]
assert all(x > 0 for x in num_checks_by_part)
assert sum(num_checks_by_part) == len(dummycon.all())
failures = dummycon.failures()
details = itertools.chain.from_iterable([value['details'] for value in failures.values()])
passed = [item for item in details if item['passed']]
assert passed
failures = dummycon.failures(omit_passed_sub=True)
details = itertools.chain.from_iterable([value['details'] for value in failures.values()])
passed = [item for item in details if item['passed']]
assert not passed
dummycon.print_result()
captured = capsys.readouterr()
assert '\x1b' in captured.out
dummycon.print_result(color=False)
captured2 = capsys.readouterr()
assert '\x1b' not in captured2.out
dummycon.print_result(include_passed_checks=True, skip_detail=True, fail_detail=True, pass_detail=True)
captured3 = capsys.readouterr()
assert 'Skip' in captured3.out
assert 'check_nopre_want_pass' in captured3.out
assert 'check_want_pass' in captured3.out
def test_one(dummycon):
dummycon.check('check_need_pass')
assert not dummycon.failures()
def test_multiple(dummycon):
dummycon.check(['check_need_pass', 'check_need_fail'])
assert set(dummycon.failures()) == {'check_need_fail'}
assert set(dummycon.all()).difference(dummycon.failures()) == {'check_need_pass'}
def test_check_with_ignore_pattern(dummycon):
# all checks must start with check_
dummycon.check(ignore_patterns=['check_'])
assert set(dummycon.all()) == set()
@pytest.mark.parametrize('should_ignore', [True, False])
def test_check_with_ignore_specific(dummycon, should_ignore):
test_name = 'check_exception'
ignore_patterns = [test_name] if should_ignore else []
dummycon.check(ignore_patterns=ignore_patterns)
was_tested = (test_name in dummycon.all())
assert was_tested != should_ignore
def test_invalid(dummycon):
with pytest.raises(ValueError):
dummycon.check('this_does_not_exist')
def test_approx():
apx = con.Approx(10.0, atol=.1, rtol=0)
assert apx == 10.0
assert apx == 10.01
assert not apx != 10.01
assert apx > 10.01
assert apx >= 10.01
assert apx >= 0
assert not apx <= 0
assert apx < 10.01
assert apx <= 10.01
assert repr(apx) == "10.0 ± 0.1"
| 4,970 | 27.405714 | 107 | py |
sarpy | sarpy-master/tests/consistency/test_cphd_consistency.py | #
# Copyright 2020-2021 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import copy
import importlib.util
import os
import re
import shutil
import tempfile
import xml.etree.ElementTree as ET
from lxml import etree
import numpy as np
import pytest
from sarpy.consistency.cphd_consistency import main, CphdConsistency, \
get_by_id, read_header, strip_namespace
import sarpy.io.phase_history.cphd1_elements.Dwell as sarpy_dwell
TEST_FILE_NAMES = {
'simple': 'spotlight_example.cphd',
'bistatic': 'bistatic.cphd',
}
TEST_FILE_PATHS = {}
TEST_FILE_ROOT = os.environ.get('SARPY_TEST_PATH', None)
if TEST_FILE_ROOT is not None:
for name_key, path_value in TEST_FILE_NAMES.items():
the_file = os.path.join(TEST_FILE_ROOT, 'cphd', path_value)
if os.path.isfile(the_file):
TEST_FILE_PATHS[name_key] = the_file
HAVE_NETWORKX = importlib.util.find_spec('networkx') is not None
HAVE_SHAPELY = importlib.util.find_spec('shapely') is not None
@pytest.fixture(scope='module')
def good_cphd():
file_path = TEST_FILE_PATHS.get('simple', None)
if file_path is None:
pytest.skip('simple cphd test file not found')
else:
return file_path
@pytest.fixture(scope='module')
def bistatic_cphd():
file_path = TEST_FILE_PATHS.get('bistatic', None)
if file_path is None:
pytest.skip('bistatic cphd test file not found')
else:
return file_path
def make_elem(tag, text=None, children=None, namespace=None, attributes=None, **attrib):
"""
Creates described element.
Creates the Element with tag name, text, and attributes given. Attributes
can be specified as either a dictionary or keyword arguments.
Parameters
----------
tag : str
A string that will become the tag name.
text : None|str|float|int
A string that will become the text in the element. (Default: ``None``)
children : lxml.etree.ElementTree
The children elements. (Default: ``None``)
namespace : str
The string containing the namespace. (Default: ``None``)
attributes : dict
A dictionary mapping attribute names to values. (Default: ``None``)
**attrib
Keyword arguments that map to attributes. (Default: ``None``)
Returns
-------
lxml.etree.ElementTree.Element
"""
if attributes is None:
attributes = {}
if text is not None:
if isinstance(text, bool):
text = str(text).lower()
if not isinstance(text, str):
text = repr(text)
attrib = copy.copy(attrib)
attrib.update(attributes)
attrib = {key: str(value) for key, value in attrib.items()}
if namespace is not None:
tag = '{{{namespace}}}{tag}'.format(namespace=namespace, tag=tag)
retval = etree.Element(tag, attrib)
if text is not None:
retval.text = str(text)
if children is not None:
retval.extend([child for child in children if child is not None])
return retval
@pytest.fixture
def tmpdir():
dirname = tempfile.mkdtemp()
yield dirname
shutil.rmtree(dirname)
def _read_xml_str(cphd_path):
with open(cphd_path, 'rb') as fid:
header = read_header(fid)
fid.seek(header['XML_BLOCK_BYTE_OFFSET'], 0)
xml_block_size = header['XML_BLOCK_SIZE']
return fid.read(xml_block_size).decode()
@pytest.fixture(scope='module')
def good_xml_str(good_cphd):
return _read_xml_str(good_cphd)
@pytest.fixture
def good_xml(good_xml_str):
good_xml_root = etree.fromstring(good_xml_str)
good_xml_root_no_ns = strip_namespace(etree.fromstring(good_xml_str))
yield {'with_ns': good_xml_root, 'without_ns': good_xml_root_no_ns,
'nsmap': {'ns': re.match(r'\{(.*)\}', good_xml_root.tag).group(1)}}
@pytest.fixture
def good_header(good_cphd):
with open(good_cphd, 'rb') as fid:
return read_header(fid)
def remove_nodes(*nodes):
for node in nodes:
node.getparent().remove(node)
def copy_xml(elem):
return etree.fromstring(etree.tostring(elem))
def test_from_file_cphd(good_cphd):
cphdcon = CphdConsistency.from_file(str(good_cphd), check_signal_data=True)
assert isinstance(cphdcon, CphdConsistency)
cphdcon.check()
assert len(cphdcon.failures()) == 0
def test_from_file_xml(good_xml_str, tmpdir):
xml_file = os.path.join(tmpdir, 'cphd.xml')
with open(xml_file, 'w') as fid:
fid.write(good_xml_str)
cphdcon = CphdConsistency.from_file(str(xml_file), check_signal_data=False)
assert isinstance(cphdcon, CphdConsistency)
cphdcon.check()
assert len(cphdcon.failures()) == 0
def test_main(good_cphd, good_xml_str, tmpdir):
assert not main([str(good_cphd), '--signal-data'])
assert not main([str(good_cphd)])
xml_file = os.path.join(tmpdir, 'cphd.xml')
with open(xml_file, 'w') as fid:
fid.write(good_xml_str)
assert not main([str(xml_file), '-v'])
def test_main_with_ignore(good_xml, tmpdir):
good_xml['with_ns'].find('./ns:Global/ns:SGN', namespaces=good_xml['nsmap']).text += '1'
slightly_bad_xml = os.path.join(tmpdir, 'slightly_bad.xml')
etree.ElementTree(good_xml['with_ns']).write(str(slightly_bad_xml))
assert main([slightly_bad_xml])
assert not main([slightly_bad_xml, '--ignore', 'check_against_schema'])
def test_main_schema_args(good_cphd):
good_schema = CphdConsistency.from_file(good_cphd).schema
assert main([str(good_cphd), '--schema', str(good_cphd)]) # fails with bogus schema
assert not main([str(good_cphd), '--schema', good_schema]) # pass with actual schema
assert not main([str(good_cphd), '--schema', str(good_cphd), '--noschema']) # skips schema
@pytest.mark.parametrize('cphd_file', TEST_FILE_PATHS.values())
def test_main_each_file(cphd_file):
assert not main([cphd_file])
def test_check_file_type_header(good_cphd, tmpdir):
bad_cphd = os.path.join(tmpdir, 'bad.cphd')
with open(good_cphd, 'rb') as orig_file, open(bad_cphd, 'wb') as out_file:
orig_header = orig_file.readline()
orig_version_length = len(orig_header) - len('CPHD/') - 1
assert orig_version_length > 3
out_file.write(f"CPHD/1.0{'Q' * (orig_version_length - 3)}\n".encode())
shutil.copyfileobj(orig_file, out_file)
cphd_con = CphdConsistency.from_file(bad_cphd)
cphd_con.check('check_file_type_header')
assert cphd_con.failures()
def test_schema_available(good_xml_str):
xml_str_with_unknown_ns = re.sub(r'<CPHD xmlns="[^"]+">', '<CPHD xmlns="bad_ns">', good_xml_str)
root_elem = etree.fromstring(xml_str_with_unknown_ns)
cphd_con = CphdConsistency(root_elem, pvps={}, header=None, filename=None)
cphd_con.check('check_against_schema')
assert cphd_con.failures()
def test_xml_schema_error(good_xml):
bad_xml = copy_xml(good_xml['with_ns'])
remove_nodes(*bad_xml.xpath('./ns:Global/ns:DomainType', namespaces=good_xml['nsmap']))
cphd_con = CphdConsistency(
bad_xml, pvps={}, header=None, filename=None, check_signal_data=False)
cphd_con.check('check_against_schema')
assert len(cphd_con.failures()) > 0
def test_check_unconnected_ids_severed_node(good_xml):
bad_xml = copy_xml(good_xml['without_ns'])
bad_xml.find('./Dwell/CODTime/Identifier').text += '-make-bad'
cphd_con = CphdConsistency(
bad_xml, pvps={}, header=good_header, filename=None, check_signal_data=False)
cphd_con.check('check_unconnected_ids')
assert (len(cphd_con.failures()) > 0) == HAVE_NETWORKX
def test_check_unconnected_ids_extra_node(good_xml):
bad_xml = copy_xml(good_xml['without_ns'])
first_acf = bad_xml.find('./Antenna/AntCoordFrame')
extra_acf = copy.deepcopy(first_acf)
extra_acf.find('./Identifier').text += '_superfluous'
first_acf.getparent().append(extra_acf)
cphd_con = CphdConsistency(
bad_xml, pvps={}, header=good_header, filename=None, check_signal_data=False)
cphd_con.check('check_unconnected_ids')
assert (len(cphd_con.failures()) > 0) == HAVE_NETWORKX
def test_check_classification_and_release_info_error(good_xml, good_header):
bad_xml = copy_xml(good_xml['without_ns'])
bad_xml.find('./CollectionID/ReleaseInfo').text += '-make-bad'
cphd_con = CphdConsistency(
bad_xml, pvps={}, header=good_header, filename=None, check_signal_data=False)
cphd_con.check('check_classification_and_release_info')
assert len(cphd_con.failures()) > 0
def test_error_in_check(good_xml):
bad_xml = copy_xml(good_xml['with_ns'])
remove_nodes(*bad_xml.xpath('./ns:Channel/ns:Parameters/ns:DwellTimes/ns:CODId', namespaces=good_xml['nsmap']))
cphd_con = CphdConsistency(
bad_xml, pvps={}, header=None, filename=None, check_signal_data=False)
tocheck = []
for chan_id in bad_xml.findall('./ns:Data/ns:Channel/ns:Identifier', namespaces=good_xml['nsmap']):
tocheck.append('check_channel_dwell_exist_{}'.format(chan_id.text))
cphd_con.check(tocheck)
assert len(cphd_con.failures()) > 0
def test_polygon_size_error(good_xml):
bad_xml = copy_xml(good_xml['with_ns'])
ia_polygon_node = bad_xml.find('./ns:SceneCoordinates/ns:ImageArea/ns:Polygon', namespaces=good_xml['nsmap'])
ia_polygon_node.attrib['size'] = "12345678890"
cphd_con = CphdConsistency(
bad_xml, pvps={}, header=None, filename=None, check_signal_data=False)
cphd_con.check('check_global_imagearea_polygon')
assert (len(cphd_con.failures()) > 0) == HAVE_SHAPELY
def test_polygon_winding_error(good_xml):
bad_xml = copy_xml(good_xml['with_ns'])
ia_polygon_node = bad_xml.find('./ns:SceneCoordinates/ns:ImageArea/ns:Polygon', namespaces=good_xml['nsmap'])
size = int(ia_polygon_node.attrib['size'])
# Reverse the order of the vertices
for vertex in ia_polygon_node:
vertex.attrib['index'] = str(size - int(vertex.attrib['index']) + 1)
cphd_con = CphdConsistency(
bad_xml, pvps={}, header=None, filename=None, check_signal_data=False)
cphd_con.check('check_global_imagearea_polygon')
assert (len(cphd_con.failures()) > 0) == HAVE_SHAPELY
@pytest.fixture
def xml_with_signal_normal(good_xml):
root = copy_xml(good_xml['with_ns'])
pvps = {}
for channel_node in root.findall('./ns:Data/ns:Channel', namespaces=good_xml['nsmap']):
chan_id = channel_node.findtext('./ns:Identifier', namespaces=good_xml['nsmap'])
num_vect = int(channel_node.findtext('./ns:NumVectors', namespaces=good_xml['nsmap']))
pvps[chan_id] = np.ones(num_vect, dtype=[('SIGNAL', 'i8')])
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_id),
namespaces=good_xml['nsmap'])[0]
chan_param_node.append(make_elem('SignalNormal', 'true', namespace=good_xml['nsmap']['ns']))
return pvps, root, good_xml['nsmap']
def test_signalnormal(xml_with_signal_normal):
pvps, root, nsmap = xml_with_signal_normal
cphd_con = CphdConsistency(
root, pvps=pvps, header=None, filename=None, check_signal_data=False)
tocheck = ['check_channel_signalnormal_{}'.format(key) for key in pvps.keys()]
cphd_con.check(tocheck)
assert len(cphd_con.failures()) == 0
def test_signalnormal_bad_pvp(xml_with_signal_normal):
pvps, root, nsmap = xml_with_signal_normal
for idx, pvp in enumerate(pvps.values()):
pvp['SIGNAL'][idx] = 0
cphd_con = CphdConsistency(
root, pvps=pvps, header=None, filename=None, check_signal_data=False)
tocheck = ['check_channel_signalnormal_{}'.format(key) for key in pvps.keys()]
cphd_con.check(tocheck)
assert len(cphd_con.failures()) == len(pvps)
for norm_node in root.findall('./ns:Channel/ns:Parameters/ns:SignalNormal', namespaces=nsmap):
norm_node.text = 'false'
cphd_con = CphdConsistency(
root, pvps=pvps, header=None, filename=None, check_signal_data=False)
cphd_con.check(tocheck)
assert len(cphd_con.failures()) == 0
no_sig_pvp = {name: np.zeros(pvp.shape, dtype=[('notsignal', 'i8')]) for name, pvp in pvps.items()}
cphd_con = CphdConsistency(
root, pvps=no_sig_pvp, header=None, filename=None, check_signal_data=False)
cphd_con.check(tocheck)
assert len(cphd_con.failures()) > 0
@pytest.fixture
def xml_without_fxfixed(good_xml):
root = copy_xml(good_xml['with_ns'])
pvps = {}
for channel_node in root.findall('./ns:Data/ns:Channel', namespaces=good_xml['nsmap']):
chan_id = channel_node.findtext('./ns:Identifier', namespaces=good_xml['nsmap'])
num_vect = int(channel_node.findtext('./ns:NumVectors', namespaces=good_xml['nsmap']))
pvps[chan_id] = np.zeros(num_vect, dtype=[('FX1', 'f8'), ('FX2', 'f8')])
pvps[chan_id]['FX1'] = np.linspace(1.0, 1.1, num_vect)
pvps[chan_id]['FX2'] = np.linspace(2.0, 2.2, num_vect)
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_id),
namespaces=good_xml['nsmap'])[0]
chan_param_node.find('./ns:FXFixed', namespaces=good_xml['nsmap']).text = 'false'
root.find('./ns:Channel/ns:FXFixedCPHD', namespaces=good_xml['nsmap']).text = 'false'
return pvps, root, good_xml['nsmap']
def test_fxfixed(xml_without_fxfixed):
pvps, root, nsmap = xml_without_fxfixed
cphd_con = CphdConsistency(
root, pvps=pvps, header=None, filename=None, check_signal_data=False)
tocheck = ['check_channel_fxfixed_{}'.format(key) for key in pvps.keys()]
tocheck.append('check_file_fxfixed')
cphd_con.check(tocheck)
assert len(cphd_con.failures()) == 0
@pytest.fixture
def xml_without_toafixed(good_xml):
root = copy_xml(good_xml['with_ns'])
pvps = {}
for channel_node in root.findall('./ns:Data/ns:Channel', namespaces=good_xml['nsmap']):
chan_id = channel_node.findtext('./ns:Identifier', namespaces=good_xml['nsmap'])
num_vect = int(channel_node.findtext('./ns:NumVectors', namespaces=good_xml['nsmap']))
pvps[chan_id] = np.zeros(num_vect, dtype=[('TOA1', 'f8'), ('TOA2', 'f8')])
pvps[chan_id]['TOA1'] = np.linspace(1.0, 1.1, num_vect)
pvps[chan_id]['TOA2'] = np.linspace(2.0, 2.2, num_vect)
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_id),
namespaces=good_xml['nsmap'])[0]
chan_param_node.find('./ns:TOAFixed', namespaces=good_xml['nsmap']).text = 'false'
root.find('./ns:Channel/ns:TOAFixedCPHD', namespaces=good_xml['nsmap']).text = 'false'
return pvps, root, good_xml['nsmap']
def test_channel_toafixed(xml_without_toafixed):
pvps, root, nsmap = xml_without_toafixed
cphd_con = CphdConsistency(
root, pvps=pvps, header=None, filename=None, check_signal_data=False)
tocheck = ['check_channel_toafixed_{}'.format(key) for key in pvps.keys()]
tocheck.append('check_file_toafixed')
cphd_con.check(tocheck)
assert len(cphd_con.failures()) == 0
@pytest.fixture
def xml_without_srpfixed(good_xml):
root = copy_xml(good_xml['with_ns'])
pvps = {}
for channel_node in root.findall('./ns:Data/ns:Channel', namespaces=good_xml['nsmap']):
chan_id = channel_node.findtext('./ns:Identifier', namespaces=good_xml['nsmap'])
num_vect = int(channel_node.findtext('./ns:NumVectors', namespaces=good_xml['nsmap']))
pvps[chan_id] = np.zeros(num_vect, dtype=[('SRPPos', 'f8', 3)])
pvps[chan_id]['SRPPos'][:, 0] = np.linspace(1.0, 10, num_vect)
pvps[chan_id]['SRPPos'][:, 1] = np.linspace(2.0, 20, num_vect)
pvps[chan_id]['SRPPos'][:, 2] = np.linspace(3.0, 30, num_vect)
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_id),
namespaces=good_xml['nsmap'])[0]
chan_param_node.find('./ns:SRPFixed', namespaces=good_xml['nsmap']).text = 'false'
root.find('./ns:Channel/ns:SRPFixedCPHD', namespaces=good_xml['nsmap']).text = 'false'
return pvps, root, good_xml['nsmap']
def test_channel_srpfixed(xml_without_srpfixed):
pvps, root, nsmap = xml_without_srpfixed
cphd_con = CphdConsistency(
root, pvps=pvps, header=None, filename=None, check_signal_data=False)
tocheck = ['check_channel_srpfixed_{}'.format(key) for key in pvps.keys()]
tocheck.append('check_file_srpfixed')
cphd_con.check(tocheck)
assert len(cphd_con.failures()) == 0
@pytest.fixture
def xml_with_txrcv(good_xml):
root = copy_xml(good_xml['with_ns'])
root.append(make_elem('TxRcv', namespace=good_xml['nsmap']['ns'], children=[
make_elem('NumTxWFs', 2, namespace=good_xml['nsmap']['ns']),
make_elem('TxWFParameters', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Identifier', 'wf_unit_test_1', namespace=good_xml['nsmap']['ns']),
]),
make_elem('TxWFParameters', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Identifier', 'wf_unit_test_2', namespace=good_xml['nsmap']['ns']),
]),
make_elem('NumRcvs', 2, namespace=good_xml['nsmap']['ns']),
make_elem('RcvParameters', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Identifier', 'rcv_unit_test_1', namespace=good_xml['nsmap']['ns']),
]),
make_elem('RcvParameters', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Identifier', 'rcv_unit_test_2', namespace=good_xml['nsmap']['ns']),
])
]))
chan_param_node = root.xpath('./ns:Channel/ns:Parameters',
namespaces=good_xml['nsmap'])[0]
chan_param_node.append(make_elem('TxRcv', namespace=good_xml['nsmap']['ns'], children=[
make_elem('TxWFId', 'wf_unit_test_1', namespace=good_xml['nsmap']['ns']),
make_elem('TxWFId', 'wf_unit_test_2', namespace=good_xml['nsmap']['ns']),
make_elem('RcvId', 'rcv_unit_test_1', namespace=good_xml['nsmap']['ns']),
make_elem('RcvId', 'rcv_unit_test_2', namespace=good_xml['nsmap']['ns']),
]))
chan_ids = [chan_param_node.findtext('./ns:Identifier', namespaces=good_xml['nsmap'])]
return chan_ids, root, good_xml['nsmap']
def test_txrcv(xml_with_txrcv):
chan_ids, root, nsmap = xml_with_txrcv
cphd_con = CphdConsistency(
root, pvps=None, header=None, filename=None, check_signal_data=False)
tocheck = ['check_channel_txrcv_exist_{}'.format(key) for key in chan_ids]
cphd_con.check(tocheck)
assert len(cphd_con.failures()) == 0
def test_txrcv_bad_txwfid(xml_with_txrcv):
chan_ids, root, nsmap = xml_with_txrcv
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_ids[0]),
namespaces=nsmap)[0]
chan_param_node.xpath('./ns:TxRcv/ns:TxWFId', namespaces=nsmap)[-1].text = 'missing'
cphd_con = CphdConsistency(
root, pvps=None, header=None, filename=None, check_signal_data=False)
tocheck = ['check_channel_txrcv_exist_{}'.format(key) for key in chan_ids]
cphd_con.check(tocheck)
assert len(cphd_con.failures()) > 0
def test_antenna_bad_acf_count(good_xml):
root = copy_xml(good_xml['with_ns'])
antenna_node = root.find('./ns:Antenna', namespaces=good_xml['nsmap'])
antenna_node.xpath('./ns:NumACFs', namespaces=good_xml['nsmap'])[-1].text += '2'
cphd_con = CphdConsistency(
root, pvps=None, header=None, filename=None, check_signal_data=False)
cphd_con.check('check_antenna')
assert len(cphd_con.failures()) > 0
def test_antenna_bad_apc_count(good_xml):
root = copy_xml(good_xml['with_ns'])
antenna_node = root.find('./ns:Antenna', namespaces=good_xml['nsmap'])
antenna_node.xpath('./ns:NumAPCs', namespaces=good_xml['nsmap'])[-1].text += '2'
cphd_con = CphdConsistency(
root, pvps=None, header=None, filename=None, check_signal_data=False)
cphd_con.check('check_antenna')
assert len(cphd_con.failures()) > 0
def test_antenna_bad_antpats_count(good_xml):
root = copy_xml(good_xml['with_ns'])
antenna_node = root.find('./ns:Antenna', namespaces=good_xml['nsmap'])
antenna_node.xpath('./ns:NumAntPats', namespaces=good_xml['nsmap'])[-1].text += '2'
cphd_con = CphdConsistency(
root, pvps=None, header=None, filename=None, check_signal_data=False)
cphd_con.check('check_antenna')
assert len(cphd_con.failures()) > 0
def test_antenna_non_matching_acfids(good_xml):
root = copy_xml(good_xml['with_ns'])
antenna_node = root.find('./ns:Antenna', namespaces=good_xml['nsmap'])
antenna_node.xpath('./ns:AntPhaseCenter/ns:ACFId', namespaces=good_xml['nsmap'])[-1].text += '_wrong'
cphd_con = CphdConsistency(
root, pvps=None, header=None, filename=None, check_signal_data=False)
cphd_con.check('check_antenna')
assert len(cphd_con.failures())
def test_txrcv_bad_rcvid(xml_with_txrcv):
chan_ids, root, nsmap = xml_with_txrcv
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_ids[0]),
namespaces=nsmap)[0]
chan_param_node.xpath('./ns:TxRcv/ns:RcvId', namespaces=nsmap)[-1].text = 'missing'
cphd_con = CphdConsistency(
root, pvps=None, header=None, filename=None, check_signal_data=False)
tocheck = ['check_channel_txrcv_exist_{}'.format(key) for key in chan_ids]
cphd_con.check(tocheck)
assert len(cphd_con.failures()) > 0
def test_txrcv_missing_channel_node(xml_with_txrcv):
chan_ids, root, nsmap = xml_with_txrcv
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_ids[0]),
namespaces=nsmap)[0]
remove_nodes(*chan_param_node.findall('./ns:TxRcv', nsmap))
cphd_con = CphdConsistency(
root, pvps=None, header=None, filename=None, check_signal_data=False)
cphd_con.check('check_txrcv_ids_in_channel')
assert len(cphd_con.failures()) > 0
@pytest.fixture
def xml_with_fxbwnoise(good_xml):
root = copy_xml(good_xml['with_ns'])
pvps = {}
for channel_node in root.findall('./ns:Data/ns:Channel', namespaces=good_xml['nsmap']):
chan_id = channel_node.findtext('./ns:Identifier', namespaces=good_xml['nsmap'])
num_vect = int(channel_node.findtext('./ns:NumVectors', namespaces=good_xml['nsmap']))
pvps[chan_id] = np.zeros(num_vect, dtype=[('FXN1', 'f8'), ('FXN2', 'f8')])
pvps[chan_id]['FXN1'] = np.linspace(1, 2, num_vect)
pvps[chan_id]['FXN2'] = pvps[chan_id]['FXN1'] * 1.1
pvps[chan_id]['FXN1'][10] = np.nan
pvps[chan_id]['FXN2'][10] = np.nan
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_id),
namespaces=good_xml['nsmap'])[0]
chan_param_node.append(make_elem('FxBWNoise', 1.2, namespace=good_xml['nsmap']['ns']))
return pvps, root, good_xml['nsmap']
def test_fxbwnoise(xml_with_fxbwnoise):
pvps, root, nsmap = xml_with_fxbwnoise
cphd_con = CphdConsistency(
root, pvps=pvps, header=None, filename=None, check_signal_data=False)
tocheck = ['check_channel_fxbwnoise_{}'.format(key) for key in pvps.keys()]
cphd_con.check(tocheck)
assert len(cphd_con.failures()) == 0
def test_fxbwnoise_bad_domain(xml_with_fxbwnoise):
pvps, root, nsmap = xml_with_fxbwnoise
root.find('./ns:Global/ns:DomainType', namespaces=nsmap).text = 'TOA'
cphd_con = CphdConsistency(
root, pvps=pvps, header=None, filename=None, check_signal_data=False)
tocheck = ['check_channel_fxbwnoise_{}'.format(key) for key in pvps.keys()]
cphd_con.check(tocheck)
assert len(cphd_con.failures()) > 0
def test_fxbwnoise_bad_value(xml_with_fxbwnoise):
pvps, root, nsmap = xml_with_fxbwnoise
chan_id = list(pvps.keys())[-1]
pvps[chan_id]['FXN1'][0] = 0.5
cphd_con = CphdConsistency(
root, pvps=pvps, header=None, filename=None, check_signal_data=False)
tocheck = ['check_channel_fxbwnoise_{}'.format(key) for key in pvps.keys()]
cphd_con.check(tocheck)
assert len(cphd_con.failures()) > 0
def test_geoinfo_polygons(good_xml):
root = copy_xml(good_xml['with_ns'])
root.append(make_elem('GeoInfo', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Polygon', size='3', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Vertex', index='1', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Lat', 0.0, namespace=good_xml['nsmap']['ns']),
make_elem('Lon', 0.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='2', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Lat', 1.0, namespace=good_xml['nsmap']['ns']),
make_elem('Lon', 0.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='3', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Lat', 1.0, namespace=good_xml['nsmap']['ns']),
make_elem('Lon', 1.0, namespace=good_xml['nsmap']['ns']),
]),
])
]))
cphd_con = CphdConsistency(
root, pvps=None, header=None, filename=None, check_signal_data=False)
cphd_con.check('check_geoinfo_polygons')
assert len(cphd_con.failures()) == 0
def test_geoinfo_polygons_bad_order(good_xml):
root = copy_xml(good_xml['with_ns'])
root.append(make_elem('GeoInfo', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Polygon', size='3', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Vertex', index='1', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Lat', 0.0, namespace=good_xml['nsmap']['ns']),
make_elem('Lon', 0.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='2', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Lat', 0.0, namespace=good_xml['nsmap']['ns']),
make_elem('Lon', 1.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='3', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Lat', 1.0, namespace=good_xml['nsmap']['ns']),
make_elem('Lon', 1.0, namespace=good_xml['nsmap']['ns']),
]),
])
]))
cphd_con = CphdConsistency(
root, pvps=None, header=None, filename=None, check_signal_data=False)
cphd_con.check('check_geoinfo_polygons')
assert (len(cphd_con.failures()) > 0) == HAVE_SHAPELY
@pytest.fixture
def xml_with_channel_imagearea(good_xml):
root = copy_xml(good_xml['with_ns'])
for chan_param_node in root.xpath('./ns:Channel/ns:Parameters', namespaces=good_xml['nsmap']):
chan_param_node.append(make_elem('ImageArea', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X1Y1', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', -50, namespace=good_xml['nsmap']['ns']),
make_elem('Y', -50, namespace=good_xml['nsmap']['ns']),
]),
make_elem('X2Y2', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 50, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 50, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Polygon', size='4', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Vertex', index='1', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', -50.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 0.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='2', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 0.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 50.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='3', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 50.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 0.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='4', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 0.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', -50.0, namespace=good_xml['nsmap']['ns']),
]),
])
]))
return root, good_xml['nsmap']
def test_channel_image_area(xml_with_channel_imagearea):
root, nsmap = xml_with_channel_imagearea
cphd_con = CphdConsistency(
root, pvps=None, header=None, filename=None, check_signal_data=False)
tocheck = []
for chan_id in root.findall('./ns:Data/ns:Channel/ns:Identifier', namespaces=nsmap):
tocheck.append('check_channel_imagearea_x1y1_{}'.format(chan_id.text))
tocheck.append('check_channel_imagearea_polygon_{}'.format(chan_id.text))
cphd_con.check(tocheck)
assert len(cphd_con.failures()) == 0
def test_check_imagearea_x1y1_x2y2(good_cphd):
cphd_con = CphdConsistency.from_file(str(good_cphd))
scene_imagearea = cphd_con.xml.find('./SceneCoordinates/ImageArea')
scene_imagearea.find('./X1Y1/X').text = scene_imagearea.findtext('./X2Y2/X')
cphd_con.check('check_imagearea_x1y1_x2y2')
assert cphd_con.failures()
def test_check_channel_imagearea_x1y1(xml_with_channel_imagearea):
root, nsmap = xml_with_channel_imagearea
channel_imagearea = root.find('./{*}Channel/{*}Parameters/{*}ImageArea')
channel_imagearea.find('./{*}X1Y1/{*}Y').text = channel_imagearea.findtext('./{*}X2Y2/{*}Y')
cphd_con = CphdConsistency(root, pvps=None, header=None, filename=None)
cphd_con.check('check_channel_imagearea_x1y1', allow_prefix=True)
assert cphd_con.failures()
@pytest.fixture
def xml_with_extendedarea(good_xml):
root = copy_xml(good_xml['with_ns'])
scene = root.find('./ns:SceneCoordinates', namespaces=good_xml['nsmap'])
scene.append(make_elem('ExtendedArea', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X1Y1', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', -1000, namespace=good_xml['nsmap']['ns']),
make_elem('Y', -1000, namespace=good_xml['nsmap']['ns']),
]),
make_elem('X2Y2', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 1000, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 1000, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Polygon', size='4', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Vertex', index='1', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', -1000.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 0.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='2', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 0.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 1000.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='3', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 1000.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 0.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='4', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 0.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', -1000.0, namespace=good_xml['nsmap']['ns']),
]),
])
]))
return root, good_xml['nsmap']
def test_extended_imagearea(xml_with_extendedarea):
root, nsmap = xml_with_extendedarea
cphd_con = CphdConsistency(
root, pvps=None, header=None, filename=None, check_signal_data=False)
cphd_con.check(['check_extended_imagearea_polygon', 'check_extended_imagearea_x1y1_x2y2'])
assert len(cphd_con.failures()) == 0
def test_check_extended_imagearea_x1y1_x2y2(xml_with_extendedarea):
root, nsmap = xml_with_extendedarea
x1 = root.findtext('./ns:SceneCoordinates/ns:ExtendedArea/ns:X1Y1/ns:X', namespaces=nsmap)
root.find('./ns:SceneCoordinates/ns:ExtendedArea/ns:X2Y2/ns:X', namespaces=nsmap).text = x1
cphd_con = CphdConsistency(
root, pvps=None, header=None, filename=None, check_signal_data=False)
cphd_con.check('check_extended_imagearea_x1y1_x2y2')
assert cphd_con.failures()
def test_extended_imagearea_polygon_bad_extent(xml_with_extendedarea):
root, nsmap = xml_with_extendedarea
root.find('./ns:SceneCoordinates/ns:ExtendedArea/ns:X2Y2/ns:X', namespaces=nsmap).text = '2000'
cphd_con = CphdConsistency(
root, pvps=None, header=None, filename=None, check_signal_data=False)
cphd_con.check('check_extended_imagearea_polygon')
assert (len(cphd_con.failures()) > 0) == HAVE_SHAPELY
def test_antenna_missing_channel_node(good_xml):
bad_xml = copy_xml(good_xml['with_ns'])
remove_nodes(*bad_xml.xpath('./ns:Channel/ns:Parameters/ns:Antenna', namespaces=good_xml['nsmap']))
cphd_con = CphdConsistency(
bad_xml, pvps=None, header=None, filename=None, check_signal_data=False)
cphd_con.check('check_antenna_ids_in_channel')
assert len(cphd_con.failures()) > 0
def test_refgeom_bad_root(good_cphd):
cphd_con = CphdConsistency.from_file(
good_cphd, check_signal_data=False)
bad_node = cphd_con.xml.find('./ReferenceGeometry/SRPCODTime')
bad_node.text = '24' + bad_node.text
cphd_con.check('check_refgeom_root')
assert len(cphd_con.failures()) > 0
def test_refgeom_bad_monostatic(good_cphd):
cphd_con = CphdConsistency.from_file(
good_cphd, check_signal_data=False)
bad_node = cphd_con.xml.find('./ReferenceGeometry/Monostatic/AzimuthAngle')
bad_node.text = str((float(bad_node.text) + 3) % 360)
cphd_con.check('check_refgeom_monostatic')
assert len(cphd_con.failures()) > 0
def test_refgeom_bad_bistatic(bistatic_cphd):
cphd_con = CphdConsistency.from_file(
bistatic_cphd, check_signal_data=False)
bad_node = cphd_con.xml.find('./ReferenceGeometry/Bistatic/RcvPlatform/SlantRange')
bad_node.text = '2' + bad_node.text
cphd_con.check('check_refgeom_bistatic')
assert len(cphd_con.failures()) > 0
def test_check_identifier_uniqueness(good_cphd):
cphd_con = CphdConsistency.from_file(good_cphd)
dwelltime = cphd_con.xml.find('./Dwell/DwellTime')
dwelltime.getparent().append(copy.deepcopy(dwelltime))
cphd_con.check('check_identifier_uniqueness')
assert cphd_con.failures()
def _invalidate_order(xml):
poly_2d = xml.find('./Dwell/CODTime/CODTimePoly')
poly_2d.find('./Coef').set('exponent1', '1' + poly_2d.get('order1'))
def _invalidate_coef_uniqueness(xml):
poly_2d = xml.find('./Dwell/DwellTime/DwellTimePoly')
poly_2d.append(copy.deepcopy(poly_2d.find('./Coef')))
@pytest.mark.parametrize('invalidate_func', [_invalidate_order, _invalidate_coef_uniqueness])
def test_check_polynomials(invalidate_func, good_cphd):
cphd_con = CphdConsistency.from_file(good_cphd)
invalidate_func(cphd_con.xml)
cphd_con.check('check_polynomials')
assert cphd_con.failures()
def test_check_channel_normal_signal_pvp(xml_with_signal_normal):
pvps, root, nsmap = xml_with_signal_normal
cphd_con = CphdConsistency(
root, pvps=pvps, header=None, filename=None, check_signal_data=False)
channel_pvps = next(iter(cphd_con.pvps.values()))
channel_pvps['SIGNAL'][:] = 0
cphd_con.check(ignore_patterns=['check_(?!channel_normal_signal_pvp.+)'])
assert cphd_con.failures()
channel_pvps['SIGNAL'][::2] = 1
cphd_con.check(ignore_patterns=['check_(?!channel_normal_signal_pvp.+)'])
assert not cphd_con.failures()
def _fxn_with_toa_domain(xml):
xml.find('./Global/DomainType').text = 'TOA'
fx1 = xml.find('./PVP/FX1')
for name in ('FXN1', 'FXN2'):
if xml.find(f'./PVP/{name}') is None:
new_elem = copy.deepcopy(fx1)
new_elem.tag = name
fx1.getparent().append(new_elem)
def _fxn1_only(xml):
xml.find('./Global/DomainType').text = 'FX'
fx1 = xml.find('./PVP/FX1')
remove_nodes(*xml.findall('./PVP/FXN1'), *xml.findall('./PVP/FXN2'))
new_elem = copy.deepcopy(fx1)
new_elem.tag = 'FXN1'
fx1.getparent().append(new_elem)
@pytest.mark.parametrize('invalidate_func', [_fxn_with_toa_domain, _fxn1_only])
def test_check_optional_pvps_fx(invalidate_func, good_cphd):
cphd_con = CphdConsistency.from_file(good_cphd)
invalidate_func(cphd_con.xml)
cphd_con.check('check_optional_pvps_fx')
assert cphd_con.failures()
def test_check_optional_pvps_toa(good_cphd):
cphd_con = CphdConsistency.from_file(good_cphd)
toa1 = cphd_con.xml.find('./PVP/TOA1')
remove_nodes(*cphd_con.xml.findall('./PVP/TOAE1'), *cphd_con.xml.findall('./PVP/TOAE2'))
new_elem = copy.deepcopy(toa1)
new_elem.tag = 'TOAE1'
toa1.getparent().append(new_elem)
cphd_con.check('check_optional_pvps_toa')
assert cphd_con.failures()
@pytest.fixture
def dataset_with_toaextsaved(good_xml):
root = copy_xml(good_xml['with_ns'])
pvps = {}
min_toae1 = -1.1
max_toae2 = 2.2
toaextsaved = max_toae2 - min_toae1
for channel_node in root.findall('./ns:Data/ns:Channel', namespaces=good_xml['nsmap']):
chan_id = channel_node.findtext('./ns:Identifier', namespaces=good_xml['nsmap'])
num_vect = int(channel_node.findtext('./ns:NumVectors', namespaces=good_xml['nsmap']))
pvps[chan_id] = np.zeros(num_vect, dtype=[('TOAE1', 'f8'), ('TOAE2', 'f8')])
pvps[chan_id]['TOAE1'] = np.linspace(min_toae1, min_toae1 / 2, num_vect)
pvps[chan_id]['TOAE2'] = np.linspace(max_toae2, max_toae2 / 2, num_vect)
chan_param_node = root.find(f'./ns:Channel/ns:Parameters[ns:Identifier="{chan_id}"]',
namespaces=good_xml['nsmap'])
remove_nodes(*chan_param_node.findall('./ns:TOAExtended/ns:TOAExtSaved', namespaces=good_xml['nsmap']))
new_elem = make_elem('TOAExtended', namespace=good_xml['nsmap']['ns'], children=[
make_elem('TOAExtSaved', text=str(toaextsaved), namespace=good_xml['nsmap']['ns'])])
chan_param_node.find('./ns:TOASaved', namespaces=good_xml['nsmap']).addnext(new_elem)
toa1 = root.find('./ns:PVP/ns:TOA1', namespaces=good_xml['nsmap'])
for parameter in ('TOAE1', 'TOAE2'):
remove_nodes(*root.findall(f'./ns:PVP/ns:{parameter}', namespaces=good_xml['nsmap']))
new_elem = copy.deepcopy(toa1)
new_elem.tag = etree.QName(new_elem, parameter)
toa1.getparent().append(new_elem)
return pvps, root, good_xml['nsmap']
def test_check_channel_toaextsaved(dataset_with_toaextsaved):
pvps, root, nsmap = dataset_with_toaextsaved
cphd_con = CphdConsistency(root, pvps=pvps, header=None, filename=None)
cphd_con.check(ignore_patterns=['check_(?!channel_toaextsaved.+)'])
assert cphd_con.passes()
assert not cphd_con.failures()
def test_check_channel_toaextsaved_no_toae1(dataset_with_toaextsaved):
pvps, root, nsmap = dataset_with_toaextsaved
cphd_con = CphdConsistency(root, pvps=pvps, header=None, filename=None)
remove_nodes(*cphd_con.xml.findall('./PVP/TOAE1'))
cphd_con.pvps = {k: v[[x for x in v.dtype.names if x != 'TOAE1']] for k, v in pvps.items()}
cphd_con.check(ignore_patterns=['check_(?!channel_toaextsaved.+)'])
assert cphd_con.failures()
def test_channel_fx_osr(good_cphd):
cphd_con = CphdConsistency.from_file(good_cphd, check_signal_data=True)
cphd_con = copy.deepcopy(cphd_con)
channel_pvps = next(iter(cphd_con.pvps.values()))
channel_pvps['TOA2'][0] = channel_pvps['TOA1'][0] + 1.0 / channel_pvps['SCSS'][0]
cphd_con.check('check_channel_fx_osr', allow_prefix=True)
assert cphd_con.failures()
assert not cphd_con.skips()
cphd_con.check('check_channel_toa_osr', allow_prefix=True)
assert cphd_con.skips()
def test_channel_toa_osr(good_cphd):
cphd_con = CphdConsistency.from_file(good_cphd, check_signal_data=True)
cphd_con = copy.deepcopy(cphd_con)
cphd_con.xml.find('./Global/DomainType').text = 'TOA'
for channel_pvps in cphd_con.pvps.values():
needed_ss = 1.0 / (1.25 * (channel_pvps['FX2'] - channel_pvps['FX1']))
channel_pvps['SCSS'][:] = needed_ss
cphd_con.check('check_channel_toa_osr', allow_prefix=True)
assert not cphd_con.failures()
assert not cphd_con.skips()
channel_pvps['FX2'][0] = channel_pvps['FX1'][0] + 1.0 / channel_pvps['SCSS'][0]
cphd_con.check('check_channel_toa_osr', allow_prefix=True)
assert cphd_con.failures()
assert not cphd_con.skips()
cphd_con.check('check_channel_fx_osr', allow_prefix=True)
assert cphd_con.skips()
def test_check_channel_afdop(good_cphd):
cphd_con = CphdConsistency.from_file(good_cphd, check_signal_data=True)
cphd_con = copy.deepcopy(cphd_con)
channel_pvps = next(iter(cphd_con.pvps.values()))
channel_pvps['aFDOP'][0] += 100
cphd_con.check(ignore_patterns=['check_(?!channel_afdop.+)'])
assert cphd_con.failures()
assert not cphd_con.skips()
channel_pvps['aFDOP'][:] = 0
cphd_con.check(ignore_patterns=['check_(?!channel_afdop.+)'])
assert cphd_con.skips() # skips check if all zeroes
def test_check_channel_afrr1_afrr2_relative(good_cphd):
cphd_con = CphdConsistency.from_file(good_cphd, check_signal_data=True)
cphd_con = copy.deepcopy(cphd_con)
channel_pvps = next(iter(cphd_con.pvps.values()))
channel_pvps['FX1'][0] *= 2
channel_pvps['FX2'][0] *= 2
cphd_con.check(ignore_patterns=['check_(?!channel_afrr1_afrr2_relative.+)'])
assert cphd_con.failures()
@pytest.mark.parametrize('parameter', ['aFRR1', 'aFRR2'])
def test_check_channel_afrrs(good_cphd, parameter):
cphd_con = CphdConsistency.from_file(good_cphd, check_signal_data=True)
cphd_con = copy.deepcopy(cphd_con)
channel_id = cphd_con.xml.findtext('./Data/Channel/Identifier')
tx_wf_ids = cphd_con.xml.findall(f'./Channel/Parameters[Identifier="{channel_id}"]/TxRcv/TxWFId')
assert len(tx_wf_ids) == 1 # test construction assumes only one tx_wf
tx_wf_parameters = get_by_id(cphd_con.xml, './TxRcv/TxWFParameters', tx_wf_ids[0].text)
restored_tx_wf_parameters = copy.deepcopy(tx_wf_parameters)
restored_tx_wf_parameters.find('./Identifier').text += '_restored'
tx_fm_rate = tx_wf_parameters.find('./LFMRate')
tx_fm_rate.text = str((float(tx_fm_rate.text) + 1e6) * 10)
cphd_con.check(f'check_channel_{parameter.lower()}_{channel_id}')
assert cphd_con.failures()
# add back the original LFMRate and see that it passes
restored_tx_wf_id = copy.deepcopy(tx_wf_ids[0])
restored_tx_wf_id.text = restored_tx_wf_parameters.findtext('./Identifier')
tx_wf_ids[0].addnext(restored_tx_wf_id)
tx_wf_parameters.addnext(restored_tx_wf_parameters)
num_tx_wfs = tx_wf_parameters.getparent().find('./NumTxWFs')
num_tx_wfs.text = str(int(num_tx_wfs.text) + 1)
cphd_con = CphdConsistency(cphd_con.xml, cphd_con.pvps, cphd_con.header, cphd_con.filename,
cphd_con.schema, cphd_con.check_signal_data)
cphd_con.check(f'check_channel_{parameter.lower()}_{channel_id}')
assert not cphd_con.failures()
assert cphd_con.passes()
# skips if micro parameter is all zero
cphd_con.pvps[channel_id][parameter][:] = 0
cphd_con.check(f'check_channel_{parameter.lower()}_{channel_id}')
assert not cphd_con.failures()
assert not cphd_con.passes()
assert cphd_con.skips()
def test_check_channel_identifier_uniqueness(good_cphd):
cphd_con = CphdConsistency.from_file(good_cphd, check_signal_data=False)
tx_wf_id = cphd_con.xml.find('./Channel/Parameters/TxRcv/TxWFId')
tx_wf_id.addnext(copy.deepcopy(tx_wf_id))
cphd_con = CphdConsistency(cphd_con.xml, cphd_con.pvps, cphd_con.header, cphd_con.filename,
cphd_con.schema, cphd_con.check_signal_data)
cphd_con.check(ignore_patterns=['check_(?!channel_identifier_uniqueness.+)'])
assert cphd_con.failures()
def test_check_channel_rcv_sample_rate(good_cphd):
cphd_con = CphdConsistency.from_file(good_cphd, check_signal_data=False)
for rcv_rate in cphd_con.xml.findall('./TxRcv/RcvParameters/SampleRate'):
rcv_rate.text = '0'
cphd_con.check(ignore_patterns=['check_(?!channel_rcv_sample_rate.+)'])
assert cphd_con.failures()
def test_check_channel_dwell_polys(good_cphd):
cphd_con = CphdConsistency.from_file(str(good_cphd))
bad_xml = ET.fromstring(ET.tostring(cphd_con.xml)) # CODTimeType needs xml
global_txtime2 = float(bad_xml.findtext('./Global/Timeline/TxTime2'))
cod_id = bad_xml.findtext('./Channel/Parameters/DwellTimes/CODId')
codtime_elem = get_by_id(bad_xml, './Dwell/CODTime', cod_id)
codtime_elem.remove(codtime_elem.find('./CODTimePoly'))
bad_cod_time = sarpy_dwell.CODTimeType(Identifier=cod_id, CODTimePoly=[[global_txtime2 + 1]])
codtime_elem.append(bad_cod_time.CODTimePoly.to_node(ET.ElementTree(bad_xml), 'CODTimePoly'))
bad_xml = etree.fromstring(ET.tostring(bad_xml)) # CphdConsistency needs lxml
cphd_con = CphdConsistency(bad_xml, cphd_con.pvps, cphd_con.header, cphd_con.filename,
cphd_con.schema, cphd_con.check_signal_data)
cphd_con.check(ignore_patterns=['check_(?!channel_dwell_polys.+)'])
assert cphd_con.failures()
| 46,073 | 40.620596 | 115 | py |
sarpy | sarpy-master/tests/geometry/test_geocoords.py | import pytest
import numpy
from sarpy.geometry import geocoords
EQUATORIAL_RADIUS = 6378137
POLAR_RADIUS = 6356752.314245179
TOLERANCE = 1e-8
numpy.random.seed(314159)
@pytest.fixture(scope='module')
def input():
llh = numpy.array([[0, 0, 0], [0, 180, 0], [90, 0, 0], [-90, 0, 0], [0, 90, 0]], dtype='float64')
ecf = numpy.array([[EQUATORIAL_RADIUS, 0, 0],
[-EQUATORIAL_RADIUS, 0, 0],
[0, 0, POLAR_RADIUS],
[0, 0, -POLAR_RADIUS],
[0, EQUATORIAL_RADIUS, 0]], dtype='float64')
ned = numpy.array([[0, 0, 0],
[0, 0, EQUATORIAL_RADIUS*2],
[POLAR_RADIUS, 0, EQUATORIAL_RADIUS],
[-POLAR_RADIUS, 0, EQUATORIAL_RADIUS],
[0, EQUATORIAL_RADIUS, EQUATORIAL_RADIUS]], dtype='float64')
enu = numpy.array([[0, 0, 0],
[0, 0, -EQUATORIAL_RADIUS*2],
[0, POLAR_RADIUS, -EQUATORIAL_RADIUS],
[0, -POLAR_RADIUS, -EQUATORIAL_RADIUS],
[EQUATORIAL_RADIUS, 0, -EQUATORIAL_RADIUS]], dtype='float64')
orp = ecf[0, :]
return {"llh": llh, "ecf": ecf, "ned": ned, "enu": enu, "orp": orp}
def test_ecf_to_geodetic(input):
out = geocoords.ecf_to_geodetic(input['ecf'][0, :])
# basic value check
assert out == pytest.approx(input['llh'][0, :], abs=TOLERANCE)
out2 = geocoords.ecf_to_geodetic(input['ecf'])
# 2d value check
assert out2 == pytest.approx(input['llh'], abs=TOLERANCE)
# check (lon, lat, hae) order
out3 = geocoords.ecf_to_geodetic(input['ecf'], ordering='longlat')
assert out3 == pytest.approx(input['llh'][:, [1, 0, 2]], abs=TOLERANCE)
# error check
with pytest.raises(ValueError):
geocoords.ecf_to_geodetic(numpy.arange(4))
def test_geodetic_to_ecf(input):
out = geocoords.geodetic_to_ecf(input['llh'][0, :])
# basic value check
assert out == pytest.approx(input['ecf'][0, :], abs=TOLERANCE)
out2 = geocoords.geodetic_to_ecf(input['llh'])
assert out2 == pytest.approx(input['ecf'], abs=TOLERANCE)
# check (lon, lat, hae) order
out3 = geocoords.geodetic_to_ecf(input['llh'][:, [1, 0, 2]], ordering='longlat')
assert out3 == pytest.approx(input['ecf'], abs=TOLERANCE)
# error check
with pytest.raises(ValueError):
geocoords.geodetic_to_ecf(numpy.arange(4))
def test_values_both_ways():
shp = (8, 5)
rand_llh = numpy.empty(shp + (3, ), dtype=numpy.float64)
rng = numpy.random.default_rng()
rand_llh[:, :, 0] = 180*(rng.random(shp) - 0.5)
rand_llh[:, :, 1] = 360*(rng.random(shp) - 0.5)
rand_llh[:, :, 2] = 1e5*rng.random(shp)
rand_ecf = geocoords.geodetic_to_ecf(rand_llh)
rand_llh2 = geocoords.ecf_to_geodetic(rand_ecf)
rand_ecf2 = geocoords.geodetic_to_ecf(rand_llh2)
# llh match
assert rand_llh == pytest.approx(rand_llh2, abs=TOLERANCE)
# ecf match
assert rand_ecf == pytest.approx(rand_ecf2, abs=TOLERANCE)
def test_ecf_to_ned(input):
out = geocoords.ecf_to_ned(input['ecf'][0, :], input['orp'])
assert numpy.all(out == 0)
out = geocoords.ecf_to_ned(input['ecf'], input['orp'])
assert out == pytest.approx(input['ned'], abs=TOLERANCE)
# orp is a list
out = geocoords.ecf_to_ned(input['ecf'], [EQUATORIAL_RADIUS, 0, 0])
assert out == pytest.approx(input['ned'], abs=TOLERANCE)
# absolute_coords not default
out = geocoords.ecf_to_ned(input['ecf'][0, :], input['orp'], absolute_coords=False)
assert out == pytest.approx([0, 0, -EQUATORIAL_RADIUS], abs=TOLERANCE)
# orp is of length 3
with pytest.raises(ValueError):
orp1 = numpy.append(input['orp'], 0)
out = geocoords.ecf_to_ned(input['ecf'][0, :], orp1)
def test_ned_to_ecf(input):
# input is list instead of array
out = geocoords.ned_to_ecf(input['ned'][0, :].tolist(), input['orp'])
assert out == pytest.approx(input['ecf'][0, :], abs=TOLERANCE)
out = geocoords.ned_to_ecf(input['ned'], input['orp'])
assert out == pytest.approx(input['ecf'], abs=TOLERANCE)
# orp is a list
out = geocoords.ned_to_ecf(input['ned'], [EQUATORIAL_RADIUS, 0, 0])
assert out == pytest.approx(input['ecf'], abs=TOLERANCE)
def test_ecf_to_ned_roundtrip(input):
shp = (8, 5)
rand_ecf = numpy.empty(shp + (3, ), dtype=numpy.float64)
rng = numpy.random.default_rng()
rand_ecf[:, :, 0] = EQUATORIAL_RADIUS*(rng.random(shp) - 0.5)
rand_ecf[:, :, 1] = EQUATORIAL_RADIUS*(rng.random(shp) - 0.5)
rand_ecf[:, :, 2] = POLAR_RADIUS*(rng.random(shp) - 0.5)
rand_ned = geocoords.ecf_to_ned(rand_ecf, input['orp'])
rand_ecf2 = geocoords.ned_to_ecf(rand_ned, input['orp'])
rand_ned2 = geocoords.ecf_to_ned(rand_ecf2, input['orp'])
# ecf match
assert rand_ecf == pytest.approx(rand_ecf2, abs=TOLERANCE)
# ned match
assert rand_ned == pytest.approx(rand_ned2, abs=TOLERANCE)
def test_ecf_to_enu(input):
out = geocoords.ecf_to_enu(input['ecf'][0, :], input['orp'])
assert numpy.all(out == 0)
out = geocoords.ecf_to_enu(input['ecf'], input['orp'])
assert out == pytest.approx(input['enu'], abs=TOLERANCE)
# orp is a list
out = geocoords.ecf_to_enu(input['ecf'], [EQUATORIAL_RADIUS, 0, 0])
assert out == pytest.approx(input['enu'], abs=TOLERANCE)
# absolute_coords not default
out = geocoords.ecf_to_enu(input['ecf'][0, :], input['orp'], absolute_coords=False)
assert out == pytest.approx([0, 0, EQUATORIAL_RADIUS], abs=TOLERANCE)
# orp is of length 3
with pytest.raises(ValueError):
orp1 = numpy.append(input['orp'], 0)
out = geocoords.ecf_to_enu(input['ecf'][0, :], orp1)
def test_enu_to_ecf(input):
out = geocoords.enu_to_ecf(input['enu'][0, :], input['orp'])
assert out == pytest.approx(input['ecf'][0, :], abs=TOLERANCE)
out = geocoords.enu_to_ecf(input['enu'], input['orp'])
assert out == pytest.approx(input['ecf'], abs=TOLERANCE)
# orp is a list
out = geocoords.enu_to_ecf(input['enu'], [EQUATORIAL_RADIUS, 0, 0])
assert out == pytest.approx(input['ecf'], abs=TOLERANCE)
def test_ecf_to_enu_roundtrip(input):
shp = (8, 5)
rand_ecf = numpy.empty(shp + (3, ), dtype=numpy.float64)
rng = numpy.random.default_rng()
rand_ecf[:, :, 0] = EQUATORIAL_RADIUS*(rng.random(shp) - 0.5)
rand_ecf[:, :, 1] = EQUATORIAL_RADIUS*(rng.random(shp) - 0.5)
rand_ecf[:, :, 2] = POLAR_RADIUS*(rng.random(shp) - 0.5)
rand_enu = geocoords.ecf_to_enu(rand_ecf, input['orp'])
rand_ecf2 = geocoords.enu_to_ecf(rand_enu, input['orp'])
rand_enu2 = geocoords.ecf_to_enu(rand_ecf2, input['orp'])
# ecf match
assert rand_ecf == pytest.approx(rand_ecf2, abs=TOLERANCE)
# enu match
assert rand_enu == pytest.approx(rand_enu2, abs=TOLERANCE)
def test_wgs84_norm(input):
wgs84_norm = geocoords.wgs_84_norm(input['ecf'])
expected = numpy.array([[1., 0., 0.],
[-1., 0., 0.],
[0., 0., 1.],
[0., 0., -1.],
[0., 1., 0.]])
assert wgs84_norm == pytest.approx(expected, abs=TOLERANCE)
| 7,290 | 34.565854 | 101 | py |
sarpy | sarpy-master/tests/geometry/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/tests/geometry/test_point_projection.py | #
# Copyright 2023 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import pathlib
import numpy as np
import pytest
from sarpy.geometry import point_projection
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.product.sidd2_elements.SIDD import SIDDType
from sarpy.io.DEM.DEM import DEMInterpolator
TOLERANCE = 1e-8
@pytest.fixture(scope='module')
def sicd():
xml_file = pathlib.Path(pathlib.Path.cwd(), 'tests/data/example.sicd.xml')
structure = SICDType().from_xml_file(xml_file)
scp_pixel = [structure.ImageData.SCPPixel.Row,
structure.ImageData.SCPPixel.Col]
scp_ecf = [structure.GeoData.SCP.ECF.X,
structure.GeoData.SCP.ECF.Y,
structure.GeoData.SCP.ECF.Z]
scp_llh = [structure.GeoData.SCP.LLH.Lat,
structure.GeoData.SCP.LLH.Lon,
structure.GeoData.SCP.LLH.HAE]
return {'structure': structure, 'scp_pixel': scp_pixel, 'scp_ecf': scp_ecf, 'scp_llh': scp_llh}
@pytest.fixture(scope='module')
def sidd():
xml_file = pathlib.Path(pathlib.Path.cwd(), 'tests/data/example.sidd.xml')
return SIDDType().from_xml_file(xml_file)
def test_image_to_ground_plane(sicd):
# project scp pixel (PLANE)
scp_ecef1 = point_projection.image_to_ground(sicd['scp_pixel'],
sicd['structure'],
projection_type='PLANE')
assert scp_ecef1 == pytest.approx(sicd['scp_ecf'], abs=TOLERANCE)
scp_ecef2 = point_projection.image_to_ground([sicd['scp_pixel'], sicd['scp_pixel']],
sicd['structure'],
block_size=1,
projection_type='PLANE')
assert np.all(np.abs(scp_ecef2[0] - scp_ecef1) < TOLERANCE)
assert np.all(np.abs(scp_ecef2[1] - scp_ecef1) < TOLERANCE)
# 2-dim gref
gref = np.array([[sicd['structure'].GeoData.SCP.ECF.X],
[sicd['structure'].GeoData.SCP.ECF.Y],
[sicd['structure'].GeoData.SCP.ECF.Z]])
scp_ecef3 = point_projection.image_to_ground(sicd['scp_pixel'],
sicd['structure'],
gref=gref,
projection_type='PLANE')
assert scp_ecef3 == pytest.approx(sicd['scp_ecf'], abs=TOLERANCE)
# 2-dim ugpn
ugpn = gref
scp_ecef4 = point_projection.image_to_ground(sicd['scp_pixel'],
sicd['structure'],
ugpn=ugpn,
projection_type='PLANE')
assert scp_ecef4 == pytest.approx(sicd['scp_ecf'], abs=TOLERANCE)
def test_image_to_ground_hae(sicd, caplog):
# project scp pixel (HAE)
scp_ecef1 = point_projection.image_to_ground(sicd['scp_pixel'],
sicd['structure'],
projection_type='HAE')
assert scp_ecef1 == pytest.approx(sicd['scp_ecf'], abs=TOLERANCE)
scp_ecef2 = point_projection.image_to_ground([sicd['scp_pixel'], sicd['scp_pixel']],
sicd['structure'],
block_size=1,
projection_type='HAE')
assert np.all(np.abs(scp_ecef2[0] - scp_ecef1) < TOLERANCE)
assert np.all(np.abs(scp_ecef2[1] - scp_ecef1) < TOLERANCE)
# error max_iterations < 1
point_projection.image_to_ground(sicd['scp_pixel'],
sicd['structure'],
projection_type='HAE',
max_iterations=0)
assert 'max_iterations must be a positive integer' in caplog.text
# error max_iterations > 100
point_projection.image_to_ground(sicd['scp_pixel'],
sicd['structure'],
projection_type='HAE',
max_iterations=101)
assert 'maximum allowed max_iterations is 100' in caplog.text
def test_image_to_ground_errors(sicd):
# invalid im_points (empty)
with pytest.raises(ValueError, match="final dimension of im_points must have length 2"):
point_projection.image_to_ground([], sicd['structure'], projection_type='PLANE')
# invalid im_points (None)
with pytest.raises(ValueError, match="The argument cannot be None"):
point_projection.image_to_ground(None, sicd['structure'], projection_type='PLANE')
# invalid projection_type
with pytest.raises(ValueError, match="Got unrecognized projection type INVALID_PLANE"):
point_projection.image_to_ground(sicd['scp_pixel'], sicd['structure'], projection_type='INVALID_PLANE')
# invalid gref
with pytest.raises(ValueError, match="gref must have three elements"):
point_projection.image_to_ground(sicd['scp_pixel'], sicd['structure'], gref=[0.0, 0.0], projection_type='PLANE')
# invalid ugpn
with pytest.raises(ValueError, match="ugpn must have three elements"):
point_projection.image_to_ground(sicd['scp_pixel'], sicd['structure'], ugpn=[0.0, 0.0], projection_type='PLANE')
# dem_interpolator is none
with pytest.raises(ValueError, match="dem_interpolator is None"):
point_projection.image_to_ground(sicd['scp_pixel'], sicd['structure'], projection_type='DEM')
# dem_interpolator is not DEMInterpolator type
with pytest.raises(TypeError, match="dem_interpolator is of unsupported type"):
point_projection.image_to_ground(sicd['scp_pixel'], sicd['structure'], projection_type='DEM',
dem_interpolator=sicd['scp_pixel'])
def test_image_to_ground_geo(sicd):
# project scp pixel to geodetic
scp_geo = point_projection.image_to_ground_geo(sicd['scp_pixel'], sicd['structure'])
assert scp_geo == pytest.approx(sicd['scp_llh'], abs=TOLERANCE)
def test_image_to_ground_dem(sicd):
interp = DEMInterpolator()
with pytest.raises(NotImplementedError):
point_projection.image_to_ground(sicd['scp_pixel'],
sicd['structure'],
projection_type='DEM',
dem_interpolator=interp)
def test_ground_to_image(sicd):
# project scp ecef to pixel
scp_pixel1 = point_projection.ground_to_image(sicd['scp_ecf'], sicd['structure'])
assert scp_pixel1[0] == pytest.approx(sicd['scp_pixel'], abs=TOLERANCE)
assert scp_pixel1[1] == pytest.approx(0.0, abs=TOLERANCE)
scp_pixel2 = point_projection.ground_to_image([sicd['scp_ecf'], sicd['scp_ecf']], sicd['structure'], block_size=1)
assert np.all(np.abs(scp_pixel2[0][0] - scp_pixel1[0]) < TOLERANCE)
assert np.all(np.abs(scp_pixel2[0][1] - scp_pixel1[0]) < TOLERANCE)
def test_ground_to_image_geo(sicd):
# project scp pixel to geodetic
scp_pixel1 = point_projection.ground_to_image_geo(sicd['scp_llh'], sicd['structure'])
assert scp_pixel1[0] == pytest.approx(sicd['scp_pixel'], abs=TOLERANCE)
assert scp_pixel1[1] == pytest.approx(0.0, abs=TOLERANCE)
def test_image_to_ground_sidd(sidd, caplog):
# project SIDD reference point pixel
ref_point = [sidd.Measurement.PlaneProjection.ReferencePoint.Point.Row,
sidd.Measurement.PlaneProjection.ReferencePoint.Point.Col]
scp_ecef1 = point_projection.image_to_ground(ref_point, sidd)
sidd_ecef = sidd.Measurement.PlaneProjection.ReferencePoint.ECEF
assert sidd_ecef.X == pytest.approx(scp_ecef1[0], abs=TOLERANCE)
assert sidd_ecef.Y == pytest.approx(scp_ecef1[1], abs=TOLERANCE)
assert sidd_ecef.Z == pytest.approx(scp_ecef1[2], abs=TOLERANCE)
# force path through _get_outward_norm
scp_ecef2 = point_projection.image_to_ground(ref_point, sidd, projection_type='PLANE', ugpn=None)
assert scp_ecef2[0] == pytest.approx(sidd_ecef.X, abs=TOLERANCE)
assert scp_ecef2[1] == pytest.approx(sidd_ecef.Y, abs=TOLERANCE)
assert scp_ecef2[2] == pytest.approx(sidd_ecef.Z, abs=TOLERANCE)
point_projection.image_to_ground(ref_point, sidd, tolerance=1e-13)
assert 'minimum allowed tolerance is 1e-12' in caplog.text
def test_ground_to_image_sidd(sidd, caplog):
# project reference point ecef to pixel
ref_point = [sidd.Measurement.PlaneProjection.ReferencePoint.ECEF.X,
sidd.Measurement.PlaneProjection.ReferencePoint.ECEF.Y,
sidd.Measurement.PlaneProjection.ReferencePoint.ECEF.Z]
scp_pixel1 = point_projection.ground_to_image(ref_point, sidd)
sidd_rowcol = sidd.Measurement.PlaneProjection.ReferencePoint.Point
assert sidd_rowcol.Row == pytest.approx(scp_pixel1[0][0], abs=TOLERANCE)
assert sidd_rowcol.Col == pytest.approx(scp_pixel1[0][1], abs=TOLERANCE)
assert scp_pixel1[1] == pytest.approx(0.0, abs=TOLERANCE)
point_projection.ground_to_image(ref_point, sidd, tolerance=1e-13)
assert 'minimum allowed tolerance is 1e-12' in caplog.text
def test_coa_projection(sicd, sidd):
# smoke test
proj = point_projection.COAProjection.from_sicd(sicd['structure'])
assert proj.delta_arp is not None
assert proj.delta_varp is not None
assert proj.range_bias is not None
assert proj.delta_range is not None
# force path through RIC_ECF code (SICD)
proj = point_projection.COAProjection.from_sicd(sicd['structure'], adj_params_frame='RIC_ECF')
assert proj.delta_arp is not None
# force path through RIC_ECF code (SIDD)
proj = point_projection.COAProjection.from_sidd(sidd, adj_params_frame='RIC_ECF')
assert proj.delta_arp is not None
method_projection = point_projection._get_sicd_type_specific_projection(sicd['structure'])
with pytest.raises(TypeError, match="time_coa_poly must be a Poly2DType instance"):
point_projection.COAProjection(sicd['structure'].Position.ARPPoly,
sicd['structure'].Position.ARPPoly,
method_projection)
with pytest.raises(TypeError, match="arp_poly must be an XYZPolyType instance"):
point_projection.COAProjection(sicd['structure'].Grid.TimeCOAPoly,
sicd['structure'].Grid.TimeCOAPoly,
method_projection)
with pytest.raises(TypeError, match="method_projection must be callable"):
point_projection.COAProjection(sicd['structure'].Grid.TimeCOAPoly,
sicd['structure'].Position.ARPPoly,
'WRONG')
def test_validate_coords_error(sicd):
# ECF coordinates must have length 3
coords = np.array([[sicd['structure'].GeoData.SCP.ECF.X],
[sicd['structure'].GeoData.SCP.ECF.Y],
[sicd['structure'].GeoData.SCP.ECF.Z],
[sicd['structure'].GeoData.SCP.ECF.Z]])
with pytest.raises(ValueError, match="final dimension of coords must have length 3"):
point_projection._validate_coords(coords)
def test_validate_adjustment_param_error(sicd):
# ECF coordinates must have length 3 and passed as an array
coords = [[sicd['structure'].GeoData.SCP.ECF.X],
[sicd['structure'].GeoData.SCP.ECF.Y],
[sicd['structure'].GeoData.SCP.ECF.Z],
[sicd['structure'].GeoData.SCP.ECF.Z]]
with pytest.raises(ValueError, match=r"position must have shape \(3, \). Got \(4, 1\)"):
point_projection._validate_adj_param(coords, 'position')
coords = np.array(coords)
with pytest.raises(ValueError, match=r"position must have shape \(3, \). Got \(4, 1\)"):
point_projection._validate_adj_param(coords, 'position')
| 12,005 | 46.082353 | 120 | py |
sarpy | sarpy-master/tests/geometry/test_latlon.py | #
# Copyright 2022 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import numpy as np
import pytest
from sarpy.geometry import latlon
def test_string():
ll_str = latlon.string(33.92527777777778, 'lat') # float input
assert ll_str == '33°55\'31"N'
ll_str = latlon.string(33, 'lat')
assert ll_str == '33°00\'00"N'
ll_str = latlon.string(33.92527777777778, 'lat', num_units=1)
assert ll_str == '33.92528°N'
ll_str = latlon.string(33.92527777777778, 'lat', num_units=2)
assert ll_str == "33°56'N"
ll_str = latlon.string(33.92527777777778, 'lat', num_units=3)
assert ll_str == '33°55\'31"N'
ll_str = latlon.string(33.92527777777778, 'lat', include_symbols=False)
assert ll_str == '335531N'
ll_str = latlon.string(33.92527777777778, 'lat', signed=True)
assert ll_str == '+33°55\'31"'
ll_str = latlon.string(-33.92527777777778, 'lat')
assert ll_str == '33°55\'31"S'
ll_str = latlon.string(np.array([33.0, 55.0, 31.0]), 'lat') # array input
assert ll_str == '33°55\'31"N'
ll_str = latlon.string([33.0, 55.0, 31.0], 'lat') # list input
assert ll_str == '33°55\'31"N'
ll_str = latlon.string((33.0, 55.0, 31.0), 'lat') # tuple input
assert ll_str == '33°55\'31"N'
ll_str = latlon.string(133.92527777777778, 'lon')
assert ll_str == '133°55\'31"E'
ll_str = latlon.string(493.92527777777778, 'lon')
assert ll_str == '133°55\'31"E'
ll_str = latlon.string(-133.92527777777778, 'lon')
assert ll_str == '133°55\'31"W'
ll_str = latlon.string([33.0, 55.0, 60.0], 'lat') # seconds == 60
assert ll_str == '33°56\'00"N'
ll_str = latlon.string([33.0, 59.0, 60.0], 'lat') # seconds rollover to minutes == 60
assert ll_str == '34°00\'00"N'
ll_str = latlon.string(33.0, 'lat', padded=False)
assert ll_str == '33°0\'0"N'
def test_dms():
deg, min, sec = latlon.dms(33.92527777777778)
assert deg == 33
assert min == 55
assert 31 == pytest.approx(sec, abs=1e-10)
deg, min, sec = latlon.dms(-33.92527777777778)
assert deg == -33
assert min == 55
assert 31 == pytest.approx(sec, abs=1e-10)
def test_num():
ll_dec = latlon.num('33:55:31')
assert 33.9252777778 == pytest.approx(ll_dec, abs=1e-10)
ll_dec = latlon.num('33:55:31W')
assert -33.9252777778 == pytest.approx(ll_dec, abs=1e-10)
ll_dec = latlon.num('1335531')
assert 133.9252777778 == pytest.approx(ll_dec, abs=1e-10)
ll_dec = latlon.num('360:00:01')
assert np.isnan(ll_dec)
ll_dec = latlon.num('-180:00:01')
assert np.isnan(ll_dec)
ll_dec = latlon.num('33:55:31:21')
assert np.isnan(ll_dec)
with pytest.raises(ValueError):
ll_dec = latlon.num(33.92527777777778)
| 2,772 | 29.811111 | 90 | py |
sarpy | sarpy-master/tests/geometry/test_geometry_elements.py | #
# Copyright 2022 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import numpy as np
import pytest
from sarpy.geometry import geometry_elements
@pytest.fixture(scope='module')
def test_elements():
point = [33.447899, -112.097254]
point_json = {
"type": "Point",
"coordinates": point
}
line_string = [
[33.447899, -112.097254],
[33.448364, -112.072789]
]
line_string_json = {
"type": "LineString",
"coordinates": line_string
}
poly = [
[33.846868, -112.269723],
[33.831174, -111.637440],
[33.244145, -111.669740],
[33.237457, -112.261898],
[33.846868, -112.269723]
]
poly_json = {
"type": "Polygon",
"coordinates": [poly]
}
return {"point": point,
"point_json": point_json,
"line_string": line_string,
"line_string_json": line_string_json,
"poly": poly,
"poly_json": poly_json}
def test_feature(test_elements):
# test Feature class
feature = geometry_elements.Feature()
assert feature.uid is not None
assert feature.geometry is None
feature.geometry = geometry_elements.Geometry().from_dict(test_elements['point_json'])
assert np.all(feature.geometry.coordinates == test_elements['point'])
feature.properties = 'my properties'
assert feature.properties == 'my properties'
feature_dict = feature.to_dict()
feature1 = geometry_elements.Feature().from_dict(feature_dict)
assert np.all(feature1.geometry.coordinates == test_elements['point'])
assert feature1.properties == 'my properties'
feature2 = feature.replicate()
assert np.all(feature2.geometry.coordinates == test_elements['point'])
assert feature2.properties == 'my properties'
# test FeatureCollection class
feature_collection = geometry_elements.FeatureCollection([feature, feature1, feature2])
assert len(feature_collection.features) == 3
feature_collection.features = [feature2]
assert len(feature_collection.features) == 4
idx = feature_collection.get_integer_index(feature2.uid)
assert idx == 3
feature_collection_dict = feature_collection.to_dict()
feature_collection1 = geometry_elements.FeatureCollection().from_dict(feature_collection_dict)
assert len(feature_collection1) == 4
feature_collection.add_feature(feature1)
assert len(feature_collection.features) == 5
feature_collection2 = feature_collection.replicate()
assert len(feature_collection2.features) == 5
feature_collection3 = feature_collection1.replicate()
assert len(feature_collection3.features) == 4
def test_geometry_collection(test_elements):
geom = geometry_elements.GeometryCollection([test_elements['point_json'],
test_elements['line_string_json'],
test_elements['poly_json']])
# check properties
assert len(geom.collection) == 3
assert len(geom.geometries) == 3
# check setter
geom.geometries = []
assert len(geom.collection) == 0
geom.geometries = [test_elements['point_json']]
assert len(geom.collection) == 1
# check setter error conditions
with pytest.raises(TypeError, match='geometries must be None or a list of Geometry objects'):
geom.geometries = test_elements['point_json']
with pytest.raises(TypeError, match='geometries must be a list of Geometry objects'):
geom.geometries = [test_elements['point_json'], 10]
# check bounding box
geom.geometries = [test_elements['poly_json']]
coord_arr = np.asarray(test_elements['poly'])
truth_box = [min(coord_arr[:, 0]),
min(coord_arr[:, 1]),
max(coord_arr[:, 0]),
max(coord_arr[:, 1])]
box = geom.get_bbox()
assert box == truth_box
# check to/from dict
geom.geometries = [test_elements['point_json'], test_elements['line_string_json'], test_elements['poly_json']]
geom_dict = geom.to_dict()
geom1 = geometry_elements.GeometryCollection.from_dict(geom_dict)
assert dir(geom) == dir(geom1)
assert np.all(geom.geometries[0].coordinates == geom1.geometries[0].coordinates)
assert np.all(geom.geometries[1].get_coordinate_list() ==
geom1.geometries[1].get_coordinate_list())
assert np.all(geom.geometries[2].get_coordinate_list() ==
geom1.geometries[2].get_coordinate_list())
# check assemble from collection
point = geometry_elements.Point(coordinates=test_elements['point'])
line_string = geometry_elements.LineString(coordinates=test_elements['line_string'])
geom = geometry_elements.GeometryCollection([test_elements['point_json'], test_elements['line_string_json'], test_elements['poly_json']])
geom1 = geometry_elements.GeometryCollection()
geom1 = geom1.assemble_from_collection(point, line_string, geom)
list_of_geometries = geom1.geometries
assert len(list_of_geometries) == 5
assert list_of_geometries[0].get_coordinate_list() == test_elements['point']
assert list_of_geometries[1].get_coordinate_list() == test_elements['line_string']
assert list_of_geometries[2].get_coordinate_list() == test_elements['point']
assert list_of_geometries[3].get_coordinate_list() == test_elements['line_string']
assert list_of_geometries[4].get_coordinate_list()[0] == test_elements['poly']
# No args
geom2 = geom1.assemble_from_collection()
assert isinstance(geom2, geometry_elements.GeometryCollection)
def test_polygons():
outer_ring_coords = [[0, 0], [3, 0], [3, 3], [0, 3], [0, 0]]
inner_ring_coords = [[1, 1], [2, 1], [2, 2], [1, 2], [1, 1]]
intersecting_ring_coords = [[0, 1], [1, 1], [1, 2], [0, 2], [0, 1]]
# check inner/outer ring
ring1 = geometry_elements.LinearRing(outer_ring_coords)
ring2 = geometry_elements.LinearRing(inner_ring_coords)
ring3 = geometry_elements.LinearRing(intersecting_ring_coords)
assert np.all(ring1.get_coordinate_list() == outer_ring_coords)
assert np.all(ring2.get_coordinate_list() == inner_ring_coords)
assert np.all(ring3.get_coordinate_list() == intersecting_ring_coords)
test_poly = geometry_elements.Polygon([ring1, ring2])
# check inner/outer ring
outer_ring = test_poly.outer_ring
assert np.all(outer_ring.get_coordinate_list() == outer_ring_coords)
inner_rings = test_poly.inner_rings
assert np.all(inner_rings[0].get_coordinate_list() == inner_ring_coords)
# check intersection by adding inner ring
assert not test_poly.self_intersection()
test_poly.add_inner_ring(intersecting_ring_coords)
assert test_poly.self_intersection()
# check to/from dict
polygon_dict = test_poly.to_dict()
test_polygon1 = geometry_elements.Polygon().from_dict(polygon_dict)
assert test_polygon1.outer_ring.get_coordinate_list() == outer_ring_coords
inner_rings = test_polygon1.inner_rings
assert inner_rings[0].get_coordinate_list() == inner_ring_coords[::-1]
assert inner_rings[1].get_coordinate_list() == intersecting_ring_coords[::-1]
# check get perimeter
ring1_perimeter = ring1.get_perimeter()
ring2_perimeter = ring2.get_perimeter()
ring3_perimeter = ring3.get_perimeter()
poly_perimeter = test_poly.get_perimeter()
assert poly_perimeter == ring1_perimeter + ring2_perimeter + ring3_perimeter
# check set outer ring
test_polygon1 = geometry_elements.Polygon([ring1])
assert test_polygon1.get_area() == 9
test_polygon1.set_outer_ring([[0, 0], [10, 0], [10, 10], [0, 10], [0, 0]])
assert test_polygon1.get_area() == 100
# check centroid
test_polygon1 = geometry_elements.Polygon()
assert test_polygon1.get_centroid() is None
test_polygon1 = geometry_elements.Polygon([outer_ring_coords])
assert np.all(test_polygon1.get_centroid() == [1.5, 1.5])
# check contain coordinates
test_polygon1 = geometry_elements.Polygon([outer_ring_coords])
assert np.all(test_polygon1.contain_coordinates([1], [1]))
assert np.all(test_polygon1.contain_coordinates([1, 2, 3], [0, 1, 2]))
assert not np.all(test_polygon1.contain_coordinates([4], [4]))
# check grid contained
test_polygon1 = geometry_elements.Polygon([outer_ring_coords])
# grid_y must be monotonically increasing
with pytest.raises(ValueError, match='grid_y must be monotonically increasing'):
test_polygon1.grid_contained(np.array([1, 2, 3, 4, 5]), np.array([1, 1, 1]))
# grid_x must be monotonically increasing
with pytest.raises(ValueError, match='grid_x must be monotonically increasing'):
test_polygon1.grid_contained(np.array([1, 1, 1, 1]), np.array([1, 2, 3, 4, 5]))
# grid_x and grid_y must be one dimensional
with pytest.raises(ValueError, match='grid_x and grid_y must be one dimensional'):
test_polygon1.grid_contained(np.array([[1, 2, 3], [3, 4, 5]]), np.array([1, 2, 3]))
assert np.all(test_polygon1.grid_contained(np.array([1, 2, 3]), np.array([0, 1, 2])))
contained = test_polygon1.grid_contained(np.array([0, 1, 2, 3, 4]), np.array([0, 1, 2, 3, 4]))
for i, val in enumerate(contained):
for j in range(len(val)):
if i <= 3 and j <= 3:
assert val[j]
else:
assert not val[j]
def test_multi_polygon(test_elements):
poly_coords1 = [[0, 0], [3, 0], [3, 3], [0, 3], [0, 0]]
poly_coords2 = [[1, 1], [2, 1], [2, 2], [1, 2], [1, 1]]
poly_coords3 = [[1, 2], [2, 2], [2, 3], [1, 3], [1, 2]]
test_poly1 = geometry_elements.Polygon([poly_coords1])
test_poly2 = geometry_elements.Polygon([poly_coords2])
test_poly3 = geometry_elements.Polygon([poly_coords3])
# check instantiation and properties
multi_poly = geometry_elements.MultiPolygon([test_poly1, test_poly2, test_poly3])
assert len(multi_poly.collection) == 3
assert len(multi_poly.polygons) == 3
# check polygon setter
multi_poly = geometry_elements.MultiPolygon()
assert multi_poly.polygons is None
multi_poly.polygons = [test_poly1, test_poly2, test_poly3]
assert len(multi_poly.polygons) == 3
# check bounding box
multi_poly = geometry_elements.MultiPolygon([test_poly1, test_poly2])
truth_box = [0, 0, 3, 3]
box = multi_poly.get_bbox()
assert box == truth_box
# check to/from dict
multi_poly = geometry_elements.MultiPolygon([test_poly1, test_poly2])
multi_poly_dict = multi_poly.to_dict()
multi_poly1 = geometry_elements.MultiPolygon().from_dict(multi_poly_dict)
assert multi_poly1.get_coordinate_list()[0] == test_poly1.get_coordinate_list()
assert multi_poly1.get_coordinate_list()[1] == test_poly2.get_coordinate_list()
# check get perimeter
multi_poly = geometry_elements.MultiPolygon([test_poly1, test_poly2])
poly1_perimeter = test_poly1.get_perimeter()
poly2_perimeter = test_poly2.get_perimeter()
assert multi_poly.get_perimeter() == poly1_perimeter + poly2_perimeter
# check contain coordinates
multi_poly = geometry_elements.MultiPolygon([test_poly1, test_poly2])
assert np.all(multi_poly.contain_coordinates([1], [1]))
assert np.all(multi_poly.contain_coordinates([1, 2, 3], [0, 1, 2]))
assert not np.all(multi_poly.contain_coordinates([4], [4]))
# check grid contained
multi_poly = geometry_elements.MultiPolygon([test_poly1, test_poly2])
# grid_y must be monotonically increasing
with pytest.raises(ValueError, match='grid_y must be monotonically increasing'):
multi_poly.grid_contained(np.array([1, 2, 3, 4, 5]), np.array([1, 1, 1]))
# grid_x must be monotonically increasing
with pytest.raises(ValueError, match='grid_x must be monotonically increasing'):
multi_poly.grid_contained(np.array([1, 1, 1, 1]), np.array([1, 2, 3, 4, 5]))
# grid_x and grid_y must be one dimensional
with pytest.raises(ValueError, match='grid_x and grid_y must be one dimensional'):
multi_poly.grid_contained(np.array([[1, 2, 3], [3, 4, 5]]), np.array([1, 2, 3]))
assert np.all(multi_poly.grid_contained(np.array([1, 2, 3]), np.array([0, 1, 2])))
contained = multi_poly.grid_contained(np.array([0, 1, 2, 3, 4]), np.array([0, 1, 2, 3, 4]))
for i, val in enumerate(contained):
for j in range(len(val)):
if i <= 3 and j <= 3:
assert val[j]
else:
assert not val[j]
# check get_minimum_distance
multi_poly = geometry_elements.MultiPolygon()
assert multi_poly.get_minimum_distance([0, 0]) == float('inf')
multi_poly = geometry_elements.MultiPolygon([test_poly1, test_poly2])
assert multi_poly.get_minimum_distance([0, 0]) == 0.0
assert multi_poly.get_minimum_distance([0, 4]) == 1.0
assert multi_poly.get_minimum_distance([4, 0]) == 1.0
assert multi_poly.get_minimum_distance([4, 4]) == pytest.approx(np.sqrt(2), abs=1e-8)
# check assemble from collection
ring = geometry_elements.LinearRing(poly_coords1)
poly = geometry_elements.Polygon([poly_coords2])
multi_poly = geometry_elements.MultiPolygon([test_poly1, test_poly2])
geom_coll = geometry_elements.GeometryCollection([test_elements['poly_json']])
multi_poly1 = geometry_elements.MultiPolygon()
multi_poly1 = multi_poly1.assemble_from_collection(ring, poly, multi_poly, geom_coll)
assert multi_poly1.polygons[0].get_coordinate_list()[0] == poly_coords1
assert multi_poly1.polygons[1].get_coordinate_list()[0] == poly_coords2
assert multi_poly1.polygons[2].get_coordinate_list()[0] == poly_coords1
assert multi_poly1.polygons[3].get_coordinate_list()[0] == poly_coords2
assert multi_poly1.polygons[4].get_coordinate_list()[0] == test_elements['poly']
# No args
multi_poly2 = multi_poly1.assemble_from_collection()
assert isinstance(multi_poly2, geometry_elements.MultiPolygon)
def test_point(test_elements):
# test Point class
point = geometry_elements.Point(coordinates=test_elements['point'])
assert np.all(point.coordinates == test_elements['point'])
point.coordinates = [0.0, 0.0]
assert np.all(point.coordinates == [0.0, 0.0])
# Coordinates must be a one-dimensional array
with pytest.raises(ValueError, match='coordinates must be a one-dimensional array'):
point.coordinates = np.zeros((2, 3))
# Coordinates must have between 2 and 4 entries
with pytest.raises(ValueError, match='coordinates must have between 2 and 4 entries'):
point.coordinates = [0.0]
# Coordinates must have between 2 and 4 entries
with pytest.raises(ValueError, match='coordinates must have between 2 and 4 entries'):
point.coordinates = [0.0, 0.0, 0.0, 0.0, 0.0]
point.coordinates = test_elements['point']
assert np.all(point.get_bbox() == test_elements['point'] + test_elements['point'])
point.coordinates = test_elements['point']
assert point.get_coordinate_list() == test_elements['point']
point_dict = point.to_dict()
point1 = geometry_elements.Point().from_dict(point_dict)
assert point.get_coordinate_list() == point1.get_coordinate_list()
point = geometry_elements.Point(coordinates=[1, 1])
point1 = [0, 0]
assert point.get_minimum_distance(point1) == pytest.approx(np.sqrt(2), abs=1e-8)
def test_multi_point(test_elements):
# test MultiPoint class
multi_point = geometry_elements.MultiPoint()
assert multi_point.get_coordinate_list() is None
multi_point = geometry_elements.MultiPoint(coordinates=test_elements['poly'])
assert np.all(multi_point.get_coordinate_list() == test_elements['poly'])
multi_point1 = geometry_elements.MultiPoint(coordinates=multi_point)
assert np.all(multi_point.get_coordinate_list() == multi_point1.get_coordinate_list())
assert len(multi_point1.collection) == len(test_elements['poly'])
assert len(multi_point1.points) == len(test_elements['poly'])
multi_point1.points = test_elements['line_string']
assert np.all(multi_point1.get_coordinate_list() == test_elements['line_string'])
assert len(multi_point1.collection) == len(test_elements['line_string'])
multi_point_dict = multi_point.to_dict()
multi_point1 = geometry_elements.MultiPoint().from_dict(multi_point_dict)
assert multi_point.get_coordinate_list() == multi_point1.get_coordinate_list()
# test assemble from collection
point = geometry_elements.Point(coordinates=test_elements['point'])
multi_point1 = geometry_elements.MultiPoint()
multi_point1 = multi_point1.assemble_from_collection(multi_point, point)
truth_coords = test_elements['poly']
truth_coords.append(test_elements['point'])
assert multi_point1.get_coordinate_list() == truth_coords
# No args
multi_point2 = multi_point1.assemble_from_collection()
assert isinstance(multi_point2, geometry_elements.MultiPoint)
# test bounding box
coord_arr = np.asarray(test_elements['poly'])
truth_box = [min(coord_arr[:, 0]),
min(coord_arr[:, 1]),
max(coord_arr[:, 0]),
max(coord_arr[:, 1])]
box = multi_point1.get_bbox()
assert box == truth_box
multi_point2 = geometry_elements.MultiPoint()
assert multi_point2.get_bbox() is None
def test_line_string(test_elements):
# test LineString class
line_string = geometry_elements.LineString(coordinates=test_elements['line_string'])
assert np.all(line_string.coordinates == test_elements['line_string'])
line_string1 = geometry_elements.LineString(coordinates=line_string)
assert np.all(line_string1.coordinates == line_string.coordinates)
line_string.coordinates = [[0.0, 0.0], [1.0, 1.0]]
assert np.all(line_string.coordinates == [[0.0, 0.0], [1.0, 1.0]])
# Coordinates must be a two-dimensional array
with pytest.raises(ValueError, match='coordinates must be a two-dimensional array'):
line_string.coordinates = np.zeros((2, 3, 2))
# The second dimension of coordinates must have between 2 and 4 entries
with pytest.raises(ValueError, match='The second dimension of coordinates must have between 2 and 4 entries'):
line_string.coordinates = [[0.0]]
# The second dimension of coordinates must have between 2 and 4 entries
with pytest.raises(ValueError, match='The second dimension of coordinates must have between 2 and 4 entries'):
line_string.coordinates = [[0.0, 0.0, 0.0, 0.0, 0.0]]
# check intersection
line_string = geometry_elements.LineString()
line_string.coordinates = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]
assert not line_string.self_intersection()
line_string.coordinates = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]]
assert line_string.self_intersection()
# check bounding box
line_string = geometry_elements.LineString(coordinates=test_elements['line_string'])
coord_arr = np.asarray(test_elements['line_string'])
truth_box = [min(coord_arr[:, 0]),
min(coord_arr[:, 1]),
max(coord_arr[:, 0]),
max(coord_arr[:, 1])]
box = line_string.get_bbox()
assert box == truth_box
# check coordinate list getter
coord_list = line_string.get_coordinate_list()
assert coord_list == test_elements['line_string']
# check to/from dict
line_string_dict = line_string.to_dict()
line_string1 = geometry_elements.LineString().from_dict(line_string_dict)
assert line_string.get_coordinate_list() == line_string1.get_coordinate_list()
# check get length
line_string1.coordinates = [[0.0, 0.0], [1.0, 1.0]]
assert line_string1.get_length() == pytest.approx(np.sqrt(2), abs=1e-8)
# check get minimum distance
line_string1.coordinates = [[0.0, 0.0], [1.0, 1.0]]
test_point = [0.0, 0.0]
assert line_string1.get_minimum_distance(test_point) == 0.0
test_point = [1.0, 0.0]
assert line_string1.get_minimum_distance(test_point) == pytest.approx(1.0 / np.sqrt(2), abs=1e-8)
def test_multi_line_string(test_elements):
# test instantiate with a list of lists
multi_line_string = geometry_elements.MultiLineString(coordinates=[test_elements['poly'], test_elements['poly']])
assert len(multi_line_string.lines) == 2
assert len(multi_line_string.lines[0].coordinates) == len(test_elements['poly'])
assert len(multi_line_string.lines[1].coordinates) == len(test_elements['poly'])
# test instantiate with a MultiLineString
multi_line_string1 = geometry_elements.MultiLineString(coordinates=multi_line_string)
assert len(multi_line_string1.lines) == 2
assert len(multi_line_string1.lines[0].coordinates) == len(test_elements['poly'])
assert len(multi_line_string1.lines[1].coordinates) == len(test_elements['poly'])
# check line setter
multi_line_string1.lines = None
assert multi_line_string1.lines is None
multi_line_string1.lines = [test_elements['poly'], test_elements['poly']]
assert len(multi_line_string1.lines) == 2
assert len(multi_line_string1.lines[0].coordinates) == len(test_elements['poly'])
assert len(multi_line_string1.lines[1].coordinates) == len(test_elements['poly'])
# Coordinates must be a two-dimensional array
with pytest.raises(ValueError, match='coordinates must be a two-dimensional array'):
multi_line_string1.lines = test_elements['poly']
# check bounding box
poly_coords1 = [[0, 0], [3, 0], [3, 3], [0, 3], [0, 0]]
extended_poly_coords = [[0, 3], [3, 3], [3, 4], [0, 4], [0, 3]]
bb_line_string = geometry_elements.MultiLineString(coordinates=[poly_coords1, extended_poly_coords])
truth_box = [0, 0, 3, 4]
box = bb_line_string.get_bbox()
assert box == truth_box
# check coordinate list getter
coord_list = multi_line_string.get_coordinate_list()
assert coord_list[0] == test_elements['poly']
assert coord_list[1] == test_elements['poly']
# check to/from dict
multi_line_string_dict = multi_line_string.to_dict()
multi_line_string1 = geometry_elements.MultiLineString().from_dict(multi_line_string_dict)
assert multi_line_string.get_coordinate_list() == multi_line_string1.get_coordinate_list()
# check get minimum distance
multi_line_string1.lines = [[[0.0, 0.0], [2.0, 0.0]],
[[0.0, 2.0], [2.0, 2.0]]]
test_point = [0.0, 1.0]
assert multi_line_string1.get_minimum_distance(test_point) == 1.0
test_point = [1.0, 0.0]
assert multi_line_string1.get_minimum_distance(test_point) == 0
# check assemble from collection
multi_line_string1 = multi_line_string1.assemble_from_collection()
assert isinstance(multi_line_string1, geometry_elements.MultiLineString)
line_string = geometry_elements.LineString(coordinates=test_elements['line_string'])
multi_line_string1 = geometry_elements.MultiLineString()
multi_line_string1 = multi_line_string1.assemble_from_collection(multi_line_string, line_string)
assert multi_line_string1.lines[0].get_coordinate_list() == test_elements['poly']
assert multi_line_string1.lines[1].get_coordinate_list() == test_elements['poly']
assert multi_line_string1.lines[2].get_coordinate_list() == test_elements['line_string']
def test_linear_ring():
outer_ring_coords = [[0, 0], [3, 0], [3, 3], [0, 3], [0, 0]]
# check init and get_coordinate_list
ring1 = geometry_elements.LinearRing()
assert isinstance(ring1, geometry_elements.LinearRing)
assert ring1.get_coordinate_list() is None
ring2 = geometry_elements.LinearRing(outer_ring_coords)
ring3 = geometry_elements.LinearRing(ring2)
assert ring2.get_coordinate_list() == ring3.get_coordinate_list()
# check reverse_orientation (no coordinates)
ring1.reverse_orientation()
assert ring1.get_coordinate_list() is None
# check reverse_orientation (with coordinates)
ring2.reverse_orientation()
assert np.all(np.array(ring2.get_coordinate_list()) == np.array(outer_ring_coords)[::-1, :])
# check bounding_box
truth_box = np.array([[0., 3.], [0., 3.]])
box = ring2.bounding_box
assert np.all(box == truth_box)
ring2.coordinates = None
assert ring2.get_coordinate_list() is None
with pytest.raises(ValueError, match='coordinates must be two-dimensional'):
ring2.coordinates = outer_ring_coords[0]
with pytest.raises(ValueError, match='coordinates must have between 2 and 4 entries'):
ring2.coordinates = [[0], [1]]
with pytest.raises(ValueError, match='coordinates must have between 2 and 4 entries'):
ring2.coordinates = [[0, 0, 0, 0, 0], [1, 1, 1, 1, 1]]
def test_basic_assemble(test_elements):
poly_coords1 = [[0, 0], [3, 0], [3, 3], [0, 3], [0, 0]]
poly_coords2 = [[1, 1], [2, 1], [2, 2], [1, 2], [1, 1]]
test_poly1 = geometry_elements.Polygon([poly_coords1])
test_poly2 = geometry_elements.Polygon([poly_coords2])
multi_point = geometry_elements.MultiPoint(coordinates=test_elements['poly'])
multi_line_string = geometry_elements.MultiLineString(coordinates=[test_elements['poly'], test_elements['poly']])
multi_poly = geometry_elements.MultiPolygon([test_poly1, test_poly2])
collective_type = geometry_elements.basic_assemble_from_collection(multi_point)
assert collective_type.type == 'MultiPoint'
collective_type = geometry_elements.basic_assemble_from_collection(multi_line_string)
assert collective_type.type == 'MultiLineString'
collective_type = geometry_elements.basic_assemble_from_collection(multi_poly)
assert collective_type.type == 'MultiPolygon'
collective_type = geometry_elements.basic_assemble_from_collection(multi_point, multi_line_string, multi_poly)
assert collective_type.type == 'GeometryCollection'
| 25,941 | 42.021559 | 141 | py |
sarpy | sarpy-master/tests/visualization/test_remap.py | #
# Copyright 2022 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import collections
import unittest
import numpy as np
from sarpy.visualization import remap
try:
import matplotlib.pyplot as plt
MATPLOTLIB_AVAILABLE = True
except ImportError:
MATPLOTLIB_AVAILABLE = False
class NoOpRemap(remap.RemapFunction):
def __init__(self):
super().__init__(override_name="noop")
def raw_call(self, data, **kwargs):
return data
class TestRemap(unittest.TestCase):
def test_clip_cast(self):
data = np.asarray([-100000, -128, -10, 0, 10, 127, 100000], dtype=np.float64)
result = remap.clip_cast(data, np.int8)
np.testing.assert_array_almost_equal(result, [-128, -128, -10, 0, 10, 127, 127])
result = remap.clip_cast(data, np.int8, -100, 100)
np.testing.assert_array_almost_equal(result, [-100, -100, -10, 0, 10, 100, 100])
result = remap.clip_cast(data, np.int8, -500, 500)
np.testing.assert_array_almost_equal(result, [-128, -128, -10, 0, 10, 127, 127])
result = remap.clip_cast(data, np.int16)
np.testing.assert_array_almost_equal(result, [-32768, -128, -10, 0, 10, 127, 32767])
def test_amplitude_to_density_zeros(self):
data = np.zeros(100, dtype=np.complex64)
result = remap.amplitude_to_density(data.copy())
np.testing.assert_array_equal(data, result)
def test_amplitude_to_density(self):
data = np.arange(100, dtype=np.complex64)
result = remap.amplitude_to_density(data)
self.assertTrue(np.all(np.isfinite(result)))
with self.assertRaises(ValueError):
remap.amplitude_to_density(data, dmin=-1)
with self.assertRaises(ValueError):
remap.amplitude_to_density(data, dmin=255)
with self.assertRaises(ValueError):
remap.amplitude_to_density(data, mmult=0)
def test_RemapFunction(self):
rf = remap.RemapFunction()
self.assertEqual(rf.bit_depth, 8)
self.assertEqual(rf.dimension, 0)
self.assertEqual(rf.output_dtype, np.dtype(np.uint8))
self.assertTrue(rf.are_global_parameters_set)
self.assertTrue(isinstance(rf.name, str))
self.assertGreater(len(rf.name), 0)
with self.assertRaises(NotImplementedError):
rf(np.arange(10))
with self.assertRaises(NotImplementedError):
rf.calculate_global_parameters_from_reader(None)
rf = remap.RemapFunction(override_name="unit_test_remap")
self.assertEqual(rf.name, "unit_test_remap")
with self.assertRaises(ValueError):
rf = remap.RemapFunction(override_name=123)
with self.assertRaises(ValueError):
rf = remap.RemapFunction(dimension=10)
for bit_depth in [8.0, 16, 32]:
rf = remap.RemapFunction(bit_depth=bit_depth)
self.assertEqual(rf.output_dtype.kind, "u")
self.assertEqual(rf.output_dtype.itemsize, bit_depth // 8)
for bit_depth in [0, 15.0, 64]:
with self.assertRaises(ValueError):
remap.RemapFunction(bit_depth=bit_depth)
# Check that casting and clipping occurs
data = np.linspace(-1000, 1000)
remapped = NoOpRemap()(data)
self.assertEqual(remapped.dtype, np.dtype(np.uint8))
np.testing.assert_array_equal(remapped, remap.clip_cast(data))
def test_MonochromaticRemap(self):
mr = remap.MonochromaticRemap()
self.assertEqual(mr.bit_depth, 8)
self.assertEqual(mr.max_output_value, 255)
mr = remap.MonochromaticRemap(bit_depth=16)
self.assertEqual(mr.max_output_value, (1 << 16) - 1)
with self.assertRaises(ValueError):
mr = remap.MonochromaticRemap(bit_depth=16, max_output_value=1 << 16)
with self.assertRaises(ValueError):
mr = remap.MonochromaticRemap(bit_depth=16, max_output_value=0)
with self.assertRaises(NotImplementedError):
mr.calculate_global_parameters_from_reader(None)
with self.assertRaises(NotImplementedError):
mr(np.random.uniform(100))
def test_Density(self):
with self.assertRaises(ValueError):
remap.Density(dmin=-1)
with self.assertRaises(ValueError):
remap.Density(dmin=256)
with self.assertRaises(ValueError):
remap.Density(mmult=0.9)
data = np.random.lognormal(size=1000).astype(np.complex128)
dr = remap.Density()
nominal = dr(data)
self.assertEqual(nominal.dtype, np.uint8)
double = dr(data * 2)
np.testing.assert_array_equal(nominal, double)
self.assertFalse(dr.are_global_parameters_set)
self.assertTrue(remap.Density(data_mean=1).are_global_parameters_set)
def test_Linear(self):
data = np.linspace(2, 1, 512)
lr = remap.Linear()
self.assertFalse(lr.are_global_parameters_set)
self.assertTrue(remap.Linear(min_value=1, max_value=2).are_global_parameters_set)
nominal = lr(data)
self.assertGreaterEqual(np.count_nonzero(nominal), data.size - 3)
self.assertEqual(nominal.dtype, np.uint8)
self.assertEqual(nominal.min(), 0)
self.assertEqual(nominal.max(), 255)
double = lr(data * 2)
np.testing.assert_array_equal(nominal, double)
lr.min_value = data.mean()
with_min = lr(data)
self.assertEqual(np.count_nonzero(with_min), data.size / 2 - 1)
with self.assertRaises(ValueError):
lr.min_value = np.inf
lr.min_value = None
self.assertGreaterEqual(np.count_nonzero(lr(data)), data.size - 3)
lr.max_value = data.mean()
with_max = lr(data)
self.assertEqual(np.sum(with_max == 255), np.sum(data >= data.mean()))
with self.assertRaises(ValueError):
lr.max_value = np.inf
lr.max_value = None
np.testing.assert_array_equal(lr(data), nominal)
double = lr(data * 2)
np.testing.assert_array_equal(nominal, double)
def test_Logarithmic(self):
data = np.linspace(2, 1, 512)
lr = remap.Logarithmic()
self.assertFalse(lr.are_global_parameters_set)
self.assertTrue(remap.Logarithmic(min_value=1, max_value=2).are_global_parameters_set)
nominal = lr(data)
self.assertEqual(nominal.dtype, np.uint8)
self.assertEqual(nominal.min(), 0)
self.assertEqual(nominal.max(), 255)
lr.max_value = 1.5
with_max = lr(data)
self.assertEqual(nominal.min(), 0)
np.testing.assert_array_equal(with_max[:256], 255)
np.testing.assert_array_less(with_max[256:], 255)
lr.max_value = None
lr.min_value = 1.5
with_max = lr(data)
self.assertEqual(nominal.max(), 255)
np.testing.assert_array_equal(with_max[256:], 0)
def test_Logarithmic_const(self):
data = np.full(1000, 1.0, dtype=np.complex64)
lr = remap.Logarithmic()
np.testing.assert_array_equal(lr(data), 0)
def test_PEDF(self):
self.assertFalse(remap.PEDF().are_global_parameters_set)
self.assertTrue(remap.PEDF(data_mean=0.5).are_global_parameters_set)
data = np.random.lognormal(size=1000).astype(np.complex128)
pedf = remap.PEDF()(data)
self.assertEqual(pedf.dtype, np.uint8)
self.assertEqual(pedf.min(), 0)
def test_NRL(self):
self.assertFalse(remap.NRL().are_global_parameters_set)
self.assertTrue(remap.NRL(stats=(0, 2, 1)).are_global_parameters_set)
data = np.random.lognormal(size=1000).astype(np.complex128)
nrl = remap.NRL()(data)
self.assertEqual(nrl.dtype, np.uint8)
self.assertEqual(nrl.min(), 0)
self.assertEqual(nrl.max(), 255)
with self.assertRaises(ValueError):
remap.NRL(percentile=0)
with self.assertRaises(ValueError):
remap.NRL(percentile=100)
with self.assertRaises(ValueError):
remap.NRL(max_output_value=100, knee=101)
def test_NRL_near_const(self):
data = np.full(1000, 1.0, dtype=np.complex64)
data[-1] += 1e-6
with self.assertLogs('sarpy.visualization.remap', level='WARNING') as lc:
nrl = remap.NRL()(data)
self.assertTrue(any('at least significantly constant' in msg for msg in lc.output))
expected = np.zeros(data.size, dtype=np.uint8)
expected[-1] = 255
np.testing.assert_array_equal(nrl, expected)
def test_NRL_inf(self):
data = np.full(1000, np.inf, dtype=np.complex64)
nrl = remap.NRL()(data)
np.testing.assert_array_equal(nrl, 0)
def test_MonoRemaps(self):
data = np.random.lognormal(size=1000).astype(np.complex128)
adata = np.abs(data)
nominal = remap.Density()(data)
brighter = remap.Brighter()(data)
darker = remap.Darker()(data)
self.assertEqual(nominal.dtype, np.uint8)
self.assertEqual(brighter.dtype, np.uint8)
self.assertEqual(darker.dtype, np.uint8)
self.assertTrue(np.all(brighter >= nominal))
self.assertGreater(np.mean(brighter), np.mean(nominal))
self.assertTrue(np.all(darker <= nominal))
self.assertLess(np.mean(darker), np.mean(nominal))
hc = remap.High_Contrast()(data)
self.assertEqual(hc.dtype, np.uint8)
self.assertGreater(np.sum(hc == 0), np.sum(nominal == 0))
self.assertGreater(np.sum(hc == 255), np.sum(nominal == 255))
linear = remap.Linear()(data)
self.assertEqual(linear.dtype, np.uint8)
self.assertAlmostEqual(max(linear / adata), 255 / max(adata))
log = remap.Logarithmic()(data)
self.assertEqual(log.dtype, np.uint8)
self.assertTrue(np.all(linear <= log))
nrl = remap.NRL()(data)
self.assertTrue(np.all(nrl >= linear))
def test_LUTRemap(self):
data = np.concatenate((np.arange(256), np.arange(256)[::-1]))
lut = np.zeros((256, 3), dtype=np.uint8)
lut[:, 0] = np.arange(256)
lut[:, 1] = np.arange(256)[::-1]
lut[:, 2] = np.roll(np.arange(256), 128)
lutr = remap.LUT8bit(mono_remap=remap.Linear(), lookup_table=lut)
result = lutr(data)
np.testing.assert_array_equal(result.shape, data.shape + (3,))
np.testing.assert_array_equal(result[:256], lut)
np.testing.assert_array_equal(result[256:], lut[::-1])
class NonMono(remap.RemapFunction):
pass
with self.assertRaises(ValueError):
remap.LUT8bit(mono_remap=NonMono, lookup_table=lut)
@unittest.skipIf(not MATPLOTLIB_AVAILABLE, "matplotlib not available")
def test_LUTRemap_matplotlib(self):
data = np.arange(0, 512, 2)[::-1]
lutr = remap.LUT8bit(mono_remap=remap.Linear(), lookup_table="binary")
result = lutr(data)
np.testing.assert_array_equal(result.shape, data.shape + (3,))
np.testing.assert_array_equal(result[:, 0], result[:, 1])
np.testing.assert_array_equal(result[:, 0], result[:, 2])
np.testing.assert_array_equal(result[:, 0], result[:, 2])
self.assertTrue(np.all(np.abs(result[:, 0] - np.arange(256)) <= 1))
def test_remap_names(self):
remap._DEFAULTS_REGISTERED = False
remap._REMAP_DICT = collections.OrderedDict()
default_names = remap.get_remap_names()
self.assertIn("nrl", default_names)
self.assertIn("density", default_names)
self.assertIn("high_contrast", default_names)
self.assertIn("brighter", default_names)
self.assertIn("darker", default_names)
self.assertIn("linear", default_names)
self.assertIn("log", default_names)
self.assertIn("pedf", default_names)
if MATPLOTLIB_AVAILABLE:
self.assertIn("viridis", default_names)
self.assertIn("magma", default_names)
self.assertIn("rainbow", default_names)
self.assertIn("bone", default_names)
noop_remap = NoOpRemap()
remap.register_remap(noop_remap)
updated_names = remap.get_remap_names()
self.assertEqual(set(updated_names) - set(default_names), {"noop"})
with self.assertRaises(TypeError):
remap.register_remap(lambda x: x)
another_noop_remap = NoOpRemap()
remap.register_remap(another_noop_remap)
self.assertIs(noop_remap, remap.get_registered_remap("noop")) # didn't overwrite
remap.register_remap(another_noop_remap, overwrite=True)
self.assertIs(another_noop_remap, remap.get_registered_remap("noop")) # did overwrite
remap._DEFAULTS_REGISTERED = False
remap._REMAP_DICT = collections.OrderedDict()
def test_get_registered_remap(self):
with self.assertRaises(KeyError):
remap.get_registered_remap("__fake__")
self.assertEqual(remap.get_registered_remap("__fake__", "default"), "default")
def test_get_remap_list(self):
remap_list = remap.get_remap_list()
self.assertSetEqual(set(item[0] for item in remap_list), set(remap.get_remap_names()))
self.assertIsInstance(dict(remap_list)["density"], remap.Density)
def test_flat_interface(self):
data = np.random.lognormal(size=1000).astype(np.complex128)
with self.assertWarns(DeprecationWarning):
np.testing.assert_array_equal(remap.density(data), remap.Density()(data))
with self.assertWarns(DeprecationWarning):
np.testing.assert_array_equal(remap.brighter(data), remap.Brighter()(data))
with self.assertWarns(DeprecationWarning):
np.testing.assert_array_equal(remap.darker(data), remap.Darker()(data))
with self.assertWarns(DeprecationWarning):
np.testing.assert_array_equal(remap.high_contrast(data), remap.High_Contrast()(data))
with self.assertWarns(DeprecationWarning):
np.testing.assert_array_equal(remap.linear(data), remap.Linear()(data))
with self.assertWarns(DeprecationWarning):
np.testing.assert_array_equal(remap.log(data), remap.Logarithmic()(data))
with self.assertWarns(DeprecationWarning):
np.testing.assert_array_equal(remap.pedf(data), remap.PEDF()(data))
with self.assertWarns(DeprecationWarning):
np.testing.assert_array_equal(remap.nrl(data), remap.NRL()(data))
| 14,497 | 39.954802 | 97 | py |
sarpy | sarpy-master/tests/visualization/test_cphd_kmz_product_creation.py | import os
import xml.etree.ElementTree
import zipfile
import pytest
import sarpy.io.phase_history
import sarpy.visualization.cphd_kmz_product_creation as cphd_kmz
TEST_FILE_NAMES = {
'simple': 'spotlight_example.cphd',
}
TEST_FILE_PATHS = {}
TEST_FILE_ROOT = os.environ.get('SARPY_TEST_PATH', None)
if TEST_FILE_ROOT is not None:
for name_key, path_value in TEST_FILE_NAMES.items():
the_file = os.path.join(TEST_FILE_ROOT, 'cphd', path_value)
if os.path.isfile(the_file):
TEST_FILE_PATHS[name_key] = the_file
@pytest.fixture(scope='module')
def cphd_file():
file_path = TEST_FILE_PATHS.get('simple', None)
if file_path is None:
pytest.skip('simple cphd test file not found')
else:
return file_path
def test_create_kmz(cphd_file, tmp_path):
reader = sarpy.io.phase_history.open(cphd_file)
file_stem = 'the_file_stem'
cphd_kmz.cphd_create_kmz_view(reader, tmp_path, file_stem=file_stem)
assert len(list(tmp_path.glob('**/*'))) == 1
produced_file = next(tmp_path.glob(file_stem + '*.kmz'))
with zipfile.ZipFile(produced_file, 'r') as kmz:
assert set(kmz.namelist()) == {'doc.kml'}
with kmz.open('doc.kml') as kml_fd:
tree = xml.etree.ElementTree.parse(kml_fd)
assert tree.getroot().tag == '{http://www.opengis.net/kml/2.2}kml'
| 1,364 | 28.042553 | 78 | py |
sarpy | sarpy-master/tests/visualization/__init__.py | 0 | 0 | 0 | py | |
sarpy | sarpy-master/sarpy/__details__.py |
__classification__ = 'UNCLASSIFIED'
_post_identifier = '' # this is the public release version
| 97 | 23.5 | 59 | py |
sarpy | sarpy-master/sarpy/compliance.py |
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class SarpyError(Exception):
"""A custom base exception class"""
pass
def bytes_to_string(bytes_in, encoding='utf-8'):
"""
Ensure that the input bytes is mapped to a string.
Parameters
----------
bytes_in : bytes
encoding : str
The encoding to apply, if necessary.
Returns
-------
str
"""
if isinstance(bytes_in, str):
return bytes_in
if not isinstance(bytes_in, bytes):
raise TypeError('Input is required to be bytes. Got type {}'.format(type(bytes_in)))
return bytes_in.decode(encoding, errors='ignore')
| 669 | 19.30303 | 92 | py |
sarpy | sarpy-master/sarpy/__init__.py | from .__about__ import *
import logging
__all__ = ['__version__',
'__classification__', '__author__', '__url__', '__email__',
'__title__', '__summary__',
'__license__', '__copyright__']
# establish logging paradigm
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
| 322 | 22.071429 | 70 | py |
sarpy | sarpy-master/sarpy/__about__.py | # MIT License
#
# Copyright (c) 2020 National Geospatial-Intelligence Agency
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__all__ = ['__version__',
'__classification__', '__author__', '__url__', '__email__',
'__title__', '__summary__',
'__license__', '__copyright__']
from sarpy.__details__ import __classification__, _post_identifier
_version_number = '1.3.53'
__version__ = _version_number + _post_identifier
__author__ = "National Geospatial-Intelligence Agency"
__url__ = "https://github.com/ngageoint/sarpy"
__email__ = "Wade.C.Schwartzkopf@nga.mil"
__title__ = "sarpy"
__summary__ = "Python tools for reading, writing, and simple processing of complex SAR data and other " \
"associated data."
__license__ = "MIT License"
__copyright__ = "2020 {}".format(__author__)
| 1,841 | 39.043478 | 105 | py |
sarpy | sarpy-master/sarpy/io/kml.py | """
Functionality for exporting certain data elements to a kml document
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import zipfile
import logging
import os
import numpy
from typing import Union, List
from uuid import uuid4
from io import BytesIO
from xml.dom import minidom
from sarpy.geometry.geocoords import geodetic_to_ecf, ecf_to_geodetic
try:
# noinspection PyPackageRequirements
import PIL
# noinspection PyPackageRequirements
import PIL.Image
except ImportError:
PIL = None
logger = logging.getLogger(__name__)
#################
# default values
_DEFAULT_ICON = 'http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png'
class Document(object):
"""
The main kml document container, and zip archive if the output file is
of type kmz. *This is intended to be used as a context manager.*
"""
__slots__ = ('_doc', '_document', '_archive', '_file', '_closed')
def __init__(self, file_name=None, **params):
"""
Parameters
----------
file_name : str|zipfile.ZipFile|file like
The output location or buffer to which to write the kml/other objects
params
The parameters dictionary for file creation.
"""
self._file = None
self._archive = None
self._closed = False
self._set_file(file_name)
self._doc = minidom.Document()
kml = self._doc.createElement('kml')
self._doc.appendChild(kml)
kml.setAttribute('xmlns', 'http://www.opengis.net/kml/2.2')
kml.setAttribute('xmlns:gx', 'http://www.google.com/kml/ext/2.2')
kml.setAttribute('xmlns:kml', 'http://www.opengis.net/kml/2.2')
kml.setAttribute('xmlns:atom', 'http://www.w3.org/2005/Atom')
self._document = self.add_container(kml, 'Document', **params)
def __str__(self):
xml = self._doc.toprettyxml(encoding='utf-8')
if not isinstance(xml, str):
return xml.decode('utf-8')
else:
return xml
def _set_file(self, file_name):
if isinstance(file_name, str):
fext = os.path.splitext(file_name)[1]
if fext not in ['.kml', '.kmz']:
logger.warning('file extension should be one of .kml or .kmz, got {}. This will be treated as a kml file.'.format(fext))
if fext == '.kmz':
self._archive = zipfile.ZipFile(file_name, 'w', zipfile.ZIP_DEFLATED)
else:
self._file = open(file_name, 'w')
elif isinstance(file_name, zipfile.ZipFile):
self._archive = file_name
elif hasattr(file_name, 'write'):
self._file = file_name
else:
raise TypeError('file_name must be a file path, file-like object, or a zipfile.Zipfile instance')
def close(self):
if self._closed:
return
if self._file is not None:
self._file.write(str(self))
self._file.close()
else:
self.write_string_to_archive('doc.kml', str(self))
self._archive.close()
self._closed = True
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if exception_type is None:
self.close()
else:
logger.error(
'The kml/kmz file writer generated an exception during processing.\n\t'
'Any generated file may be only partially generated and/or corrupt.')
# The exception will be reraised - it's unclear how any exception could be caught.
def write_file_to_archive(self, archive_path, file_path):
"""
Copy the given file into the kmz archive at the given archive location.
Parameters
----------
archive_path : str
The location in the archive.
file_path : str
The file location on the file system.
Returns
-------
None
"""
if self._archive is None:
raise ValueError('No archive defined.')
self._archive.write(file_path, archive_path, zipfile.ZIP_DEFLATED)
def write_string_to_archive(self, archive_path, val):
"""
Write the given string/bytes into the kmz archive at the given location.
Parameters
----------
archive_path : str
val : bytes|str
Returns
-------
None
"""
if self._archive is None:
raise ValueError('No archive defined.')
self._archive.writestr(zipfile.ZipInfo(archive_path), val, zipfile.ZIP_DEFLATED)
def write_image_to_archive(self, archive_path, val, img_format='PNG'):
"""
Write the given PIL image into the kmz archive at the given location.
Parameters
----------
archive_path : str
val : PIL.Image.Image
img_format : str
Returns
-------
None
"""
imbuf = BytesIO()
val.save(imbuf, img_format)
self.write_string_to_archive(archive_path, imbuf.getvalue())
imbuf.close()
# xml node creation elements
def _create_new_node(self, par, tag):
# type: (Union[None, minidom.Element], str) -> minidom.Element
nod = self._doc.createElement(tag)
if par is None:
self._document.appendChild(nod)
else:
par.appendChild(nod)
return nod
def _add_text_node(self, par, tag, value):
# type: (Union[None, minidom.Element], str, str) -> Union[None, minidom.Element]
if value is None:
return None
nod = self._doc.createElement(tag)
if isinstance(value, str):
nod.appendChild(self._doc.createTextNode(value))
else:
nod.appendChild(self._doc.createTextNode(str(value)))
par.appendChild(nod)
return nod
def _add_cdata_node(self, par, tag, value):
# type: (Union[None, minidom.Element], str, str) -> Union[None, minidom.Element]
if value is None:
return None
nod = self._doc.createElement(tag)
if isinstance(value, str):
nod.appendChild(self._doc.createCDATASection(value))
else:
nod.appendChild(self._doc.createCDATASection(str(value)))
par.appendChild(nod)
return nod
def _add_conditional_text_node(self, par, tag, params, default=None):
# type: (Union[None, minidom.Element], str, dict, Union[None, str]) -> minidom.Element
return self._add_text_node(par, tag, params.get(tag, default))
def _add_conditional_cdata_node(self, par, tag, params, default=None):
# type: (Union[None, minidom.Element], str, dict, Union[None, str]) -> minidom.Element
return self._add_cdata_node(par, tag, params.get(tag, default))
# basic kml container creation
def add_container(self, par=None, the_type='Placemark', **params):
"""
For creation of Document, Folder, or Placemark container.
Parameters
----------
par : None|minidom.Element
the_type : str
One of "Placemark", "Folder". The type "Document" can only be used once,
for the top level construct.
params
The dictionary of parameters
Returns
-------
minidom.Element
"""
if the_type not in ("Placemark", "Folder", "Document"):
raise ValueError('the_type must be one of ("Placemark", "Folder", "Document")')
container = self._create_new_node(par, the_type)
if 'id' in params:
container.setAttribute('id', params['id'])
for opt in ['name', 'Snippet', 'styleUrl']:
self._add_conditional_text_node(container, opt, params)
self._add_conditional_cdata_node(container, 'description', params)
# extended data
if ('schemaUrl' in params) and ('ExtendedData' in params):
self._add_extended_data(container, **params)
if ('beginTime' in params) or ('endTime' in params):
ts = self._create_new_node(container, 'TimeSpan')
self._add_text_node(ts, 'begin', params.get('beginTime', None))
self._add_text_node(ts, 'end', params.get('endTime', None))
elif 'when' in params:
ts = self._create_new_node(container, 'TimeStamp')
self._add_text_node(ts, 'when', params['when'])
elif 'visibility' in params:
if isinstance(params['visibility'], str):
visibility = params['visibility']
else:
visibility = '1' if params['visibility'] else '0'
self._add_text_node(container, 'visibility', visibility)
return container
# Styles
def add_style_map(self, style_id, high_id, low_id):
"""
Creates a styleMap from two given style ids.
Parameters
----------
style_id : str
high_id : str
low_id : str
Returns
-------
None
"""
sm = self._create_new_node(None, 'StyleMap')
sm.setAttribute('id', style_id)
pair1 = self._create_new_node(sm, 'Pair')
self._add_text_node(pair1, 'key', 'normal')
self._add_text_node(pair1, 'styleUrl', '#'+low_id)
pair2 = self._create_new_node(sm, 'Pair')
self._add_text_node(pair2, 'key', 'highlight')
self._add_text_node(pair2, 'styleUrl', '#'+high_id)
def add_style(self, style_id, **params):
"""
Creates a style for use in the document tree.
Parameters
----------
style_id : str
the style id string.
params
the dictionary of the parameters
Returns
-------
None
"""
sty = self._create_new_node(None, 'Style')
sty.setAttribute('id', style_id)
if 'line_style' in params:
self.add_line_style(None, sty, **params['line_style'])
if 'label_style' in params:
self.add_label_style(None, sty, **params['label_style'])
if 'list_style' in params:
self.add_list_style(None, sty, **params['list_style'])
if 'icon_style' in params:
self.add_icon_style(None, sty, **params['icon_style'])
if 'poly_style' in params:
self.add_poly_style(None, sty, **params['poly_style'])
def add_line_style(self, style_id=None, par=None, **params):
"""
Add line style.
Parameters
----------
style_id : None|str
The id, which should not be set if this is a child of a style element.
par : None|minidom.Element
The parent node.
params
The parameters dictionary.
Returns
-------
None
"""
sty = self._create_new_node(par, 'LineStyle')
if style_id is not None:
sty.setAttribute('id', style_id)
self._add_conditional_text_node(sty, 'color', params, default='b0ff0000')
self._add_conditional_text_node(sty, 'width', params, default='1.0')
def add_list_style(self, style_id=None, par=None, **params):
"""
Add list style
Parameters
----------
style_id : None|str
The id, which should not be set if this is a child of a style element.
par : None|minidom.Element
The parent node.
params
The parameters dictionary.
Returns
-------
None
"""
sty = self._create_new_node(par, 'ListStyle')
if style_id is not None:
sty.setAttribute('id', style_id)
item_icon = self._create_new_node(sty, 'ItemIcon')
self._add_text_node(item_icon, 'href', params.get('icon_ref', _DEFAULT_ICON))
def add_label_style(self, style_id=None, par=None, **params):
"""
Add label style
Parameters
----------
style_id : None|str
The id, which should not be set if this is a child of a style element.
par : None|minidom.Element
The parent node.
params
The parameters dictionary.
Returns
-------
None
"""
sty = self._create_new_node(par, 'LabelStyle')
if style_id is not None:
sty.setAttribute('id', style_id)
self._add_conditional_text_node(sty, 'color', params, default='b0ff0000')
self._add_conditional_text_node(sty, 'scale', params, default='1.0')
def add_icon_style(self, style_id=None, par=None, **params):
"""
Add icon style.
Parameters
----------
style_id : None|str
The id, which should not be set if this is a child of a style element.
par : None|minidom.Element
The parent node.
params
The parameters dictionary.
Returns
-------
None
"""
sty = self._create_new_node(par, 'IconStyle')
if style_id is not None:
sty.setAttribute('id', style_id)
self._add_conditional_text_node(sty, 'color', params)
self._add_conditional_text_node(sty, 'scale', params)
icon = self._create_new_node(sty, 'Icon')
self._add_text_node(icon, 'href', params.get('icon_ref', _DEFAULT_ICON))
def add_poly_style(self, style_id=None, par=None, **params):
"""
Add poly style.
Parameters
----------
style_id : None|str
The id, which should not be set if this is a child of a style element.
par : None|minidom.Element
The parent node.
params
The parameters dictionary.
Returns
-------
None
"""
sty = self._create_new_node(par, 'PolyStyle')
if style_id is not None:
sty.setAttribute('id', style_id)
self._add_conditional_text_node(sty, 'color', params, default='80ff0000')
self._add_conditional_text_node(sty, 'fill', params)
self._add_conditional_text_node(sty, 'outline', params)
def add_default_style(self):
"""
Add default style
The style is created, and appended at root level. The corresponding styleUrl is '#defaultStyle'
"""
line = {'color': 'ff505050', 'width': '1.0'}
label = {'color': 'ffc0c0c0', 'scale': '1.0'}
icon = {'color': 'ffff5050', 'scale': '1.0'}
poly = {'color': '80ff5050'}
self.add_style(
'default_high',
line_style=line, label_style=label, icon_style=icon, poly_style=poly)
line['width'] = '0.75'
label['scale'] = '0.75'
icon['scale'] = '0.75'
self.add_style(
'default_low',
line_style=line, label_style=label, icon_style=icon, poly_style=poly)
self.add_style_map('defaultStyle', 'default_high', 'default_low')
def add_color_ramp(self, colors, high_size=1.0, low_size=0.5, icon_ref=None, name_stem='sty'):
"""
Adds collection of enumerated styles corresponding to provided array of colors.
Parameters
----------
colors : numpy.ndarray
numpy array of shape (N, 4) of 8-bit colors assumed to be RGBA
high_size : float
The highlighted size.
low_size : float
The regular (low lighted?) size.
icon_ref : str
The icon reference.
name_stem : str
The string representing the naming convention for the associated styles.
Returns
-------
None
"""
hline = {'width': 2*high_size}
hlabel = {'scale': high_size}
hicon = {'scale': high_size}
lline = {'width': 2*low_size}
llabel = {'scale': low_size}
licon = {'scale': low_size}
if icon_ref is not None:
hicon['icon_ref'] = icon_ref
licon['icon_ref'] = icon_ref
for i in range(colors.shape[0]):
col = '{3:02x}{2:02x}{1:02x}{0:02x}'.format(*colors[i, :])
for di in [hline, hlabel, hicon, lline, llabel, licon]:
di['color'] = col
self.add_style(
'{0!s}{1:d}_high'.format(name_stem, i),
line_style=hline, label_style=hlabel, icon_style=hicon)
self.add_style(
'{0!s}{1:d}_low'.format(name_stem, i),
line_style=lline, label_style=llabel, icon_style=licon)
self.add_style_map(
'{0!s}{1:d}'.format(name_stem, i),
'{0!s}{1:d}_high'.format(name_stem, i),
'{0!s}{1:d}_low'.format(name_stem, i))
# extended data handling
def add_schema(self, schema_id, field_dict, short_name=None):
"""
For defining/using the extended data schema capability. **Note that this
is specifically part of the google earth extension of kml, and may not be generally
supported by anything except google earth.**
Parameters
----------
schema_id : str
the schema id - must be unique to the id collection document
field_dict : dict
dictionary where the key is field name. The corresponding value is a tuple of the form
`(type, displayName)`, where `displayName` can be `None`. The value of `type` is one of
the data types permitted:
* 'string'
* 'int
* 'uint'
* 'short'
* 'ushort'
* 'float'
* 'double'
* 'bool'
short_name : None|str
optional short name for display in the schema
Returns
-------
None
"""
types = ['string', 'int', 'uint', 'short', 'ushort', 'float', 'double', 'bool']
sch = self._create_new_node(None, 'Schema')
sch.setAttribute('id', schema_id)
if short_name is not None:
sch.setAttribute('name', short_name)
for name in field_dict:
sf = self._doc.createElement('SimpleField')
typ, dname = field_dict[name]
sf.setAttribute('name', name)
if typ in types:
sf.setAttribute('type', typ)
sch.appendChild(sf)
else:
logger.warning(
"Schema '{0!s}' has field '{1!s}' of unrecognized type '{2!s}',\n\t"
"which is being excluded.".format(schema_id, name, typ))
self._add_text_node(sf, 'displayName', dname)
def _add_extended_data(self, par, **params):
"""
Adds ExtendedData (schema data) to the parent element. **Note that this
is specifically part of the google earth extension of kml, and may not be generally
supported by anything except google earth.**
Parameters
----------
par : minidom.Element
params
the parameters dictionary
Returns
-------
None
"""
extended_data = self._create_new_node(par, 'ExtendedData')
schema_data = self._create_new_node(extended_data, 'SchemaData')
schema_data.setAttribute('schemaUrl', params['schemaUrl'])
dat = params['ExtendedData']
keys = params.get('fieldOrder', sorted(dat.keys()))
for key in keys:
# check if data is iterable
if hasattr(dat[key], '__iter__'):
array_node = self._create_new_node(schema_data, 'gx:SimpleArrayData')
array_node.setAttribute('name', key)
for el in dat[key]:
self._add_text_node(array_node, 'gx:value', el)
else:
sid = self._add_text_node(schema_data, 'SimpleData', dat[key])
sid.setAttribute('name', key)
def add_screen_overlay(self, image_ref, par=None, **params):
"""
Adds ScreenOverlay object.
Parameters
----------
image_ref : str
Reference to appropriate image object, whether in the kmz archive or
an appropriate url.
par : None|minidom.Element
The parent node. Appended at root level if not provided.
params
The parameters dictionary.
Returns
-------
minidom.Element
"""
overlay = self._create_new_node(par, 'ScreenOverlay')
if 'id' in params:
overlay.setAttribute('id', params['id'])
for opt in ['name', 'Snippet', 'styleUrl', 'rotation']:
self._add_conditional_text_node(overlay, opt, params)
self._add_conditional_cdata_node(overlay, 'description', params)
# extended data
if ('schemaUrl' in params) and ('ExtendedData' in params):
self._add_extended_data(overlay, **params)
# overlay parameters
for opt in ['overlayXY', 'screenXY', 'size', 'rotationXY']:
olp = self._doc.createElement(opt)
good = True
for att in ['x', 'y', 'xunits', 'yunits']:
key = '{}:{}'.format(opt, att)
if key in params:
olp.setAttribute(att, params[key])
else:
logger.error(
'params is missing required key {},\n\t'
'so we are aborting screen overlay parameters construction. '
'This screen overlay will likely not render correctly.'.format(key))
good = False
if good:
overlay.appendChild(olp)
# icon
ic = self._create_new_node(overlay, 'Icon')
self._add_text_node(ic, 'href', image_ref)
return overlay
# direct kml geometries
def add_multi_geometry(self, par=None, **params):
"""
Adds a MultiGeometry object. The MultiGeometry object is really just a container.
The user must continue adding the primitive Geometry constituents to this container or
nothing will actually get rendered.
Parameters
----------
par : None|minidom.Element
The parent node. If not given, then a Placemark is created.
params
The parameters dictionary
Returns
-------
minidom.Element
"""
if par is None:
par = self.add_container(**params)
multigeometry_node = self._create_new_node(par, 'MultiGeometry')
return multigeometry_node
def add_polygon(self, outer_coords, inner_coords=None, par=None, **params):
"""
Adds a Polygon element - a polygonal outer region, possibly with polygonal holes removed
Parameters
----------
outer_coords : str
comma/space delimited string of coordinates for the outerRing. Format of the string
:code:`'lon1,lat1,alt1 lon2,lat2,alt2 ...'` with the altitude values optional. If given, the altitude value
is in meters. The precise interpretation of altitude (relative to the ground, relative to sea level, etc.)
depends on the value of relevant tags passed down to the LinearRing objects, namely the values for the
params entries:
* 'extrude'
* 'tessellate'
* 'altitudeMode'
inner_coords : None|List[str]
If provided, the coordinates for inner rings.
par : None|minidom.Element
The parent node. If not given, then a Placemark is created.
params
The parameters dictionary.
Returns
-------
minidom.Element
"""
if par is None:
par = self.add_container(**params)
polygon_node = self._create_new_node(par, 'Polygon')
for opt in ['extrude', 'tessellate', 'altitudeMode']:
self._add_conditional_text_node(polygon_node, opt, params)
outer_ring_node = self._create_new_node(polygon_node, 'outerBoundaryIs')
self.add_linear_ring(outer_coords, outer_ring_node)
if inner_coords is not None:
for coords in inner_coords:
inner_ring = self._create_new_node(polygon_node, 'innerBoundaryIs')
self.add_linear_ring(coords, inner_ring)
def add_linear_ring(self, coords, par=None, **params):
"""
Adds a LinearRing element (closed linear path).
Parameters
----------
coords : str
comma/space delimited string of coordinates for the outerRing. Format of the string
:code:`'lon1,lat1,alt1 lon2,lat2,alt2 ...'` with the altitude values optional. If given, the altitude value
is in meters. The precise interpretation of altitude (relative to the ground, relative to sea level, etc.)
depends on the value of relevant tags passed down to the LinearRing objects, namely the values for the
params entries:
* 'extrude'
* 'tessellate'
* 'altitudeMode'
par : None|minidom.Element
The parent node. If not given, then a Placemark is created.
params
The parameters dictionary.
Returns
-------
minidom.Element
"""
if par is None:
par = self.add_container(**params)
linear_ring = self._create_new_node(par, 'LinearRing')
for opt in ['extrude', 'tessellate', 'altitudeMode']:
self._add_conditional_text_node(linear_ring, opt, params)
self._add_text_node(linear_ring, 'coordinates', coords)
return linear_ring
def add_line_string(self, coords, par=None, **params):
"""
Adds a LineString element (linear path).
Parameters
----------
coords : str
comma/space delimited string of coordinates for the outerRing. Format of the string
:code:`'lon1,lat1,alt1 lon2,lat2,alt2 ...'` with the altitude values optional. If given, the altitude value
is in meters. The precise interpretation of altitude (relative to the ground, relative to sea level, etc.)
depends on the value of relevant tags passed down to the LinearRing objects, namely the values for the
params entries:
* 'extrude'
* 'tessellate'
* 'altitudeMode'
par : None|minidom.Element
The parent node. If not given, then a Placemark is created.
params
The parameters dictionary.
Returns
-------
minidom.Element
"""
if par is None:
par = self.add_container(**params)
line_string = self._create_new_node(par, 'LineString')
for opt in ['extrude', 'tessellate', 'altitudeMode']:
self._add_conditional_text_node(line_string, opt, params)
self._add_text_node(line_string, 'coordinates', coords)
return line_string
def add_point(self, coords, par=None, **params):
"""
Adds a Point object.
Parameters
----------
coords : str
comma/space delimited string of coordinates for the outerRing. Format of the string
:code:`'lon1,lat1,alt1 lon2,lat2,alt2 ...'` with the altitude values optional. If given, the altitude value
is in meters. The precise interpretation of altitude (relative to the ground, relative to sea level, etc.)
depends on the value of relevant tags passed down to the LinearRing objects, namely the values for the
params entries:
* 'extrude'
* 'tessellate'
* 'altitudeMode'
par : None|minidom.Element
The parent node. If not given, then a Placemark is created.
params
The parameters dictionary.
Returns
-------
minidom.Element
"""
if par is None:
par = self.add_container(**params)
point = self._create_new_node(par, 'Point')
for opt in ['extrude', 'tessellate', 'altitudeMode']:
self._add_conditional_text_node(point, opt, params)
self._add_text_node(point, 'coordinates', coords)
def add_gx_multitrack(self, par=None, **params):
"""
Adds a MultiTrack from the gx namespace. This is only a container, much like
a MultiGeometry object, which requires the addition of gx:Track objects. **Note that this
is specifically part of the google earth extension of kml, and may not be generally
supported by anything except google earth.**
Parameters
----------
par : None|minidom.Element
The parent node. If not given, then a Placemark is created.
params
The parameters dictionary.
Returns
-------
minidom.Element
"""
if par is None:
par = self.add_container(**params)
gx_multitrack = self._create_new_node(par, 'gx:MultiTrack')
for opt in ['gx:interpolate', 'extrude', 'tessellate', 'altitudeMode']:
self._add_conditional_text_node(gx_multitrack, opt, params)
return gx_multitrack
def add_gx_track(self, coords, whens, angles=None, par=None, **params):
"""
Adds a Track from the gx namespace. **Note that this
is specifically part of the google earth extension of kml, and may not be generally
supported by anything except google earth.**
Parameters
----------
coords : List[str]
list of comma delimited string of coordinates. Format of each string entry: 'lon1,lat1,alt1'
with altitude values optional. If given, the altitude value is in meters. The precise
interpretation of altitude (relative to the ground, relative to sea level, etc.) depends on
the value of relevant tags passed down to the LinearRing objects, namely the values for the
params entries:
* 'extrude'
* 'tessellate'
* 'altitudeMode'
whens : List[str]
list of iso-formatted time strings - entries matching coords
angles : None|List[str]
None or list of heading (rotation) angles for the icon. If None, then Google Earth
infers from the path.
par : None|minidom.Element
The parent node. If not given, then a Placemark is created.
params
The parameters dictionary.
Returns
-------
minidom.Element
"""
if par is None:
par = self.add_container(**params)
gx_track = self._create_new_node(par, 'gx:Track')
for opt in ['extrude', 'tessellate', 'altitudeMode']:
self._add_conditional_text_node(gx_track, opt, params)
for wh in whens:
self._add_text_node(gx_track, 'when', wh)
for coords in coords:
self._add_text_node(gx_track, 'gx:coord', coords)
if angles is not None:
for an in angles:
self._add_text_node(gx_track, 'gx:angles', '{} 0 0'.format(an))
if ('ExtendedData' in params) and ('schemaUrl' in params):
self._add_extended_data(gx_track, **params)
return gx_track
def add_ground_overlay(self, image_ref, bounding_box=None, lat_lon_quad=None, par=None, **params):
"""
Adds GroundOverlay object, defined either from a bounding_box or a lat/lon
quadrilateral.
Parameters
----------
image_ref : str
Reference to appropriate image object, either in the kmz archive or
an appropriate url.
bounding_box : None|numpy.ndarray|tuple|list
list of the form `[latitude max, latitude min, longitude max, longitude min]`
lat_lon_quad : None|numpy.ndarray|list|tuple
list of the form [[latitude, longitude]], must have 4 entries. The orientation
is counter-clockwise from the lower-left image corner.
par : None|minidom.Element
The parent node. if not provided, then a Placemark object is created implicitly.
params
The parameters dictionary.
Returns
-------
minidom.Element
"""
if bounding_box is None and lat_lon_quad is None:
raise ValueError('Either bounding_box or lat_lon_quad must be defined.')
if bounding_box is not None and lat_lon_quad is not None:
raise ValueError('Both bounding_box or lat_lon_quad are provided, which is not sensible.')
if par is None:
par = self.add_container(**params)
overlay = self._create_new_node(par, 'GroundOverlay')
if 'id' in params:
overlay.setAttribute('id', params['id'])
for opt in ['name', 'Snippet', 'styleUrl', 'altitude', 'altitudeMode']:
self._add_conditional_text_node(overlay, opt, params)
self._add_conditional_cdata_node(overlay, 'description', params)
# extended data
if ('schemaUrl' in params) and ('ExtendedData' in params):
self._add_extended_data(overlay, **params)
# time parameters
if ('beginTime' in params) or ('endTime' in params):
ts = self._create_new_node(overlay, 'TimeSpan')
self._add_text_node(ts, 'begin', params.get('beginTime', None))
self._add_text_node(ts, 'end', params.get('endTime', None))
elif 'when' in params:
ts = self._create_new_node(overlay, 'TimeStamp')
self._add_conditional_text_node(ts, 'when', params)
if bounding_box is not None:
# latitude/longitude box parameters
ll = self._create_new_node(overlay, 'LatLonBox')
for cdir, num in zip(['north', 'south', 'east', 'west'], bounding_box):
self._add_text_node(ll, cdir, num)
self._add_conditional_text_node(ll, 'rotation', params)
elif lat_lon_quad is not None:
if len(lat_lon_quad) != 4:
raise ValueError('lat_lon_quad must have length 4.')
# latitude/longitude quad parameters
llq = self._create_new_node(overlay, 'gx:LatLonQuad')
coords = ''
for entry in lat_lon_quad:
if isinstance(entry, str):
coords += entry.strip() + ' '
elif len(entry) >= 2:
coords += '{0:0.8f},{1:0.8f} '.format(entry[1], entry[0])
else:
raise TypeError('Got unexpected entry type {}'.format(type(entry)))
self._add_text_node(llq, 'coordinates', coords.strip())
# icon
ic = self._create_new_node(overlay, 'Icon')
self._add_text_node(ic, 'href', image_ref)
return overlay
# regionation tools for ground overlays
def _add_lod(self, par, **params):
"""
Adds a Level of Detail (LOD) element, which is explicitly a child of Region.
Parameters
----------
par : minidom.Element
params
Returns
-------
minidom.Element
"""
lod = self._create_new_node(par, 'Lod')
self._add_conditional_text_node(lod, 'minLodPixels', params, '128')
self._add_conditional_text_node(lod, 'maxLodPixels', params, '-1')
self._add_conditional_text_node(lod, 'minFadeExtent', params, '0')
return lod
def _add_lat_lon_alt_box(self, par, **params):
"""
Adds LatLonAltBox element, which is explicitly a child of Region.
Parameters
----------
par : minidom.Element
params
Returns
-------
minidom.Element
"""
box = self._create_new_node(par, 'LatLonAltBox')
for key in ['north', 'south', 'east', 'west', 'minAltitude', 'maxAltitude', 'altitudeMode']:
self._add_conditional_text_node(box, key, params)
return box
def add_region(self, par, **params):
"""
Adds a region element.
Parameters
----------
par : None|minidom.Element
params
Returns
-------
minidom.Element
"""
reg = self._create_new_node(par, 'Region')
self._add_lod(reg, **params)
self._add_lat_lon_alt_box(reg, **params)
return reg
def _add_ground_overlay_region_bbox(
self, image_name, fld, img, image_bounds, bounding_box,
nominal_image_size, img_format, depth_count=0, **params):
"""
Helper function for creating an ground overlay region part.
Parameters
----------
image_name : str
The image name.
fld : minidom.Element
img : PIL.Image.Image
image_bounds : numpy.ndarray|list|tuple
Using PIL conventions, of the form `(col min, row min, col max, row max)`.
bounding_box : numpy.ndarray|tuple|list
Bounding box of the form `[latitude max, latitude min, longitude max, longitude min]`
nominal_image_size : int
img_format : str
depth_count : int
What is the depth for our recursion.
params
The parameters dictionary.
Returns
-------
None
"""
col_min, row_min, col_max, row_max = image_bounds
# determine how to resample this image
row_length = int(row_max - row_min)
col_length = int(col_max - col_min)
cont_recursion = True
if max(row_length, col_length) < 1.5*nominal_image_size:
cont_recursion = False
sample_rows = row_length
sample_cols = col_length
elif row_length >= col_length:
sample_rows = nominal_image_size
sample_cols = int(col_length*nominal_image_size/float(row_length))
else:
sample_cols = nominal_image_size
sample_rows = int(row_length*nominal_image_size/float(col_length))
archive_name = 'images/{}.{}'.format(image_name, img_format)
# resample our image
pil_box = tuple(int(el) for el in image_bounds)
this_img = img.crop(pil_box).resize((sample_cols, sample_rows), PIL.Image.ANTIALIAS)
self.write_image_to_archive(archive_name, this_img, img_format=img_format)
# create the ground overlay parameters
pars = {'name': image_name}
for key in ['beginTime', 'endTime', 'when']:
if key in params:
pars[key] = params[key]
# create the ground overlay
gnd_overlay = self.add_ground_overlay(archive_name, bounding_box=bounding_box, par=fld, **pars)
# add the region
pars = {}
if depth_count == 0:
# root level, no minimum size
pars['minLodPixels'] = 0
else:
pars['minLodPixels'] = 0.3*nominal_image_size
pars['minFadeExtent'] = 0.3*nominal_image_size
if cont_recursion:
pars['maxLodPixels'] = 1.75*nominal_image_size
pars['maxFadeExtent'] = 0.3*nominal_image_size
else:
# leaf, no maximum size
pars['maxLodPixels'] = -1
pars['north'] = bounding_box[0]
pars['south'] = bounding_box[1]
pars['east'] = bounding_box[2]
pars['west'] = bounding_box[3]
self.add_region(gnd_overlay, **pars)
if cont_recursion:
# create a list of [(start row, end row)]
if row_length > 1.5*nominal_image_size:
split_row = row_min + int(0.5*row_length)
split_lat = bounding_box[0] + (split_row/float(row_length))*(bounding_box[1] - bounding_box[0])
row_sizes = [(row_min, split_row), (split_row, row_max)]
lats = [(bounding_box[0], split_lat), (split_lat, bounding_box[1])]
else:
row_sizes = [(row_min, row_max), ]
lats = [(bounding_box[0], bounding_box[1]), ]
if col_length > 1.5*nominal_image_size:
split_col = col_min + int(0.5*col_length)
split_lon = bounding_box[2] + (split_col/float(row_length))*(bounding_box[3] - bounding_box[2])
col_sizes = [(col_min, split_col), (split_col, col_max)]
lons = [(bounding_box[2], split_lon), (split_lon, bounding_box[3])]
else:
col_sizes = [(col_min, col_max), ]
lons = [(bounding_box[2], bounding_box[3]), ]
count = 0
for row_bit, lat_bit in zip(row_sizes, lats):
for col_bit, lon_bit in zip(col_sizes, lons):
this_im_name = '{}_{}'.format(image_name, count)
this_im_bounds = row_bit + col_bit
this_bounding_box = lat_bit + lon_bit
self._add_ground_overlay_region_bbox(
this_im_name, fld, img, this_im_bounds, this_bounding_box,
nominal_image_size, img_format, depth_count=depth_count+1, **params)
count += 1
@staticmethod
def _split_lat_lon_quad(ll_quad, split_fractions):
"""
Helper method for recursively splitting the lat/lon quad box.
Parameters
----------
ll_quad : numpy.ndarray
split_fractions : list|tuple
Returns
-------
numpy.ndarray
"""
r1, r2, c1, c2 = split_fractions
# [0] corresponds to (max_row, 0)
# [1] corresponds to (max row, max_col)
# [2] corresponds to (0, max_col)
# [3] corresponds to (0, 0)
# do row split
# [0] = r2*[0] + (1-r2)*[3]
# [1] = r2*[1] + (1-r2)*[2]
# [2] = r1*[1] + (1-r1)*[2]
# [3] = r1*[0] + (1-r1)*[3]
row_split = numpy.array([
[r2, 0, 0, 1-r2],
[0, r2, 1-r2, 0],
[0, r1, 1-r1, 0],
[r1, 0, 0, 1-r1],
], dtype='float64')
# do column split
# [0] = (1-c1)*[0] + c1*[1]
# [1] = (1-c2)*[0] + c2*[1]
# [2] = c2*[2] + (1-c2)*[3]
# [3] = c1*[2] + (1-c1)*[3]
col_split = numpy.array([
[1-c1, c1, 0, 0],
[1-c2, c2, 0, 0],
[0, 0, c2, 1-c2],
[0, 0, c1, 1-c1], ], dtype='float64')
split = col_split.dot(row_split)
llh_temp = numpy.zeros((4, 3))
llh_temp[:, :2] = ll_quad
ecf_coords = geodetic_to_ecf(llh_temp)
split_ecf = split.dot(ecf_coords)
return ecf_to_geodetic(split_ecf)[:, :2]
def _add_ground_overlay_region_quad(
self, image_name, fld, img, image_bounds, lat_lon_quad,
nominal_image_size, img_format, depth_count=0, **params):
"""
Helper function for creating an ground overlay region part.
Parameters
----------
image_name : str
The image name.
fld : minidom.Element
img : PIL.Image.Image
image_bounds : numpy.ndarray|list|tuple
Using PIL conventions, of the form `(col min, row min, col max, row max)`.
lat_lon_quad : numpy.ndarray
list of the form [[latitude, longitude]], must have 4 entries.
nominal_image_size : int
img_format : str
depth_count : int
What is the depth of the recursion?
params
Returns
-------
None
"""
bounding_box = [
float(numpy.max(lat_lon_quad[:, 0])), float(numpy.min(lat_lon_quad[:, 0])),
float(numpy.max(lat_lon_quad[:, 1])), float(numpy.min(lat_lon_quad[:, 1]))]
col_min, row_min, col_max, row_max = image_bounds
# determine how to resample this image
row_length = int(row_max - row_min)
col_length = int(col_max - col_min)
cont_recursion = True
if max(row_length, col_length) <= 1.5*nominal_image_size:
cont_recursion = False
sample_rows = row_length
sample_cols = col_length
elif row_length >= col_length:
sample_rows = nominal_image_size
sample_cols = int(col_length*nominal_image_size/float(row_length))
else:
sample_cols = nominal_image_size
sample_rows = int(row_length*nominal_image_size/float(col_length))
logger.info(
'Processing ({}:{}, {}:{}) into a downsampled image\n\t'
'of size ({}, {})'.format(
row_min, row_max, col_min, col_max, sample_rows, sample_cols))
archive_name = 'images/{}.{}'.format(image_name, img_format)
pil_box = tuple(int(el) for el in image_bounds)
# resample our image
this_img = img.crop(pil_box).resize((sample_cols, sample_rows), PIL.Image.ANTIALIAS)
self.write_image_to_archive(archive_name, this_img, img_format=img_format)
# create the ground overlay parameters
pars = {'name': image_name}
for key in ['beginTime', 'endTime', 'when']:
if key in params:
pars[key] = params[key]
# create the ground overlay
gnd_overlay = self.add_ground_overlay(archive_name, lat_lon_quad=lat_lon_quad, par=fld, **pars)
# add the region
pars = {}
if depth_count == 0:
# root level, no minimum size
pars['minLodPixels'] = 0
else:
pars['minLodPixels'] = 0.3*nominal_image_size
pars['minFadeExtent'] = 0.3*nominal_image_size
if cont_recursion:
pars['maxLodPixels'] = 1.75*nominal_image_size
pars['maxFadeExtent'] = 0.3*nominal_image_size
else:
# leaf, no maximum size
pars['maxLodPixels'] = -1
pars['north'] = bounding_box[0]
pars['south'] = bounding_box[1]
pars['east'] = bounding_box[2]
pars['west'] = bounding_box[3]
self.add_region(gnd_overlay, **pars)
if cont_recursion:
if row_length >= 1.5*nominal_image_size:
split_row = row_min + int(0.5*row_length)
row_sizes = [(row_min, split_row), (split_row, row_max)]
else:
row_sizes = [(row_min, row_max), ]
if col_length >= 1.5*nominal_image_size:
split_col = col_min + int(0.5*col_length)
col_sizes = [(col_min, split_col), (split_col, col_max)]
else:
col_sizes = [(col_min, col_max), ]
count = 0
for row_bit in enumerate(row_sizes):
for col_bit in enumerate(col_sizes):
this_im_name = '{}_{}'.format(image_name, count)
this_im_bounds = (col_bit[1][0], row_bit[1][0], col_bit[1][1], row_bit[1][1])
split_fractions = [
(row_bit[1][0] - row_min)/float(row_length),
(row_bit[1][1] - row_min)/float(row_length),
(col_bit[1][0] - col_min)/float(col_length),
(col_bit[1][1] - col_min)/float(col_length)
]
this_ll_quad = self._split_lat_lon_quad(lat_lon_quad, split_fractions)
self._add_ground_overlay_region_quad(
this_im_name, fld, img, this_im_bounds, this_ll_quad,
nominal_image_size, img_format, depth_count=depth_count+1,
**params)
count += 1
def add_regionated_ground_overlay(
self, img, par, bounding_box=None, lat_lon_quad=None, img_format='PNG',
nominal_image_size=1024, **params):
"""
Adds regionated GroundOverlay objects. This downsamples the image to a pyramid type
collection of smaller images, and defines the regions. **Requires viable archive.**
Parameters
----------
img : PIL.Image.Image
the image instance.
par : minidom.Element
the parent node, a folder object will be created and appended to par.
The overlays will be added below this folder.
bounding_box : None|numpy.ndarray
Follows the format for the argument in :func:`add_ground_overlay`.
lat_lon_quad : None|nunpy.ndarray
Follows the format for the argument in :func:`add_ground_overlay`.
img_format : str
string representing a viable Image format. The viable options that will be allowed:
* 'PNG' - (default) transparency; lossless; good compression
* 'TIFF' - supports transparency; lossless; poor compression
* 'JPEG' - no transparency; lossy; best compression
* 'GIF' - transparency; lossless; medium compression
The PIL library actually supports a much larger collection of image formats, but the
remaining formats are not good candidates for this application.
nominal_image_size : int
The nominal image size for splitting. A minimum of 512 will be enforced.
params
The parameters dictionary.
Returns
-------
minidom.Element
"""
nominal_image_size = int(nominal_image_size)
if nominal_image_size < 512:
nominal_image_size = 512
if self._archive is None:
raise ValueError('We must have a viable archive.')
if PIL is None:
raise ImportError(
'Optional dependency Pillow is required to use this functionality.')
if not isinstance(img, PIL.Image.Image):
raise TypeError('We must have that img is a PIL instance, got type {}.'.format(type(img)))
# validate ground overlay area arguments
if bounding_box is None and lat_lon_quad is None:
raise ValueError('Either bounding_box or lat_lon_quad must be defined.')
if bounding_box is not None and lat_lon_quad is not None:
raise ValueError('Both bounding_box or lat_lon_quad are provided, which is not sensible.')
if lat_lon_quad is not None:
if not isinstance(lat_lon_quad, numpy.ndarray) or lat_lon_quad.ndim != 2 or \
lat_lon_quad.shape[0] != 4 or lat_lon_quad.shape[1] != 2:
raise TypeError('lat_lon_quad, if supplied, must be a numpy array of shape (4, 2).')
# create our folder object
fld = self.add_container(par, the_type='Folder', **params)
# get base name
base_img_name = '{}-image'.format(uuid4())
base_img_box = (0, 0, img.size[0], img.size[1])
if bounding_box is not None:
self._add_ground_overlay_region_bbox(
base_img_name, fld, img, base_img_box, bounding_box,
nominal_image_size, img_format, **params)
elif lat_lon_quad is not None:
self._add_ground_overlay_region_quad(
base_img_name, fld, img, base_img_box, lat_lon_quad,
nominal_image_size, img_format, **params)
| 51,284 | 36.136133 | 136 | py |
sarpy | sarpy-master/sarpy/io/__init__.py |
__classification__ = "UNCLASSIFIED"
def open(file_name: str):
"""
Given a file, try to find and return the appropriate reader object.
Parameters
----------
file_name : str
Returns
-------
BaseReader
Raises
------
SarpyIOError
"""
from .complex.converter import open_complex
from .product.converter import open_product
from .phase_history.converter import open_phase_history
from .received.converter import open_received
from .general.converter import open_general
from .general.base import SarpyIOError
try:
return open_complex(file_name)
except SarpyIOError:
pass
try:
return open_product(file_name)
except SarpyIOError:
pass
try:
return open_phase_history(file_name)
except SarpyIOError:
pass
try:
return open_received(file_name)
except SarpyIOError:
pass
try:
return open_general(file_name)
except SarpyIOError:
pass
raise SarpyIOError(
'The format of file {} does not match any reader in the complex, '
'product, phase_history, received, or general modules.'.format(file_name))
| 1,207 | 20.192982 | 82 | py |
sarpy | sarpy-master/sarpy/io/received/base.py | """
Base structures for received signal data readers and usage
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
from typing import Union, Tuple, Sequence, Dict, Optional
import numpy
from sarpy.io.general.base import BaseReader
from sarpy.io.general.data_segment import DataSegment
from sarpy.io.received.crsd1_elements.CRSD import CRSDType as CRSDType1_0
class CRSDTypeReader(BaseReader):
"""
A class for ensuring common CRSD reading functionality.
**Updated in version 1.3.0** for reading changes.
"""
def __init__(self,
data_segment: Union[None, DataSegment, Sequence[DataSegment]],
crsd_meta: Union[None, CRSDType1_0],
close_segments: bool = True,
delete_files: Union[None, str, Sequence[str]] = None):
"""
Parameters
----------
data_segment : None|DataSegment|Sequence[DataSegment]
crsd_meta : None|CRSDType1_0
The CRSD metadata object
close_segments : bool
Call segment.close() for each data segment on reader.close()?
delete_files : None|Sequence[str]
Any temp files which should be cleaned up on reader.close()?
This will occur after closing segments.
"""
if crsd_meta is None:
self._crsd_meta = None
elif isinstance(crsd_meta, CRSDType1_0):
self._crsd_meta = crsd_meta
else:
raise TypeError(
'The crsd_meta must be of type CRSDType, got `{}`'.format(type(crsd_meta)))
BaseReader.__init__(
self, data_segment, reader_type='CRSD', close_segments=close_segments, delete_files=delete_files)
@property
def crsd_meta(self) -> Union[None, CRSDType1_0]:
"""
None|CRSDType1_0: the crsd meta_data.
"""
return self._crsd_meta
def read_support_array(self,
index: Union[int, str],
*ranges: Sequence[Union[None, int, Tuple[int, ...], slice]]) -> numpy.ndarray:
"""
Read the support array.
Parameters
----------
index : int|str
The support array integer index.
ranges : Sequence[None|int|Tuple[int, ...]|slice]
The slice definition appropriate for support array usage.
Returns
-------
numpy.ndarray
Raises
------
TypeError
If called on a reader which doesn't support this.
"""
raise NotImplementedError
def read_support_block(self) -> Dict[str, numpy.ndarray]:
"""
Reads the entirety of support block(s).
Returns
-------
Dict[str, numpy.ndarray]
Dictionary of `numpy.ndarray` containing the support arrays.
"""
raise NotImplementedError
def read_pvp_variable(
self,
variable: str,
index: Union[int, str],
the_range: Union[None, int, Tuple[int, ...], slice] = None) -> Optional[numpy.ndarray]:
"""
Read the vector parameter for the given `variable` and CRSD channel.
Parameters
----------
variable : str
index : int|str
The channel index or identifier.
the_range : None|int|Tuple[int, ...]|slice
The indices for the vector parameter. `None` returns all,
a integer returns the single value at that location, otherwise
the input determines a slice.
Returns
-------
None|numpy.ndarray
This will return None if there is no such variable, otherwise the data.
"""
raise NotImplementedError
def read_pvp_array(
self,
index: Union[int, str],
the_range: Union[None, int, Tuple[int, ...], slice] = None) -> numpy.ndarray:
"""
Read the PVP array from the requested channel.
Parameters
----------
index : int|str
The support array integer index (of cphd.Data.Channels list) or identifier.
the_range : None|int|Tuple[int, ...]|slice
The indices for the vector parameter. `None` returns all,
a integer returns the single value at that location, otherwise
the input determines a slice.
Returns
-------
pvp_array : numpy.ndarray
"""
raise NotImplementedError
def read_pvp_block(self) -> Dict[str, numpy.ndarray]:
"""
Reads the entirety of the PVP block(s).
Returns
-------
Dict[str, numpy.ndarray]
Dictionary containing the PVP arrays.
"""
raise NotImplementedError
def read_signal_block(self) -> Dict[str, numpy.ndarray]:
"""
Reads the entirety of signal block(s), with data formatted as complex64
(after accounting for AmpSF).
Returns
-------
Dict[str, numpy.ndarray]
Dictionary of `numpy.ndarray` containing the signal arrays.
"""
raise NotImplementedError
def read_signal_block_raw(self) -> Dict[str, numpy.ndarray]:
"""
Reads the entirety of signal block(s), with data formatted in file
storage format (no converting to complex, no consideration of AmpSF).
Returns
-------
Dict[str, numpy.ndarray]
Dictionary of `numpy.ndarray` containing the signal arrays.
"""
raise NotImplementedError
| 5,562 | 29.07027 | 109 | py |
sarpy | sarpy-master/sarpy/io/received/converter.py | """
This module provide utilities for reading essentially Compensated Received Signal Data
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import os
from typing import Callable
from sarpy.io.general.base import SarpyIOError, BaseReader, check_for_openers
from sarpy.io.received.base import CRSDTypeReader
###########
# Module variables
_openers = []
_parsed_openers = False
def register_opener(open_func: Callable) -> None:
"""
Provide a new opener.
Parameters
----------
open_func
This is required to be a function which takes a single argument (file name).
This function should return a sarpy.io.received.base.CRSDTypeReader instance
if the referenced file is viable for the underlying type, and None otherwise.
Returns
-------
None
"""
if not callable(open_func):
raise TypeError('open_func must be a callable')
if open_func not in _openers:
_openers.append(open_func)
def parse_openers() -> None:
"""
Automatically find the viable openers (i.e. :func:`is_a`) in the various modules.
"""
global _parsed_openers
if _parsed_openers:
return
_parsed_openers = True
check_for_openers('sarpy.io.received', register_opener)
def open_received(file_name: str) -> BaseReader:
"""
Given a file, try to find and return the appropriate reader object.
Parameters
----------
file_name : str
Returns
-------
CRSDTypeReader
Raises
------
SarpyIOError
"""
if not os.path.exists(file_name):
raise SarpyIOError('File {} does not exist.'.format(file_name))
# parse openers, if not already done
parse_openers()
# see if we can find a reader though trial and error
for opener in _openers:
reader = opener(file_name)
if reader is not None:
return reader
# If for loop completes, no matching file format was found.
raise SarpyIOError('Unable to determine received image format.')
| 2,036 | 22.964706 | 86 | py |
sarpy | sarpy-master/sarpy/io/received/__init__.py |
__classification__ = 'UNCLASSIFIED'
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/sarpy/io/received/crsd.py | """
Module for reading and writing CRSD version 1.0 files
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Michael Stewart, Valyrie")
import logging
import os
from typing import Union, Tuple, List, Sequence, Dict, BinaryIO, Optional
from collections import OrderedDict
import numpy
from sarpy.io.general.utils import is_file_like
from sarpy.io.general.base import BaseReader, SarpyIOError
from sarpy.io.general.data_segment import DataSegment, NumpyMemmapSegment
from sarpy.io.general.slice_parsing import verify_subscript, verify_slice
from sarpy.io.phase_history.cphd import CPHDWritingDetails, CPHDWriter1, \
AmpScalingFunction
from sarpy.io.received.crsd1_elements.CRSD import CRSDType, CRSDHeader, \
CRSD_SECTION_TERMINATOR
from sarpy.io.received.base import CRSDTypeReader
from sarpy.io.received.crsd_schema import get_namespace, get_default_tuple
logger = logging.getLogger(__name__)
_unhandled_version_text = 'Got unhandled CRSD version number `{}`'
_missing_channel_identifier_text = 'Cannot find CRSD channel for identifier `{}`'
_index_range_text = 'index must be in the range `[0, {})`'
#########
# Object for parsing CRSD elements
class CRSDDetails(object):
"""
The basic CRSD element parser.
"""
__slots__ = (
'_file_name', '_file_object', '_close_after', '_crsd_version', '_crsd_header', '_crsd_meta')
def __init__(self, file_object: Union[str, BinaryIO]):
"""
Parameters
----------
file_object : str|BinaryIO
The path to or file like object referencing the CRSD file.
"""
self._crsd_version = None
self._crsd_header = None
self._crsd_meta = None
self._close_after = False
if isinstance(file_object, str):
if not os.path.exists(file_object) or not os.path.isfile(file_object):
raise SarpyIOError('path {} does not exist or is not a file'.format(file_object))
self._file_name = file_object
self._file_object = open(file_object, 'rb')
self._close_after = True
elif is_file_like(file_object):
self._file_object = file_object
if hasattr(file_object, 'name') and isinstance(file_object.name, str):
self._file_name = file_object.name
else:
self._file_name = '<file like object>'
self._close_after = False
else:
raise TypeError('Got unsupported input type {}'.format(type(file_object)))
self._file_object.seek(0, os.SEEK_SET)
head_bytes = self._file_object.read(10)
if not isinstance(head_bytes, bytes):
raise ValueError('Input file like object not open in bytes mode.')
if not head_bytes.startswith(b'CRSD'):
raise SarpyIOError('File {} does not appear to be a CRSD file.'.format(self.file_name))
self._extract_version()
self._extract_header()
self._extract_crsd()
@property
def file_name(self) -> str:
"""
str: The CRSD filename.
"""
return self._file_name
@property
def file_object(self) -> BinaryIO:
"""
BinaryIO: The binary file object
"""
return self._file_object
@property
def crsd_version(self) -> str:
"""
str: The CRSD version.
"""
return self._crsd_version
@property
def crsd_header(self) -> CRSDHeader:
"""
CRSDHeader: The CRSD header object
"""
return self._crsd_header
@property
def crsd_meta(self) -> CRSDType:
"""
CRSDType: The CRSD structure, which is version dependent.
"""
return self._crsd_meta
def _extract_version(self) -> None:
"""
Extract the version number from the file. This will advance the file
object to the end of the initial header line.
"""
self._file_object.seek(0, os.SEEK_SET)
head_line = self._file_object.readline().strip()
parts = head_line.split(b'/')
if len(parts) != 2:
raise ValueError('Cannot extract CRSD version number from line {}'.format(head_line))
if parts[0] != b'CRSD':
raise ValueError('"{}" does not conform to a CRSD file type header'.format(head_line))
crsd_version = parts[1].strip().decode('utf-8')
self._crsd_version = crsd_version
def _extract_header(self) -> None:
"""
Extract the header from the file. The file object is assumed to be advanced
to the header location. This will advance to the file object to the end of
the header section.
"""
if self.crsd_version.startswith('1.'):
self._crsd_header = CRSDHeader.from_file_object(self._file_object)
else:
raise ValueError(_unhandled_version_text.format(self.crsd_version))
def _extract_crsd(self) -> None:
"""
Extract and interpret the CRSD structure from the file.
"""
xml = self.get_crsd_bytes()
if self.crsd_version.startswith('1.'):
the_type = CRSDType
else:
raise ValueError(_unhandled_version_text.format(self.crsd_version))
self._crsd_meta = the_type.from_xml_string(xml)
def get_crsd_bytes(self) -> bytes:
"""
Extract the (uninterpreted) bytes representation of the CRSD structure.
Returns
-------
bytes
"""
header = self.crsd_header
if header is None:
raise ValueError('No crsd_header populated.')
if self.crsd_version.startswith('1.'):
assert isinstance(header, CRSDHeader)
# extract the xml data
self._file_object.seek(header.XML_BLOCK_BYTE_OFFSET, os.SEEK_SET)
xml = self._file_object.read(header.XML_BLOCK_SIZE)
else:
raise ValueError(_unhandled_version_text.format(self.crsd_version))
return xml
def __del__(self):
if self._close_after:
self._close_after = False
# noinspection PyBroadException
try:
self._file_object.close()
except Exception:
pass
def _validate_crsd_details(
crsd_details: Union[str, CRSDDetails],
version: Union[None, str, Sequence[str]] = None) -> CRSDDetails:
"""
Validate the input argument.
Parameters
----------
crsd_details : str|CRSDDetails
version : None|str|Sequence[str]
Returns
-------
CRSDDetails
"""
if isinstance(crsd_details, str):
crsd_details = CRSDDetails(crsd_details)
if not isinstance(crsd_details, CRSDDetails):
raise TypeError('crsd_details is required to be a file path to a CRSD file '
'or CRSDDetails, got type {}'.format(crsd_details))
if version is not None:
if isinstance(version, str) and not crsd_details.crsd_version.startswith(version):
raise ValueError(
'This CRSD file is required to be version {},\n\t'
'got {}'.format(version, crsd_details.crsd_version))
else:
val = False
for entry in version:
if crsd_details.crsd_version.startswith(entry):
val = True
break
if not val:
raise ValueError(
'This CRSD file is required to be one of version {},\n\t'
'got {}'.format(version, crsd_details.crsd_version))
return crsd_details
class CRSDReader(CRSDTypeReader):
"""
The Abstract CRSD reader instance, which just selects the proper CRSD reader
class based on the CRSD version. Note that there is no __init__ method for
this class, and it would be skipped regardless. Ensure that you make a direct
call to the BaseReader.__init__() method when extending this class.
**Updated in version 1.3.0** for reading changes.
"""
__slots__ = ('_crsd_details', )
def __new__(cls, *args, **kwargs):
if len(args) == 0:
raise ValueError(
'The first argument of the constructor is required to be a file_path '
'or CRSDDetails instance.')
if is_file_like(args[0]):
raise ValueError('File like object input not supported for CRSD reading at this time.')
crsd_details = _validate_crsd_details(args[0])
if crsd_details.crsd_version.startswith('1.'):
return object.__new__(CRSDReader1)
else:
raise ValueError('Got unhandled CRSD version {}'.format(crsd_details.crsd_version))
@property
def crsd_details(self) -> CRSDDetails:
"""
CRSDDetails: The crsd details object.
"""
return self._crsd_details
@property
def crsd_version(self) -> str:
"""
str: The CRSD version.
"""
return self.crsd_details.crsd_version
@property
def crsd_header(self) -> CRSDHeader:
"""
CRSDHeader: The CRSD header object
"""
return self.crsd_details.crsd_header
@property
def file_name(self) -> str:
return self.crsd_details.file_name
def read_support_array(self,
index: Union[int, str],
*ranges: Sequence[Union[None, int, Tuple[int, ...], slice]]) -> numpy.ndarray:
raise NotImplementedError
def read_support_block(self) -> Dict[str, numpy.ndarray]:
raise NotImplementedError
def read_pvp_variable(
self,
variable: str,
index: Union[int, str],
the_range: Union[None, int, Tuple[int, ...], slice] = None) -> Optional[numpy.ndarray]:
raise NotImplementedError
def read_pvp_array(
self,
index: Union[int, str],
the_range: Union[None, int, Tuple[int, ...], slice] = None) -> numpy.ndarray:
raise NotImplementedError
def read_pvp_block(self) -> Dict[str, numpy.ndarray]:
raise NotImplementedError
def read_signal_block(self) -> Dict[str, numpy.ndarray]:
raise NotImplementedError
def read_signal_block_raw(self) -> Dict[str, numpy.ndarray]:
raise NotImplementedError
def close(self):
CRSDTypeReader.close(self)
if hasattr(self, '_crsd_details'):
if hasattr(self._crsd_details, 'close'):
self._crsd_details.close()
del self._crsd_details
class CRSDReader1(CRSDReader):
"""
The CRSD version 1 reader.
**Updated in version 1.3.0** for reading changes.
"""
_allowed_versions = ('1.0', )
def __new__(cls, *args, **kwargs):
# we must override here, to avoid recursion with
# the CRSDReader parent
return object.__new__(cls)
def __init__(
self,
crsd_details: Union[str, CRSDDetails]):
"""
Parameters
----------
crsd_details : str|CRSDDetails
"""
self._channel_map = None # type: Union[None, Dict[str, int]]
self._support_array_map = None # type: Union[None, Dict[str, int]]
self._pvp_memmap = None # type: Union[None, Dict[str, numpy.ndarray]]
self._support_array_memmap = None # type: Union[None, Dict[str, numpy.ndarray]]
self._crsd_details = _validate_crsd_details(crsd_details, version=self._allowed_versions)
CRSDTypeReader.__init__(self, None, self._crsd_details.crsd_meta)
# set data segments after setting up the pvp information, because
# we need the AmpSf to set up the format function for the data segment
self._create_pvp_memmaps()
self._create_support_array_memmaps()
data_segments = self._create_data_segments()
BaseReader.__init__(self, data_segments, reader_type='CRSD')
@property
def crsd_meta(self) -> CRSDType:
"""
CRSDType: the crsd meta_data.
"""
return self._crsd_meta
@property
def crsd_header(self) -> CRSDHeader:
"""
CRSDHeader: The CRSD header object.
"""
return self.crsd_details.crsd_header
def _create_data_segments(self) -> List[DataSegment]:
"""
Helper method for creating the various signal data segments.
Returns
-------
List[DataSegment]
"""
data_segments = []
data = self.crsd_meta.Data
sample_type = data.SignalArrayFormat
if sample_type == "CF8":
raw_dtype = numpy.dtype('>f4')
elif sample_type == "CI4":
raw_dtype = numpy.dtype('>i2')
elif sample_type == "CI2":
raw_dtype = numpy.dtype('>i1')
else:
raise ValueError('Got unhandled signal array format {}'.format(sample_type))
block_offset = self.crsd_header.SIGNAL_BLOCK_BYTE_OFFSET
for entry in data.Channels:
amp_sf = self.read_pvp_variable('AmpSF', entry.Identifier)
format_function = AmpScalingFunction(raw_dtype, amplitude_scaling=amp_sf)
raw_shape = (entry.NumVectors, entry.NumSamples, 2)
data_offset = entry.SignalArrayByteOffset
data_segments.append(
NumpyMemmapSegment(
self.crsd_details.file_object, block_offset+data_offset,
raw_dtype, raw_shape, formatted_dtype='complex64', formatted_shape=raw_shape[:2],
format_function=format_function, close_file=False))
return data_segments
def _create_pvp_memmaps(self) -> None:
"""
Helper method which creates the pvp mem_maps.
Returns
-------
None
"""
self._pvp_memmap = None
if self.crsd_meta.Data.Channels is None:
logger.error('No Data.Channels defined.')
return
if self.crsd_meta.PVP is None:
logger.error('No PVP object defined.')
return
pvp_dtype = self.crsd_meta.PVP.get_vector_dtype()
self._pvp_memmap = OrderedDict()
self._channel_map = OrderedDict()
for i, entry in enumerate(self.crsd_meta.Data.Channels):
self._channel_map[entry.Identifier] = i
offset = self.crsd_header.PVP_BLOCK_BYTE_OFFSET + entry.PVPArrayByteOffset
shape = (entry.NumVectors, )
self._pvp_memmap[entry.Identifier] = numpy.memmap(
self.crsd_details.file_name, dtype=pvp_dtype, mode='r', offset=offset, shape=shape)
def _create_support_array_memmaps(self) -> None:
"""
Helper method which creates the support array mem_maps.
Returns
-------
None
"""
if self.crsd_meta.Data.SupportArrays is None:
self._support_array_memmap = None
return
self._support_array_memmap = OrderedDict()
for i, entry in enumerate(self.crsd_meta.Data.SupportArrays):
# extract the support array metadata details
details = self.crsd_meta.SupportArray.find_support_array(entry.Identifier)
# determine array byte offset
offset = self.crsd_header.SUPPORT_BLOCK_BYTE_OFFSET + entry.ArrayByteOffset
# determine numpy dtype and depth of array
dtype, depth = details.get_numpy_format()
# set up the numpy memory map
shape = (entry.NumRows, entry.NumCols) if depth == 1 else (entry.NumRows, entry.NumCols, depth)
self._support_array_memmap[entry.Identifier] = numpy.memmap(
self.crsd_details.file_name, dtype=dtype, mode='r', offset=offset, shape=shape)
def _validate_index(self, index: Union[int, str]) -> int:
"""
Get corresponding integer index for CRSD channel.
Parameters
----------
index : int|str
Returns
-------
int
"""
crsd_meta = self.crsd_details.crsd_meta
if isinstance(index, str):
if index in self._channel_map:
return self._channel_map[index]
else:
raise KeyError(_missing_channel_identifier_text.format(index))
else:
int_index = int(index)
if not (0 <= int_index < crsd_meta.Data.NumCRSDChannels):
raise ValueError(_index_range_text.format(crsd_meta.Data.NumCRSDChannels))
return int_index
def _validate_index_key(self, index: Union[int, str]) -> str:
"""
Gets the corresponding identifier for the CPHD channel.
Parameters
----------
index : int|str
Returns
-------
str
"""
crsd_meta = self.crsd_details.crsd_meta
if isinstance(index, str):
if index in self._channel_map:
return index
else:
raise KeyError(_missing_channel_identifier_text.format(index))
else:
int_index = int(index)
if not (0 <= int_index < crsd_meta.Data.NumCRSDChannels):
raise ValueError(_index_range_text.format(crsd_meta.Data.NumCRSDChannels))
return crsd_meta.Data.Channels[int_index].Identifier
def read_support_array(
self,
index: Union[int, str],
*ranges) -> numpy.ndarray:
# find the support array identifier
if isinstance(index, int):
the_entry = self.crsd_meta.Data.SupportArrays[index]
index = the_entry.Identifier
if not isinstance(index, str):
raise TypeError('Got unexpected type {} for identifier'.format(type(index)))
the_memmap = self._support_array_memmap[index]
if len(ranges) == 0:
return numpy.copy(the_memmap[:])
# noinspection PyTypeChecker
subscript = verify_subscript(ranges, the_memmap.shape)
return numpy.copy(the_memmap[subscript])
def read_support_block(self) -> Dict:
if self.crsd_meta.Data.SupportArrays:
return {
sa.Identifier: self.read_support_array(sa.Identifier)
for sa in self.crsd_meta.Data.SupportArrays}
else:
return {}
def read_pvp_variable(self, variable, index, the_range=None):
index_key = self._validate_index_key(index)
the_memmap = self._pvp_memmap[index_key]
the_slice = verify_slice(the_range, the_memmap.shape[0])
if variable in the_memmap.dtype.fields:
return numpy.copy(the_memmap[variable][the_slice])
else:
return None
def read_pvp_array(self, index, the_range=None):
index_key = self._validate_index_key(index)
the_memmap = self._pvp_memmap[index_key]
the_slice = verify_slice(the_range, the_memmap.shape[0])
return numpy.copy(the_memmap[the_slice])
def read_pvp_block(self) -> Dict[str, numpy.ndarray]:
return {chan.Identifier: self.read_pvp_array(chan.Identifier) for chan in self.crsd_meta.Data.Channels}
def read_signal_block(self) -> Dict[str, numpy.ndarray]:
return {chan.Identifier: numpy.copy(self.read(index=chan.Identifier)) for chan in self.crsd_meta.Data.Channels}
def read_signal_block_raw(self) -> Dict[str, numpy.ndarray]:
return {chan.Identifier: numpy.copy(self.read_raw(index=chan.Identifier)) for chan in self.crsd_meta.Data.Channels}
def read_chip(self,
*ranges: Sequence[Union[None, int, Tuple[int, ...], slice]],
index: Union[int, str] = 0,
squeeze: bool = True) -> numpy.ndarray:
"""
This is identical to :meth:`read`, and presented for backwards compatibility.
Parameters
----------
ranges : Sequence[Union[None, int, Tuple[int, ...], slice]]
index : int|str
squeeze : bool
Returns
-------
numpy.ndarray
See Also
--------
:meth:`read`.
"""
return self.__call__(*ranges, index=index, raw=False, squeeze=squeeze)
def read(self,
*ranges: Sequence[Union[None, int, Tuple[int, ...], slice]],
index: Union[int, str] = 0,
squeeze: bool = True) -> numpy.ndarray:
"""
Read formatted data from the given data segment. Note this is an alias to the
:meth:`__call__` called as
:code:`reader(*ranges, index=index, raw=False, squeeze=squeeze)`.
Parameters
----------
ranges : Sequence[Union[None, int, Tuple[int, ...], slice]]
The slice definition appropriate for `data_segment[index].read()` usage.
index : int|str
The data_segment index or channel identifier.
squeeze : bool
Squeeze length 1 dimensions out of the shape of the return array?
Returns
-------
numpy.ndarray
See Also
--------
See :meth:`sarpy.io.general.data_segment.DataSegment.read`.
"""
return self.__call__(*ranges, index=index, raw=False, squeeze=squeeze)
def read_raw(self,
*ranges: Sequence[Union[None, int, Tuple[int, ...], slice]],
index: Union[int, str] = 0,
squeeze: bool = True) -> numpy.ndarray:
"""
Read raw data from the given data segment. Note this is an alias to the
:meth:`__call__` called as
:code:`reader(*ranges, index=index, raw=True, squeeze=squeeze)`.
Parameters
----------
ranges : Sequence[Union[None, int, Tuple[int, ...], slice]]
The slice definition appropriate for `data_segment[index].read()` usage.
index : int|str
The data_segment index or crsd channel identifier.
squeeze : bool
Squeeze length 1 dimensions out of the shape of the return array?
Returns
-------
numpy.ndarray
See Also
--------
See :meth:`sarpy.io.general.data_segment.DataSegment.read_raw`.
"""
return self.__call__(*ranges, index=index, raw=True, squeeze=squeeze)
def __call__(self,
*ranges: Sequence[Union[None, int, slice]],
index: int = 0,
raw: bool = False,
squeeze: bool = True) -> numpy.ndarray:
index = self._validate_index(index)
return BaseReader.__call__(self, *ranges, index=index, raw=raw, squeeze=squeeze)
def is_a(file_name: str) -> Optional[CRSDReader]:
"""
Tests whether a given file_name corresponds to a CRSD file. Returns a reader instance, if so.
Parameters
----------
file_name : str
the file_name to check
Returns
-------
CRSDReader1|None
Appropriate `CRSDReader` instance if CRSD file, `None` otherwise
"""
try:
crsd_details = CRSDDetails(file_name)
logger.info('File {} is determined to be a CRSD version {} file.'.format(file_name, crsd_details.crsd_version))
return CRSDReader(crsd_details)
except SarpyIOError:
# we don't want to catch parsing errors, for now?
return None
###########
# writer
class CRSDWritingDetails(CPHDWritingDetails):
@property
def header(self) -> CRSDHeader:
return self._header
def _set_header(self, check_older_version: bool):
if check_older_version:
use_version_tuple = self.meta.version_required()
else:
use_version_tuple = get_default_tuple()
use_version_string = '{}.{}.{}'.format(*use_version_tuple)
self._header = self.meta.make_file_header(use_version=use_version_string)
@property
def meta(self) -> CRSDType:
"""
CPSDType: The metadata
"""
return self._meta
@meta.setter
def meta(self, value: CRSDType):
if self._meta is not None:
raise ValueError('meta is read only once initialized.')
if not isinstance(value, CRSDType):
raise TypeError('meta must be of type {}'.format(CRSDType))
self._meta = value
def write_header(
self,
file_object: BinaryIO,
overwrite: bool = False) -> None:
"""
Write the header.The file object will be advanced to the end of the
block, if writing occurs.
Parameters
----------
file_object : BinaryIO
overwrite : bool
Overwrite, if previously written?
Returns
-------
None
"""
if self._header_written and not overwrite:
return
file_object.write(self.header.to_string().encode())
file_object.write(CRSD_SECTION_TERMINATOR)
# write xml
file_object.seek(self.header.XML_BLOCK_BYTE_OFFSET, os.SEEK_SET)
file_object.write(self.meta.to_xml_bytes(urn=get_namespace(self.use_version)))
file_object.write(CRSD_SECTION_TERMINATOR)
self._header_written = True
class CRSDWriter1(CPHDWriter1):
"""
The CRSD version 1 writer.
**Updated in version 1.3.0** for writing changes.
"""
_writing_details_type = CRSDWritingDetails
def __init__(
self,
file_object: Union[str, BinaryIO],
meta: Optional[CRSDType] = None,
writing_details: Optional[CRSDWritingDetails] = None,
check_existence: bool = True):
"""
Parameters
----------
file_object : str|BinaryIO
meta : None|CRSDType
writing_details : None|CRSDWritingDetails
check_existence : bool
Should we check if the given file already exists, and raises an exception if so?
"""
CPHDWriter1.__init__(
self, file_object, meta=meta, writing_details=writing_details,
check_existence=check_existence)
@property
def writing_details(self) -> CRSDWritingDetails:
return self._writing_details
@writing_details.setter
def writing_details(self, value: CRSDWritingDetails):
if self._writing_details is not None:
raise ValueError('writing_details is read-only')
if not isinstance(value, CRSDWritingDetails):
raise TypeError('writing_details must be of type {}'.format(CRSDWritingDetails))
self._writing_details = value
@property
def file_name(self) -> Optional[str]:
return self._file_name
@property
def meta(self) -> CRSDType:
"""
CRSDType: The metadata
"""
return self.writing_details.meta
| 26,665 | 31.759214 | 123 | py |
sarpy | sarpy-master/sarpy/io/received/crsd1_elements/base.py |
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
DEFAULT_STRICT = False
FLOAT_FORMAT = '0.17E'
| 118 | 13.875 | 35 | py |
sarpy | sarpy-master/sarpy/io/received/crsd1_elements/Antenna.py | """
The Antenna type definition.
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Michael Stewart, Valkyrie")
from typing import List
from sarpy.io.xml.base import Serializable
from sarpy.io.xml.descriptors import FloatDescriptor, StringDescriptor, BooleanDescriptor, \
SerializableDescriptor, SerializableListDescriptor
from sarpy.io.complex.sicd_elements.blocks import Poly1DType
from sarpy.io.phase_history.cphd1_elements.Antenna import AntPhaseCenterType
from .base import DEFAULT_STRICT, FLOAT_FORMAT
class AntCoordFrameType(Serializable):
"""
Antenna coordinate frame (ACF) in which one or more phase centers may lie.
"""
_fields = ('Identifier', )
_required = _fields
# descriptors
Identifier = StringDescriptor(
'Identifier', _required, strict=DEFAULT_STRICT,
docstring='String that uniquely identifies this ACF.') # type: str
def __init__(self, Identifier=None, **kwargs):
"""
Parameters
----------
Identifier : str
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Identifier = Identifier
super(AntCoordFrameType, self).__init__(**kwargs)
class AntPatternType(Serializable):
"""
Parameter set that defines each one-way Antenna Pattern.
"""
_fields = (
'Identifier', 'FreqZero', 'EBFreqShift', 'MLFreqDilation',
'GainZero', 'GainBSPoly', 'ArrayGPId', 'ElementGPId')
_required = (
'Identifier', 'FreqZero', 'EBFreqShift', 'MLFreqDilation',
'ArrayGPId', 'ElementGPId')
_numeric_format = {'FreqZero': FLOAT_FORMAT, 'GainZero': FLOAT_FORMAT}
# descriptors
Identifier = StringDescriptor(
'Identifier', _required, strict=DEFAULT_STRICT,
docstring='String that uniquely identifies this Antenna Pattern') # type: str
FreqZero = FloatDescriptor(
'FreqZero', _required, strict=DEFAULT_STRICT,
docstring='The reference frequency value for which the patterns are computed.') # type: float
EBFreqShift = BooleanDescriptor(
'EBFreqShift', _required, strict=DEFAULT_STRICT,
docstring="Parameter indicating whether the electronic boresight shifts with "
"frequency.") # type: bool
MLFreqDilation = BooleanDescriptor(
'MLFreqDilation', _required, strict=DEFAULT_STRICT,
docstring="Parameter indicating the mainlobe (ML) width changes with "
"frequency.") # type: bool
GainZero = FloatDescriptor(
'GainZero', _required, strict=DEFAULT_STRICT,
docstring='The reference antenna gain at zero steering angle at the '
'reference frequency, measured in dB.') # type: float
GainBSPoly = SerializableDescriptor(
'GainBSPoly', Poly1DType, _required, strict=DEFAULT_STRICT,
docstring='Gain polynomial *(in dB)* as a function of frequency for boresight *(BS)* '
'at :math:`DCX=0, DCY=0`. '
'Frequency ratio :math:`(f-f0)/f0` is the input variable, and the constant '
'coefficient is always `0.0`.') # type: Poly1DType
ArrayGPId = StringDescriptor(
'ArrayGPId', _required, strict=DEFAULT_STRICT,
docstring='Support array identifier of the sampled gain/phase of the array '
'at ref frequency.') # type: str
ElementGPId = StringDescriptor(
'ElementGPId', _required, strict=DEFAULT_STRICT,
docstring='Support array identifier of the sampled gain/phase of the element '
'at ref frequency.') # type: str
def __init__(self, Identifier=None, FreqZero=None, EBFreqShift=None, MLFreqDilation=None,
GainZero=None, GainBSPoly=None, ArrayGPId=None, ElementGPId=None, **kwargs):
"""
Parameters
----------
Identifier : str
FreqZero : float
EBFreqShift : bool
MLFreqDilation : bool
GainZero : None|float
GainBSPoly : None|Poly1DType
ArrayGPId : str
ElementGPId : str
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Identifier = Identifier
self.FreqZero = FreqZero
self.EBFreqShift = EBFreqShift
self.MLFreqDilation = MLFreqDilation
self.GainZero = GainZero
self.GainBSPoly = GainBSPoly
self.ArrayGPId = ArrayGPId
self.ElementGPId = ElementGPId
super(AntPatternType, self).__init__(**kwargs)
class AntennaType(Serializable):
"""
Parameters that describe the transmit and receive antennas used to collect
the signal array(s).
"""
_fields = (
'NumACFs', 'NumAPCs', 'NumAntPats', 'AntCoordFrame', 'AntPhaseCenter', 'AntPattern')
_required = ('AntCoordFrame', 'AntPhaseCenter', 'AntPattern')
_collections_tags = {
'AntCoordFrame': {'array': False, 'child_tag': 'AntCoordFrame'},
'AntPhaseCenter': {'array': False, 'child_tag': 'AntPhaseCenter'},
'AntPattern': {'array': False, 'child_tag': 'AntPattern'}}
# descriptors
AntCoordFrame = SerializableListDescriptor(
'AntCoordFrame', AntCoordFrameType, _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='Antenna coordinate frame (ACF) in which one or more phase centers'
' may lie.') # type: List[AntCoordFrameType]
AntPhaseCenter = SerializableListDescriptor(
'AntPhaseCenter', AntPhaseCenterType, _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='Parameters that describe each Antenna Phase Center (APC). Parameter '
'set repeated for each APC.') # type: List[AntPhaseCenterType]
AntPattern = SerializableListDescriptor(
'AntPattern', AntPatternType, _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='Parameter set that defines each one-way Antenna Pattern.') # type: List[AntPatternType]
def __init__(self, AntCoordFrame=None, AntPhaseCenter=None, AntPattern=None, **kwargs):
"""
Parameters
----------
AntCoordFrame : List[AntCoordFrameType]
AntPhaseCenter : List[AntPhaseCenterType]
AntPattern : List[AntPatternType]
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.AntCoordFrame = AntCoordFrame
self.AntPhaseCenter = AntPhaseCenter
self.AntPattern = AntPattern
super(AntennaType, self).__init__(**kwargs)
@property
def NumACFs(self):
"""
int: The number of antenna coordinate frame elements.
"""
if self.AntCoordFrame is None:
return 0
return len(self.AntCoordFrame)
@property
def NumAPCs(self):
"""
int: The number of antenna phase center elements.
"""
if self.AntPhaseCenter is None:
return 0
return len(self.AntPhaseCenter)
@property
def NumAntPats(self):
"""
int: The number of antenna pattern elements.
"""
if self.AntPattern is None:
return 0
return len(self.AntPattern)
| 7,511 | 36.373134 | 107 | py |
sarpy | sarpy-master/sarpy/io/received/crsd1_elements/CRSD.py | """
The Compensated Received Signal Data 1.0 definition.
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Michael Stewart, Valkyrie")
from xml.etree import ElementTree
from collections import OrderedDict
from typing import Union
import numpy
from sarpy.io.xml.base import Serializable, find_children, parse_xml_from_file, \
parse_xml_from_string
from sarpy.io.xml.descriptors import SerializableDescriptor, IntegerDescriptor, \
StringDescriptor
from .base import DEFAULT_STRICT
from .CollectionID import CollectionIDType
from .Global import GlobalType
from sarpy.io.phase_history.cphd1_elements.SceneCoordinates import SceneCoordinatesType
from .Data import DataType
from .Channel import ChannelType
from .PVP import PVPType
from sarpy.io.phase_history.cphd1_elements.SupportArray import SupportArrayType
from sarpy.io.phase_history.cphd1_elements.Dwell import DwellType
from .ReferenceGeometry import ReferenceGeometryType
from .Antenna import AntennaType
from .ErrorParameters import ErrorParametersType
from sarpy.io.phase_history.cphd1_elements.ProductInfo import ProductInfoType
from sarpy.io.phase_history.cphd1_elements.GeoInfo import GeoInfoType
from sarpy.io.complex.sicd_elements.MatchInfo import MatchInfoType
from sarpy.io.phase_history.cphd_schema import get_urn_details, WRITABLE_VERSIONS, \
get_namespace, get_default_tuple
#########
# Module variables
#########
# Module variables
_CRSD_SPEC_DETAILS = {
key: {'namespace': get_namespace(key), 'details': get_urn_details(key)}
for key in WRITABLE_VERSIONS}
_CRSD_DEFAULT_TUPLE = get_default_tuple()
_CRSD_DEFAULT_VERSION = '{}.{}.{}'.format(*_CRSD_DEFAULT_TUPLE)
CRSD_SECTION_TERMINATOR = b'\f\n'
#########
# CRSD header object
def _parse_crsd_header_field(line):
"""
Parse the CRSD header field, or return `None` as a termination signal.
Parameters
----------
line : bytes
Returns
-------
None|(str, str)
"""
if line.startswith(CRSD_SECTION_TERMINATOR):
return None
parts = line.split(b' := ')
if len(parts) != 2:
raise ValueError('Cannot extract CRSD header value from line {}'.format(line))
fld = parts[0].strip().decode('utf-8')
val = parts[1].strip().decode('utf-8')
return fld, val
class CRSDHeaderBase(object):
_fields = ()
_required = ()
def __init__(self, **kwargs):
# abstract class
pass
@classmethod
def from_file_object(cls, fi):
"""
Extract the CRSD header object from a file opened in byte mode.
This file object is assumed to be at the correct location for the
CRSD header.
Parameters
----------
fi
The open file object, which will be progressively read.
Returns
-------
CRSDHeaderBase
"""
the_dict = {}
while True:
line = fi.readline()
res = _parse_crsd_header_field(line)
if res is None:
break
else:
fld, val = res
if fld not in cls._fields:
raise ValueError('Cannot extract CRSD header value from line {}'.format(line))
the_dict[fld] = val
return cls(**the_dict)
class CRSDHeader(CRSDHeaderBase):
_fields = (
'XML_BLOCK_SIZE', 'XML_BLOCK_BYTE_OFFSET', 'SUPPORT_BLOCK_SIZE', 'SUPPORT_BLOCK_BYTE_OFFSET',
'PVP_BLOCK_SIZE', 'PVP_BLOCK_BYTE_OFFSET', 'SIGNAL_BLOCK_SIZE', 'SIGNAL_BLOCK_BYTE_OFFSET',
'CLASSIFICATION', 'RELEASE_INFO')
_required = (
'XML_BLOCK_SIZE', 'XML_BLOCK_BYTE_OFFSET', 'PVP_BLOCK_SIZE', 'PVP_BLOCK_BYTE_OFFSET',
'SIGNAL_BLOCK_SIZE', 'SIGNAL_BLOCK_BYTE_OFFSET', 'CLASSIFICATION', 'RELEASE_INFO')
# descriptor
XML_BLOCK_SIZE = IntegerDescriptor(
'XML_BLOCK_SIZE', _required, strict=True,
docstring='Size of the XML instance that describes the product in bytes. '
'Size does NOT include the 2 bytes of the section terminator.') # type: int
XML_BLOCK_BYTE_OFFSET = IntegerDescriptor(
'XML_BLOCK_BYTE_OFFSET', _required, strict=True,
docstring='Offset to the first byte of the XML block in bytes.') # type: int
SUPPORT_BLOCK_SIZE = IntegerDescriptor(
'SUPPORT_BLOCK_SIZE', _required, strict=True,
docstring='Size of the Support block in bytes. Note - If the Support block is omitted, this '
'is not included.') # type: int
SUPPORT_BLOCK_BYTE_OFFSET = IntegerDescriptor(
'SUPPORT_BLOCK_BYTE_OFFSET', _required, strict=True,
docstring='Offset to the first byte of the Support block in bytes. Note - If the Support '
'block is omitted, this is not included.') # type: int
PVP_BLOCK_SIZE = IntegerDescriptor(
'PVP_BLOCK_SIZE', _required, strict=True,
docstring='Size of the PVP block in bytes.') # type: int
PVP_BLOCK_BYTE_OFFSET = IntegerDescriptor(
'PVP_BLOCK_BYTE_OFFSET', _required, strict=True,
docstring='Offset to the first byte of the PVP block in bytes.') # type: int
SIGNAL_BLOCK_SIZE = IntegerDescriptor(
'SIGNAL_BLOCK_SIZE', _required, strict=True,
docstring='Size of the Signal block in bytes.') # type: int
SIGNAL_BLOCK_BYTE_OFFSET = IntegerDescriptor(
'SIGNAL_BLOCK_BYTE_OFFSET', _required, strict=True,
docstring='Offset to the first byte of the Signal block in bytes.') # type: int
CLASSIFICATION = StringDescriptor(
'CLASSIFICATION', _required, strict=True, default_value='UNCLASSIFIED',
docstring='Product classification information that is human-readable.') # type: str
RELEASE_INFO = StringDescriptor(
'RELEASE_INFO', _required, strict=True, default_value='UNRESTRICTED',
docstring='Product release information that is human-readable.') # type: str
def __init__(self, XML_BLOCK_SIZE=None, XML_BLOCK_BYTE_OFFSET=None,
SUPPORT_BLOCK_SIZE=None, SUPPORT_BLOCK_BYTE_OFFSET=None,
PVP_BLOCK_SIZE=None, PVP_BLOCK_BYTE_OFFSET=None,
SIGNAL_BLOCK_SIZE=None, SIGNAL_BLOCK_BYTE_OFFSET=None,
CLASSIFICATION='UNCLASSIFIED', RELEASE_INFO='UNRESTRICTED',
use_version=None):
self.XML_BLOCK_SIZE = XML_BLOCK_SIZE
self.XML_BLOCK_BYTE_OFFSET = XML_BLOCK_BYTE_OFFSET
self.SUPPORT_BLOCK_SIZE = SUPPORT_BLOCK_SIZE
self.SUPPORT_BLOCK_BYTE_OFFSET = SUPPORT_BLOCK_BYTE_OFFSET
self.PVP_BLOCK_SIZE = PVP_BLOCK_SIZE
self.PVP_BLOCK_BYTE_OFFSET = PVP_BLOCK_BYTE_OFFSET
self.SIGNAL_BLOCK_SIZE = SIGNAL_BLOCK_SIZE
self.SIGNAL_BLOCK_BYTE_OFFSET = SIGNAL_BLOCK_BYTE_OFFSET
self.CLASSIFICATION = CLASSIFICATION
self.RELEASE_INFO = RELEASE_INFO
self._use_version = _CRSD_DEFAULT_VERSION if use_version is None else use_version
super(CRSDHeader, self).__init__()
@property
def use_version(self) -> str:
return self._use_version
def to_string(self):
"""
Forms a CRSD file header string (not including the section terminator) from populated attributes.
"""
return ('CRSD/{}\n'.format(self.use_version)
+ ''.join(["{} := {}\n".format(f, getattr(self, f))
for f in self._fields if getattr(self, f) is not None]))
class CRSDType(Serializable):
"""
The Compensated Received Signal Data definition.
"""
_fields = (
'CollectionID', 'Global', 'SceneCoordinates', 'Data', 'Channel', 'PVP',
'SupportArray', 'Dwell', 'ReferenceGeometry', 'Antenna',
'ErrorParameters', 'ProductInfo', 'GeoInfo', 'MatchInfo')
_required = (
'CollectionID', 'Global', 'Data', 'Channel', 'PVP', 'ReferenceGeometry')
_collections_tags = {'GeoInfo': {'array': 'False', 'child_tag': 'GeoInfo'}}
# descriptors
CollectionID = SerializableDescriptor(
'CollectionID', CollectionIDType, _required, strict=DEFAULT_STRICT,
docstring='General information about the collection.') # type: CollectionIDType
Global = SerializableDescriptor(
'Global', GlobalType, _required, strict=DEFAULT_STRICT,
docstring='Global parameters that apply to metadata components and CRSD '
'signal arrays.') # type: GlobalType
SceneCoordinates = SerializableDescriptor(
'SceneCoordinates', SceneCoordinatesType, _required, strict=DEFAULT_STRICT,
docstring='Parameters that define geographic coordinates of the imaged '
'scene.') # type: Union[None, SceneCoordinatesType]
Data = SerializableDescriptor(
'Data', DataType, _required, strict=DEFAULT_STRICT,
docstring='Parameters that describe binary data components contained in '
'the product.') # type: DataType
Channel = SerializableDescriptor(
'Channel', ChannelType, _required, strict=DEFAULT_STRICT,
docstring='Parameters that describe the data channels contained in the '
'product.') # type: ChannelType
PVP = SerializableDescriptor(
'PVP', PVPType, _required, strict=DEFAULT_STRICT,
docstring='Structure used to specify the Per Vector Parameters provided for '
'each channel of a given product.') # type: PVPType
SupportArray = SerializableDescriptor(
'SupportArray', SupportArrayType, _required, strict=DEFAULT_STRICT,
docstring='Parameters that describe the binary support array(s) content and '
'grid coordinates.') # type: Union[None, SupportArrayType]
Dwell = SerializableDescriptor(
'Dwell', DwellType, _required, strict=DEFAULT_STRICT,
docstring='Parameters that specify the SAR dwell time supported by the signal '
'arrays contained in the CRSD product.') # type: Union[None, DwellType]
ReferenceGeometry = SerializableDescriptor(
'ReferenceGeometry', ReferenceGeometryType, _required, strict=DEFAULT_STRICT,
docstring='Parameters that describe the collection geometry for the reference '
'vector of the reference channel.') # type: ReferenceGeometryType
Antenna = SerializableDescriptor(
'Antenna', AntennaType, _required, strict=DEFAULT_STRICT,
docstring='Parameters that describe the antennas antennas used '
'to collect the signal array(s).') # type: Union[None, AntennaType]
ErrorParameters = SerializableDescriptor(
'ErrorParameters', ErrorParametersType, _required, strict=DEFAULT_STRICT,
docstring='Parameters that describe the statistics of errors in measured or estimated parameters '
'that describe the collection.') # type: Union[None, ErrorParametersType]
ProductInfo = SerializableDescriptor(
'ProductInfo', ProductInfoType, _required, strict=DEFAULT_STRICT,
docstring='Parameters that provide general information about the CRSD product '
'and/or the derived products that may be created '
'from it.') # type: Union[None, ProductInfoType]
MatchInfo = SerializableDescriptor(
'MatchInfo', MatchInfoType, _required, strict=DEFAULT_STRICT,
docstring='Information about other collections that are matched to the collection from which '
'this CRSD product was generated.') # type: Union[None, MatchInfoType]
def __init__(self, CollectionID=None, Global=None, SceneCoordinates=None, Data=None,
Channel=None, PVP=None, SupportArray=None, Dwell=None, ReferenceGeometry=None,
Antenna=None, ErrorParameters=None, ProductInfo=None,
GeoInfo=None, MatchInfo=None, **kwargs):
"""
Parameters
----------
CollectionID : CollectionIDType
Global : GlobalType
SceneCoordinates : None|SceneCoordinatesType
Data : DataType
Channel : ChannelType
PVP : PVPType
SupportArray : None|SupportArrayType
Dwell : None|DwellType
ReferenceGeometry : ReferenceGeometryType
Antenna : None|AntennaType
ErrorParameters : None|ErrorParametersType
ProductInfo : None|ProductInfoType
GeoInfo : None|List[GeoInfoType]|GeoInfoType
MatchInfo : None|MatchInfoType
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.CollectionID = CollectionID
self.Global = Global
self.SceneCoordinates = SceneCoordinates
self.Data = Data
self.Channel = Channel
self.PVP = PVP
self.SupportArray = SupportArray
self.Dwell = Dwell
self.ReferenceGeometry = ReferenceGeometry
self.Antenna = Antenna
self.ErrorParameters = ErrorParameters
self.ProductInfo = ProductInfo
self.MatchInfo = MatchInfo
self._GeoInfo = []
if GeoInfo is None:
pass
elif isinstance(GeoInfo, GeoInfoType):
self.addGeoInfo(GeoInfo)
elif isinstance(GeoInfo, (list, tuple)):
for el in GeoInfo:
self.addGeoInfo(el)
else:
raise ValueError('GeoInfo got unexpected type {}'.format(type(GeoInfo)))
super(CRSDType, self).__init__(**kwargs)
@property
def GeoInfo(self):
"""
List[GeoInfoType]: Parameters that describe a geographic feature.
"""
return self._GeoInfo
def getGeoInfo(self, key):
"""
Get GeoInfo(s) with name attribute == `key`.
Parameters
----------
key : str
Returns
-------
List[GeoInfoType]
"""
return [entry for entry in self._GeoInfo if entry.name == key]
def addGeoInfo(self, value):
"""
Add the given GeoInfo to the GeoInfo list.
Parameters
----------
value : GeoInfoType
Returns
-------
None
"""
if isinstance(value, ElementTree.Element):
gi_key = self._child_xml_ns_key.get('GeoInfo', self._xml_ns_key)
value = GeoInfoType.from_node(value, self._xml_ns, ns_key=gi_key)
elif isinstance(value, dict):
value = GeoInfoType.from_dict(value)
if isinstance(value, GeoInfoType):
self._GeoInfo.append(value)
else:
raise TypeError('Trying to set GeoInfo element with unexpected type {}'.format(type(value)))
@classmethod
def from_node(cls, node, xml_ns, ns_key=None, kwargs=None):
if kwargs is None:
kwargs = OrderedDict()
gi_key = cls._child_xml_ns_key.get('GeoInfo', ns_key)
kwargs['GeoInfo'] = find_children(node, 'GeoInfo', xml_ns, gi_key)
return super(CRSDType, cls).from_node(node, xml_ns, ns_key=ns_key, kwargs=kwargs)
def to_node(self, doc, tag, ns_key=None, parent=None, check_validity=False, strict=DEFAULT_STRICT, exclude=()):
node = super(CRSDType, self).to_node(
doc, tag, ns_key=ns_key, parent=parent, check_validity=check_validity,
strict=strict, exclude=exclude+('GeoInfo', ))
# slap on the GeoInfo children
if self._GeoInfo is not None and len(self._GeoInfo) > 0:
for entry in self._GeoInfo:
entry.to_node(doc, 'GeoInfo', ns_key=ns_key, parent=node, strict=strict)
return node
def to_dict(self, check_validity=False, strict=DEFAULT_STRICT, exclude=()):
out = super(CRSDType, self).to_dict(
check_validity=check_validity, strict=strict, exclude=exclude+('GeoInfo', ))
# slap on the GeoInfo children
if len(self.GeoInfo) > 0:
out['GeoInfo'] = [entry.to_dict(
check_validity=check_validity, strict=strict) for entry in self._GeoInfo]
return out
def to_xml_bytes(self, urn=None, tag='CRSD', check_validity=False, strict=DEFAULT_STRICT):
if urn is None:
urn = get_namespace(_CRSD_DEFAULT_VERSION)
return super(CRSDType, self).to_xml_bytes(
urn=urn, tag=tag, check_validity=check_validity, strict=strict)
def to_xml_string(self, urn=None, tag='CRSD', check_validity=False, strict=DEFAULT_STRICT):
return self.to_xml_bytes(urn=urn, tag=tag, check_validity=check_validity, strict=strict).decode('utf-8')
def make_file_header(self, xml_offset=1024, use_version=None):
"""
Forms a CRSD file header consistent with the information in the Data and CollectionID nodes.
Parameters
----------
xml_offset : int, optional
Offset in bytes to the first byte of the XML block. If the provided value
is not large enough to account for the length of the file header
string, a larger value is chosen.
use_version : None|str
What version to use?
Returns
-------
header : CRSDHeader
"""
kwargs = OrderedDict()
kwargs['use_version'] = _CRSD_DEFAULT_VERSION if use_version is None else use_version
def _align(val):
align_to = 64
return int(numpy.ceil(float(val)/align_to)*align_to)
kwargs['XML_BLOCK_SIZE'] = len(self.to_xml_string())
kwargs['XML_BLOCK_BYTE_OFFSET'] = xml_offset
block_end = kwargs['XML_BLOCK_BYTE_OFFSET'] + kwargs['XML_BLOCK_SIZE'] + len(CRSD_SECTION_TERMINATOR)
if self.Data.NumSupportArrays > 0:
kwargs['SUPPORT_BLOCK_SIZE'] = self.Data.calculate_support_block_size()
kwargs['SUPPORT_BLOCK_BYTE_OFFSET'] = _align(block_end)
block_end = kwargs['SUPPORT_BLOCK_BYTE_OFFSET'] + kwargs['SUPPORT_BLOCK_SIZE']
kwargs['PVP_BLOCK_SIZE'] = self.Data.calculate_pvp_block_size()
kwargs['PVP_BLOCK_BYTE_OFFSET'] = _align(block_end)
block_end = kwargs['PVP_BLOCK_BYTE_OFFSET'] + kwargs['PVP_BLOCK_SIZE']
kwargs['SIGNAL_BLOCK_SIZE'] = self.Data.calculate_signal_block_size()
kwargs['SIGNAL_BLOCK_BYTE_OFFSET'] = _align(block_end)
kwargs['CLASSIFICATION'] = self.CollectionID.Classification
kwargs['RELEASE_INFO'] = self.CollectionID.ReleaseInfo
header = CRSDHeader(**kwargs)
header_str = header.to_string()
min_xml_offset = len(header_str) + len(CRSD_SECTION_TERMINATOR)
if kwargs['XML_BLOCK_BYTE_OFFSET'] < min_xml_offset:
header = self.make_file_header(xml_offset=_align(min_xml_offset + 32), use_version=use_version)
return header
def get_pvp_dtype(self):
"""
Gets the dtype for the corresponding PVP structured array. Note that they
must all have homogeneous dtype.
Returns
-------
numpy.dtype
This will be a compound dtype for a structured array.
"""
if self.PVP is None:
raise ValueError('No PVP defined.')
return self.PVP.get_vector_dtype()
@classmethod
def from_xml_file(cls, file_path):
"""
Construct the crsd object from a stand-alone xml file path.
Parameters
----------
file_path : str
Returns
-------
CRSDType
"""
root_node, xml_ns = parse_xml_from_file(file_path)
ns_key = 'default' if 'default' in xml_ns else None
return cls.from_node(root_node, xml_ns=xml_ns, ns_key=ns_key)
@classmethod
def from_xml_string(cls, xml_string):
"""
Construct the crsd object from an xml string.
Parameters
----------
xml_string : str|bytes
Returns
-------
CRSDType
"""
root_node, xml_ns = parse_xml_from_string(xml_string)
ns_key = 'default' if 'default' in xml_ns else None
return cls.from_node(root_node, xml_ns=xml_ns, ns_key=ns_key)
def version_required(self):
"""
What CPHD version is required for valid support?
Returns
-------
tuple
"""
required = (1, 0, 0)
for fld in self._fields:
val = getattr(self, fld)
if val is not None and hasattr(val, 'version_required'):
required = max(required, val.version_required())
return required
| 20,462 | 38.888889 | 115 | py |
sarpy | sarpy-master/sarpy/io/received/crsd1_elements/Data.py | """
The DataType definition.
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Michael Stewart, Valkyrie")
from typing import List
from sarpy.io.xml.base import Serializable
from sarpy.io.xml.descriptors import StringDescriptor, StringEnumDescriptor, \
IntegerDescriptor, SerializableListDescriptor
from sarpy.io.phase_history.cphd1_elements.Data import SupportArraySizeType
from sarpy.io.phase_history.cphd1_elements.utils import binary_format_string_to_dtype
from .base import DEFAULT_STRICT
class ChannelSizeType(Serializable):
"""
Parameters that define the Channel signal array and PVP array size and location.
"""
_fields = ('Identifier', 'NumVectors', 'NumSamples', 'SignalArrayByteOffset', 'PVPArrayByteOffset')
_required = _fields
# descriptors
Identifier = StringDescriptor(
'Identifier', _required, strict=DEFAULT_STRICT,
docstring='String that uniquely identifies the CRSD channel (Ch_ID)'
' for which the data applies.') # type: str
NumVectors = IntegerDescriptor(
'NumVectors', _required, strict=DEFAULT_STRICT, bounds=(1, None),
docstring='Number of vectors in the signal array.') # type: int
NumSamples = IntegerDescriptor(
'NumSamples', _required, strict=DEFAULT_STRICT, bounds=(1, None),
docstring='Number of samples per vector in the signal array.') # type: int
SignalArrayByteOffset = IntegerDescriptor(
'SignalArrayByteOffset', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Signal Array offset from the start of the Signal block (in bytes) to the '
'start of the Signal Array for the channel.') # type: int
PVPArrayByteOffset = IntegerDescriptor(
'PVPArrayByteOffset', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='PVP Array offset from the start of the PVP block (in bytes) to the '
'start of the PVP Array for the channel.') # type: int
def __init__(self, Identifier=None, NumVectors=None, NumSamples=None, SignalArrayByteOffset=None,
PVPArrayByteOffset=None, **kwargs):
"""
Parameters
----------
Identifier : str
NumVectors : int
NumSamples : int
SignalArrayByteOffset : int
PVPArrayByteOffset : int
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Identifier = Identifier
self.NumVectors = NumVectors
self.NumSamples = NumSamples
self.SignalArrayByteOffset = SignalArrayByteOffset
self.PVPArrayByteOffset = PVPArrayByteOffset
super(ChannelSizeType, self).__init__(**kwargs)
class DataType(Serializable):
"""
Parameters that describe binary data components contained in the product.
"""
_fields = (
'SignalArrayFormat', 'NumBytesPVP', 'NumCRSDChannels',
'Channels', 'NumSupportArrays', 'SupportArrays')
_required = ('SignalArrayFormat', 'NumBytesPVP', 'Channels')
_collections_tags = {
'Channels': {'array': False, 'child_tag': 'Channel'},
'SupportArrays': {'array': False, 'child_tag': 'SupportArray'}}
# descriptors
SignalArrayFormat = StringEnumDescriptor(
'SignalArrayFormat', ('CI2', 'CI4', 'CF8'), _required, strict=DEFAULT_STRICT,
docstring="Signal Array sample binary format of the CRSD signal arrays, where"
"`CI2` denotes a 1 byte signed integer parameter, 2's complement format, and 2 Bytes Per Sample;"
"`CI4` denotes a 2 byte signed integer parameter, 2's complement format, and 4 Bytes Per Sample;"
"`CF8` denotes a 4 byte floating point parameter, and 8 Bytes Per Sample.") # type: str
NumBytesPVP = IntegerDescriptor(
'NumBytesPVP', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Number of bytes per set of Per Vector Parameters, where there is '
'one set of PVPs for each CRSD signal vector') # type: int
Channels = SerializableListDescriptor(
'Channels', ChannelSizeType, _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='Parameters that define the Channel signal array and PVP array size '
'and location.') # type: List[ChannelSizeType]
SupportArrays = SerializableListDescriptor(
'SupportArrays', SupportArraySizeType, _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='Support Array size parameters. Branch repeated for each binary support array. '
'Support Array referenced by its unique Support Array '
'identifier.') # type: List[SupportArraySizeType]
def __init__(self, SignalArrayFormat=None, NumBytesPVP=None, Channels=None, SupportArrays=None, **kwargs):
"""
Parameters
----------
SignalArrayFormat : str
NumBytesPVP : int
Channels : List[ChannelSizeType]
SupportArrays : None|List[SupportArraySizeType]
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.SignalArrayFormat = SignalArrayFormat
self.NumBytesPVP = NumBytesPVP
self.Channels = Channels
self.SupportArrays = SupportArrays
super(DataType, self).__init__(**kwargs)
@property
def NumSupportArrays(self):
"""
int: The number of support arrays.
"""
if self.SupportArrays is None:
return 0
else:
return len(self.SupportArrays)
@property
def NumCRSDChannels(self):
"""
int: The number of CRSD channels.
"""
if self.Channels is None:
return 0
else:
return len(self.Channels)
def calculate_support_block_size(self):
"""
Calculates the size of the support block in bytes as described by the SupportArray fields.
"""
return sum([s.calculate_size() for s in self.SupportArrays])
def calculate_pvp_block_size(self):
"""
Calculates the size of the PVP block in bytes as described by the Data fields.
"""
return self.NumBytesPVP * sum([c.NumVectors for c in self.Channels])
def calculate_signal_block_size(self):
"""
Calculates the size of the signal block in bytes as described by the Data fields.
"""
num_bytes_per_sample = binary_format_string_to_dtype(self.SignalArrayFormat).itemsize
return num_bytes_per_sample * sum([c.NumVectors * c.NumSamples for c in self.Channels])
| 6,848 | 40.011976 | 115 | py |
sarpy | sarpy-master/sarpy/io/received/crsd1_elements/ErrorParameters.py | """
The error parameters type definition.
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Michael Stewart, Valkyrie")
from typing import Union
from sarpy.io.xml.base import Serializable, ParametersCollection
from sarpy.io.xml.descriptors import FloatDescriptor, SerializableDescriptor, \
ParametersDescriptor
from sarpy.io.complex.sicd_elements.blocks import ErrorDecorrFuncType
from sarpy.io.complex.sicd_elements.ErrorStatistics import PosVelErrType, TropoErrorType
from .base import DEFAULT_STRICT, FLOAT_FORMAT
class RadarSensorType(Serializable):
"""
Radar sensor error statistics.
"""
_fields = ('RangeBias', 'ClockFreqSF', 'RefTimeError', 'RangeBiasDecorr')
_required = ('RangeBias', )
_numeric_format = {'RangeBias': FLOAT_FORMAT, 'ClockFreqSF': FLOAT_FORMAT, 'RefTimeError': FLOAT_FORMAT}
# descriptors
RangeBias = FloatDescriptor(
'RangeBias', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Range bias error standard deviation.') # type: float
ClockFreqSF = FloatDescriptor(
'ClockFreqSF', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Payload clock frequency scale factor standard deviation, '
r'where :math:`SF = (\Delta f)/f_0`.') # type: float
RefTimeError = FloatDescriptor(
'RefTimeError', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Time error standard deviation, in seconds.') # type: float
RangeBiasDecorr = SerializableDescriptor(
'RangeBiasDecorr', ErrorDecorrFuncType, _required, strict=DEFAULT_STRICT,
docstring='Range Bias error decorrelation function.') # type: ErrorDecorrFuncType
def __init__(self, RangeBias=None, ClockFreqSF=None, RefTimeError=None,
RangeBiasDecorr=None, **kwargs):
"""
Parameters
----------
RangeBias : float
ClockFreqSF : None|float
RefTimeError : None|float
RangeBiasDecorr : None|ErrorDecorrFuncType
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.RangeBias = RangeBias
self.ClockFreqSF = ClockFreqSF
self.RefTimeError = RefTimeError
self.RangeBiasDecorr = RangeBiasDecorr
super(RadarSensorType, self).__init__(**kwargs)
class IonoErrorType(Serializable):
"""
Ionosphere delay error statistics.
"""
_fields = ('IonoRangeVertical', 'IonoRangeRateVertical', 'IonoRgRgRateCC', 'IonoRangeVertDecorr')
_required = ('IonoRangeVertical', )
_numeric_format = {'IonoRangeVertical': FLOAT_FORMAT, 'IonoRangeRateVertical': FLOAT_FORMAT, 'IonoRgRgRateCC': FLOAT_FORMAT}
# descriptors
IonoRangeVertical = FloatDescriptor(
'IonoRangeVertical', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Ionosphere two-way delay error for normal incidence standard deviation. '
r'Expressed as a range error. :math:`(\Delta R) = (\Delta T) \cdot (c/2)`.') # type: float
IonoRangeRateVertical = FloatDescriptor(
'IonoRangeRateVertical', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Ionosphere two-way delay rate of change error for normal incidence standard deviation. '
r'Expressed as a range rate error. :math:`\dot{R} = \Delta \dot{TD_Iono} \times c/2`.') # type: float
IonoRgRgRateCC = FloatDescriptor(
'IonoRgRgRateCC', _required, strict=DEFAULT_STRICT, bounds=(-1, 1),
docstring='Ionosphere range error and range rate error correlation coefficient.') # type: float
IonoRangeVertDecorr = SerializableDescriptor(
'IonoRangeVertDecorr', ErrorDecorrFuncType, _required, strict=DEFAULT_STRICT,
docstring='Ionosphere range error decorrelation function.') # type: ErrorDecorrFuncType
def __init__(self, IonoRangeVertical=None, IonoRangeRateVertical=None,
IonoRgRgRateCC=None, IonoRangeVertDecorr=None, **kwargs):
"""
Parameters
----------
IonoRangeVertical : float
IonoRangeRateVertical : None|float
IonoRgRgRateCC : None|float
IonoRangeVertDecorr : None|ErrorDecorrFuncType
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.IonoRangeVertical = IonoRangeVertical
self.IonoRangeRateVertical = IonoRangeRateVertical
self.IonoRgRgRateCC = IonoRgRgRateCC
self.IonoRangeVertDecorr = IonoRangeVertDecorr
super(IonoErrorType, self).__init__(**kwargs)
class BistaticRadarSensorType(Serializable):
"""
Error statistics for a single radar platform.
"""
_fields = ('ClockFreqSF', 'RefTimeError')
_required = tuple()
_numeric_format = {'ClockFreqSF': FLOAT_FORMAT, 'RefTimeError': FLOAT_FORMAT}
# descriptors
ClockFreqSF = FloatDescriptor(
'ClockFreqSF', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Payload clock frequency scale factor standard deviation, '
r'where :math:`SF = (\Delta f)/f_0`.') # type: float
RefTimeError = FloatDescriptor(
'RefTimeError', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Time error standard deviation, in seconds.') # type: float
def __init__(self, ClockFreqSF=None, RefTimeError=None, **kwargs):
"""
Parameters
----------
ClockFreqSF : None|float
RefTimeError : None|float
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.ClockFreqSF = ClockFreqSF
self.RefTimeError = RefTimeError
super(BistaticRadarSensorType, self).__init__(**kwargs)
class ReceiveSensorType(Serializable):
"""
Receive sensor error statistics
"""
_fields = ('RcvDelayBias', 'ClockFreqSF', 'RefTimeError')
_required = ('RcvDelayBias', )
_numeric_format = {'ClockFreqSF': FLOAT_FORMAT, 'RefTimeError': FLOAT_FORMAT}
# descriptors
RcvDelayBias = FloatDescriptor(
'RcvDelayBias', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Receive path signal delay error bias that causes an error in the'
' estimated signal time of arrival at the Receive APC') # type: float
ClockFreqSF = FloatDescriptor(
'ClockFreqSF', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Payload clock frequency scale factor standard deviation, '
r'where :math:`SF = (\Delta f)/f_0`.') # type: float
RefTimeError = FloatDescriptor(
'RefTimeError', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Time error standard deviation, in seconds.') # type: float
def __init__(self, RcvDelayBias=None, ClockFreqSF=None, RefTimeError=None, **kwargs):
"""
Parameters
----------
RcvDelayBias : float
ClockFreqSF : None|float
RefTimeError : None|float
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.RcvDelayBias = RcvDelayBias
self.ClockFreqSF = ClockFreqSF
self.RefTimeError = RefTimeError
super(ReceiveSensorType, self).__init__(**kwargs)
class MonostaticType(Serializable):
"""
Error parameters for monostatic collection.
"""
_fields = ('PosVelErr', 'RadarSensor', 'TropoError', 'IonoError', 'AddedParameters')
_required = ('PosVelErr', 'RadarSensor')
_collections_tags = {'AddedParameters': {'array': False, 'child_tag': 'Parameter'}}
# descriptors
PosVelErr = SerializableDescriptor(
'PosVelErr', PosVelErrType, _required, strict=DEFAULT_STRICT,
docstring='Position and velocity error statistics for the sensor '
'platform.') # type: PosVelErrType
RadarSensor = SerializableDescriptor(
'RadarSensor', RadarSensorType, _required, strict=DEFAULT_STRICT,
docstring='Radar sensor error statistics.') # type: RadarSensorType
TropoError = SerializableDescriptor(
'TropoError', TropoErrorType, _required, strict=DEFAULT_STRICT,
docstring='Troposphere delay error statistics.') # type: TropoErrorType
IonoError = SerializableDescriptor(
'IonoError', IonoErrorType, _required, strict=DEFAULT_STRICT,
docstring='Ionosphere delay error statistics.') # type: IonoErrorType
AddedParameters = ParametersDescriptor(
'AddedParameters', _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='Additional error parameters.') # type: ParametersCollection
def __init__(self, PosVelErr=None, RadarSensor=None, TropoError=None, IonoError=None,
AddedParameters=None, **kwargs):
"""
Parameters
----------
PosVelErr : PosVelErrType
RadarSensor : RadarSensorType
TropoError : None|TropoErrorType
IonoError : None|IonoErrorType
AddedParameters : None|ParametersCollection|dict
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.PosVelErr = PosVelErr
self.RadarSensor = RadarSensor
self.TropoError = TropoError
self.IonoError = IonoError
self.AddedParameters = AddedParameters
super(MonostaticType, self).__init__(PosVelErr=PosVelErr, RadarSensor=RadarSensor, **kwargs)
class PlatformType(Serializable):
"""
Basic bistatic platform error type definition.
"""
_fields = ('PosVelErr', 'RadarSensor')
_required = _fields
# descriptors
PosVelErr = SerializableDescriptor(
'PosVelErr', PosVelErrType, _required, strict=DEFAULT_STRICT,
docstring='Position and velocity error statistics for the sensor '
'platform.') # type: PosVelErrType
RadarSensor = SerializableDescriptor(
'RadarSensor', BistaticRadarSensorType, _required, strict=DEFAULT_STRICT,
docstring='Platform sensor error statistics.') # type: BistaticRadarSensorType
def __init__(self, PosVelErr=None, RadarSensor=None, **kwargs):
"""
Parameters
----------
PosVelErr : PosVelErrType
RadarSensor : BistaticRadarSensorType
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.PosVelErr = PosVelErr
self.RadarSensor = RadarSensor
super(PlatformType, self).__init__(**kwargs)
class BistaticType(Serializable):
"""
Error parameters for bistatic parameters.
"""
_fields = ('TxPlatform', 'RcvPlatform', 'AddedParameters')
_required = ('TxPlatform', 'RcvPlatform')
_collections_tags = {'AddedParameters': {'array': False, 'child_tag': 'Parameter'}}
# descriptors
TxPlatform = SerializableDescriptor(
'TxPlatform', PlatformType, _required, strict=DEFAULT_STRICT,
docstring='Error statistics for the transmit platform.') # type: PlatformType
RcvPlatform = SerializableDescriptor(
'RcvPlatform', PlatformType, _required, strict=DEFAULT_STRICT,
docstring='Error statistics for the receive platform.') # type: PlatformType
AddedParameters = ParametersDescriptor(
'AddedParameters', _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='Additional error parameters.') # type: ParametersCollection
def __init__(self, TxPlatform=None, RcvPlatform=None, AddedParameters=None, **kwargs):
"""
Parameters
----------
TxPlatform : PlatformType
RcvPlatform : PlatformType
AddedParameters : None|ParametersCollection|dict
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.TxPlatform = TxPlatform
self.RcvPlatform = RcvPlatform
self.AddedParameters = AddedParameters
super(BistaticType, self).__init__(**kwargs)
class ReceiveOnlyType(Serializable):
"""
Parameters for receive only collect type
"""
_fields = ('PosVelErr', 'ReceiveSensorType', 'AddedParameters')
_required = ('PosVelErr', 'ReceiveSensorType')
_collections_tags = {'AddedParameters': {'array': False, 'child_tag': 'Parameter'}}
# descriptors
PosVelErr = SerializableDescriptor(
'PosVelErr', PosVelErrType, _required, strict=DEFAULT_STRICT,
docstring='Position and velocity error statistics for the sensor '
'platform.') # type: PosVelErrType
ReceiveSensor = SerializableDescriptor(
'ReceiveSensor', RadarSensorType, _required, strict=DEFAULT_STRICT,
docstring='Receive sensor error statistics.') # type: RadarSensorType
AddedParameters = ParametersDescriptor(
'AddedParameters', _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='Additional error parameters.') # type: ParametersCollection
def __init__(self, PosVelErr=None, ReceiveSensor=None, AddedParameters=None, **kwargs):
"""
Parameters
----------
PosVelErr : PosVelErrType
ReceiveSensor : RadarSensorType
AddedParameters : None|ParametersCollection|dict
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.PosVelErr = PosVelErr
self.ReceiveSensor = ReceiveSensor
self.AddedParameters = AddedParameters
super(ReceiveOnlyType, self).__init__(**kwargs)
class ErrorParametersType(Serializable):
"""
Parameters that describe the statistics of errors in measured or estimated
parameters that describe the collection.
"""
_fields = ('Monostatic', 'Bistatic', 'ReceiveOnly')
_required = ()
_choice = ({'required': True, 'collection': _fields}, )
# descriptors
Monostatic = SerializableDescriptor(
'Monostatic', MonostaticType, _required, strict=DEFAULT_STRICT,
docstring='The monostatic parameters.') # type: Union[None, MonostaticType]
Bistatic = SerializableDescriptor(
'Bistatic', BistaticType, _required, strict=DEFAULT_STRICT,
docstring='The bistatic parameters.') # type: Union[None, BistaticType]
ReceiveOnly = SerializableDescriptor(
'ReceiveOnly', ReceiveOnlyType, _required, strict=DEFAULT_STRICT,
docstring='The receive only parameters.') # type: Union[None, BistaticType]
def __init__(self, Monostatic=None, Bistatic=None, ReceiveOnly=None, **kwargs):
"""
Parameters
----------
Monostatic : None|MonostaticType
Bistatic : None|BistaticType
ReceiveOnly : None|ReceiveOnlyType
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Monostatic = Monostatic
self.Bistatic = Bistatic
self.ReceiveOnly = ReceiveOnly
super(ErrorParametersType, self).__init__(**kwargs)
| 15,839 | 38.6 | 128 | py |
sarpy | sarpy-master/sarpy/io/received/crsd1_elements/CollectionID.py | """
The CollectionIDType definition.
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Michael Stewart, Valkyrie")
from sarpy.io.xml.descriptors import StringDescriptor
from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType
from .base import DEFAULT_STRICT
class CollectionIDType(CollectionInfoType):
"""
The CollectionID type definition.
"""
_fields = (
'CollectorName', 'IlluminatorName', 'CoreName', 'CollectType',
'RadarMode', 'Classification', 'ReleaseInfo', 'CountryCodes', 'Parameters')
_required = ('CollectorName', 'CoreName', 'CollectType', 'Classification', 'ReleaseInfo')
# descriptors
ReleaseInfo = StringDescriptor(
'ReleaseInfo', _required, strict=DEFAULT_STRICT, default_value='UNRESTRICTED',
docstring='The product release information.') # type: str
def __init__(self, CollectorName=None, IlluminatorName=None, CoreName=None, CollectType=None,
RadarMode=None, Classification="UNCLASSIFIED", ReleaseInfo='UNRESTRICTED',
CountryCodes=None, Parameters=None, **kwargs):
"""
Parameters
----------
CollectorName : str
IlluminatorName : str
CoreName : str
CollectType : str
RadarMode : RadarModeType
Classification : str
ReleaseInfo : str
CountryCodes : list|str
Parameters : ParametersCollection|dict
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.ReleaseInfo = ReleaseInfo
super(CollectionIDType, self).__init__(
CollectorName=CollectorName, IlluminatorName=IlluminatorName, CoreName=CoreName,
CollectType=CollectType, RadarMode=RadarMode, Classification=Classification,
CountryCodes=CountryCodes, Parameters=Parameters, **kwargs)
| 1,998 | 34.696429 | 97 | py |
sarpy | sarpy-master/sarpy/io/received/crsd1_elements/PVP.py | """
The Per Vector parameters (PVP) definition.
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Michael Stewart, Valkyrie")
from typing import Union, List
import numpy
from sarpy.io.xml.base import Serializable
from sarpy.io.xml.descriptors import IntegerDescriptor, SerializableDescriptor, \
SerializableListDescriptor
from sarpy.io.phase_history.cphd1_elements.PVP import PerVectorParameterI8, \
PerVectorParameterF8, PerVectorParameterXYZ, UserDefinedPVPType
from sarpy.io.phase_history.cphd1_elements.utils import binary_format_string_to_dtype, homogeneous_dtype
from .base import DEFAULT_STRICT
BYTES_PER_WORD = 8
class PerVectorParameterDCXY(Serializable):
_fields = ('Offset', 'Size', 'Format')
_required = ('Offset', )
# descriptors
Offset = IntegerDescriptor(
'Offset', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='The offset value.') # type: int
def __init__(self, Offset=None, **kwargs):
"""
Parameters
----------
Offset : int
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Offset = Offset
super(PerVectorParameterDCXY, self).__init__(**kwargs)
@property
def Size(self):
"""
int: The size of the vector
"""
return 2
@property
def Format(self):
"""
str: The format of the vector data, constant value 'DCX=F8;DCY=F8;' here.
"""
return 'DCX=F8;DCY=F8;'
class PerVectorParameterTxLFM(Serializable):
_fields = ('Offset', 'Size', 'Format')
_required = ('Offset', )
# descriptors
Offset = IntegerDescriptor(
'Offset', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='The offset value.') # type: int
def __init__(self, Offset=None, **kwargs):
"""
Parameters
----------
Offset : int
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Offset = Offset
super(PerVectorParameterTxLFM, self).__init__(**kwargs)
@property
def Size(self):
"""
int: The size of the vector, constant value 3 here.
"""
return 3
@property
def Format(self):
"""
str: The format of the vector data, constant value 'PhiXC=F8;FxC=F8;FxRate=F8;' here.
"""
return 'PhiXC=F8;FxC=F8;FxRate=F8;'
class TxAntennaType(Serializable):
_fields = ('TxACX', 'TxACY', 'TxEB')
_required = _fields
# descriptors
TxACX = SerializableDescriptor(
'TxACX', PerVectorParameterXYZ, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterXYZ
TxACY = SerializableDescriptor(
'TxACY', PerVectorParameterXYZ, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterXYZ
TxEB = SerializableDescriptor(
'TxEB', PerVectorParameterDCXY, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterDCXY
def __init__(self, TxACX=None, TxACY=None, TxEB=None, **kwargs):
"""
Parameters
----------
TxACX : PerVectorParameterXYZ
TxACY : PerVectorParameterXYZ
TxEB : PerVectorParameterDCXY
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.TxACX = TxACX
self.TxACY = TxACY
self.TxEB = TxEB
super(TxAntennaType, self).__init__(**kwargs)
class RcvAntennaType(Serializable):
_fields = ('RcvACX', 'RcvACY', 'RcvEB')
_required = _fields
# descriptors
RcvACX = SerializableDescriptor(
'RcvACX', PerVectorParameterXYZ, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterXYZ
RcvACY = SerializableDescriptor(
'RcvACY', PerVectorParameterXYZ, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterXYZ
RcvEB = SerializableDescriptor(
'RcvEB', PerVectorParameterDCXY, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterDCXY
def __init__(self, RcvACX=None, RcvACY=None, RcvEB=None, **kwargs):
"""
Parameters
----------
RcvACX : PerVectorParameterXYZ
RcvACY : PerVectorParameterXYZ
RcvEB : PerVectorParameterDCXY
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.RcvACX = RcvACX
self.RcvACY = RcvACY
self.RcvEB = RcvEB
super(RcvAntennaType, self).__init__(**kwargs)
class TxPulseType(Serializable):
_fields = ('TxTime', 'TxPos', 'TxVel', 'FX1', 'FX2', 'TXmt', 'TxLFM', 'TxAntenna')
_required = ('TxTime', 'TxPos', 'TxVel', 'FX1', 'FX2', 'TXmt')
# descriptors
TxTime = SerializableDescriptor(
'TxTime', PerVectorParameterF8, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterF8
TxPos = SerializableDescriptor(
'TxPos', PerVectorParameterXYZ, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterXYZ
TxVel = SerializableDescriptor(
'TxVel', PerVectorParameterXYZ, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterXYZ
FX1 = SerializableDescriptor(
'FX1', PerVectorParameterF8, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterF8
FX2 = SerializableDescriptor(
'FX2', PerVectorParameterF8, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterF8
TXmt = SerializableDescriptor(
'TXmt', PerVectorParameterF8, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterF8
TxLFM = SerializableDescriptor(
'TxLFM', PerVectorParameterTxLFM, _required, strict=DEFAULT_STRICT,
docstring='') # type: Union[None, PerVectorParameterTxLFM]
TxAntenna = SerializableDescriptor(
'TxAntenna', TxAntennaType, _required, strict=DEFAULT_STRICT,
docstring='') # type: Union[None, TxAntennaType]
def __init__(self, TxTime=None, TxPos=None, TxVel=None,
FX1=None, FX2=None, TXmt=None, TxLFM=None,
TxAntenna=None, **kwargs):
"""
Parameters
----------
TxTime : PerVectorParameterF8
TxPos : PerVectorParameterXYZ
TxVel : PerVectorParameterXYZ
FX1 : PerVectorParameterF8
FX2 : PerVectorParameterF8
TXmt : PerVectorParameterF8
TxLFM : None|PerVectorParameterTxLFM
TxAntenna : None|TxAntennaType
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.TxTime = TxTime
self.TxPos = TxPos
self.TxVel = TxVel
self.FX1 = FX1
self.FX2 = FX2
self.TXmt = TXmt
self.TxLFM = TxLFM
self.TxAntenna = TxAntenna
super(TxPulseType, self).__init__(**kwargs)
class PVPType(Serializable):
_fields = (
'RcvTime', 'RcvPos', 'RcvVel', 'RefPhi0', 'RefFreq', 'DFIC0',
'FICRate', 'FRCV1', 'FRCV2', 'DGRGC', 'SIGNAL', 'AmpSF',
'RcvAntenna', 'TxPulse', 'AddedPVP')
_required = (
'RcvTime', 'RcvPos', 'RcvVel', 'RefPhi0', 'RefFreq', 'DFIC0',
'FICRate', 'FRCV1', 'FRCV2')
_collections_tags = {'AddedPVP': {'array': False, 'child_tag': 'AddedPVP'}}
# descriptors
RcvTime = SerializableDescriptor(
'RcvTime', PerVectorParameterF8, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterF8
RcvPos = SerializableDescriptor(
'RcvPos', PerVectorParameterXYZ, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterXYZ
RcvVel = SerializableDescriptor(
'RcvVel', PerVectorParameterXYZ, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterXYZ
RefPhi0 = SerializableDescriptor(
'RefPhi0', PerVectorParameterF8, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterF8
RefFreq = SerializableDescriptor(
'RefFreq', PerVectorParameterF8, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterF8
DFIC0 = SerializableDescriptor(
'DFIC0', PerVectorParameterF8, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterF8
FICRate = SerializableDescriptor(
'FICRate', PerVectorParameterF8, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterF8
FRCV1 = SerializableDescriptor(
'FRCV1', PerVectorParameterF8, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterF8
FRCV2 = SerializableDescriptor(
'FRCV2', PerVectorParameterF8, _required, strict=DEFAULT_STRICT,
docstring='') # type: PerVectorParameterF8
DGRGC = SerializableDescriptor(
'DGRGC', PerVectorParameterF8, _required, strict=DEFAULT_STRICT,
docstring='') # type: Union[None, PerVectorParameterF8]
SIGNAL = SerializableDescriptor(
'SIGNAL', PerVectorParameterI8, _required, strict=DEFAULT_STRICT,
docstring='') # type: Union[None, PerVectorParameterI8]
AmpSF = SerializableDescriptor(
'AmpSF', PerVectorParameterF8, _required, strict=DEFAULT_STRICT,
docstring='') # type: Union[None, PerVectorParameterF8]
RcvAntenna = SerializableDescriptor(
'RcvAntenna', RcvAntennaType, _required, strict=DEFAULT_STRICT,
docstring='') # type: Union[None, RcvAntennaType]
TxPulse = SerializableDescriptor(
'TxPulse', TxPulseType, _required, strict=DEFAULT_STRICT,
docstring='') # type: Union[None, TxPulseType]
AddedPVP = SerializableListDescriptor(
'AddedPVP', UserDefinedPVPType, _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='') # type: Union[None, List[UserDefinedPVPType]]
def __init__(self, RcvTime=None, RcvPos=None, RcvVel=None,
RefPhi0=None, RefFreq=None, DFIC0=None,
FICRate=None, FRCV1=None, FRCV2=None,
DGRGC=None, SIGNAL=None, AmpSF=None,
RcvAntenna=None, TxPulse=None,
AddedPVP=None, **kwargs):
"""
Parameters
----------
RcvTime : PerVectorParameterF8
RcvPos : PerVectorParameterXYZ
RcvVel : PerVectorParameterXYZ
RefPhi0 : PerVectorParameterF8
RefFreq : PerVectorParameterF8
DFIC0 : PerVectorParameterF8
FICRate : PerVectorParameterF8
FRCV1 : PerVectorParameterF8
FRCV2 : PerVectorParameterF8
DGRGC : None|PerVectorParameterF8
SIGNAL : None|PerVectorParameterI8
AmpSF : None|PerVectorParameterF8
RcvAntenna : None|RcvAntennaType
TxPulse : None|TxPulseType
AddedPVP : None|List[UserDefinedPVPType]
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.RcvTime = RcvTime
self.RcvPos = RcvPos
self.RcvVel = RcvVel
self.RefPhi0 = RefPhi0
self.RefFreq = RefFreq
self.DFIC0 = DFIC0
self.FICRate = FICRate
self.FRCV1 = FRCV1
self.FRCV2 = FRCV2
self.DGRGC = DGRGC
self.SIGNAL = SIGNAL
self.AmpSF = AmpSF
self.RcvAntenna = RcvAntenna
self.TxPulse = TxPulse
self.AddedPVP = AddedPVP
super(PVPType, self).__init__(**kwargs)
def get_size(self):
"""
Gets the size in bytes of each vector.
Returns
-------
int
"""
def get_num_words(obj):
sz = getattr(obj, 'Size')
if sz is not None:
return sz
sz = 0
# noinspection PyProtectedMember
for fld in obj._fields:
fld_val = getattr(obj, fld)
if fld_val is not None:
if fld_val.array:
for arr_val in fld_val:
sz += get_num_words(arr_val)
else:
sz += get_num_words(fld_val)
return sz
return get_num_words(self) * BYTES_PER_WORD
def get_offset_size_format(self, field):
"""
Get the Offset (in bytes), Size (in bytes) for the given field,
as well as the corresponding struct format string.
Parameters
----------
field : str
The desired field name.
Returns
-------
None|(int, int, str)
"""
def osf_tuple(val_in):
return val_in.Offset*BYTES_PER_WORD, val_in.Size*BYTES_PER_WORD, homogeneous_dtype(val_in.Format).char
# noinspection PyProtectedMember
if field in self._fields[:-1]:
val = getattr(self, field)
if val is None:
return None
return osf_tuple(val)
elif self.RcvAntenna and field in self.RcvAntenna._fields:
val = getattr(self.RcvAntenna, field)
if val is None:
return None
return osf_tuple(val)
elif self.TxPulse and field in self.TxPulse._fields:
val = getattr(self.TxPulse, field)
if val is None:
return None
return osf_tuple(val)
elif self.TxPulse and self.TxPulse.TxAntenna and field in self.TxPulse.TxAntenna._fields:
val = getattr(self.TxPulse.TxAntenna, field)
if val is None:
return None
return osf_tuple(val)
else:
if self.AddedPVP is None:
return None
for val in self.AddedPVP:
if field == val.Name:
return osf_tuple(val)
return None
def get_vector_dtype(self):
"""
Gets the dtype for the corresponding structured array for the full PVP set.
Returns
-------
numpy.dtype
This will be a compound dtype for a structured array.
"""
names = []
formats = []
offsets = []
for field in self._fields:
val = getattr(self, field)
if val is None:
continue
elif field == "AddedPVP":
for entry in val:
names.append(entry.Name)
formats.append(binary_format_string_to_dtype(entry.Format))
offsets.append(entry.Offset*BYTES_PER_WORD)
elif field == 'RcvAntenna' or field == 'TxPulse':
continue
else:
names.append(field)
formats.append(binary_format_string_to_dtype(val.Format))
offsets.append(val.Offset*BYTES_PER_WORD)
if self.RcvAntenna is not None:
# noinspection PyProtectedMember
for field in self.RcvAntenna._fields:
val = getattr(self.RcvAntenna, field)
if val is None:
continue
else:
names.append(field)
formats.append(binary_format_string_to_dtype(val.Format))
offsets.append(val.Offset*BYTES_PER_WORD)
if self.TxPulse is not None:
# noinspection PyProtectedMember
for field in self.TxPulse._fields:
val = getattr(self.TxPulse, field)
if val is None:
continue
elif field == 'TxAntenna':
continue
else:
names.append(field)
formats.append(binary_format_string_to_dtype(val.Format))
offsets.append(val.Offset*BYTES_PER_WORD)
if self.TxPulse.TxAntenna is not None:
# noinspection PyProtectedMember
for field in self.TxPulse.TxAntenna._fields:
val = getattr(self.TxPulse.TxAntenna, field)
if val is None:
continue
else:
names.append(field)
formats.append(binary_format_string_to_dtype(val.Format))
offsets.append(val.Offset*BYTES_PER_WORD)
return numpy.dtype({'names': names, 'formats': formats, 'offsets': offsets})
| 17,069 | 34.636743 | 114 | py |
sarpy | sarpy-master/sarpy/io/received/crsd1_elements/Channel.py | """
The Channel definition.
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Michael Stewart, Valkyrie")
from typing import Union, List
from sarpy.io.xml.base import Serializable, ParametersCollection
from sarpy.io.xml.descriptors import StringDescriptor, StringEnumDescriptor, \
IntegerDescriptor, SerializableListDescriptor, ParametersDescriptor, \
BooleanDescriptor, FloatDescriptor, SerializableDescriptor
from sarpy.io.phase_history.cphd1_elements.blocks import POLARIZATION_TYPE, AreaType
from sarpy.io.phase_history.cphd1_elements.Channel import DwellTimesType
from .base import DEFAULT_STRICT, FLOAT_FORMAT
class RcvAntennaType(Serializable):
"""
The receive antenna information.
"""
_fields = ('RcvAPCId', 'RcvAPATId')
_required = _fields
# descriptors
RcvAPCId = StringDescriptor(
'RcvAPCId', _required, strict=DEFAULT_STRICT,
docstring='Identifier for the Receive APC to be used to compute the receive antenna'
' pattern as a function of time for the channel (APC_ID).') # type: str
RcvAPATId = StringDescriptor(
'RcvAPATId', _required, strict=DEFAULT_STRICT,
docstring='Identifier for the Receive Antenna pattern to collect the signal data'
' (APAT_ID).') # type: str
def __init__(self, RcvAPCId=None, RcvAPATId=None, **kwargs):
"""
Parameters
----------
RcvAPCId : str
RcvAPATId : str
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.RcvAPCId = RcvAPCId
self.RcvAPATId = RcvAPATId
super(RcvAntennaType, self).__init__(**kwargs)
class SignalRefLevelType(Serializable):
"""
The signal power level information.
"""
_fields = ('PSCRSD', 'PRcvDensity')
_required = _fields
_numeric_format = {fld: FLOAT_FORMAT for fld in _fields}
# descriptors
PSCRSD = FloatDescriptor(
'PSCRSD', _required, strict=DEFAULT_STRICT,
docstring='Power level in the fast time signal vector for a CW tone at f = f_0_REF'
' and for f_IC(v,t) = f_0_REF.') # type: float
PRcvDensity = FloatDescriptor(
'PRcvDensity', _required, strict=DEFAULT_STRICT,
docstring='Receive power density per unit area for a CW tone at f = f_0_REF that results in'
' signal vector power PS_CRSD. Signal received from a far field source located'
' along the receive antenna mainlobe boresight at t = trs(v_CH_REF).') # type:float
def __init__(self, PSCRSD=None, PRcvDensity=None, **kwargs):
"""
Parameters
----------
PSCRSD : float
PRcvDensity : float
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.PSCRSD = PSCRSD
self.PRcvDensity = PRcvDensity
super(SignalRefLevelType, self).__init__(**kwargs)
class NoiseLevelType(Serializable):
"""
The thermal noise level information.
"""
_fields = ('PNCRSD', 'BNCRSD')
_required = _fields
_numeric_format = {fld: FLOAT_FORMAT for fld in _fields}
# descriptors
PNCRSD = FloatDescriptor(
'PNCRSD', _required, strict=DEFAULT_STRICT,
docstring='Noise power level in fast time signal vector for f_IC(v,t) = f_0(v_CH_REF).') # type: float
BNCRSD = FloatDescriptor(
'BNCRSD', _required, strict=DEFAULT_STRICT,
docstring='Noise Equivalent BW for the noise signal. Bandwidth BN_CRSD is expressed relative'
' to the fast time sample rate for the channel (fs).') # type:float
def __init__(self, PNCRSD=None, BNCRSD=None, **kwargs):
"""
Parameters
----------
PNCRSD : float
BNCRSD : float
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.PNCRSD = PNCRSD
self.BNCRSD = BNCRSD
super(NoiseLevelType, self).__init__(**kwargs)
class TxAntennaType(Serializable):
"""
The receive antenna information.
"""
_fields = ('TxAPCId', 'TxAPATId')
_required = _fields
# descriptors
TxAPCId = StringDescriptor(
'TxAPCId', _required, strict=DEFAULT_STRICT,
docstring='Identifier of Transmit APC to be used to compute the transmit antenna'
' pattern as a function of time for the channel (APC_ID).') # type: str
TxAPATId = StringDescriptor(
'TxAPATId', _required, strict=DEFAULT_STRICT,
docstring='Identifier of the Transmit Antenna pattern used to form the channel'
' signal array (APAT_ID).') # type: str
def __init__(self, TxAPCId=None, TxAPATId=None, **kwargs):
"""
Parameters
----------
TxAPCId : str
TxAPATId : str
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.TxAPCId = TxAPCId
self.TxAPATId = TxAPATId
super(TxAntennaType, self).__init__(**kwargs)
class SARImagingType(Serializable):
"""
The SAR Imaging parameters
"""
_fields = ('TxLFMFixed', 'TxPol', 'DwellTimes', 'TxAntenna', 'ImageArea')
_required = ('TxPol', 'DwellTimes')
# descriptors
TxLFMFixed = BooleanDescriptor(
'TxLFMFixed', _required, strict=DEFAULT_STRICT,
docstring='Flag to indicate the same transmit LFM waveform is used for all pulses.'
' vectors of the channel.') # type: Union[None, bool]
TxPol = StringEnumDescriptor(
'TxPol', POLARIZATION_TYPE, _required, strict=DEFAULT_STRICT,
docstring='Transmitted signal polarization for the channel.') # type: str
DwellTimes = SerializableDescriptor(
'DwellTimes', DwellTimesType, _required, strict=DEFAULT_STRICT,
docstring='COD Time and Dwell Time polynomials over the image area.') # type: DwellTimesType
TxAntenna = SerializableDescriptor(
'TxAntenna', TxAntennaType, _required, strict=DEFAULT_STRICT,
docstring='Antenna Phase Center and Antenna Pattern identifiers for the transmit antenna'
' used to illuminate the imaged area.') # type: Union[None, TxAntennaType]
ImageArea = SerializableDescriptor(
'ImageArea', AreaType, _required, strict=DEFAULT_STRICT,
docstring='SAR Image Area for the channel defined by a rectangle aligned with (IAX, IAY).'
' May be reduced by the optional polygon.') # type: Union[None, AreaType]
def __init__(self, TxLFMFixed=None, TxPol=None, DwellTimes=None, TxAntenna=None,
ImageArea=None, **kwargs):
"""
Parameters
----------
TxLFMFixed : None|bool
TxPol : PolarizationType
DwellTimes : DwellTimesType
TxAntenna : None|TxAntennaType
ImageArea : None|AreaType
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.TxLFMFixed = TxLFMFixed
self.TxPol = TxPol
self.DwellTimes = DwellTimes
self.TxAntenna = TxAntenna
self.ImageArea = ImageArea
super(SARImagingType, self).__init__(**kwargs)
class ChannelParametersType(Serializable):
"""
The CRSD data channel parameters
"""
_fields = (
'Identifier', 'RefVectorIndex', 'RefFreqFixed', 'FrcvFixed', 'DemodFixed',
'F0Ref', 'Fs', 'BWInst', 'RcvPol', 'SignalNormal', 'RcvAntenna',
'SignalRefLevel', 'NoiseLevel', 'AddedParameters', 'SARImaging')
_required = (
'Identifier', 'RefVectorIndex', 'RefFreqFixed', 'FrcvFixed', 'DemodFixed',
'F0Ref', 'Fs', 'BWInst', 'RcvPol')
_numeric_format = {
'F0Ref': FLOAT_FORMAT, 'Fs': FLOAT_FORMAT, 'BWInst': FLOAT_FORMAT}
_collections_tags = {
'AddedParameters': {'array': False, 'child_tag': 'AddedParameters'}}
# descriptors
Identifier = StringDescriptor(
'Identifier', _required, strict=DEFAULT_STRICT,
docstring='String that uniquely identifies this CRSD data channel.') # type: str
RefVectorIndex = IntegerDescriptor(
'RefVectorIndex', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Index of the reference vector for the channel.') # type: int
RefFreqFixed = BooleanDescriptor(
'RefFreqFixed', _required, strict=DEFAULT_STRICT,
docstring='Flag to indicate a constant reference frequency is used for'
' the channel.') # type: bool
FrcvFixed = BooleanDescriptor(
'FrcvFixed', _required, strict=DEFAULT_STRICT,
docstring='Flag to indicate a constant receive band is saved for the'
' channel.') # type: bool
DemodFixed = BooleanDescriptor(
'DemodFixed', _required, strict=DEFAULT_STRICT,
docstring='Flag to indicate a constant demodulation is used for the'
' channel.') # type: bool
F0Ref = FloatDescriptor(
'F0Ref', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Reference frequency for the reference signal vector.') # type: float
Fs = FloatDescriptor(
'Fs', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Fast time sample rate for the signal array.') # type: float
BWInst = FloatDescriptor(
'BWInst', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Nominal instantaneous bandwidth for the channel.') # type: float
RcvPol = StringEnumDescriptor(
'RcvPol', POLARIZATION_TYPE, _required, strict=DEFAULT_STRICT,
docstring='Receive polarization for the signal data processed to form the signal array.'
' Parameter describes the E-Field orientation of the signal.') # type: str
SignalNormal = BooleanDescriptor(
'SignalNormal', _required, strict=DEFAULT_STRICT,
docstring='Flag to indicate when all signal array vectors are normal.'
' Included if and only if the SIGNAL PVP is also included.') # type: Union[None, bool]
RcvAntenna = SerializableDescriptor(
'RcvAntenna', RcvAntennaType, _required, strict=DEFAULT_STRICT,
docstring='Antenna Phase Center and Antenna Pattern identifiers for the receive antenna'
' used to collect and form the signal array data.') # type: Union[None, RcvAntennaType]
SignalRefLevel = SerializableDescriptor(
'SignalRefLevel', SignalRefLevelType, _required, strict=DEFAULT_STRICT,
docstring='Signal power levels for a received CW signal with f = f_0_REF and polarization'
' matched to RcvPol of the channel.') # type: Union[None, SignalRefLevelType]
NoiseLevel = SerializableDescriptor(
'NoiseLevel', NoiseLevelType, _required, strict=DEFAULT_STRICT,
docstring='Thermal noise level in the CRSD signal vector for f_IC(v,t) ='
' f_0(v_CH_REF).') # type: Union[None, NoiseLevelType]
AddedParameters = ParametersDescriptor(
'AddedParameters', _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='Additional free form parameters.') # type: Union[None, ParametersCollection]
SARImaging = SerializableDescriptor(
'SARImaging', SARImagingType, _required, strict=DEFAULT_STRICT,
docstring='Structure included for all SAR imaging collections.') # type: Union[None, SARImagingType]
def __init__(self, Identifier=None, RefVectorIndex=None, RefFreqFixed=None,
FrcvFixed=None, DemodFixed=None, F0Ref=None, Fs=None, BWInst=None,
RcvPol=None, SignalNormal=None, RcvAntenna=None, SignalRefLevel=None,
NoiseLevel=None, AddedParameters=None, SARImaging=None, **kwargs):
"""
Parameters
----------
Identifier : str
RefVectorIndex : int
RefFreqFixed : bool
FrcvFixed : bool
DemodFixed : bool
F0Ref : float
Fs : float
BWInst : float
RcvPol : str
SignalNormal : None|bool
RcvAntenna : None|RcvAntennaType
SignalRefLevel : None|SignalRefLevelType
NoiseLevel : None|NoiseLevelType
AddedParameters : None|ParametersCollection
SARImaging : None|SARImagingType
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Identifier = Identifier
self.RefVectorIndex = RefVectorIndex
self.RefFreqFixed = RefFreqFixed
self.FrcvFixed = FrcvFixed
self.DemodFixed = DemodFixed
self.F0Ref = F0Ref
self.Fs = Fs
self.BWInst = BWInst
self.RcvPol = RcvPol
self.SignalNormal = SignalNormal
self.RcvAntenna = RcvAntenna
self.SignalRefLevel = SignalRefLevel
self.NoiseLevel = NoiseLevel
self.AddedParameters = AddedParameters
self.SARImaging = SARImaging
super(ChannelParametersType, self).__init__(**kwargs)
class ChannelType(Serializable):
"""
The channel definition.
"""
_fields = ('RefChId', 'Parameters')
_required = _fields
_collections_tags = {'Parameters': {'array': False, 'child_tag': 'Parameters'}}
# descriptors
RefChId = StringDescriptor(
'RefChId', _required, strict=DEFAULT_STRICT,
docstring='Channel ID for the Reference Channel in the '
'product.') # type: str
Parameters = SerializableListDescriptor(
'Parameters', ChannelParametersType, _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='Parameter Set that describes a CRSD data '
'channel.') # type: List[ChannelParametersType]
def __init__(self, RefChId=None, Parameters=None, **kwargs):
"""
Parameters
----------
RefChId : str
Parameters : List[ChannelParametersType]
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.RefChId = RefChId
self.Parameters = Parameters
super(ChannelType, self).__init__(**kwargs)
| 14,820 | 38.734584 | 111 | py |
sarpy | sarpy-master/sarpy/io/received/crsd1_elements/__init__.py | """
**This sub-package is a work in progress to encapsulate pythonic object-oriented CRSD structure 1.0.0
"""
__classification__ = "UNCLASSIFIED"
| 147 | 23.666667 | 101 | py |
sarpy | sarpy-master/sarpy/io/received/crsd1_elements/ReferenceGeometry.py | """
The reference geometry parameters definition.
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Michael Stewart, Valkyrie")
import numpy
from sarpy.io.xml.base import Serializable, parse_serializable
from sarpy.io.xml.descriptors import FloatDescriptor, StringEnumDescriptor, \
SerializableDescriptor
from sarpy.io.complex.sicd_elements.blocks import XYZType, LatLonHAEType
from sarpy.geometry.geocoords import geodetic_to_ecf, ecf_to_geodetic
from .base import DEFAULT_STRICT, FLOAT_FORMAT
class CRPType(Serializable):
"""
The CRP position for the reference vector of the reference channel.
"""
_fields = ('ECF', 'LLH')
_required = _fields
_ECF = None
_LLH = None
def __init__(self, ECF=None, LLH=None, **kwargs):
"""
To avoid the potential of inconsistent state, ECF and LLH are not simultaneously
used. If ECF is provided, it is used to populate LLH. Otherwise, if LLH is provided,
then it is used the populate ECF.
Parameters
----------
ECF : XYZType|numpy.ndarray|list|tuple
LLH : LatLonHAEType|numpy.ndarray|list|tuple
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
if ECF is not None:
self.ECF = ECF
elif LLH is not None:
self.LLH = LLH
super(CRPType, self).__init__(**kwargs)
@property
def ECF(self): # type: () -> XYZType
"""
XYZType: The CRP Position ECF coordinates.
"""
return self._ECF
@ECF.setter
def ECF(self, value):
if value is not None:
self._ECF = parse_serializable(value, 'ECF', self, XYZType)
self._LLH = LatLonHAEType.from_array(ecf_to_geodetic(self._ECF.get_array()))
@property
def LLH(self): # type: () -> LatLonHAEType
"""
LatLonHAEType: The CRP Position in WGS-84 coordinates.
"""
return self._LLH
@LLH.setter
def LLH(self, value):
if value is not None:
self._LLH = parse_serializable(value, 'LLH', self, LatLonHAEType)
self._ECF = XYZType.from_array(geodetic_to_ecf(self._LLH.get_array(order='LAT')))
class RcvParametersType(Serializable):
"""
The receive parameters geometry implementation.
"""
_fields = (
'RcvTime', 'RcvPos', 'RcvVel', 'SideOfTrack', 'SlantRange', 'GroundRange',
'DopplerConeAngle', 'GrazeAngle', 'IncidenceAngle', 'AzimuthAngle')
_required = _fields
_numeric_format = {
'SlantRange': FLOAT_FORMAT, 'GroundRange': FLOAT_FORMAT, 'DopplerConeAngle': FLOAT_FORMAT,
'GrazeAngle': FLOAT_FORMAT, 'IncidenceAngle': FLOAT_FORMAT, 'AzimuthAngle': FLOAT_FORMAT}
# descriptors
RcvTime = FloatDescriptor(
'RcvTime', _required, strict=DEFAULT_STRICT,
docstring='Receive time for the first sample for the reference vector.') # type: float
RcvPos = SerializableDescriptor(
'RcvPos', XYZType, _required, strict=DEFAULT_STRICT,
docstring='APC position in ECF coordinates.') # type: XYZType
RcvVel = SerializableDescriptor(
'RcvVel', XYZType, _required, strict=DEFAULT_STRICT,
docstring='APC velocity in ECF coordinates.') # type: XYZType
SideOfTrack = StringEnumDescriptor(
'SideOfTrack', ('L', 'R'), _required, strict=DEFAULT_STRICT,
docstring='Side of Track parameter for the collection.') # type: str
SlantRange = FloatDescriptor(
'SlantRange', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Slant range from the APC to the CRP.') # type: float
GroundRange = FloatDescriptor(
'GroundRange', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Ground range from the APC nadir to the CRP.') # type: float
DopplerConeAngle = FloatDescriptor(
'DopplerConeAngle', _required, strict=DEFAULT_STRICT, bounds=(0, 180),
docstring='Doppler Cone Angle between APC velocity and deg CRP Line of '
'Sight (LOS).') # type: float
GrazeAngle = FloatDescriptor(
'GrazeAngle', _required, strict=DEFAULT_STRICT, bounds=(0, 90),
docstring='Grazing angle for the APC to CRP LOS and the Earth Tangent '
'Plane (ETP) at the CRP.') # type: float
IncidenceAngle = FloatDescriptor(
'IncidenceAngle', _required, strict=DEFAULT_STRICT, bounds=(0, 90),
docstring='Incidence angle for the APC to CRP LOS and the Earth Tangent '
'Plane (ETP) at the CRP.') # type: float
AzimuthAngle = FloatDescriptor(
'AzimuthAngle', _required, strict=DEFAULT_STRICT, bounds=(0, 360),
docstring='Angle from north to the line from the CRP to the APC ETP '
'Nadir (i.e. North to +GPX). Measured clockwise from North '
'toward East.') # type: float
def __init__(self, RcvTime=None, RcvPos=None, RcvVel=None,
SideOfTrack=None, SlantRange=None, GroundRange=None,
DopplerConeAngle=None, GrazeAngle=None, IncidenceAngle=None,
AzimuthAngle=None, **kwargs):
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.RcvTime = RcvTime
self.RcvPos = RcvPos
self.RcvVel = RcvVel
self.SideOfTrack = SideOfTrack
self.SlantRange = SlantRange
self.GroundRange = GroundRange
self.DopplerConeAngle = DopplerConeAngle
self.GrazeAngle = GrazeAngle
self.IncidenceAngle = IncidenceAngle
self.AzimuthAngle = AzimuthAngle
super(RcvParametersType, self).__init__(**kwargs)
@property
def look(self):
"""
int: An integer version of `SideOfTrack`:
* None if `SideOfTrack` is not defined
* -1 if SideOfTrack == 'R'
* 1 if SideOftrack == 'L'
"""
if self.SideOfTrack is None:
return None
return -1 if self.SideOfTrack == 'R' else 1
class ReferenceGeometryType(Serializable):
"""
Parameters that describe the collection geometry for the reference vector
of the reference channel.
"""
_fields = ('CRP', 'RcvParameters')
_required = _fields
# descriptors
CRP = SerializableDescriptor(
'CRP', CRPType, _required, strict=DEFAULT_STRICT,
docstring='The Collection Reference Point (CRP) used for computing the'
' geometry parameters.') # type: CRPType
RcvParameters = SerializableDescriptor(
'RcvParameters', RcvParametersType, _required, strict=DEFAULT_STRICT,
docstring='Parameters computed for the receive APC position.') # type: RcvParametersType
def __init__(self, CRP=None, RcvParameters=None, **kwargs):
"""
Parameters
----------
CRP : CRPType
RcvParameters : RcvParametersType
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.CRP = CRP
self.RcvParameters = RcvParameters
super(ReferenceGeometryType, self).__init__(**kwargs)
| 7,442 | 35.846535 | 98 | py |
sarpy | sarpy-master/sarpy/io/received/crsd1_elements/Global.py | """
The Global type definition.
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Michael Stewart, Valkyrie")
from typing import Union
import numpy
from sarpy.io.xml.base import Serializable, Arrayable
from sarpy.io.xml.descriptors import FloatDescriptor, DateTimeDescriptor, \
SerializableDescriptor
from sarpy.io.phase_history.cphd1_elements.Global import FxBandType, TropoParametersType, \
IonoParametersType
from .base import DEFAULT_STRICT, FLOAT_FORMAT
class TimelineType(Serializable):
"""
Parameters that describe the collection times for the data contained in the product.
"""
_fields = ('CollectionRefTime', 'RcvTime1', 'RcvTime2')
_required = ('CollectionRefTime', 'RcvTime1', 'RcvTime2')
_numeric_format = {'RcvTime1': FLOAT_FORMAT, 'RcvTime2': FLOAT_FORMAT}
# descriptors
CollectionRefTime = DateTimeDescriptor(
'CollectionRefTime', _required, strict=DEFAULT_STRICT, numpy_datetime_units='us',
docstring='Collection Reference Time (t_CRT). Time reference used for all receive times'
' and all transmit times. All times are specified in seconds relative to t_CRT'
' (i.e., t_CRT is slow time t = 0).') # type: numpy.datetime64
RcvTime1 = FloatDescriptor(
'RcvTime1', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Earliest RcvTime value for any signal vector in the product.'
' Time relative to Collection Reference Time.') # type: float
RcvTime2 = FloatDescriptor(
'RcvTime2', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Latest RcvTime value for any signal vector in the product.'
' Time relative to Collection Reference Time.') # type: float
def __init__(self, CollectionRefTime=None, RcvTime1=None, RcvTime2=None, **kwargs):
"""
Parameters
----------
CollectionRefTime : numpy.datetime64|datetime|date|str
RcvTime1 : float
RcvTime2 : float
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.CollectionRefTime = CollectionRefTime
self.RcvTime1 = RcvTime1
self.RcvTime2 = RcvTime2
super(TimelineType, self).__init__(**kwargs)
class FrcvBandType(Serializable, Arrayable):
"""
Parameters that describe the received frequency limits for the signal array(s) contained in the product.
"""
_fields = ('FrcvMin', 'FrcvMax')
_required = _fields
_numeric_format = {fld: FLOAT_FORMAT for fld in _fields}
# descriptors
FrcvMin = FloatDescriptor(
'FrcvMin', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Minimum frcv_1 PVP value for any signal vector in the product.') # type: float
FrcvMax = FloatDescriptor(
'FrcvMax', _required, strict=DEFAULT_STRICT, bounds=(0, None),
docstring='Maximum frcv_2 PVP value for any signal vector in the product.') # type: float
def __init__(self, FrcvMin=None, FrcvMax=None, **kwargs):
"""
Parameters
----------
FrcvMin : float
FrcvMax : float
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.FrcvMin = FrcvMin
self.FrcvMax = FrcvMax
super(FrcvBandType, self).__init__(**kwargs)
def get_array(self, dtype=numpy.float64):
return numpy.array([self.FrcvMin, self.FrcvMax], dtype=dtype)
@classmethod
def from_array(cls, array):
# type: (Union[numpy.ndarray, list, tuple]) -> FrcvBandType
if array is None:
return None
if isinstance(array, (numpy.ndarray, list, tuple)):
if len(array) < 2:
raise ValueError('Expected array to be of length 2, and received {}'.format(array))
return cls(FrcvMin=array[0], FrcvMax=array[1])
raise ValueError('Expected array to be numpy.ndarray, list, or tuple, got {}'.format(type(array)))
class GlobalType(Serializable):
"""
The Global type definition.
"""
_fields = ('Timeline', 'FrcvBand', 'FxBand', 'TropoParameters', 'IonoParameters')
_required = ('Timeline', 'FrcvBand')
# descriptors
Timeline = SerializableDescriptor(
'Timeline', TimelineType, _required, strict=DEFAULT_STRICT,
docstring='Parameters that describe the collection times for the data contained '
'in the product') # type: TimelineType
FrcvBand = SerializableDescriptor(
'FrcvBand', FrcvBandType, _required, strict=DEFAULT_STRICT,
docstring='Parameters that describe the Frcv frequency limits for the signal array(s) '
'contained in the product.') # type: FrcvBandType
FxBand = SerializableDescriptor(
'FxBand', FxBandType, _required, strict=DEFAULT_STRICT,
docstring='Parameters that describe the FX frequency limits for the signal array(s) '
'contained in the product.') # type: FxBandType
TropoParameters = SerializableDescriptor(
'TropoParameters', TropoParametersType, _required, strict=DEFAULT_STRICT,
docstring='Parameters used to compute the propagation delay due to the '
'troposphere.') # type: Union[None, TropoParametersType]
IonoParameters = SerializableDescriptor(
'IonoParameters', IonoParametersType, _required, strict=DEFAULT_STRICT,
docstring='Parameters used to compute propagation effects due to the '
'ionosphere.') # type: Union[None, IonoParametersType]
def __init__(self, Timeline=None, FrcvBand=None, FxBand=None,
TropoParameters=None, IonoParameters=None, **kwargs):
"""
Parameters
----------
Timeline : TimelineType
FrcvBand : FrcvBandType|numpy.ndarray|list|tuple
FxBand : None|FxBandType|numpy.ndarray|list|tuple
TropoParameters : None|TropoParametersType
IonoParameters : None|IonoParametersType
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Timeline = Timeline
self.FrcvBand = FrcvBand
self.FxBand = FxBand
self.TropoParameters = TropoParameters
self.IonoParameters = IonoParameters
super(GlobalType, self).__init__(**kwargs)
| 6,689 | 39.05988 | 108 | py |
sarpy | sarpy-master/sarpy/io/received/crsd_schema/__init__.py | """
This package contains the CRSD schema
"""
__classification__ = 'UNCLASSIFIED'
__author__ = "Thomas McCullough"
import os
import re
from typing import List, Dict, Tuple, Union
_CRSD_DEFAULT_TUPLE = (1, 0, 0)
_the_directory = os.path.split(__file__)[0]
urn_mapping = {
'urn:CRSD:1.0.0': {
'version': '1.0.0',
'release': '1.0.0',
'date': '2021-06-12T00:00:00Z',
'schema': os.path.join(_the_directory, 'CRSD_schema_V1.0.0_2021_06_12.xsd')},
}
WRITABLE_VERSIONS = ('1.0.0', )
# validate the defined paths
for key, entry in urn_mapping.items():
schema_path = entry.get('schema', None)
if schema_path is not None and not os.path.exists(schema_path):
raise ValueError('`{}` has nonexistent schema path {}'.format(key, schema_path))
def get_default_tuple() -> Tuple[int, int, int]:
"""
Get the default CRSD version tuple.
Returns
-------
Tuple[int, int, int]
"""
return _CRSD_DEFAULT_TUPLE
def get_default_version_string() -> str:
"""
Get the default CRSD version string.
Returns
-------
str
"""
return '{}.{}.{}'.format(*_CRSD_DEFAULT_TUPLE)
def get_namespace(version: Union[str, Tuple[int, int, int]]) -> str:
if isinstance(version, (list, tuple)):
version = '{}.{}.{}'.format(version[0], version[1], version[2])
return 'http://api.nsgreg.nga.mil/schema/crsd/{}'.format(version)
def check_urn(urn_string: str) -> str:
"""
Checks that the urn string follows the correct pattern.
Parameters
----------
urn_string : str
Raises
------
ValueError
This raises an exception for a poorly formed or unmapped CRSD urn.
"""
if not isinstance(urn_string, str):
raise TypeError(
'Expected a urn input of string type, got type {}'.format(type(urn_string)))
the_match = re.match(r'^\d.\d.\d$', urn_string)
if the_match is not None:
urn_string = 'urn:CRSD:{}'.format(urn_string)
the_match = re.match(r'^urn:CRSD:\d.\d.\d$', urn_string)
if the_match is None:
raise ValueError(
'Input provided as `{}`,\nbut should be of the form '
'`urn:CRSD:<major>.<minor>.<release>'.format(urn_string))
return urn_string
def get_urn_details(urn_string: str) -> Dict[str, str]:
"""
Gets the associated details for the given CRSD urn, or raise an exception for
poorly formatted or unrecognized urn.
Parameters
----------
urn_string : str
Returns
-------
Dict[str, str]
"""
urn_string = check_urn(urn_string)
out = urn_mapping.get(urn_string, None)
if out is None:
raise KeyError(
'Got correctly formatted, but unmapped CRSD urn {}.'.format(urn_string))
return out
def get_schema_path(the_urn: str) -> str:
"""
Gets the path to the proper schema file for the given urn.
Parameters
----------
the_urn : str
Returns
-------
str
"""
result = get_urn_details(the_urn)
return result.get('schema', None)
def get_versions() -> List[str]:
"""
Gets a list of recognized CRSD urns.
Returns
-------
List[str]
"""
return list(sorted(urn_mapping.keys()))
| 3,244 | 21.692308 | 88 | py |
sarpy | sarpy-master/sarpy/io/general/base.py | """
The basic definitions for file-like reading and writing. This is generally
centered on image-like file efforts, and array-like interaction with image data.
This module completely revamped in version 1.3.0 for data segment usage.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import os
import logging
from typing import Union, List, Tuple, Sequence, Optional, Callable
from importlib import import_module
import pkgutil
import numpy
from sarpy.compliance import SarpyError
from sarpy.io.general.format_function import FormatFunction
from sarpy.io.general.data_segment import DataSegment, extract_string_from_subscript, \
NumpyArraySegment
logger = logging.getLogger(__name__)
READER_TYPES = ('SICD', 'SIDD', 'CPHD', 'CRSD', 'OTHER')
"""
The reader_type enum
"""
class SarpyIOError(SarpyError):
"""A custom exception class for discovered input/output errors."""
############
# module walking to register openers
def check_for_openers(start_package: str, register_method: Callable) -> None:
"""
Walks the package, and registers the discovered openers. That is, the modules
with an :meth:`is_a` method.
Parameters
----------
start_package : str
register_method : Callable
"""
module = import_module(start_package)
for details in pkgutil.walk_packages(module.__path__, start_package+'.'):
_, module_name, is_pkg = details
if is_pkg:
# don't bother checking for packages
continue
sub_module = import_module(module_name)
if hasattr(sub_module, 'is_a'):
register_method(sub_module.is_a)
#############
# reader implementation for array like data
class BaseReader(object):
"""
The basic reader definition, using array-like data fetching.
**Changed in version 1.3.0**
"""
__slots__ = (
'_data_segment', '_reader_type', '_closed', '_close_segments',
'_delete_temp_files')
def __init__(
self,
data_segment: Union[None, DataSegment, Sequence[DataSegment]],
reader_type: str = 'OTHER',
close_segments: bool = True,
delete_files: Optional[Union[str, Sequence[str]]] = None):
"""
Parameters
----------
data_segment : None|DataSegment|Sequence[DataSegment]
None is a feasible value for extension tricks, ultimately the data_segments
must be defined on initialization (by some extension).
reader_type : str
close_segments : bool
Call segment.close() for each data segment on reader.close()?
delete_files : None|str|Sequence[str]
Any temp files which should be cleaned up on reader.close()?
This will occur after closing segments.
"""
# NB: it's entirely possible under multiple inheritance (class extends
# two classes each of which extends BaseReader), that this initializer
# has already been called. Don't override appropriate values, in that case.
# override regardless here
reader_type = reader_type.upper()
if reader_type not in READER_TYPES:
logger.info(
'reader_type has value {}, while it is generally expected to be '
'one of {}'.format(reader_type, READER_TYPES))
self._reader_type = reader_type
try:
_ = self._closed
# we didn't get an attribute error, so something has already defined
except AttributeError:
self._closed = False
try:
_ = self._close_segments
except AttributeError:
self._close_segments = close_segments
try:
_ = self._delete_temp_files
except AttributeError:
self._delete_temp_files = [] # type: List[str]
if delete_files is None:
pass
elif isinstance(delete_files, str):
if delete_files not in self._delete_temp_files:
self._delete_temp_files.append(delete_files)
else:
for entry in delete_files:
if entry not in self._delete_temp_files:
self._delete_temp_files.append(entry)
try:
_ = self._data_segment
# we didn't get an attribute error, so something has already defined it
self._set_data_segment(data_segment)
# NB: this will raise a ValueError upon repeated definition attempts.
except AttributeError:
self._data_segment = None
self._set_data_segment(data_segment)
@property
def file_name(self) -> Optional[str]:
"""
None|str: Defined as a convenience property.
"""
return None
@property
def reader_type(self) -> str:
"""
str: A descriptive string for the type of reader
"""
return self._reader_type
@property
def data_segment(self) -> Union[DataSegment, Tuple[DataSegment, ...]]:
"""
DataSegment|Tuple[DataSegment, ...]: The data segment collection.
"""
return self._data_segment
def _set_data_segment(
self,
data_segment: Union[DataSegment, Sequence[DataSegment]]) -> None:
"""
Sets the data segment collection. This can only be performed once.
Parameters
----------
data_segment : DataSegment|Sequence[DataSegment]
Returns
-------
None
"""
if data_segment is None:
return # do nothing
if self._data_segment is not None:
raise ValueError('data_segment is read only, once set.')
if isinstance(data_segment, DataSegment):
data_segment = [data_segment, ]
if not isinstance(data_segment, Sequence):
raise TypeError('data_segment must be an instance of DataSegment or a sequence of such instances')
for entry in data_segment:
if not isinstance(entry, DataSegment):
raise TypeError(
'Requires all data segment entries to be an instance of DataSegment.\n\t'
'Got type {}'.format(type(entry)))
if not entry.mode == 'r':
raise ValueError('Each data segment must have mode="r"')
if len(data_segment) == 1:
self._data_segment = data_segment[0]
else:
self._data_segment = tuple(data_segment)
@property
def image_count(self) -> int:
"""
int: The number of images/data segments from which to read.
"""
if isinstance(self.data_segment, DataSegment):
return 1
else:
return len(self.data_segment)
def get_data_segment_as_tuple(self) -> Tuple[DataSegment, ...]:
"""
Get the data segment collection as a tuple, to avoid the need for redundant
checking issues.
Returns
-------
Tuple[DataSegment, ...]
"""
return (self.data_segment, ) if self.image_count == 1 else self._data_segment
@property
def data_size(self) -> Union[Tuple[int, ...], Tuple[Tuple[int, ...]]]:
"""
Tuple[int, ...]|Tuple[Tuple[int, ...], ...]: the output/formatted data
size(s) of the data segment(s). If there is a single data segment, then
this will be `Tuple[int, ...]`, otherwise it will be
`Tuple[Tuple, int, ...], ...]`.
"""
return self.data_segment.formatted_shape if self.image_count == 1 else \
tuple(entry.formatted_shape for entry in self.data_segment)
def get_data_size_as_tuple(self) -> Tuple[Tuple[int, ...], ...]:
"""
Get the data size collection as a tuple of tuples, to avoid the need
for redundant checking issues.
Returns
-------
Tuple[Tuple[int, ...], ...]
"""
return (self.data_size, ) if self.image_count == 1 else self.data_size
@property
def raw_data_size(self) -> Union[Tuple[int, ...], Tuple[Tuple[int, ...]]]:
"""
Tuple[int, ...]|Tuple[Tuple[int, ...], ...]: the raw data size(s) of the
data segment(s). If there is a single data segment, then this will be
`Tuple[int, ...]`, otherwise it will be `Tuple[Tuple, int, ...], ...]`.
"""
return self.data_segment.raw_shape if self.image_count == 1 else \
tuple(entry.raw_shape for entry in self.data_segment)
def get_raw_data_size_as_tuple(self) -> Tuple[Tuple[int, ...], ...]:
"""
Get the raw data size collection as a tuple of tuples, to avoid the need
for redundant checking issues.
Returns
-------
Tuple[Tuple[int, ...], ...]
"""
return (self.data_size, ) if self.image_count == 1 else self.data_size
@property
def files_to_delete_on_close(self) -> List[str]:
"""
List[str]: A collection of files to delete on the close operation.
"""
return self._delete_temp_files
@property
def closed(self) -> bool:
"""
bool: Is the reader closed? Reading will result in a ValueError
"""
return self._closed
def _validate_closed(self):
if not hasattr(self, '_closed') or self._closed:
raise ValueError('I/O operation of closed writer')
def read_chip(
self,
*ranges: Sequence[Union[None, int, Tuple[int, ...], slice]],
index: int = 0,
squeeze: bool = True) -> numpy.ndarray:
"""
This is identical to :meth:`read`, and presented for backwards compatibility.
Parameters
----------
ranges : Sequence[Union[None, int, Tuple[int, ...], slice]]
index : int
squeeze : bool
Returns
-------
numpy.ndarray
See Also
--------
:meth:`read`.
"""
return self.__call__(*ranges, index=index, raw=False, squeeze=squeeze)
def read(
self,
*ranges: Union[None, int, Tuple[int, ...], slice],
index: int = 0,
squeeze: bool = True) -> numpy.ndarray:
"""
Read formatted data from the given data segment. Note this is an alias to the
:meth:`__call__` called as
:code:`reader(*ranges, index=index, raw=False, squeeze=squeeze)`.
Parameters
----------
ranges : Sequence[Union[None, int, Tuple[int, ...], slice]]
The slice definition appropriate for `data_segment[index].read()` usage.
index : int
The data_segment index. This is ignored if `image_count== 1`.
squeeze : bool
Squeeze length 1 dimensions out of the shape of the return array?
Returns
-------
numpy.ndarray
See Also
--------
See :meth:`sarpy.io.general.data_segment.DataSegment.read`.
"""
return self.__call__(*ranges, index=index, raw=False, squeeze=squeeze)
def read_raw(
self,
*ranges: Union[None, int, Tuple[int, ...], slice],
index: int = 0,
squeeze: bool = True) -> numpy.ndarray:
"""
Read raw data from the given data segment. Note this is an alias to the
:meth:`__call__` called as
:code:`reader(*ranges, index=index, raw=True, squeeze=squeeze)`.
Parameters
----------
ranges : Sequence[Union[None, int, Tuple[int, ...], slice]]
The slice definition appropriate for `data_segment[index].read()` usage.
index : int
The data_segment index. This is ignored if `image_count== 1`.
squeeze : bool
Squeeze length 1 dimensions out of the shape of the return array?
Returns
-------
numpy.ndarray
See Also
--------
See :meth:`sarpy.io.general.data_segment.DataSegment.read_raw`.
"""
return self.__call__(*ranges, index=index, raw=True, squeeze=squeeze)
def __call__(
self,
*ranges: Union[None, int, Tuple[int, ...], slice],
index: int = 0,
raw: bool = False,
squeeze: bool = True) -> numpy.ndarray:
self._validate_closed()
if len(ranges) == 0:
subscript = None
else:
subscript = []
for rng in ranges:
if rng is None:
subscript.append(slice(None, None, 1))
elif isinstance(rng, int):
subscript.append(slice(rng))
elif isinstance(rng, tuple):
subscript.append(slice(*rng))
elif isinstance(rng, slice) or rng is Ellipsis:
subscript.append(rng)
else:
raise TypeError('Got unexpected type `{}` value for range/slice'.format(type(rng)))
if isinstance(self._data_segment, tuple):
ds = self.data_segment[index]
else:
ds = self.data_segment
if raw:
return ds.read_raw(subscript, squeeze=squeeze)
else:
return ds.read(subscript, squeeze=squeeze)
def __getitem__(self, subscript) -> numpy.ndarray:
# TODO: document the str usage and index determination
subscript, string_entries = extract_string_from_subscript(subscript)
if not isinstance(subscript, (tuple, list)):
subscript = (subscript, )
raw = ('raw' in string_entries)
squeeze = ('nosqueeze' not in string_entries)
if isinstance(subscript[-1], int):
the_index = subscript[-1]
if -self.image_count < the_index < self.image_count:
return self.__call__(*subscript[:-1], index=subscript[-1], raw=raw, squeeze=squeeze)
return self.__call__(*subscript, index=0, raw=raw, squeeze=squeeze)
def close(self) -> None:
"""
This should perform any necessary clean-up operations, like closing
open file handles, deleting any temp files, etc.
"""
if not hasattr(self, '_closed') or self._closed:
return
# close all the segments
if self._close_segments and self._data_segment is not None:
if isinstance(self._data_segment, DataSegment):
self._data_segment.close()
else:
for entry in self._data_segment:
entry.close()
self._data_segment = None
# delete temp files
while len(self._delete_temp_files) > 0:
filename = self._delete_temp_files.pop()
try:
os.remove(filename)
except FileNotFoundError:
pass
except Exception as e:
logger.error(
'Reader attempted to delete temp file {},\n\t'
'but got error {}'.format(filename, e))
self._closed = True
def __del__(self):
# NB: this is called when the object is marked for garbage collection
# (i.e. reference count goes to 0), and the order in which this happens
# may be unreliable
self.close()
class FlatReader(BaseReader):
"""
Class for passing a numpy array straight through as a reader.
Changed in version 1.3.0
"""
def __init__(
self,
underlying_array: numpy.ndarray,
reader_type: str = 'OTHER',
formatted_dtype: Optional[Union[str, numpy.dtype]] = None,
formatted_shape: Optional[Tuple[int, ...]] = None,
reverse_axes: Optional[Union[int, Sequence[int]]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None,
format_function: Optional[FormatFunction] = None,
close_segments: bool = True):
"""
Parameters
----------
underlying_array : numpy.ndarray
reader_type : str
formatted_dtype : None|str|numpy.dtype
formatted_shape : None|Tuple[int, ...]
reverse_axes : None|Sequence[int]
transpose_axes : None|Tuple[int, ...]
format_function : None|FormatFunction
close_segments : bool
"""
data_segment = NumpyArraySegment(
underlying_array, formatted_dtype=formatted_dtype, formatted_shape=formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes,
format_function=format_function, mode='r')
BaseReader.__init__(
self, data_segment, reader_type=reader_type, close_segments=close_segments)
class AggregateReader(BaseReader):
"""
Aggregate multiple readers into a single reader instance. This default
aggregate implementation will not preserve any other metadata structures.
"""
__slots__ = ('_readers', '_index_mapping', '_close_readers')
def __init__(
self,
readers: Sequence[BaseReader],
reader_type: str = "OTHER",
close_readers: bool = False):
"""
Parameters
----------
readers : Sequence[BaseReader]
The readers.
reader_type : str
The reader type string.
close_readers: bool
Close all readers on this reader close?
"""
self._close_readers = close_readers
self._index_mapping = None
self._readers = self._validate_readers(readers)
data_segments = self._define_index_mapping()
# NB: close_segments is and should be handled by the constituent readers
BaseReader.__init__(
self, data_segment=data_segments, reader_type=reader_type, close_segments=False)
@staticmethod
def _validate_readers(readers: Sequence[BaseReader]) -> Tuple[BaseReader]:
"""
Validate the input reader/file collection.
Parameters
----------
readers : Sequence[BaseReader]
Returns
-------
Tuple[BaseReader]
"""
if not isinstance(readers, Sequence):
raise TypeError('input argument must be a sequence of readers. Got type {}'.format(type(readers)))
# validate each entry
the_readers = []
for i, entry in enumerate(readers):
if not isinstance(entry, BaseReader):
raise TypeError(
'All elements of the input argument must be reader instances. '
'Entry {} is of type {}'.format(i, type(entry)))
the_readers.append(entry)
return tuple(the_readers)
def _define_index_mapping(self) -> List[DataSegment]:
"""
Define the index mapping.
Returns
-------
List[DataSegment]
"""
# prepare the index mapping workspace
index_mapping = []
segments = []
for i, reader in enumerate(self._readers):
for j, segment in enumerate(reader.get_data_segment_as_tuple()):
segments.append(segment)
index_mapping.append((i, j))
self._index_mapping = tuple(index_mapping)
return segments
@property
def index_mapping(self) -> Tuple[Tuple[int, int]]:
"""
Tuple[Tuple[int, int]]: The index mapping of the form (reader index, segment index in reader).
"""
return self._index_mapping
def close(self) -> None:
"""
This should perform any necessary clean-up operations, like closing
open file handles, deleting any temp files, etc.
"""
if not hasattr(self, '_closed') or self._closed:
return
BaseReader.close(self)
if self._close_readers and self._readers is not None:
for entry in self._readers:
entry.close()
self._readers = None
#############
# writer implementation for array like data
class BaseWriter(object):
"""
Writer definition, using array-like data writing.
Introduced in version 1.3.0
"""
__slots__ = ('_data_segment', '_closed')
def __init__(
self,
data_segment: Union[DataSegment, Sequence[DataSegment]]):
self._closed = False
if isinstance(data_segment, DataSegment):
data_segment = [data_segment, ]
if not isinstance(data_segment, Sequence):
raise TypeError('data_segment must be an instance of DataSegment or a sequence of such instances')
for entry in data_segment:
if not isinstance(entry, DataSegment):
raise TypeError(
'Requires all data segment entries to be an instance of DataSegment.\n\t'
'Got type {}'.format(type(entry)))
if not entry.mode == 'w':
raise ValueError('Each data segment must have mode="w" for writing')
self._data_segment = tuple(data_segment)
@property
def file_name(self) -> Optional[str]:
"""
None|str: Defined as a convenience property.
"""
return None
@property
def data_segment(self) -> Tuple[DataSegment, ...]:
"""
Tuple[DataSegment, ...]: The data segment collection.
"""
return self._data_segment
@property
def image_count(self) -> int:
"""
int: The number of overall images/data segments.
"""
return len(self.data_segment)
@property
def data_size(self) -> Tuple[Tuple[int, ...]]:
"""
Tuple[Tuple[int, ...], ...]: the formatted data sizes of the data
segments.
"""
return tuple(entry.formatted_shape for entry in self.data_segment)
@property
def raw_data_size(self) -> Union[Tuple[int, ...], Tuple[Tuple[int, ...]]]:
"""
Tuple[Tuple[int, ...], ...]: the raw data sizes of the data segments.
"""
return tuple(entry.raw_shape for entry in self.data_segment)
@property
def closed(self) -> bool:
"""
bool: Is the writer closed? Reading file after writing can
result in a ValueError if writer was not closed.
"""
return self._closed
def _validate_closed(self):
if not hasattr(self, '_closed') or self._closed:
raise ValueError('I/O operation of closed writer')
def write_chip(
self,
data: numpy.ndarray,
start_indices: Optional[Union[int, Tuple[int, ...]]] = None,
subscript: Optional[Tuple[slice, ...]] = None,
index: int = 0) -> None:
"""
This is identical to :meth:`write`, and presented for backwards compatibility.
Parameters
----------
data : numpy.ndarray
start_indices : None|int|Tuple[int, ...]
subscript : None|Tuple[slice, ...]
index : int
See Also
--------
See :meth:`sarpy.io.general.data_segment.DataSegment.write`.
"""
self.__call__(data, start_indices=start_indices, subscript=subscript, index=index, raw=False)
def write(
self,
data: numpy.ndarray,
start_indices: Optional[Union[int, Tuple[int, ...]]] = None,
subscript: Optional[Tuple[slice, ...]] = None,
index: int = 0) -> None:
"""
Write the data to the appropriate data segment. This is an alias to
:code:`writer(data, start_indices=start_indices, subscript=subscript, index=index, raw=False)`.
**Only one of `start_indices` and `subscript` should be specified.**
Parameters
----------
data : numpy.ndarray
The data to write.
start_indices : None|int|Tuple[int, ...]
Assuming a contiguous chunk of data, this provides the starting
indices of the chunk. Any missing (tail) coordinates will be filled
in with 0's.
subscript : None|Tuple[slice, ...]
In contrast to providing `start_indices`, the slicing definition in
formatted coordinates pertinent to the specified data segment.
index : int
The index of the
See Also
--------
See :meth:`sarpy.io.general.data_segment.DataSegment.write`.
"""
self.__call__(data, start_indices=start_indices, subscript=subscript, index=index, raw=False)
def write_raw(
self,
data: numpy.ndarray,
start_indices: Optional[Union[int, Tuple[int, ...]]] = None,
subscript: Optional[Tuple[slice, ...]] = None,
index: int = 0) -> None:
"""
Write the raw data to the file(s). This is an alias to
:code:`writer(data, start_indices=start_indices, subscript=subscript, index=index, raw=True)`.
**Only one of `start_indices` and `subscript` should be specified.**
Parameters
----------
data : numpy.ndarray
The data to write.
start_indices : None|int|Tuple[int, ...]
Assuming a contiguous chunk of data, this provides the starting
indices of the chunk. Any missing (tail) coordinates will be filled
in with 0's.
subscript : None|Tuple[slice, ...]
In contrast to providing `start_indices`, the slicing definition in
raw coordinates pertinent to the specified data segment.
index : int
See Also
--------
See :meth:`sarpy.io.general.data_segment.DataSegment.write_raw`.
"""
self.__call__(data, start_indices=start_indices, subscript=subscript, index=index, raw=True)
def __call__(
self,
data: numpy.ndarray,
start_indices: Optional[Union[int, Tuple[int, ...]]] = None,
subscript: Optional[Tuple[slice, ...]] = None,
index: int = 0,
raw: bool = False) -> None:
"""
Write the data to the given data segment.
Parameters
----------
data : numpy.ndarray
The data to write.
start_indices : None|int|Tuple[int, ...]
Assuming a contiguous chunk of data, this provides the starting
indices of the chunk. Any missing (tail) coordinates will be filled
in with 0's.
subscript : None|Tuple[slice, ...]
In contrast to providing `start_indices`, the slicing definition in
coordinates pertinent to the specified data segment and `raw` value.
index : int
raw : bool
"""
self._validate_closed()
ds = self.data_segment[index]
if raw:
return ds.write_raw(data, start_indices=start_indices, subscript=subscript)
else:
if not ds.can_write_regular:
raise ValueError(
'The data segment at index {} can not convert from formatted data to raw data.\n\t'
'It is only permitted to use the write_raw() function on this data set,\n\t'
'and to write the data in raw (i.e. unformatted) form.')
return ds.write(data, start_indices=start_indices, subscript=subscript)
def flush(self, force: bool = False) -> None:
"""
Try to perform any necessary steps to flush written data to the disk/buffer.
Parameters
----------
force : bool
Try force flushing, even for incompletely written data.
Returns
-------
None
"""
self._validate_closed()
if self._data_segment is not None:
for data_segment in self.data_segment:
data_segment.flush()
def close(self) -> None:
"""
This should perform any necessary final steps, like closing
open file handles, deleting any temp files, etc.
Trying to read newly created file without closing may raise a ValueError.
"""
if not hasattr(self, '_closed') or self._closed:
return
try:
# flush the data
self.flush(force=True)
# close all the segments
if self._data_segment is not None:
for entry in self._data_segment:
entry.close()
self._data_segment = None
self._closed = True
except AttributeError:
self._closed = True
return
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
if exception_type is not None:
logger.error(
'The {} file writer generated an exception during processing'.format(
self.__class__.__name__))
# The exception will be reraised.
# It's unclear how any exception could really be caught.
| 28,939 | 31.923777 | 110 | py |
sarpy | sarpy-master/sarpy/io/general/tiff.py | """
Module providing api consistent with other file types for reading tiff files.
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("Thomas McCullough", "Daniel Haverporth")
# It was the original intent to use gdal for the bulk of tiff reading
# Unfortunately, the necessary sarpy functionality can only be obtained by
# gdal_dataset.GetVirtualMemArray(). As of July 2020, this is supported only
# on Linux platforms - unclear what more constraints. So, using gdal to provide
# the reading capability is not feasible at present.
import logging
import os
import numpy
import re
from typing import Union, Tuple, Dict, BinaryIO, Sequence
from sarpy.io.general.base import BaseReader, SarpyIOError
from sarpy.io.general.format_function import ComplexFormatFunction
from sarpy.io.general.data_segment import NumpyMemmapSegment
logger = logging.getLogger(__name__)
_BASELINE_TAGS = {
254: 'NewSubfileType',
255: 'SubfileType',
256: 'ImageWidth',
257: 'ImageLength',
258: 'BitsPerSample',
259: 'Compression',
262: 'PhotometricInterpretation',
263: 'Thresholding',
264: 'CellWidth',
265: 'CellLength',
266: 'FillOrder',
270: 'ImageDescription',
271: 'Make',
272: 'Model',
273: 'StripOffsets',
274: 'Orientation',
277: 'SamplesPerPixel',
278: 'RowsPerStrip',
279: 'StripByteCounts',
280: 'MinSampleValue',
281: 'MaxSampleValue',
282: 'XResolution',
283: 'YResolution',
284: 'PlanarConfiguration',
288: 'FreeOffsets',
289: 'FreeByteCounts',
290: 'GrayResponseUnit',
291: 'GrayResponseCurve',
296: 'ResolutionUnit',
305: 'Software',
306: 'DateTime',
315: 'Artist',
316: 'HostComputer',
320: 'ColorMap',
338: 'ExtraSamples',
33432: 'Copyright',
}
_EXTENSION_TAGS = {
269: 'DocumentName',
285: 'PageName',
286: 'XPosition',
287: 'YPosition',
292: 'T4Options',
293: 'T6Options',
297: 'PageNumber',
301: 'TransferFunction',
317: 'Predictor',
318: 'WhitePoint',
319: 'PrimaryChromaticities',
321: 'HalftoneHints',
322: 'TileWidth',
323: 'TileLength',
324: 'TileOffsets',
325: 'TileByteCounts',
326: 'BadFaxLines',
327: 'CleanFaxData',
328: 'ConsecutiveBadFaxLines',
330: 'SubIFDs',
332: 'InkSet',
333: 'InkNames',
334: 'NumberOfInks',
336: 'DotRange',
337: 'TargetPrinter',
339: 'SampleFormat',
340: 'SMinSampleValue',
341: 'SMaxSampleValue',
342: 'TransferRange',
343: 'ClipPath',
344: 'XClipPathUnits',
345: 'YClipPathUnits',
346: 'Indexed',
347: 'JPEGTables',
351: 'OPIProxy',
400: 'GlobalParametersIFD',
401: 'ProfileType',
402: 'FaxProfile',
403: 'CodingMethods',
404: 'VersionYear',
405: 'ModeNumber',
433: 'Decode',
434: 'DefaultImageColor',
512: 'JPEGProc',
513: 'JPEGInterchangeFormat',
514: 'JPEGInterchangeFormatLength',
515: 'JPEGRestartInterval',
517: 'JPEGLosslessPredictors',
518: 'JPEGPointTransforms',
519: 'JPEGQTables',
520: 'JPEGDCTables',
521: 'JPEGACTables',
529: 'YCbCrCoefficients',
530: 'YCbCrSubSampling',
531: 'YCbCrPositioning',
532: 'ReferenceBlackWhite',
559: 'StripRowCounts',
700: 'XMP',
32781: 'ImageID',
34732: 'ImageLayer',
}
_GEOTIFF_TAGS = {
33550: 'ModelPixelScaleTag',
33922: 'ModelTiePointTag',
34264: 'ModelTransformationTag',
34735: 'GeoKeyDirectoryTag',
34736: 'GeoDoubleParamsTag',
34737: 'GeoAsciiParamsTag',
}
##########
class TiffDetails(object):
"""
For checking tiff metadata, and parsing in the event we are not using GDAL
"""
__slots__ = ('_file_name', '_endian', '_magic_number', '_tags')
_DTYPES = {i+1: entry for i, entry in enumerate(
['u1', 'a', 'u2', 'u4', 'u4',
'i1', 'u1', 'i2', 'i4', 'i4',
'f4', 'f8', 'u4', None, None,
'u8', 'i8', 'u8'])}
_SIZES = numpy.array(
[1, 1, 2, 4, 8,
1, 1, 2, 4, 8,
4, 8, 4, 0, 0,
8, 8, 8], dtype=numpy.int64)
# no definition for entries for 14 & 15
def __init__(self, file_name: str):
"""
Parameters
----------
file_name : str
"""
if not (isinstance(file_name, str) and os.path.isfile(file_name)):
raise SarpyIOError('Not a TIFF file.')
with open(file_name, 'rb') as fi:
# Try to read the basic tiff header
try:
fi_endian = fi.read(2).decode('utf-8')
except Exception as e:
raise SarpyIOError('Failed decoding the 2 character tiff header with error\n\t{}'.format(e))
if fi_endian == 'II':
self._endian = '<'
elif fi_endian == 'MM':
self._endian = '>'
else:
raise SarpyIOError('Invalid tiff endian string {}'.format(fi_endian))
# check the magic number
self._magic_number = numpy.fromfile(fi, dtype='{}i2'.format(self._endian), count=1)[0]
if self._magic_number not in [42, 43]:
raise SarpyIOError('Not a valid tiff file, got magic number {}'.format(self._magic_number))
if self._magic_number == 43:
rhead = numpy.fromfile(fi, dtype='{}i2'.format(self._endian), count=2)
if rhead[0] != 8:
raise SarpyIOError('Not a valid bigtiff. The offset size is given as {}'.format(rhead[0]))
if rhead[1] != 0:
raise SarpyIOError('Not a valid bigtiff. The reserved entry of '
'the header is given as {} != 0'.format(rhead[1]))
self._file_name = file_name
self._tags = None
@property
def file_name(self) -> str:
"""
str: READ ONLY. The file name.
"""
return self._file_name
@property
def endian(self) -> str:
"""
str: READ ONLY. The numpy dtype style ``('>' = big, '<' = little)`` endian string for the tiff file.
"""
return self._endian
@property
def tags(self) -> Dict[str, Union[str, numpy.ndarray]]:
"""
Dict: READ ONLY. The tiff tags dictionary,
provided that :meth:`parse_tags` has been called. This dictionary is
of the form `{<tag name> : str|numpy.ndarray}`, even for those tags
containing only a single entry (i.e. `count=1`).
"""
if self._tags is None:
self.parse_tags()
return self._tags
def parse_tags(self) -> None:
"""
Parse the tags from the file, if desired. This sets the `tags` attribute.
Returns
-------
None
"""
if self._magic_number == 42:
type_dtype = numpy.dtype('{}u2'.format(self._endian))
count_dtype = numpy.dtype('{}u2'.format(self._endian))
offset_dtype = numpy.dtype('{}u4'.format(self._endian))
offset_size = 4
elif self._magic_number == 43:
type_dtype = numpy.dtype('{}u2'.format(self._endian))
count_dtype = numpy.dtype('{}i8'.format(self._endian))
offset_dtype = numpy.dtype('{}i8'.format(self._endian))
offset_size = 8
else:
raise ValueError('Unrecognized magic number {}'.format(self._magic_number))
with open(self._file_name, 'rb') as fi:
# skip the basic header
fi.seek(offset_size, os.SEEK_SET)
# extract the tags information
tags = {}
self._parse_ifd(fi, tags, type_dtype, count_dtype, offset_dtype, offset_size)
self._tags = tags
def _read_tag(self,
fi: BinaryIO,
tiff_type: int,
num_tag: int,
count: int) -> Dict:
"""
Parse the specific tag information.
Parameters
----------
fi
The file type object
tiff_type : int
The tag data type identifier value
num_tag : int
The numeric tag identifier
count : int
The number of such tags
Returns
-------
dict
"""
# find which tags we belong to
if num_tag in _BASELINE_TAGS:
ext = 'BaselineTag'
name = _BASELINE_TAGS[num_tag]
elif num_tag in _EXTENSION_TAGS:
ext = 'ExtensionTag'
name = _EXTENSION_TAGS[num_tag]
elif num_tag in _GEOTIFF_TAGS:
ext = 'GeoTiffTag'
name = _GEOTIFF_TAGS[num_tag]
else:
ext, name = None, None
# Now extract from file based on type number
dtype = self._DTYPES.get(int(tiff_type), None)
if dtype is None:
logger.warning(
'Failed to extract tiff data type {},\n\t'
'for {} - {}'.format(tiff_type, ext, name))
return {'Value': None, 'Name': name, 'Extension': ext}
if tiff_type == 2: # ascii field - read directly and decode?
val = fi.read(count) # this will be a string for python 2, and we decode for python 3
if not isinstance(val, str):
val = val.decode('utf-8')
# eliminate the null characters
val = re.sub('\x00', '', val)
elif tiff_type in [5, 10]: # unsigned or signed rational
val = numpy.fromfile(fi, dtype='{}{}'.format(self._endian, dtype), count=numpy.int64(2*count)).reshape((-1, 2))
else:
val = numpy.fromfile(fi, dtype='{}{}'.format(self._endian, dtype), count=count)
if count == 1:
val = val[0]
return {'Value': val, 'Name': name, 'Extension': ext}
def _parse_ifd(self,
fi: BinaryIO,
tags: dict,
type_dtype: Union[str, numpy.dtype],
count_dtype: Union[str, numpy.dtype],
offset_dtype: Union[str, numpy.dtype],
offset_size: int) -> None:
"""
Recursively parses the tag data and populates a provided dictionary
Parameters
----------
fi
The file type object
tags : dict
The tag data dictionary being populated
type_dtype : str|numpy.dtype
The data type for the data element - note that endian-ness is included
count_dtype : str|numpy.dtype
The data type for the number of directories - note that endian-ness is included
offset_dtype : str|numpy.dtype
The data type for the offset - note that endian-ness is included
offset_size : int
The size of the offset
Returns
-------
None
"""
nifd = numpy.fromfile(fi, dtype=offset_dtype, count=1)[0]
if nifd == 0:
return # termination criterion
fi.seek(nifd)
num_entries = numpy.fromfile(fi, dtype=count_dtype, count=1)[0]
for entry in range(int(num_entries)):
num_tag, tiff_type = numpy.fromfile(fi, dtype=type_dtype, count=2)
count = numpy.fromfile(fi, dtype=offset_dtype, count=1)[0]
total_size = self._SIZES[tiff_type-1]*count
if total_size <= offset_size:
save_ptr = fi.tell() + offset_size # we should advance past the entire block
value = self._read_tag(fi, tiff_type, num_tag, count)
fi.seek(save_ptr)
else:
offset = numpy.fromfile(fi, dtype=offset_dtype, count=1)[0]
save_ptr = fi.tell() # save our current spot
fi.seek(offset) # get to the offset location
value = self._read_tag(fi, tiff_type, num_tag, count) # read the tag value
fi.seek(save_ptr) # return to our location
tags[value['Name']] = value['Value']
self._parse_ifd(fi, tags, type_dtype, count_dtype, offset_dtype, offset_size) # recurse
def check_compression(self):
"""
Check the Compression tag, and verify uncompressed.
Returns
-------
None
"""
if self.tags['Compression'] != 1:
raise ValueError(
'The file {} indicates some kind of tiff compression, and the sarpy API requirements '
'do not presently support reading of compressed tiff files. Consider using gdal to '
'translate this tiff to an uncompressed file via the command\n\t'
'"gdal_translate -co TILED=no <input_file> <output_file>"')
def check_tiled(self):
"""
Check if the tiff file is tiled.
Returns
-------
None
"""
if 'TileLength' in self.tags or 'TileWidth' in self.tags:
raise ValueError(
'The file {} indicates that this is a tiled file, and the sarpy API requirements '
'do not presently support reading of tiled tiff files. Consider using gdal to '
'translate this tiff to a flat file via the command\n\t'
'"gdal_translate -co TILED=no <input_file> <output_file>"')
class NativeTiffDataSegment(NumpyMemmapSegment):
"""
Direct reading of data from tiff file, failing if compression is present.
This is a very complex SAR specific implementation, and not general.
"""
__slots__ = ('_tiff_details', )
_SAMPLE_FORMATS = {
1: 'u', 2: 'i', 3: 'f', 5: 'i', 6: 'f'} # 5 and 6 are complex int/float
def __init__(self,
tiff_details: Union[str, TiffDetails],
reverse_axes: Union[None, int, Sequence[int]] = None,
transpose_axes: Union[None, Tuple[int, ...]] = None):
"""
If format function and format_dtype are not provided, then SAR specific
(not necessarily general) choices will be made.
Parameters
----------
tiff_details : TiffDetails
reverse_axes : None|Tuple[int, ...]
transpose_axes : None|Tuple[int, ...]
"""
if isinstance(tiff_details, str):
tiff_details = TiffDetails(tiff_details)
if not isinstance(tiff_details, TiffDetails):
raise TypeError('NativeTiffChipper input argument must be a filename '
'or TiffDetails object.')
tiff_details.check_compression()
tiff_details.check_tiled()
self._tiff_details = tiff_details
if isinstance(tiff_details.tags['SampleFormat'], numpy.ndarray):
samp_form = tiff_details.tags['SampleFormat'][0]
else:
samp_form = tiff_details.tags['SampleFormat']
if samp_form not in self._SAMPLE_FORMATS:
raise ValueError('Invalid sample format {}'.format(samp_form))
if isinstance(tiff_details.tags['BitsPerSample'], numpy.ndarray):
bits_per_sample = tiff_details.tags['BitsPerSample'][0]
else:
bits_per_sample = tiff_details.tags['BitsPerSample']
raw_bands = int(tiff_details.tags['SamplesPerPixel'])
if samp_form in [5, 6]:
transform_data = 'COMPLEX'
output_bands = int(raw_bands)
raw_bands *= 2
bits_per_sample /= 2
output_dtype = 'complex64'
elif raw_bands == 2:
# NB: this is heavily skewed towards SAR and obviously not general
transform_data = 'COMPLEX'
output_dtype = 'complex64'
output_bands = 1
else:
transform_data = None
output_bands = raw_bands
output_dtype = None
raw_shape = (int(tiff_details.tags['ImageLength']), int(tiff_details.tags['ImageWidth']), raw_bands)
raw_dtype = numpy.dtype('{0:s}{1:s}{2:d}'.format(
self._tiff_details.endian, self._SAMPLE_FORMATS[samp_form], int(bits_per_sample/8)))
if output_dtype is None:
output_dtype = raw_dtype
data_offset = int(tiff_details.tags['StripOffsets'][0])
format_function = None
if transform_data == 'COMPLEX':
format_function = ComplexFormatFunction(raw_dtype, order='IQ')
if reverse_axes is not None:
if isinstance(reverse_axes, int):
reverse_axes = (reverse_axes, )
for entry in reverse_axes:
if not entry < 2:
raise ValueError('reversing of axes on permitted along the first two axes.')
if transpose_axes is not None:
if len(transpose_axes) < 2 or len(transpose_axes) > 3:
raise ValueError('transpose axes must have length 2 or 3')
elif len(transpose_axes) == 2:
transpose_axes = transpose_axes + (2, )
if transpose_axes[2] != 2:
raise ValueError(
'The transpose operation must preserve the location of the band data,\n\t'
'in the final dimension')
if transpose_axes is None or transpose_axes == (0, 1, 2):
output_shape = raw_shape[:2]
else:
output_shape = (raw_shape[1], raw_shape[0])
if output_bands > 1:
output_shape = output_shape + (output_bands, )
NumpyMemmapSegment.__init__(
self, tiff_details.file_name, data_offset, raw_dtype, raw_shape,
formatted_dtype=output_dtype, formatted_shape=output_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes,
format_function=format_function, mode='r', close_file=True)
@property
def tiff_details(self) -> TiffDetails:
return self._tiff_details
class TiffReader(BaseReader):
def __init__(self,
tiff_details: Union[str, TiffDetails],
reverse_axes: Union[None, int, Sequence[int]] = None,
transpose_axes: Union[None, Tuple[int, ...]] = None):
"""
Parameters
----------
tiff_details : TiffDetails
reverse_axes : None|int|Sequence[int]
transpose_axes : None|Tuple[int, ...]
"""
data_segment = NativeTiffDataSegment(tiff_details, reverse_axes=reverse_axes, transpose_axes=transpose_axes)
BaseReader.__init__(self, data_segment, reader_type='OTHER', close_segments=True)
@property
def data_segment(self) -> NativeTiffDataSegment:
"""
NativeTiffDataSegment: The tiff data segment.
"""
return self._data_segment
@property
def tiff_details(self) -> TiffDetails:
"""
TiffDetails: The tiff details object.
"""
return self.data_segment.tiff_details
@property
def file_name(self):
return self.tiff_details.file_name
########
# base expected functionality for a module with an implemented Reader
def is_a(file_name: str) -> Union[None, TiffReader]:
"""
Tests whether a given file_name corresponds to a tiff file. Returns a
tiff reader instance, if so.
Parameters
----------
file_name : str
the file_name to check
Returns
-------
None|TiffReader
`TiffReader` instance if tiff file, `None` otherwise
"""
try:
tiff_details = TiffDetails(file_name)
logger.info('File {} is determined to be a tiff file.'.format(file_name))
return TiffReader(tiff_details)
except SarpyIOError:
# we don't want to catch parsing errors, for now
return None
| 19,566 | 33.08885 | 123 | py |
sarpy | sarpy-master/sarpy/io/general/slice_parsing.py | """
Utilities for parsing slice input.
"""
__classification__ = "UNCLASSIFIED"
__author__ = 'Thomas McCullough'
from typing import Union, Tuple, Sequence
import numpy
def validate_slice_int(the_int: int, bound: int, include: bool = True) -> int:
"""
Ensure that the given integer makes sense as a slice entry, and move to
a normalized form.
Parameters
----------
the_int : int
bound : int
include : bool
Returns
-------
int
"""
if not isinstance(bound, int) or bound <= 0:
raise TypeError('bound must be a positive integer.')
if include:
if not -bound <= the_int < bound:
raise ValueError('Slice argument {} does not fit with bound {}'.format(the_int, bound))
else:
if not -bound < the_int <= bound:
raise ValueError('Slice argument {} does not fit with bound {}'.format(the_int, bound))
if the_int < 0:
return the_int + bound
return the_int
def verify_slice(item: Union[None, int, slice, Tuple[int, ...]], max_element: int) -> slice:
"""
Verify a given slice against a bound.
**New in version 1.3.0.**
Parameters
----------
item : None|int|slice|Tuple[int, ...]
max_element : int
Returns
-------
slice
This will certainly have `start` and `step` populated, and will have `stop`
populated unless `step < 0` and `stop` must be `None`.
"""
def check_bound(entry: Union[None, int]) -> Union[None, int]:
if entry is None:
return entry
elif -max_element <= entry < 0:
entry += max_element
return entry
elif 0 <= entry <= max_element:
return entry
else:
raise ValueError('Got out of bounds argument ({}) in slice limited by `{}`'.format(entry, max_element))
if not isinstance(max_element, int) or max_element < 1:
raise ValueError('slice verification requires a positive integer limit')
if isinstance(item, Sequence):
item = slice(*item)
if item is None:
return slice(0, max_element, 1)
elif isinstance(item, int):
item = check_bound(item)
return slice(item, item+1, 1)
elif isinstance(item, slice):
start = check_bound(item.start)
stop = check_bound(item.stop)
step = 1 if item.step is None else item.step
if step > 0:
if start is None:
start = 0
if stop is None:
stop = max_element
if step < 0:
if start is None:
start = max_element - 1
if start is not None and stop is not None:
if numpy.sign(stop - start) != numpy.sign(step):
raise ValueError('slice {} is not well formed'.format(item))
return slice(start, stop, step)
else:
raise ValueError('Got unexpected argument of type {} in slice'.format(type(item)))
def verify_subscript(
subscript: Union[None, int, slice, Sequence[Union[int, slice, Tuple[int, ...]]]],
corresponding_shape: Tuple[int, ...]) -> Tuple[slice, ...]:
"""
Verify a subscript like item against a corresponding shape.
**New in version 1.3.0**
Parameters
----------
subscript : None|int|slice|Sequence[int|slice|Tuple[int, ...]]
corresponding_shape : Tuple[int, ...]
Returns
-------
Tuple[slice, ...]
"""
ndim = len(corresponding_shape)
if subscript is None or subscript is Ellipsis:
return tuple([slice(0, corresponding_shape[i], 1) for i in range(ndim)])
elif isinstance(subscript, int):
out = [verify_slice(slice(subscript, subscript + 1, 1), corresponding_shape[0]), ]
out.extend([slice(0, corresponding_shape[i], 1) for i in range(1, ndim)])
return tuple(out)
elif isinstance(subscript, slice):
out = [verify_slice(subscript, corresponding_shape[0]), ]
out.extend([slice(0, corresponding_shape[i], 1) for i in range(1, ndim)])
return tuple(out)
elif isinstance(subscript, Sequence):
# check for Ellipsis usage...
ellipsis_location = None
for index, entry in enumerate(subscript):
if entry is Ellipsis:
if ellipsis_location is None:
ellipsis_location = index
else:
raise KeyError('slice definition cannot contain more than one ellipsis')
if ellipsis_location is not None:
if len(subscript) > ndim:
raise ValueError('More subscript entries ({}) than shape dimensions ({}).'.format(len(subscript), ndim))
if ellipsis_location == len(subscript)-1:
subscript = subscript[:ellipsis_location]
elif ellipsis_location == 0:
init_pad = ndim - len(subscript) + 1
subscript = tuple([None, ]*init_pad) + subscript[1:]
else: # ellipsis in the middle
middle_pad = ndim - len(subscript) + 1
subscript = subscript[:ellipsis_location] + tuple([None, ]*middle_pad) + subscript[ellipsis_location+1:]
if len(subscript) > ndim:
raise ValueError('More subscript entries ({}) than shape dimensions ({}).'.format(len(subscript), ndim))
out = [verify_slice(item_i, corresponding_shape[i]) for i, item_i in enumerate(subscript)]
if len(out) < ndim:
out.extend([slice(0, corresponding_shape[i], 1) for i in range(len(out), ndim)])
return tuple(out)
else:
raise ValueError('Got unhandled subscript {}'.format(subscript))
def get_slice_result_size(slice_in: slice) -> int:
"""
Gets the size of the slice result. This assumes a normalized slice definition.
**New in version 1.3.0.**
Parameters
----------
slice_in : slice
Returns
-------
int
"""
# NB: this assumes a normalized slice definition
if slice_in.step > 0:
return int(numpy.floor((slice_in.stop - 1 - slice_in.start) / slice_in.step) + 1)
elif slice_in.stop is None:
return int(numpy.floor(slice_in.start / abs(slice_in.step)) + 1)
else:
return int(numpy.floor((slice_in.stop + 1 - slice_in.start) / slice_in.step) + 1)
def get_subscript_result_size(
subscript: Union[None, int, slice, Sequence[Union[int, slice, Tuple[int, ...]]]],
corresponding_shape: Tuple[int, ...]) -> Tuple[Tuple[slice, ...], Tuple[int, ...]]:
"""
Validate the given subscript against the corresponding shape, and also determine
the shape of the resultant data reading result.
**New in version 1.3.0**
Parameters
----------
subscript : None|int|slice|Tuple[slice, ...]
corresponding_shape : Tuple[int, ...]
Returns
-------
valid_subscript : Tuple[slice, ...]
output_shape : Tuple[int, ...]
"""
subscript = verify_subscript(subscript, corresponding_shape)
the_shape = tuple([get_slice_result_size(sl) for sl in subscript])
return subscript, the_shape
| 7,058 | 32.140845 | 120 | py |
sarpy | sarpy-master/sarpy/io/general/utils.py | """
Common functionality for converting metadata
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
from typing import Union, Tuple, BinaryIO, Any, Optional
import hashlib
import os
import warnings
import struct
import mmap
import numpy
try:
import h5py
except ImportError:
h5py = None
###########
# general file type checks
def is_file_like(the_input: Any) -> bool:
"""
Verify whether the provided input appear to provide a "file-like object". This
term is used ubiquitously, but not all usages are identical. In this case, we
mean that there exist callable attributes `read`, `write`, `seek`, and `tell`.
Note that this does not check the mode (binary/string or read/write/append),
as it is not clear that there is any generally accessible way to do so.
Parameters
----------
the_input
Returns
-------
bool
"""
out = True
for attribute in ['read', 'write', 'seek', 'tell']:
value = getattr(the_input, attribute, None)
out &= callable(value)
return out
def is_real_file(the_input: BinaryIO) -> bool:
"""
Determine if the file-like object is associated with an actual file.
This is mainly to consider suitability for establishment of a numpy.memmap.
Parameters
----------
the_input : BinaryIO
Returns
-------
bool
"""
if not hasattr(the_input, 'fileno'):
return False
# noinspection PyBroadException
try:
fileno = the_input.fileno()
return isinstance(fileno, int) and (fileno >= 0)
except Exception:
return False
def _fetch_initial_bytes(file_name: Union[str, BinaryIO], size: int) -> Optional[bytes]:
header = b''
if is_file_like(file_name):
current_location = file_name.tell()
file_name.seek(0, os.SEEK_SET)
header = file_name.read(size)
file_name.seek(current_location, os.SEEK_SET)
elif isinstance(file_name, str):
if not os.path.isfile(file_name):
return None
with open(file_name, 'rb') as fi:
header = fi.read(size)
if len(header) != size:
return None
return header
def is_nitf(
file_name: Union[str, BinaryIO],
return_version=False) -> Union[bool, Tuple[bool, Optional[str]]]:
"""
Test whether the given input is a NITF 2.0 or 2.1 file.
Parameters
----------
file_name : str|BinaryIO
return_version : bool
Returns
-------
is_nitf_file: bool
Is the file a NITF file, based solely on checking initial bytes.
nitf_version: None|str
Only returned is `return_version=True`. Will be `None` in the event that
`is_nitf_file=False`.
"""
header = _fetch_initial_bytes(file_name, 9)
if header is None:
if return_version:
return False, None
else:
return False
ihead = header[:4]
vers = header[4:]
if ihead == b'NITF':
try:
vers = vers.decode('utf-8')
return (True, vers) if return_version else True
except ValueError:
pass
return (False, None) if return_version else False
def is_tiff(
file_name: Union[str, BinaryIO],
return_details=False) -> Union[bool, Tuple[bool, Optional[str], Optional[int]]]:
"""
Test whether the given input is a tiff or big_tiff file.
Parameters
----------
file_name : str|BinaryIO
return_details : bool
Return the tiff details of endianness and magic number?
Returns
-------
is_tiff_file : bool
endianness : None|str
Only returned if `return_details` is `True`. One of `['>', '<']`.
magic_number : None|int
Only returned if `return_details` is `True`. One of `[42, 43]`.
"""
header = _fetch_initial_bytes(file_name, 4)
if header is None:
return (False, None, None) if return_details else False
try:
endian_part = header[:2].decode('utf-8')
except ValueError:
return (False, None, None) if return_details else False
if endian_part not in ['II', 'MM']:
return (False, None, None) if return_details else False
if endian_part == 'II':
endianness = '<'
else:
endianness = '>'
magic_number = struct.unpack('{}h'.format(endianness), header[2:])[0]
if magic_number in [42, 43]:
# NB: 42 is regular tiff, while 43 is big tiff
return (True, magic_number, endianness) if return_details else True
return (False, None, None) if return_details else False
def is_hdf5(file_name: Union[str, BinaryIO]) -> bool:
"""
Test whether the given input is a hdf5 file.
Parameters
----------
file_name : str|BinaryIO
Returns
-------
bool
"""
header = _fetch_initial_bytes(file_name, 4)
if header is None:
return False
out = (header == b'\x89HDF')
if out and h5py is None:
warnings.warn('The h5py library was not successfully imported, and no hdf5 files can be read')
return out
###########
def parse_timestring(str_in: str, precision: str = 'us') -> numpy.datetime64:
"""
Parse (naively) a timestring to numpy.datetime64 of the given precision.
Parameters
----------
str_in : str
precision : str
See numpy.datetime64 for precision options.
Returns
-------
numpy.datetime64
"""
if str_in.strip()[-1] == 'Z':
return numpy.datetime64(str_in[:-1], precision)
return numpy.datetime64(str_in, precision)
def get_seconds(
dt1: numpy.datetime64,
dt2: numpy.datetime64,
precision: str = 'us') -> float:
"""
The number of seconds between two numpy.datetime64 elements.
Parameters
----------
dt1 : numpy.datetime64
dt2 : numpy.datetime64
precision : str
one of 's', 'ms', 'us', or 'ns'.
Returns
-------
float
the number of seconds between dt2 and dt1 (i.e. dt1 - dt2).
"""
if precision == 's':
scale = 1
elif precision == 'ms':
scale = 1e-3
elif precision == 'us':
scale = 1e-6
elif precision == 'ns':
scale = 1e-9
else:
raise ValueError('unrecognized precision {}'.format(precision))
dtype = 'datetime64[{}]'.format(precision)
tdt1 = dt1.astype(dtype)
tdt2 = dt2.astype(dtype)
return float((tdt1.astype('int64') - tdt2.astype('int64'))*scale)
def calculate_md5(the_path: str, chunk_size: int = 1024*1024) -> str:
"""
Calculate the md5 checksum of a given file defined by a path.
Parameters
----------
the_path : str
The path to the file
chunk_size : int
The chunk size for processing
Returns
-------
str
The 32 character MD5 hex digest of the given file
"""
md5_hash = hashlib.md5()
with open(the_path, 'rb') as fi:
for chunk in iter(lambda: fi.read(chunk_size), b''):
md5_hash.update(chunk)
return md5_hash.hexdigest()
#######
# Flexible memmap object for extracting compressed image data from mid-file
class MemMap(object):
"""
Spoofing necessary memory map functionality to permit READ ONLY opening of a
file containing compressed image data somewhere mid-file for use in the PIL
interface. This is just a thin wrapper around the built-in Python memmap
class which accommodates arbitrary offset (versus limited to allocation
granularity).
**The bare minimum of functionality is implemented to permit the intended use.**
"""
__slots__ = ('_mem_map', '_file_obj', '_offset_shift')
def __init__(self, file_obj, length, offset):
"""
Parameters
----------
file_obj : str|BinaryIO
length : int
offset : int
"""
# length and offset validation
length = int(length)
offset = int(offset)
if length < 0 or offset < 0:
raise ValueError(
'length ({}) and offset ({}) must be non-negative integers'.format(length, offset))
# determine offset and length accommodating allocation block size limitation
self._offset_shift = (offset % mmap.ALLOCATIONGRANULARITY)
offset = offset - self._offset_shift
length = length + self._offset_shift
# establish the mem map
if isinstance(file_obj, str):
self._file_obj = open(file_obj, 'rb')
else:
self._file_obj = file_obj
self._mem_map = mmap.mmap(self._file_obj.fileno(), length, access=mmap.ACCESS_READ, offset=offset)
def read(self, n):
return self._mem_map.read(n)
def tell(self):
return self._mem_map.tell() - self._offset_shift
def seek(self, pos, whence=0):
whence = int(whence)
pos = int(pos)
if whence == 0:
self._mem_map.seek(pos+self._offset_shift, 0)
else:
self._mem_map.seek(pos, whence)
@property
def closed(self):
return self._file_obj.closed
def close(self):
self._file_obj.close()
| 9,121 | 25.062857 | 106 | py |
sarpy | sarpy-master/sarpy/io/general/nitf.py | """
Module laying out basic functionality for reading and writing NITF files.
Updated extensively in version 1.3.0.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
import os
from typing import Union, List, Tuple, BinaryIO, Sequence, Optional
from tempfile import mkstemp
from collections import OrderedDict
import struct
from io import BytesIO
import numpy
from sarpy.io.general.base import SarpyIOError, BaseReader, BaseWriter
from sarpy.io.general.format_function import FormatFunction, ComplexFormatFunction, \
SingleLUTFormatFunction
from sarpy.io.general.data_segment import DataSegment, BandAggregateSegment, \
BlockAggregateSegment, SubsetSegment, NumpyArraySegment, NumpyMemmapSegment, \
FileReadDataSegment
# noinspection PyProtectedMember
from sarpy.io.general.nitf_elements.nitf_head import NITFHeader, NITFHeader0, \
ImageSegmentsType, GraphicsSegmentsType, TextSegmentsType, \
DataExtensionsType, ReservedExtensionsType, _ItemArrayHeaders
from sarpy.io.general.nitf_elements.text import TextSegmentHeader, TextSegmentHeader0
from sarpy.io.general.nitf_elements.graphics import GraphicsSegmentHeader
from sarpy.io.general.nitf_elements.symbol import SymbolSegmentHeader
from sarpy.io.general.nitf_elements.label import LabelSegmentHeader
from sarpy.io.general.nitf_elements.res import ReservedExtensionHeader, ReservedExtensionHeader0
from sarpy.io.general.nitf_elements.image import ImageSegmentHeader, ImageSegmentHeader0, MaskSubheader
from sarpy.io.general.nitf_elements.des import DataExtensionHeader, DataExtensionHeader0
from sarpy.io.general.utils import is_file_like, is_nitf, is_real_file
from sarpy.io.complex.sicd_elements.blocks import LatLonType
from sarpy.geometry.geocoords import ecf_to_geodetic, geodetic_to_ecf
from sarpy.geometry.latlon import num as lat_lon_parser
try:
# noinspection PyPackageRequirements
import pyproj
except ImportError:
pyproj = None
try:
# noinspection PyPackageRequirements
from PIL import Image as PIL_Image
PIL_Image.MAX_IMAGE_PIXELS = None # get rid of decompression bomb checking
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
except ImportError:
PIL_Image = None
ImageFile = None
import platform
system_os = platform.system()
if system_os == 'Linux':
try:
import resource
except ImportError:
resource = None
logger = logging.getLogger(__name__)
_unhandled_version_text = 'Unhandled NITF version `{}`'
#####
# helper functions
def extract_image_corners(
img_header: Union[ImageSegmentHeader, ImageSegmentHeader0]) -> Union[None, numpy.ndarray]:
"""
Extract the image corner point array for the image segment header.
Parameters
----------
img_header : ImageSegmentHeader
Returns
-------
numpy.ndarray
"""
corner_string = img_header.IGEOLO
# NB: there are 4 corner point string, each of length 15
corner_strings = [corner_string[start:stop] for start, stop in zip(range(0, 59, 15), range(15, 74, 15))]
icps = []
# TODO: handle ICORDS == 'U', which is MGRS
if img_header.ICORDS in ['N', 'S']:
if pyproj is None:
logger.error('ICORDS is {}, which requires pyproj, which was not successfully imported.')
return None
for entry in corner_strings:
the_proj = pyproj.Proj(proj='utm', zone=int(entry[:2]), south=(img_header.ICORDS == 'S'), ellps='WGS84')
lon, lat = the_proj(float(entry[2:8]), float(entry[8:]), inverse=True)
icps.append([lon, lat])
elif img_header.ICORDS == 'D':
icps = [[float(corner[:7]), float(corner[7:])] for corner in corner_strings]
elif img_header.ICORDS == 'G':
icps = [[lat_lon_parser(corner[:7]), lat_lon_parser(corner[7:])] for corner in corner_strings]
else:
logger.error('Got unhandled ICORDS {}'.format(img_header.ICORDS))
return None
return numpy.array(icps, dtype='float64')
def find_jpeg_delimiters(the_bytes: bytes) -> List[Tuple[int, int]]:
"""
Finds regular jpeg delimiters from the image segment bytes.
Parameters
----------
the_bytes : bytes
Returns
-------
List[Tuple[int, int]]
Raises
------
ValueError
If the bytes doesn't start with the beginning jpeg delimiter and end with the
end jpeg delimiter.
"""
start_pattern = b'\xff\xd8'
end_pattern = b'\xff\xd9' # these should never be used for anything else
out = []
next_location = 0
while next_location < len(the_bytes):
if the_bytes[next_location:next_location+2] != start_pattern:
raise ValueError('The jpeg block {} does not start with the jpeg start delimiter'.format(len(out)))
end_block = the_bytes.find(end_pattern, next_location)
if end_block == -1:
raise ValueError('The new jpeg block {} does not contain the jpeg end delimiter'.format(len(out)))
next_location = end_block + 2
out.append((0, next_location))
return out
def _get_shape(rows: int, cols: int, bands: int, band_dimension=2) -> Tuple[int, ...]:
"""
Helper function for turning rows/cols/bands into a shape tuple.
Parameters
----------
rows: int
cols: int
bands: int
band_dimension : int
One of `{0, 1, 2}`.
Returns
-------
shape_tuple : Tuple[int, ...]
The shape tuple with band omitted if `bands=1`
"""
if bands == 1:
return rows, cols
elif band_dimension == 0:
return bands, rows, cols
elif band_dimension == 1:
return rows, bands, cols
else:
return rows, cols, bands
def _get_subscript_def(
row_start: int,
row_end: int,
col_start: int,
col_end: int,
raw_bands: int,
raw_band_dimension: int) -> Tuple[slice, ...]:
if raw_bands == 1:
return slice(row_start, row_end, 1), slice(col_start, col_end, 1)
elif raw_band_dimension == 0:
return slice(0, raw_bands, 1), slice(row_start, row_end, 1), slice(col_start, col_end, 1)
elif raw_band_dimension == 1:
return slice(row_start, row_end, 1), slice(0, raw_bands, 1), slice(col_start, col_end, 1)
elif raw_band_dimension == 2:
return slice(row_start, row_end, 1), slice(col_start, col_end, 1), slice(0, raw_bands, 1)
else:
raise ValueError('Unhandled raw_band_dimension {}'.format(raw_band_dimension))
def _construct_block_bounds(
image_header: Union[ImageSegmentHeader, ImageSegmentHeader0]) -> List[Tuple[int, int, int, int]]:
"""
Construct the bounds for the blocking definition in row/column space for
the image segment.
Note that this includes potential pad pixels, since NITF requires that
each block is the same size.
Parameters
----------
image_header : ImageSegmentHeader|ImageSegmentHeader
Returns
-------
List[Tuple[int, int, int, int]]
This is a list of the form `(row start, row end, column start, column end)`.
"""
if image_header.NPPBH == 0:
column_block_size = image_header.NCOLS
else:
column_block_size = image_header.NPPBH
# validate that this makes sense...
hblocks = column_block_size*image_header.NBPR
if not (image_header.NCOLS <= hblocks < image_header.NCOLS + column_block_size):
raise ValueError(
'Got NCOLS {}, NPPBH {}, and NBPR {}'.format(
image_header.NCOLS, image_header.NPPBH, image_header.NBPR))
if image_header.NPPBV == 0:
row_block_size = image_header.NROWS
else:
row_block_size = image_header.NPPBV
# validate that this makes sense
vblocks = row_block_size*image_header.NBPC
if not (image_header.NROWS <= vblocks < image_header.NROWS + row_block_size):
raise ValueError(
'Got NROWS {}, NPPBV {}, and NBPC {}'.format(
image_header.NROWS, image_header.NPPBV, image_header.NBPC))
bounds = []
block_row_start = 0
for row_block in range(image_header.NBPC):
block_row_end = block_row_start + row_block_size
block_col_start = 0
for column_block in range(image_header.NBPR):
block_col_end = block_col_start + column_block_size
bounds.append((block_row_start, block_row_end, block_col_start, block_col_end))
block_col_start = block_col_end
block_row_start = block_row_end
return bounds
def _get_dtype(
image_header: Union[ImageSegmentHeader, ImageSegmentHeader0]
) -> Tuple[numpy.dtype, numpy.dtype, int, Optional[str], Optional[numpy.ndarray]]:
"""
Gets the information necessary for constructing the format function applicable
to the given image segment.
Parameters
----------
image_header : ImageSegmentHeader|ImageSegmentHeader
Returns
-------
raw_dtype: numpy.ndtype
The native data type
formatted_dtype : numpy.dtype
The formatted data type. Will be `complex64` if `complex_order` is
populated, the data type of `lut` if it is populated, or same as
`raw_dtype`.
formatted_bands : int
How many bands in the formatted output. Similarly depends on the
value of `complex_order` and `lut`.
complex_order : None|str
If populated, one of `('IQ', 'QI', 'MP', 'PM')` indicating the
order of complex bands. This will only be populated if consistent.
lut : None|numpy.ndarray
If populated, the lookup table presented in the data.
"""
def get_raw_dtype() -> numpy.dtype:
if pvtype == 'INT':
return numpy.dtype('>u{}'.format(bpp))
elif pvtype == 'SI':
return numpy.dtype('>i{}'.format(bpp))
elif pvtype == 'R':
return numpy.dtype('>f{}'.format(bpp))
elif pvtype == 'C':
if bpp not in [4, 8, 16]:
raise ValueError(
'Got PVTYPE = C and NBPP = {} (not 32, 64 or 128), which is unsupported.'.format(nbpp))
return numpy.dtype('>c{}'.format(bpp))
def get_complex_order() -> Optional[str]:
bands = image_header.Bands
if (len(bands) % 2) != 0:
return None
order = bands[0].ISUBCAT + bands[1].ISUBCAT
if order not in ['IQ', 'QI', 'MP', 'PM']:
return None
for i in range(2, len(bands), 2):
if order != bands[i].ISUBCAT + bands[i+1].ISUBCAT:
return None
if order in ['IQ', 'QI']:
if pvtype not in ['SI', 'R']:
raise ValueError(
'Image segment appears to be complex of order `{}`, \n\t'
'but PVTYPE is `{}`'.format(order, pvtype))
if order in ['MP', 'PM']:
if pvtype not in ['INT', 'R']:
raise ValueError(
'Image segment appears to be complex of order `{}`, \n\t'
'but PVTYPE is `{}`'.format(order, pvtype))
return order
def get_lut_info() -> Optional[numpy.ndarray]:
bands = image_header.Bands
if len(bands) > 1:
for band in bands:
if band.LUTD is not None:
raise ValueError('There are multiple bands with LUT.')
# TODO: this isn't really right - handle most significant/least significant nonsense
lut = bands[0].LUTD
if lut is None:
return None
if lut.ndim == 1:
return lut
elif lut.ndim == 2:
return numpy.transpose(lut)
else:
raise ValueError('Got lut of shape `{}`'.format(lut.shape))
nbpp = image_header.NBPP # previously verified to be one of 8, 16, 32, 64
bpp = int(nbpp/8) # bytes per pixel per band
pvtype = image_header.PVTYPE
raw_dtype = get_raw_dtype()
formatted_dtype = raw_dtype
band_count = len(image_header.Bands)
formatted_bands = band_count
# is it one of the assembled complex types?
complex_order = get_complex_order()
if complex_order:
formatted_dtype = numpy.dtype('complex64')
formatted_bands = int(band_count/2)
# is there an LUT?
lut = get_lut_info()
if lut is not None:
formatted_dtype = lut.dtype
formatted_bands = 1 if lut.ndim == 1 else lut.shape[1]
return raw_dtype, formatted_dtype, formatted_bands, complex_order, lut
def _get_format_function(
raw_dtype: numpy.dtype,
complex_order: Optional[str],
lut: Optional[numpy.ndarray],
band_dimension: int) -> Optional[FormatFunction]:
"""
Gets the format function for use in a data segment.
Parameters
----------
raw_dtype : numpy.dtype
complex_order : None|str
lut : None|numpy.ndarray
band_dimension : int
Returns
-------
None|FormatFunction
"""
if complex_order is not None:
return ComplexFormatFunction(raw_dtype, complex_order, band_dimension=band_dimension)
elif lut is not None:
return SingleLUTFormatFunction(lut)
else:
return None
def _verify_image_segment_compatibility(
img0: Union[ImageSegmentHeader, ImageSegmentHeader0],
img1: Union[ImageSegmentHeader, ImageSegmentHeader0]) -> bool:
"""
Verify that the image segments are compatible from the data formatting
perspective.
Parameters
----------
img0 : ImageSegmentHeader
img1 : ImageSegmentHeader
Returns
-------
bool
"""
if len(img0.Bands) != len(img1.Bands):
return False
if img0.PVTYPE != img1.PVTYPE:
return False
if img0.IREP != img1.IREP:
return False
if img0.ICAT != img1.ICAT:
return False
if img0.NBPP != img1.NBPP:
return False
raw_dtype0, _, form_band0, comp_order0, lut0 = _get_dtype(img0)
raw_dtype1, _, form_band1, comp_order1, lut1 = _get_dtype(img1)
if raw_dtype0 != raw_dtype1:
return False
if form_band0 != form_band1:
return False
if (comp_order0 is None and comp_order1 is not None) or \
(comp_order0 is not None and comp_order1 is None):
return False
elif comp_order0 is not None and comp_order1 is not None and \
(comp_order0 != comp_order1):
return False
if (lut0 is None and lut1 is not None) or (lut0 is not None and lut1 is None):
return False
elif lut0 is not None and lut1 is not None and numpy.any(lut0 != lut1):
return False
return True
def _correctly_order_image_segment_collection(
image_headers: Sequence[Union[ImageSegmentHeader, ImageSegmentHeader0]]) -> Tuple[int, ...]:
"""
Determines the proper order, based on IALVL, for a collection of entries
which will be assembled into a composite image.
Parameters
----------
image_headers : Sequence[ImageSegmentHeader]
Returns
-------
Tuple[int, ...]
Raises
------
ValueError
If incompatible IALVL values collection
"""
collection = [(entry.IALVL, orig_index) for orig_index, entry in enumerate(image_headers)]
collection = sorted(collection, key=lambda x: x[0]) # (stable) order by IALVL
if all(entry[0] == 0 for entry in collection):
# all IALVL is 0, and order doesn't matter
return tuple(range(len(image_headers)))
if all(entry0[0]+1 == entry1[0] for entry0, entry1 in zip(collection[:-1], collection[1:])):
# ordered, uninterrupted sequence of IALVL values
return tuple(entry[1] for entry in collection)
raise ValueError(
'Collection of (IALVL, image segment index) has\n\t'
'neither all IALVL == 0, or an uninterrupted sequence of IALVL values.\n\t'
'See {}'.format(collection))
def _get_collection_element_coordinate_limits(
image_headers: Sequence[Union[ImageSegmentHeader, ImageSegmentHeader0]],
return_clevel: bool = False) -> Union[numpy.ndarray, Tuple[numpy.ndarray, int]]:
"""
For the given collection of image segments, get the relative coordinate
scheme of the form `[[start_row, end_row, start_column, end_column]]`.
This relies on inspection of `IALVL` and `ILOC` values for this
collection of image segments.
Parameters
----------
image_headers : Sequence[ImageSegmentHeader]
return_clevel : bool
Also calculate and return the clevel for this?
Returns
-------
block_definition: numpy.ndarray
of the form `[[start_row, end_row, start_column, end_column]]`.
clevel: int
The CLEVEL for this common coordinate system, only returned if
`return_clevel=True`
"""
the_indices = _correctly_order_image_segment_collection(image_headers)
block_definition = numpy.empty((len(the_indices), 4), dtype='int64')
for i, image_ind in enumerate(the_indices):
img_header = image_headers[image_ind]
rows = img_header.NROWS
cols = img_header.NCOLS
iloc = img_header.ILOC
if img_header.IALVL == 0 or i == 0:
previous_indices = numpy.zeros((4, ), dtype='int64')
else:
previous_indices = block_definition[i-1, :]
rel_row_start, rel_col_start = int(iloc[:5]), int(iloc[5:])
abs_row_start = rel_row_start + previous_indices[0]
abs_col_start = rel_col_start + previous_indices[2]
block_definition[i, :] = (abs_row_start, abs_row_start + rows, abs_col_start, abs_col_start + cols)
# now, re-normalize the coordinate system to be sensible
min_row = numpy.min(block_definition[:, 0])
min_col = numpy.min(block_definition[:, 2])
block_definition[:, 0:2:1] -= min_row
block_definition[:, 2:4:1] -= min_col
if return_clevel:
dim_size = numpy.max(block_definition)
if dim_size <= 2048:
clevel = 3
elif dim_size <= 8192:
clevel = 5
elif dim_size <= 65536:
clevel = 6
else:
clevel = 7
return block_definition, clevel
return block_definition
class NITFDetails(object):
"""
This class allows for somewhat general parsing of the header information in
a NITF 2.0 or 2.1 file.
"""
__slots__ = (
'_file_name', '_file_object', '_close_after',
'_nitf_version', '_nitf_header', '_img_headers',
'img_subheader_offsets', 'img_subheader_sizes',
'img_segment_offsets', 'img_segment_sizes',
'graphics_subheader_offsets', 'graphics_subheader_sizes', # only 2.1
'graphics_segment_offsets', 'graphics_segment_sizes',
'symbol_subheader_offsets', 'symbol_subheader_sizes', # only 2.0
'symbol_segment_offsets', 'symbol_segment_sizes',
'label_subheader_offsets', 'label_subheader_sizes', # only 2.0
'label_segment_offsets', 'label_segment_sizes',
'text_subheader_offsets', 'text_subheader_sizes',
'text_segment_offsets', 'text_segment_sizes',
'des_subheader_offsets', 'des_subheader_sizes',
'des_segment_offsets', 'des_segment_sizes',
'res_subheader_offsets', 'res_subheader_sizes', # only 2.1
'res_segment_offsets', 'res_segment_sizes')
def __init__(self, file_object: Union[str, BinaryIO]):
"""
Parameters
----------
file_object : str|BinaryIO
file name for a NITF file, or file like object opened in binary mode.
"""
self._img_headers = None
self._file_name = None
self._file_object = None
self._close_after = False
if isinstance(file_object, str):
if not os.path.isfile(file_object):
raise SarpyIOError('Path {} is not a file'.format(file_object))
self._file_name = file_object
self._file_object = open(file_object, 'rb')
self._close_after = True
elif is_file_like(file_object):
self._file_object = file_object
if hasattr(file_object, 'name') and isinstance(file_object.name, str):
self._file_name = file_object.name
else:
self._file_name = '<file like object>'
else:
raise TypeError('file_object is required to be a file like object, or string path to a file.')
is_nitf_file, vers_string = is_nitf(self._file_object, return_version=True)
if not is_nitf_file:
raise SarpyIOError('Not a NITF file')
self._nitf_version = vers_string
if self._nitf_version not in ['02.10', '02.00']:
raise SarpyIOError('Unsupported NITF version {} for file {}'.format(self._nitf_version, self._file_name))
if self._nitf_version == '02.10':
self._file_object.seek(354, os.SEEK_SET) # offset to header length field
header_length = int(self._file_object.read(6))
# go back to the beginning of the file, and parse the whole header
self._file_object.seek(0, os.SEEK_SET)
header_string = self._file_object.read(header_length)
self._nitf_header = NITFHeader.from_bytes(header_string, 0)
elif self._nitf_version == '02.00':
self._file_object.seek(280, os.SEEK_SET) # offset to check if DEVT is defined
# advance past security tags
DWSG = self._file_object.read(6)
if DWSG == b'999998':
self._file_object.seek(40, os.SEEK_CUR)
# seek to header length field
self._file_object.seek(68, os.SEEK_CUR)
header_length = int(self._file_object.read(6))
self._file_object.seek(0, os.SEEK_SET)
header_string = self._file_object.read(header_length)
self._nitf_header = NITFHeader0.from_bytes(header_string, 0)
else:
raise ValueError(_unhandled_version_text.format(self._nitf_version))
if self._nitf_header.get_bytes_length() != header_length:
logger.critical(
'Stated header length of file {} is {},\n\t'
'while the interpreted header length is {}.\n\t'
'This will likely be accompanied by serious parsing failures,\n\t'
'and should be reported to the sarpy team for investigation.'.format(
self._file_name, header_length, self._nitf_header.get_bytes_length()))
cur_loc = header_length
# populate image segment offset information
cur_loc, self.img_subheader_offsets, self.img_subheader_sizes, \
self.img_segment_offsets, self.img_segment_sizes = self._element_offsets(
cur_loc, self._nitf_header.ImageSegments)
# populate graphics segment offset information - only version 2.1
cur_loc, self.graphics_subheader_offsets, self.graphics_subheader_sizes, \
self.graphics_segment_offsets, self.graphics_segment_sizes = self._element_offsets(
cur_loc, getattr(self._nitf_header, 'GraphicsSegments', None))
# populate symbol segment offset information - only version 2.0
cur_loc, self.symbol_subheader_offsets, self.symbol_subheader_sizes, \
self.symbol_segment_offsets, self.symbol_segment_sizes = self._element_offsets(
cur_loc, getattr(self._nitf_header, 'SymbolsSegments', None))
# populate label segment offset information - only version 2.0
cur_loc, self.label_subheader_offsets, self.label_subheader_sizes, \
self.label_segment_offsets, self.label_segment_sizes = self._element_offsets(
cur_loc, getattr(self._nitf_header, 'LabelsSegments', None))
# populate text segment offset information
cur_loc, self.text_subheader_offsets, self.text_subheader_sizes, \
self.text_segment_offsets, self.text_segment_sizes = self._element_offsets(
cur_loc, self._nitf_header.TextSegments)
# populate data extension offset information
cur_loc, self.des_subheader_offsets, self.des_subheader_sizes, \
self.des_segment_offsets, self.des_segment_sizes = self._element_offsets(
cur_loc, self._nitf_header.DataExtensions)
# populate data extension offset information - only version 2.1
cur_loc, self.res_subheader_offsets, self.res_subheader_sizes, \
self.res_segment_offsets, self.res_segment_sizes = self._element_offsets(
cur_loc, getattr(self._nitf_header, 'ReservedExtensions', None))
@staticmethod
def _element_offsets(
cur_loc: int,
item_array_details: Union[_ItemArrayHeaders, None]
) -> Tuple[int, Optional[numpy.ndarray], Optional[numpy.ndarray], Optional[numpy.ndarray], Optional[numpy.ndarray]]:
if item_array_details is None:
return cur_loc, None, None, None, None
subhead_sizes = item_array_details.subhead_sizes
item_sizes = item_array_details.item_sizes
if subhead_sizes.size == 0:
return cur_loc, None, None, None, None
subhead_offsets = numpy.full(subhead_sizes.shape, cur_loc, dtype=numpy.int64)
subhead_offsets[1:] += numpy.cumsum(subhead_sizes[:-1]) + numpy.cumsum(item_sizes[:-1])
item_offsets = subhead_offsets + subhead_sizes
cur_loc = item_offsets[-1] + item_sizes[-1]
return cur_loc, subhead_offsets, subhead_sizes, item_offsets, item_sizes
@property
def file_name(self) -> Optional[str]:
"""
None|str: the file name, which may not be useful if the input was based
on a file like object
"""
return self._file_name
@property
def file_object(self) -> BinaryIO:
"""
BinaryIO: The binary file object
"""
return self._file_object
@property
def nitf_header(self) -> Union[NITFHeader, NITFHeader0]:
"""
NITFHeader: the nitf header object
"""
return self._nitf_header
@property
def img_headers(self) -> Union[None, List[ImageSegmentHeader], List[ImageSegmentHeader0]]:
"""
The image segment headers.
Returns
-------
None|List[ImageSegmentHeader]|List[ImageSegmentHeader0]
Only `None` in the unlikely event that there are no image segments.
"""
if self._img_headers is not None:
return self._img_headers
self._parse_img_headers()
# noinspection PyTypeChecker
return self._img_headers
@property
def nitf_version(self) -> str:
"""
str: The NITF version number.
"""
return self._nitf_version
def _parse_img_headers(self) -> None:
if self.img_segment_offsets is None or \
self._img_headers is not None:
return
self._img_headers = [self.parse_image_subheader(i) for i in range(self.img_subheader_offsets.size)]
def _fetch_item(
self,
name: str,
index: int,
offsets: numpy.ndarray,
sizes: numpy.ndarray) -> bytes:
if index >= offsets.size:
raise IndexError(
'There are only {0:d} {1:s}, invalid {1:s} position {2:d}'.format(
offsets.size, name, index))
the_offset = offsets[index]
the_size = sizes[index]
self._file_object.seek(int(the_offset), os.SEEK_SET)
the_item = self._file_object.read(int(the_size))
return the_item
def get_image_subheader_bytes(self, index: int) -> bytes:
"""
Fetches the image segment subheader at the given index.
Parameters
----------
index : int
Returns
-------
bytes
"""
return self._fetch_item(
'image subheader',
index,
self.img_subheader_offsets,
self._nitf_header.ImageSegments.subhead_sizes)
def parse_image_subheader(self, index: int) -> Union[ImageSegmentHeader, ImageSegmentHeader0]:
"""
Parse the image segment subheader at the given index.
Parameters
----------
index : int
Returns
-------
ImageSegmentHeader|ImageSegmentHeader0
"""
ih = self.get_image_subheader_bytes(index)
if self.nitf_version == '02.10':
out = ImageSegmentHeader.from_bytes(ih, 0)
elif self.nitf_version == '02.00':
out = ImageSegmentHeader0.from_bytes(ih, 0)
else:
raise ValueError(_unhandled_version_text.format(self.nitf_version))
if out.is_masked:
# read the mask subheader bytes
the_offset = int(self.img_segment_offsets[index])
self._file_object.seek(the_offset, os.SEEK_SET)
the_size = struct.unpack('>I', self._file_object.read(4))[0]
self._file_object.seek(the_offset, os.SEEK_SET)
the_bytes = self._file_object.read(the_size)
# interpret the mask subheader
band_depth = len(out.Bands) if out.IMODE == 'S' else 1
blocks = out.NBPR*out.NBPC
out.mask_subheader = MaskSubheader.from_bytes(
the_bytes, 0, band_depth=band_depth, blocks=blocks)
return out
def get_image_bytes(self, index: int) -> bytes:
"""
Fetches the image bytes at the given index.
Parameters
----------
index : int
Returns
-------
bytes
"""
return self._fetch_item(
'image data',
index,
self.img_segment_offsets,
self._nitf_header.ImageSegments.item_sizes)
def get_text_subheader_bytes(self, index: int) -> bytes:
"""
Fetches the text segment subheader at the given index.
Parameters
----------
index : int
Returns
-------
bytes
"""
return self._fetch_item(
'text subheader',
index,
self.text_subheader_offsets,
self._nitf_header.TextSegments.subhead_sizes)
def get_text_bytes(self, index: int) -> bytes:
"""
Fetches the text extension segment bytes at the given index.
Parameters
----------
index : int
Returns
-------
bytes
"""
return self._fetch_item(
'text segment',
index,
self.text_segment_offsets,
self._nitf_header.TextSegments.item_sizes)
def parse_text_subheader(self, index: int) -> Union[TextSegmentHeader, TextSegmentHeader0]:
"""
Parse the text segment subheader at the given index.
Parameters
----------
index : int
Returns
-------
TextSegmentHeader|TextSegmentHeader0
"""
th = self.get_text_subheader_bytes(index)
if self._nitf_version == '02.10':
return TextSegmentHeader.from_bytes(th, 0)
elif self._nitf_version == '02.00':
return TextSegmentHeader0.from_bytes(th, 0)
else:
raise ValueError(_unhandled_version_text.format(self.nitf_version))
def get_graphics_subheader_bytes(self, index: int) -> bytes:
"""
Fetches the graphics segment subheader at the given index (only version 2.1).
Parameters
----------
index : int
Returns
-------
bytes
"""
if self._nitf_version == '02.10':
return self._fetch_item(
'graphics subheader',
index,
self.graphics_subheader_offsets,
self._nitf_header.GraphicsSegments.subhead_sizes)
else:
raise ValueError('Only NITF version 02.10 has graphics segments')
def get_graphics_bytes(self, index: int) -> bytes:
"""
Fetches the graphics extension segment bytes at the given index (only version 2.1).
Parameters
----------
index : int
Returns
-------
bytes
"""
if self._nitf_version == '02.10':
return self._fetch_item(
'graphics segment',
index,
self.graphics_segment_offsets,
self._nitf_header.GraphicsSegments.item_sizes)
else:
raise ValueError('Only NITF version 02.10 has graphics segments')
def parse_graphics_subheader(self, index: int) -> GraphicsSegmentHeader:
"""
Parse the graphics segment subheader at the given index (only version 2.1).
Parameters
----------
index : int
Returns
-------
GraphicsSegmentHeader
"""
if self._nitf_version == '02.10':
gh = self.get_graphics_subheader_bytes(index)
return GraphicsSegmentHeader.from_bytes(gh, 0)
else:
raise ValueError('Only NITF version 02.10 has graphics segments')
def get_symbol_subheader_bytes(self, index: int) -> bytes:
"""
Fetches the symbol segment subheader at the given index (only version 2.0).
Parameters
----------
index : int
Returns
-------
bytes
"""
if self.nitf_version == '02.00':
return self._fetch_item(
'symbol subheader',
index,
self.symbol_subheader_offsets,
self._nitf_header.SymbolSegments.subhead_sizes)
else:
raise ValueError('Only NITF 02.00 has symbol elements.')
def get_symbol_bytes(self, index: int) -> bytes:
"""
Fetches the symbol extension segment bytes at the given index (only version 2.0).
Parameters
----------
index : int
Returns
-------
bytes
"""
if self.nitf_version == '02.00':
return self._fetch_item(
'symbol segment',
index,
self.symbol_segment_offsets,
self._nitf_header.SymbolSegments.item_sizes)
else:
raise ValueError('Only NITF 02.00 has symbol elements.')
def parse_symbol_subheader(self, index: int) -> SymbolSegmentHeader:
"""
Parse the symbol segment subheader at the given index (only version 2.0).
Parameters
----------
index : int
Returns
-------
SymbolSegmentHeader
"""
if self.nitf_version == '02.00':
gh = self.get_symbol_subheader_bytes(index)
return SymbolSegmentHeader.from_bytes(gh, 0)
else:
raise ValueError('Only NITF 02.00 has symbol elements.')
def get_label_subheader_bytes(self, index: int) -> bytes:
"""
Fetches the label segment subheader at the given index (only version 2.0).
Parameters
----------
index : int
Returns
-------
bytes
"""
if self.nitf_version == '02.00':
return self._fetch_item(
'label subheader',
index,
self.label_subheader_offsets,
self._nitf_header.LabelSegments.subhead_sizes)
else:
raise ValueError('Only NITF 02.00 has label elements.')
def get_label_bytes(self, index: int) -> bytes:
"""
Fetches the label extension segment bytes at the given index (only version 2.0).
Parameters
----------
index : int
Returns
-------
bytes
"""
if self.nitf_version == '02.00':
return self._fetch_item(
'label segment',
index,
self.label_segment_offsets,
self._nitf_header.LabelSegments.item_sizes)
else:
raise ValueError('Only NITF 02.00 has symbol elements.')
def parse_label_subheader(self, index: int) -> LabelSegmentHeader:
"""
Parse the label segment subheader at the given index (only version 2.0).
Parameters
----------
index : int
Returns
-------
LabelSegmentHeader
"""
if self.nitf_version == '02.00':
gh = self.get_label_subheader_bytes(index)
return LabelSegmentHeader.from_bytes(gh, 0)
else:
raise ValueError('Only NITF 02.00 has label elements.')
def get_des_subheader_bytes(self, index: int) -> bytes:
"""
Fetches the data extension segment subheader bytes at the given index.
Parameters
----------
index : int
Returns
-------
bytes
"""
return self._fetch_item(
'des subheader',
index,
self.des_subheader_offsets,
self._nitf_header.DataExtensions.subhead_sizes)
def get_des_bytes(self, index: int) -> bytes:
"""
Fetches the data extension segment bytes at the given index.
Parameters
----------
index : int
Returns
-------
bytes
"""
return self._fetch_item(
'des',
index,
self.des_segment_offsets,
self._nitf_header.DataExtensions.item_sizes)
def parse_des_subheader(self, index: int) -> Union[DataExtensionHeader, DataExtensionHeader0]:
"""
Parse the data extension segment subheader at the given index.
Parameters
----------
index : int
Returns
-------
DataExtensionHeader|DataExtensionHeader0
"""
dh = self.get_des_subheader_bytes(index)
if self.nitf_version == '02.10':
return DataExtensionHeader.from_bytes(dh, 0)
elif self.nitf_version == '02.00':
return DataExtensionHeader0.from_bytes(dh, 0)
else:
raise ValueError(_unhandled_version_text.format(self.nitf_version))
def get_res_subheader_bytes(self, index: int) -> bytes:
"""
Fetches the reserved extension segment subheader bytes at the given index (only version 2.1).
Parameters
----------
index : int
Returns
-------
bytes
"""
return self._fetch_item(
'res subheader',
index,
self.res_subheader_offsets,
self._nitf_header.ReservedExtensions.subhead_sizes)
def get_res_bytes(self, index: int) -> bytes:
"""
Fetches the reserved extension segment bytes at the given index (only version 2.1).
Parameters
----------
index : int
Returns
-------
bytes
"""
return self._fetch_item(
'res',
index,
self.res_segment_offsets,
self._nitf_header.ReservedExtensions.item_sizes)
def parse_res_subheader(self, index: int) -> Union[ReservedExtensionHeader, ReservedExtensionHeader0]:
"""
Parse the reserved extension subheader at the given index (only version 2.1).
Parameters
----------
index : int
Returns
-------
ReservedExtensionHeader|ReservedExtensionHeader0
"""
rh = self.get_res_subheader_bytes(index)
if self.nitf_version == '02.10':
return ReservedExtensionHeader.from_bytes(rh, 0)
elif self.nitf_version == '02.00':
return ReservedExtensionHeader0.from_bytes(rh, 0)
else:
raise ValueError('Unhandled version {}.'.format(self.nitf_version))
def get_headers_json(self) -> dict:
"""
Get a json (i.e. dict) representation of the NITF header elements.
Returns
-------
dict
"""
out = OrderedDict([('header', self._nitf_header.to_json()), ])
if self.img_subheader_offsets is not None:
out['Image_Subheaders'] = [
self.parse_image_subheader(i).to_json() for i in range(self.img_subheader_offsets.size)]
if self.graphics_subheader_offsets is not None:
out['Graphics_Subheaders'] = [
self.parse_graphics_subheader(i).to_json() for i in range(self.graphics_subheader_offsets.size)]
if self.symbol_subheader_offsets is not None:
out['Symbol_Subheaders'] = [
self.parse_symbol_subheader(i).to_json() for i in range(self.symbol_subheader_offsets.size)]
if self.label_subheader_offsets is not None:
out['Label_Subheaders'] = [
self.parse_label_subheader(i).to_json() for i in range(self.label_subheader_offsets.size)]
if self.text_subheader_offsets is not None:
out['Text_Subheaders'] = [
self.parse_text_subheader(i).to_json() for i in range(self.text_subheader_offsets.size)]
if self.des_subheader_offsets is not None:
out['DES_Subheaders'] = [
self.parse_des_subheader(i).to_json() for i in range(self.des_subheader_offsets.size)]
if self.res_subheader_offsets is not None:
out['RES_Subheaders'] = [
self.parse_res_subheader(i).to_json() for i in range(self.res_subheader_offsets.size)]
return out
def __del__(self):
if self._close_after:
self._close_after = False
# noinspection PyBroadException
try:
self._file_object.close()
except Exception:
pass
class NITFReader(BaseReader):
"""
A reader implementation based around array-type image data fetching for
NITF 2.0 or 2.1 files.
**Significantly revised in version 1.3.0** to accommodate the new data segment
paradigm. General NITF support is improved from previous version, but there
remain unsupported edge cases.
"""
_maximum_number_of_images = None
unsupported_compressions = ('I1', 'C1', 'C4', 'C6', 'C7', 'M1', 'M4', 'M6', 'M7')
__slots__ = (
'_nitf_details', '_unsupported_segments', '_image_segment_collections',
'_reverse_axes', '_transpose_axes', '_image_segment_data_segments')
def __init__(
self,
nitf_details: Union[str, BinaryIO, NITFDetails],
reader_type="OTHER",
reverse_axes: Union[None, int, Sequence[int]] = None,
transpose_axes: Union[None, Tuple[int, ...]] = None):
"""
Parameters
----------
nitf_details : str|BinaryIO|NITFDetails
The NITFDetails object or path to a nitf file.
reader_type : str
What type of reader is this? e.g. "SICD", "SIDD", "OTHER"
reverse_axes : None|Sequence[int]
Any entries should be restricted to `{0, 1}`. The presence of
`0` means to reverse the rows (in the raw sense), and the presence
of `1` means to reverse the columns (in the raw sense).
transpose_axes : None|Tuple[int, ...]
If presented this should be only `(1, 0)`.
"""
self._image_segment_data_segments = {}
try:
_ = self._delete_temp_files
# something has already defined this, so it's already ready
except AttributeError:
self._delete_temp_files = []
try:
_ = self._nitf_details
# something has already defined this, so it's already ready
except AttributeError:
if isinstance(nitf_details, str) or is_file_like(nitf_details):
nitf_details = NITFDetails(nitf_details)
if not isinstance(nitf_details, NITFDetails):
raise TypeError('The input argument for NITFReader must be a NITFDetails object.')
self._nitf_details = nitf_details
if self._nitf_details.img_headers is None:
raise SarpyIOError(
'The input NITF has no image segments,\n\t'
'so there is no image data to be read.')
if reverse_axes is not None:
if isinstance(reverse_axes, int):
reverse_axes = (reverse_axes, )
for entry in reverse_axes:
if not 0 <= entry < 2:
raise ValueError('reverse_axes values must be restricted to `{0, 1}`.')
self._reverse_axes = reverse_axes
if transpose_axes is not None:
if transpose_axes != (1, 0):
raise ValueError('transpose_axes, if not None, must be (1, 0)')
self._transpose_axes = transpose_axes
# find image segments which we can not support, for whatever reason
self._unsupported_segments = self.check_for_compliance()
if len(self._unsupported_segments) == len(self.nitf_details.img_headers):
raise SarpyIOError('There are no supported image segments in NITF file {}'.format(self.file_name))
# our supported images are assembled into collections for joint presentation
self._image_segment_collections = self.find_image_segment_collections()
if self._maximum_number_of_images is not None and \
len(self._image_segment_collections) > self._maximum_number_of_images:
raise SarpyIOError(
'Images in this NITF are grouped together in {} collections,\n\t'
'which exceeds the maximum number of collections permitted ({})\n\t'
'by class {} implementation'.format(
len(self._image_segment_collections), self._maximum_number_of_images, self.__class__))
self.verify_collection_compliance()
data_segments = self.get_data_segments()
BaseReader.__init__(self, data_segments, reader_type=reader_type, close_segments=True)
@property
def nitf_details(self) -> NITFDetails:
"""
NITFDetails: The NITF details object.
"""
return self._nitf_details
def get_image_header(self, index: int) -> Union[ImageSegmentHeader, ImageSegmentHeader0]:
"""
Gets the image subheader at the specified index.
Parameters
----------
index : int
Returns
-------
ImageSegmentHeader|ImageSegmentHeader0
"""
return self.nitf_details.img_headers[index]
@property
def file_name(self) -> Optional[str]:
return self._nitf_details.file_name
@property
def file_object(self) -> BinaryIO:
"""
BinaryIO: the binary file like object from which we are reading
"""
return self._nitf_details.file_object
@property
def unsupported_segments(self) -> Tuple[int, ...]:
"""
Tuple[int, ...]: The image segments deemed not supported.
"""
return self._unsupported_segments
@property
def image_segment_collections(self) -> Tuple[Tuple[int, ...]]:
"""
The definition for how image segments are grouped together to form the
output image collection.
Each entry corresponds to a single output image, and the entry defines
the image segment indices which are combined to make up the output image.
Returns
-------
Tuple[Tuple[int, ...]]
"""
return self._image_segment_collections
def can_use_memmap(self) -> bool:
"""
Can a memmap be used? This is only supported and/or sensible in the case
that the file-like object represents a local file.
Returns
-------
bool
"""
return is_real_file(self.nitf_details.file_object)
def _read_file_data(self, start_bytes: int, byte_length: int) -> bytes:
initial_loc = self.file_object.tell()
self.file_object.seek(start_bytes, os.SEEK_SET)
the_bytes = self.file_object.read(byte_length)
self.file_object.seek(initial_loc)
return the_bytes
def _check_image_segment_for_compliance(
self,
index: int,
img_header: Union[ImageSegmentHeader, ImageSegmentHeader0]) -> bool:
"""
Checks whether the image segment can be (or should be) opened.
Parameters
----------
index : int
The image segment index (for logging)
img_header : ImageSegmentHeader|ImageSegmentHeader0
The image segment header
Returns
-------
bool
"""
out = True
if img_header.NBPP not in (8, 16, 32, 64):
# TODO: is this really true? What about the compression situation?
# numpy basically only supports traditional typing
logger.error(
'Image segment at index {} has bits per pixel per band {},\n\t'
'only 8, 16, 32, 64 are supported.'.format(index, img_header.NBPP))
out = False
if img_header.is_compressed:
if PIL_Image is None:
logger.error(
'Image segment at index {} has IC value {},\n\t'
'and PIL cannot be imported.\n\t'
'Currently, compressed image segments require PIL.'.format(
index, img_header.IC))
out = False
if img_header.IC in self.unsupported_compressions:
logger.error(
'Image segment at index {} has IC value `{}`,\n\t'
'which is not supported.'.format(index, img_header.IC))
out = False
return out
def check_for_compliance(self) -> Tuple[int, ...]:
"""
Gets indices of image segments that cannot (or should not) be opened.
Returns
-------
Tuple[int, ...]
"""
out = []
for index, img_header in enumerate(self.nitf_details.img_headers):
if not self._check_image_segment_for_compliance(index, img_header):
out.append(index)
return tuple(out)
def _construct_block_bounds(self, image_segment_index: int) -> List[Tuple[int, int, int, int]]:
image_header = self.get_image_header(image_segment_index)
# noinspection PyTypeChecker
return _construct_block_bounds(image_header)
def _get_mask_details(
self,
image_segment_index: int) -> Tuple[Optional[numpy.ndarray], int, int]:
"""
Gets the mask offset details.
Parameters
----------
image_segment_index : int
Returns
-------
mask_offsets : Optional[numpy.ndarray]
The mask byte offset from the end of the mask subheader definition.
If `IMODE = S`, then this is two-dimensional, otherwise it is one
dimensional
exclude_value : int
The offset value for excluded block, should always be `0xFFFFFFFF`.
additional_offset : int
The additional offset from the beginning of the image segment data,
necessary to account for the presence of mask subheader.
"""
image_header = self.get_image_header(image_segment_index)
exclude_value = 0xFFFFFFFF
if image_header.is_masked:
offset_shift = image_header.mask_subheader.IMDATOFF
if image_header.mask_subheader.BMR is not None:
mask_offsets = image_header.mask_subheader.BMR
elif image_header.mask_subheader.TMR is not None:
mask_offsets = image_header.mask_subheader.TMR
else:
raise ValueError(
'Image segment at index {} is marked at masked,\n\t'
'but neither BMR nor TMR is defined'.format(image_segment_index))
if mask_offsets.ndim != 2:
raise ValueError('Expected two dimensional raw mask offsets array')
if mask_offsets.shape[0] == 1:
mask_offsets = numpy.reshape(mask_offsets, (-1, ))
return mask_offsets, exclude_value, offset_shift
else:
return None, exclude_value, 0
def _get_dtypes(
self,
image_segment_index: int) -> Tuple[numpy.dtype, numpy.dtype, int, Optional[str], Optional[numpy.ndarray]]:
image_header = self.get_image_header(image_segment_index)
return _get_dtype(image_header)
def _get_transpose(self, formatted_bands: int) -> Optional[Tuple[int, ...]]:
if self._transpose_axes is None:
return None
elif formatted_bands > 1:
return self._transpose_axes + (2,)
else:
return self._transpose_axes
# noinspection PyMethodMayBeStatic, PyUnusedLocal
def get_format_function(
self,
raw_dtype: numpy.dtype,
complex_order: Optional[str],
lut: Optional[numpy.ndarray],
band_dimension: int,
image_segment_index: Optional[int] = None,
**kwargs) -> Optional[FormatFunction]:
return _get_format_function(raw_dtype, complex_order, lut, band_dimension)
def _verify_image_segment_compatibility(self, index0: int, index1: int) -> bool:
img0 = self.get_image_header(index0)
img1 = self.get_image_header(index1)
return _verify_image_segment_compatibility(img0, img1)
def find_image_segment_collections(self) -> Tuple[Tuple[int, ...]]:
"""
Determines the image segments, other than those specifically excluded in
`unsupported_segments` property value. It is implicitly assumed that the
elements of a given entry are ordered so that IALVL values are sensible.
Note that in the default implementation, every image segment is simply
considered separately.
Returns
-------
Tuple[Tuple[int]]
"""
out = []
for index in range(len(self.nitf_details.img_headers)):
if index not in self.unsupported_segments:
out.append((index, ))
return tuple(out)
def verify_collection_compliance(self) -> None:
"""
Verify that image segments collections are compatible.
Raises
-------
ValueError
"""
all_compatible = True
for collection_index, the_indices in enumerate(self.image_segment_collections):
if len(the_indices) == 1:
continue
compatible = True
for the_index in the_indices[1:]:
t_compat = self._verify_image_segment_compatibility(the_indices[0], the_index)
if not t_compat:
logger.error(
'Collection index {} has image segments at indices {} and {} incompatible'.format(
collection_index, the_indices[0], the_index))
compatible &= t_compat
all_compatible &= compatible
if not all_compatible:
raise ValueError('Image segment collection incompatibilities')
def _get_collection_element_coordinate_limits(self, collection_index: int) -> numpy.ndarray:
"""
For the given image segment collection, as defined in the
`image_segment_collections` property value, get the relative coordinate
scheme of the form `[[start_row, end_row, start_column, end_column]]`.
This relies on inspection of `IALVL` and `ILOC` values for this
collection of image segments.
Parameters
----------
collection_index : int
The index into the `image_segment_collection` list.
Returns
-------
block_definition: numpy.ndarray
of the form `[[start_row, end_row, start_column, end_column]]`.
"""
image_headers = [self.nitf_details.img_headers[image_ind]
for image_ind in self.image_segment_collections[collection_index]]
# noinspection PyTypeChecker
return _get_collection_element_coordinate_limits(image_headers, return_clevel=False)
def _handle_jpeg2k_no_mask(self, image_segment_index: int, apply_format: bool) -> DataSegment:
# NOTE: it appears that the PIL to numpy array conversion will rearrange
# bands to be in the final dimension, regardless of storage particulars?
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE != 'B' or image_header.IC != 'C8':
raise ValueError(
'Requires IMODE = `B` and IC = `C8`, got `{}` and `{}` at image segment index {}'.format(
image_header.IMODE, image_header.IC, image_segment_index))
if PIL_Image is None:
raise ValueError('Image segment {} is compressed, which requires PIL'.format(image_segment_index))
# get bytes offset to this image segment (relative to start of file)
offset = self.nitf_details.img_segment_offsets[image_segment_index]
image_segment_size = self.nitf_details.img_segment_sizes[image_segment_index]
raw_bands = len(image_header.Bands)
raw_dtype, formatted_dtype, formatted_bands, complex_order, lut = self._get_dtypes(image_segment_index)
raw_shape = _get_shape(image_header.NROWS, image_header.NCOLS, raw_bands, band_dimension=2)
# the block details will be handled by the jpeg2000 compression scheme,
# just read everything and decompress
the_bytes = self._read_file_data(offset, image_segment_size)
# create a memmap, and extract all of our jpeg data into it as appropriate
fi, path_name = mkstemp(suffix='.sarpy_cache', text=False)
self._delete_temp_files.append(path_name)
mem_map = numpy.memmap(
path_name, dtype=raw_dtype, mode='w+', offset=0,
shape=_get_shape(image_header.NROWS, image_header.NCOLS, raw_bands, band_dimension=2))
# noinspection PyUnresolvedReferences
img = PIL_Image.open(BytesIO(the_bytes))
data = numpy.asarray(img)
mem_map[:] = data[:image_header.NROWS, :image_header.NCOLS]
mem_map.flush() # write all the data to the file
del mem_map # clean up the memmap
os.close(fi)
if apply_format:
format_function = self.get_format_function(
raw_dtype, complex_order, lut, 2,
image_segment_index=image_segment_index)
reverse_axes = self._reverse_axes
if self._transpose_axes is None:
formatted_shape = _get_shape(image_header.NROWS, image_header.NCOLS, formatted_bands, band_dimension=2)
else:
formatted_shape = _get_shape(image_header.NCOLS, image_header.NROWS, formatted_bands, band_dimension=2)
transpose_axes = self._get_transpose(formatted_bands)
else:
format_function = None
reverse_axes = None
transpose_axes = None
formatted_dtype = raw_dtype
formatted_shape = raw_shape
return NumpyMemmapSegment(
path_name, 0, raw_dtype, raw_shape, formatted_dtype, formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes,
format_function=format_function, mode='r', close_file=True)
def _handle_jpeg2k_with_mask(self, image_segment_index: int, apply_format: bool) -> DataSegment:
# NOTE: it appears that the PIL to numpy array conversion will rearrange
# bands to be in the final dimension, regardless of storage particulars?
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE != 'B' or image_header.IC != 'M8':
raise ValueError(
'Requires IMODE = `B` and IC = `M8`, got `{}` and `{}` at image segment index {}'.format(
image_header.IMODE, image_header.IC, image_segment_index))
if PIL_Image is None:
raise ValueError('Image segment {} is compressed, which requires PIL'.format(image_segment_index))
# get mask definition details
mask_offsets, exclude_value, additional_offset = self._get_mask_details(image_segment_index)
# get bytes offset to this image segment (relative to start of file)
offset = self.nitf_details.img_segment_offsets[image_segment_index]
image_segment_size = self.nitf_details.img_segment_sizes[image_segment_index]
raw_bands = len(image_header.Bands)
raw_dtype, formatted_dtype, formatted_bands, complex_order, lut = self._get_dtypes(image_segment_index)
# Establish block pixel bounds
block_bounds = self._construct_block_bounds(image_segment_index)
assert isinstance(block_bounds, list)
if not (isinstance(mask_offsets, numpy.ndarray) and mask_offsets.ndim == 1):
raise ValueError('Got unexpected mask offsets `{}`'.format(mask_offsets))
if len(block_bounds) != len(mask_offsets):
raise ValueError('Got mismatch between block definition and mask offsets definition')
# jpeg2000 compression, read everything excluding the mask
the_bytes = self._read_file_data(offset+additional_offset, image_segment_size-additional_offset)
raw_shape = _get_shape(image_header.NROWS, image_header.NCOLS, raw_bands, band_dimension=2)
# create a memmap, and extract all of our jpeg data into it as appropriate
fi, path_name = mkstemp(suffix='.sarpy_cache', text=False)
self._delete_temp_files.append(path_name)
mem_map = numpy.memmap(
path_name, dtype=raw_dtype, mode='w+', offset=0,
shape=_get_shape(image_header.NROWS, image_header.NCOLS, raw_bands, band_dimension=2))
next_jpeg_block = 0
for mask_index, (mask_offset, block_bound) in enumerate(zip(mask_offsets, block_bounds)):
if mask_offset == exclude_value:
continue # just skip it, because it is masked out
start_bytes = mask_offset # TODO: verify that we don't need to account for mask definition length
end_bytes = len(the_bytes) if mask_index == len(mask_offsets)-1 else mask_offsets[mask_index + 1]
# noinspection PyUnresolvedReferences
img = PIL_Image.open(BytesIO(the_bytes[start_bytes:end_bytes]))
# handle block padding situation
row_start, row_end = block_bound[0], min(block_bound[1], image_header.NROWS)
col_start, col_end = block_bound[2], min(block_bound[3], image_header.NCOLS)
mem_map[row_start: row_end, col_start:col_end] = \
numpy.asarray(img)[0:row_end - row_start, 0:col_end - col_start]
next_jpeg_block += 1
mem_map.flush() # write all the data to the file
del mem_map # clean up the memmap
os.close(fi)
if apply_format:
format_function = self.get_format_function(
raw_dtype, complex_order, lut, 2,
image_segment_index=image_segment_index)
reverse_axes = self._reverse_axes
if self._transpose_axes is None:
formatted_shape = _get_shape(image_header.NROWS, image_header.NCOLS, formatted_bands, band_dimension=2)
else:
formatted_shape = _get_shape(image_header.NCOLS, image_header.NROWS, formatted_bands, band_dimension=2)
transpose_axes = self._get_transpose(formatted_bands)
else:
format_function = None
reverse_axes = None
transpose_axes = None
formatted_dtype = raw_dtype
formatted_shape = raw_shape
return NumpyMemmapSegment(
path_name, 0, raw_dtype, raw_shape,
formatted_dtype, formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes,
format_function=format_function, mode='r', close_file=True)
def _handle_jpeg(self, image_segment_index: int, apply_format: bool) -> DataSegment:
# NOTE: it appears that the PIL to numpy array conversion will rearrange
# bands to be in the final dimension, regardless of storage particulars?
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE not in ['B', 'P'] or image_header.IC not in ['C3', 'C5', 'M3', 'M5']:
raise ValueError(
'Requires IMODE in `(B, P)` and IC in `(C3, C5, M3, M5)`,\n\t'
'got `{}` and `{}` at image segment index {}'.format(
image_header.IMODE, image_header.IC, image_segment_index))
if PIL_Image is None:
raise ValueError('Image segment {} is compressed, which requires PIL'.format(image_segment_index))
# get bytes offset to this image segment (relative to start of file)
offset = self.nitf_details.img_segment_offsets[image_segment_index]
image_segment_size = self.nitf_details.img_segment_sizes[image_segment_index]
raw_bands = len(image_header.Bands)
raw_dtype, formatted_dtype, formatted_bands, complex_order, lut = self._get_dtypes(image_segment_index)
# Establish block pixel bounds
block_bounds = self._construct_block_bounds(image_segment_index)
assert isinstance(block_bounds, list)
raw_shape = _get_shape(image_header.NROWS, image_header.NCOLS, raw_bands, band_dimension=2)
# get mask definition details
mask_offsets, exclude_value, additional_offset = self._get_mask_details(image_segment_index)
# jpeg compression, read everything (skipping mask) and find the jpeg delimiters
the_bytes = self._read_file_data(offset+additional_offset, image_segment_size-additional_offset)
jpeg_delimiters = find_jpeg_delimiters(the_bytes)
# validate our discovered delimiters and the mask offsets
if mask_offsets is not None:
if not (isinstance(mask_offsets, numpy.ndarray) and mask_offsets.ndim == 1):
raise ValueError('Got unexpected mask offsets `{}`'.format(mask_offsets))
if len(block_bounds) != len(mask_offsets):
raise ValueError('Got mismatch between block definition and mask offsets definition')
# TODO: verify that we don't need to account for mask definition length
anticipated_jpeg_indices = [index for index, entry in enumerate(mask_offsets)
if entry != exclude_value]
if len(jpeg_delimiters) != len(anticipated_jpeg_indices):
raise ValueError(
'Found different number of jpeg delimiters ({})\n\t'
'than populated blocks ({}) in masked image segment {}'.format(
len(jpeg_delimiters), len(anticipated_jpeg_indices), image_segment_index))
for jpeg_delim, mask_index in zip(jpeg_delimiters, anticipated_jpeg_indices):
if anticipated_jpeg_indices[mask_index] != jpeg_delim[0]:
raise ValueError(
'Populated mask offsets ({})\n\t'
'do not agree with discovered jpeg offsets ({})\n\t'
'with mask subheader length {}'.format(jpeg_delim, mask_offsets, additional_offset))
else:
if len(jpeg_delimiters) != len(block_bounds):
raise ValueError(
'Found different number of jpeg delimiters ({}) than blocks ({}) in image segment {}'.format(
len(jpeg_delimiters), len(block_bounds), image_segment_index))
mask_offsets = [entry[0] for entry in jpeg_delimiters]
# create a memmap, and extract all of our jpeg data into it as appropriate
fi, path_name = mkstemp(suffix='.sarpy_cache', text=False)
self._delete_temp_files.append(path_name)
mem_map = numpy.memmap(
path_name, dtype=raw_dtype, mode='w+', offset=0,
shape=_get_shape(image_header.NROWS, image_header.NCOLS, raw_bands, band_dimension=2))
if image_header.is_masked:
mem_map.fill(0) # TODO: missing value?
next_jpeg_block = 0
for mask_offset, block_bound in zip(mask_offsets, block_bounds):
if mask_offset == exclude_value:
continue # just skip it, it's masked out
jpeg_delim = jpeg_delimiters[next_jpeg_block]
# noinspection PyUnresolvedReferences
the_image_bytes = the_bytes[jpeg_delim[0]:jpeg_delim[1]]
img = PIL_Image.open(BytesIO(the_image_bytes))
# handle block padding situation
row_start, row_end = block_bound[0], min(block_bound[1], image_header.NROWS)
col_start, col_end = block_bound[2], min(block_bound[3], image_header.NCOLS)
mem_map[row_start:row_end, col_start:col_end] = \
numpy.asarray(img)[0:row_end - row_start, 0:col_end - col_start]
next_jpeg_block += 1
mem_map.flush() # write all the data to the file
del mem_map # clean up the memmap
os.close(fi)
if apply_format:
format_function = self.get_format_function(
raw_dtype, complex_order, lut, 2,
image_segment_index=image_segment_index)
reverse_axes = self._reverse_axes
if self._transpose_axes is None:
formatted_shape = _get_shape(image_header.NROWS, image_header.NCOLS, formatted_bands, band_dimension=2)
else:
formatted_shape = _get_shape(image_header.NCOLS, image_header.NROWS, formatted_bands, band_dimension=2)
transpose_axes = self._get_transpose(formatted_bands)
else:
format_function = None
reverse_axes = None
transpose_axes = None
formatted_dtype = raw_dtype
formatted_shape = raw_shape
return NumpyMemmapSegment(
path_name, 0, raw_dtype, raw_shape,
formatted_dtype, formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes,
format_function=format_function, mode='r', close_file=False)
def _handle_no_compression(self, image_segment_index: int, apply_format: bool) -> DataSegment:
# NB: Natural order inside the block is (bands, rows, columns)
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE not in ['B', 'R', 'P'] or image_header.IC not in ['NC', 'NM']:
raise ValueError(
'Requires IMODE in `(B, R, P)` and IC in `(NC, NM)`,\n\t'
'got `{}` and `{}` at image segment index {}'.format(
image_header.IMODE, image_header.IC, image_segment_index))
raw_bands = len(image_header.Bands)
# get bytes offset to this image segment (relative to start of file)
offset = self.nitf_details.img_segment_offsets[image_segment_index]
raw_dtype, formatted_dtype, formatted_bands, complex_order, lut = self._get_dtypes(image_segment_index)
can_use_memmap = self.can_use_memmap()
block_bounds = self._construct_block_bounds(image_segment_index)
assert isinstance(block_bounds, list)
block_size = image_header.get_uncompressed_block_size()
if image_header.IMODE == 'B':
# order inside the block is (bands, rows, columns)
raw_band_dimension = 0
elif image_header.IMODE == 'R':
# order inside the block is (rows, bands, columns)
raw_band_dimension = 1
elif image_header.IMODE == 'P':
# order inside the block is (rows, columns, bands)
raw_band_dimension = 2
else:
raise ValueError('Unhandled IMODE `{}`'.format(image_header.IMODE))
raw_shape = _get_shape(image_header.NROWS, image_header.NCOLS, raw_bands, band_dimension=raw_band_dimension)
# get mask definition details
mask_offsets, exclude_value, additional_offset = self._get_mask_details(image_segment_index)
block_offsets = mask_offsets if mask_offsets is not None else \
numpy.arange(len(block_bounds), dtype='int64')*block_size
# noinspection PyUnresolvedReferences
if not (isinstance(block_offsets, numpy.ndarray) and block_offsets.ndim == 1):
raise ValueError('Got unexpected block offsets `{}`'.format(block_offsets))
if len(block_bounds) != len(block_offsets):
raise ValueError('Got mismatch between block definition and block offsets definition')
final_block_ending = numpy.max(block_offsets[block_offsets != exclude_value]) + block_size + additional_offset
populated_ending = self.nitf_details.img_segment_sizes[image_segment_index]
if final_block_ending != populated_ending:
raise ValueError(
'Got mismatch between anticipated size {} and populated size {}\n\t'
'for image segment {}'.format(
final_block_ending, populated_ending, image_segment_index))
# determine output particulars
if apply_format:
format_function = self.get_format_function(
raw_dtype, complex_order, lut, raw_band_dimension,
image_segment_index=image_segment_index)
use_transpose = self._transpose_axes
use_reverse = self._reverse_axes
if self._transpose_axes is None:
formatted_shape = _get_shape(image_header.NROWS, image_header.NCOLS, formatted_bands, band_dimension=2)
else:
formatted_shape = _get_shape(image_header.NCOLS, image_header.NROWS, formatted_bands, band_dimension=2)
else:
format_function = None
use_transpose = None
use_reverse = None
formatted_dtype = raw_dtype
formatted_shape = _get_shape(image_header.NROWS, image_header.NCOLS, raw_bands, band_dimension=2)
# account for rearrangement of bands to final dimension
if raw_bands == 1:
transpose_axes = use_transpose
reverse_axes = use_reverse
elif image_header.IMODE == 'B':
# order inside the block is (bands, rows, columns)
transpose_axes = (1, 2, 0) if use_transpose is None else (2, 1, 0)
reverse_axes = None if use_reverse is None else tuple(entry + 1 for entry in use_reverse)
elif image_header.IMODE == 'R':
# order inside the block is (rows, bands, columns)
transpose_axes = (0, 2, 1) if use_transpose is None else (2, 0, 1)
reverse_mapping = {0: 0, 1: 2}
reverse_axes = None if use_reverse is None else \
tuple(reverse_mapping[entry] for entry in use_reverse)
elif image_header.IMODE == 'P':
transpose_axes = None if use_transpose is None else use_transpose + (2, )
reverse_axes = use_reverse
else:
raise ValueError('Unhandled IMODE `{}`'.format(image_header.IMODE))
if len(block_bounds) == 1:
# there is just a single block, no need to obfuscate behind a
# block aggregate
if can_use_memmap:
return NumpyMemmapSegment(
self.file_object, offset, raw_dtype, raw_shape,
formatted_dtype, formatted_shape, reverse_axes=reverse_axes,
transpose_axes=transpose_axes, format_function=format_function,
mode='r', close_file=False)
else:
return FileReadDataSegment(
self.file_object, offset, raw_dtype, raw_shape,
formatted_dtype, formatted_shape, reverse_axes=reverse_axes,
transpose_axes=transpose_axes, format_function=format_function,
close_file=False)
data_segments = []
child_arrangement = []
# Linux runs out of file handles for some NITF files,
# so pre-detect that catastrophic failure
if system_os == 'Linux':
# Get the max number of file handles allowed by the OS
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
for block_index, (block_definition, block_offset) in enumerate(zip(block_bounds, block_offsets)):
if block_offset == exclude_value:
continue # just skip this, since it's masked out
if system_os == 'Linux' and block_index > hard/6 - 4:
continue # stop loading segments for fear of too many file handles
b_rows = block_definition[1] - block_definition[0]
b_cols = block_definition[3] - block_definition[2]
b_raw_shape = _get_shape(b_rows, b_cols, raw_bands, band_dimension=raw_band_dimension)
total_offset = offset + additional_offset + block_offset
if can_use_memmap:
child_segment = NumpyMemmapSegment(
self.file_object, total_offset, raw_dtype, b_raw_shape,
raw_dtype, b_raw_shape, mode='r', close_file=False)
else:
child_segment = FileReadDataSegment(
self.file_object, total_offset, raw_dtype, b_raw_shape,
raw_dtype, b_raw_shape, close_file=False)
# handle block padding situation
row_start, row_end = block_definition[0], min(block_definition[1], image_header.NROWS)
col_start, col_end = block_definition[2], min(block_definition[3], image_header.NCOLS)
if row_end == block_definition[1] and col_end == block_definition[3]:
data_segments.append(child_segment)
else:
subset_def = _get_subscript_def(
0, row_end - row_start, 0, col_end - col_start, raw_bands, raw_band_dimension)
data_segments.append(
SubsetSegment(child_segment, subset_def, 'raw', close_parent=True, squeeze=False))
# determine arrangement of these children
child_def = _get_subscript_def(
row_start, row_end, col_start, col_end, raw_bands, raw_band_dimension)
child_arrangement.append(child_def)
return BlockAggregateSegment(
data_segments, child_arrangement, 'raw', 0, raw_shape,
formatted_dtype, formatted_shape, reverse_axes=reverse_axes,
transpose_axes=transpose_axes, format_function=format_function,
close_children=True)
def _handle_imode_s_jpeg(self, image_segment_index: int, apply_format: bool) -> DataSegment:
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE != 'S' or image_header.IC not in ['C3', 'C5', 'M3', 'M5']:
raise ValueError(
'Requires IMODE = `S` and IC in `(C3, C5, M3, M5)`,\n\t'
'got `{}` and `{}` at image segment index {}'.format(
image_header.IMODE, image_header.IC, image_segment_index))
if PIL_Image is None:
raise ValueError('Image segment {} is compressed, which requires PIL'.format(image_segment_index))
# get bytes offset to this image segment (relative to start of file)
offset = self.nitf_details.img_segment_offsets[image_segment_index]
image_segment_size = self.nitf_details.img_segment_sizes[image_segment_index]
raw_bands = len(image_header.Bands)
raw_dtype, formatted_dtype, formatted_bands, complex_order, lut = self._get_dtypes(image_segment_index)
# Establish block pixel bounds
block_bounds = self._construct_block_bounds(image_segment_index)
assert isinstance(block_bounds, list)
# get mask definition details
mask_offsets, exclude_value, additional_offset = self._get_mask_details(image_segment_index)
# NB: if defined, mask_offsets is a 2-d array here
# jpeg compression, read everything (skipping mask) and find the jpeg delimiters
the_bytes = self._read_file_data(offset+additional_offset, image_segment_size-additional_offset)
jpeg_delimiters = find_jpeg_delimiters(the_bytes)
# validate our discovered delimiters and the mask offsets
if mask_offsets is not None:
if not (isinstance(mask_offsets, numpy.ndarray) and mask_offsets.ndim == 2):
raise ValueError('Got unexpected mask offsets `{}`'.format(mask_offsets))
if len(block_bounds) != mask_offsets.shape[1]:
raise ValueError('Got mismatch between block definition and mask offsets definition')
# TODO: verify that we don't need to account for mask definition length
anticipated_jpeg_indices = [index for index, entry in enumerate(mask_offsets.ravel())
if entry != exclude_value]
if len(jpeg_delimiters) != len(anticipated_jpeg_indices):
raise ValueError(
'Found different number of jpeg delimiters ({})\n\t'
'than populated blocks ({}) in masked image segment {}'.format(
len(jpeg_delimiters), len(anticipated_jpeg_indices), image_segment_index))
for jpeg_delim, mask_index in zip(jpeg_delimiters, anticipated_jpeg_indices):
if anticipated_jpeg_indices[mask_index] != jpeg_delim[0]:
raise ValueError(
'Populated mask offsets ({})\n\t'
'do not agree with discovered jpeg offsets ({})\n\t'
'with mask subheader length {}'.format(jpeg_delim, mask_offsets, additional_offset))
else:
if len(jpeg_delimiters) != len(block_bounds)*raw_bands:
raise ValueError(
'Found different number of jpeg delimiters ({}) than blocks,\n\t'
'bands ({}, {}) in image segment {}'.format(
len(jpeg_delimiters), len(block_bounds), raw_bands, image_segment_index))
mask_offsets = numpy.reshape(
numpy.array([entry[0] for entry in jpeg_delimiters], dtype='int64'),
(raw_bands, len(block_bounds)))
# create a memmap, and extract all of our jpeg data into it as appropriate
raw_shape = _get_shape(image_header.NROWS, image_header.NCOLS, raw_bands, band_dimension=2)
fi, path_name = mkstemp(suffix='.sarpy_cache', text=False)
self._delete_temp_files.append(path_name)
mem_map = numpy.memmap(
path_name, dtype=raw_dtype, mode='w+', offset=0,
shape=raw_shape)
if image_header.is_masked:
mem_map.fill(0) # TODO: missing value?
next_jpeg_block = 0
for band_number in range(raw_bands):
for mask_offset, block_bound in zip(mask_offsets, block_bounds):
if mask_offset == exclude_value:
continue # just skip it, it's masked out
jpeg_delim = jpeg_delimiters[next_jpeg_block]
# noinspection PyUnresolvedReferences
img = PIL_Image.open(BytesIO(the_bytes[jpeg_delim[0]:jpeg_delim[1]]))
# handle block padding situation
row_start, row_end = block_bound[0], min(block_bound[1], image_header.NROWS)
col_start, col_end = block_bound[2], min(block_bound[3], image_header.NCOLS)
mem_map[row_start: row_end, col_start:col_end, band_number] = \
numpy.asarray(img)[0:row_end - row_start, 0:col_end - col_start]
next_jpeg_block += 1
mem_map.flush() # write all the data to the file
del mem_map # clean up the memmap
os.close(fi)
if apply_format:
format_function = self.get_format_function(
raw_dtype, complex_order, lut, 2,
image_segment_index=image_segment_index)
reverse_axes = self._reverse_axes
if self._transpose_axes is None:
formatted_shape = _get_shape(image_header.NROWS, image_header.NCOLS, formatted_bands, band_dimension=2)
else:
formatted_shape = _get_shape(image_header.NCOLS, image_header.NROWS, formatted_bands, band_dimension=2)
transpose_axes = self._get_transpose(formatted_bands)
else:
format_function = None
reverse_axes = None
transpose_axes = None
formatted_dtype = raw_dtype
formatted_shape = raw_shape
return NumpyMemmapSegment(
path_name, 0, raw_dtype, raw_shape,
formatted_dtype, formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes,
format_function=format_function, mode='r', close_file=False)
def _handle_imode_s_no_compression(self, image_segment_index: int, apply_format: bool) -> DataSegment:
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE != 'S' or image_header.IC not in ['NC', 'NM']:
raise ValueError(
'Requires IMODE = `S` and IC in `(NC, NM)`, got `{}` and `{}` at image segment index {}'.format(
image_header.IMODE, image_header.IC, image_segment_index))
# get bytes offset to this image segment (relative to start of file)
offset = self.nitf_details.img_segment_offsets[image_segment_index]
raw_bands = len(image_header.Bands)
raw_shape = _get_shape(image_header.NROWS, image_header.NCOLS, raw_bands, band_dimension=2)
raw_dtype, formatted_dtype, formatted_bands, complex_order, lut = self._get_dtypes(image_segment_index)
can_use_memmap = self.can_use_memmap()
block_bounds = self._construct_block_bounds(image_segment_index)
assert isinstance(block_bounds, list)
# get mask definition details
mask_offsets, exclude_value, additional_offset = self._get_mask_details(image_segment_index)
block_size = image_header.get_uncompressed_block_size()
if mask_offsets is not None:
block_offsets = mask_offsets
else:
block_offsets = numpy.zeros((raw_bands, len(block_bounds)), dtype='int64')
for i in range(raw_bands):
block_offsets[i, :] = i*(block_size*len(block_bounds)) + \
numpy.arange(len(block_bounds), dtype='int64')*block_size
if not (isinstance(block_offsets, numpy.ndarray) and block_offsets.ndim == 2):
raise ValueError('Got unexpected block offsets `{}`'.format(block_offsets))
if len(block_bounds) != block_offsets.shape[1]:
raise ValueError('Got mismatch between block definition and block offsets definition')
block_offsets_flat = block_offsets.ravel()
final_block_ending = numpy.max(block_offsets_flat[block_offsets_flat != exclude_value]) + \
block_size + additional_offset
populated_ending = self.nitf_details.img_segment_sizes[image_segment_index]
if final_block_ending != populated_ending:
raise ValueError(
'Got mismatch between anticipated size {} and populated size {}\n\t'
'for image segment {}'.format(
final_block_ending, populated_ending, image_segment_index))
band_segments = []
for band_number in range(raw_bands):
band_raw_shape = _get_shape(image_header.NROWS, image_header.NCOLS, 1, band_dimension=2)
data_segments = []
child_arrangement = []
for block_index, (block_definition, block_offset) in enumerate(
zip(block_bounds, block_offsets[band_number, :])):
if block_offset == exclude_value:
continue # just skip this, since it's masked out
b_rows = block_definition[1] - block_definition[0]
b_cols = block_definition[3] - block_definition[2]
b_raw_shape = _get_shape(b_rows, b_cols, 1, band_dimension=2)
total_offset = offset + additional_offset + block_offset
if can_use_memmap:
child_segment = NumpyMemmapSegment(
self.file_object, total_offset, raw_dtype, b_raw_shape,
raw_dtype, b_raw_shape, mode='r', close_file=False)
else:
child_segment = FileReadDataSegment(
self.file_object, total_offset, raw_dtype, b_raw_shape,
raw_dtype, b_raw_shape, close_file=False)
# handle block padding situation
row_start, row_end = block_definition[0], min(block_definition[1], image_header.NROWS)
col_start, col_end = block_definition[2], min(block_definition[3], image_header.NCOLS)
child_def = _get_subscript_def(
row_start, row_end, col_start, col_end, 1, 2)
child_arrangement.append(child_def)
if row_end == block_definition[1] and col_end == block_definition[3]:
data_segments.append(child_segment)
else:
subset_def = _get_subscript_def(
0, row_end - row_start, 0, col_end - col_start, 1, 2)
data_segments.append(
SubsetSegment(child_segment, subset_def, 'raw', close_parent=True, squeeze=False))
band_segments.append(BlockAggregateSegment(
data_segments, child_arrangement, 'raw', 0, band_raw_shape,
raw_dtype, band_raw_shape, close_children=True))
if apply_format:
format_function = self.get_format_function(
raw_dtype, complex_order, lut, 2,
image_segment_index=image_segment_index)
reverse_axes = self._reverse_axes
if self._transpose_axes is None:
formatted_shape = _get_shape(image_header.NROWS, image_header.NCOLS, formatted_bands, band_dimension=2)
else:
formatted_shape = _get_shape(image_header.NCOLS, image_header.NROWS, formatted_bands, band_dimension=2)
transpose_axes = self._get_transpose(formatted_bands)
else:
format_function = None
reverse_axes = None
transpose_axes = None
formatted_dtype = raw_dtype
formatted_shape = raw_shape
return BandAggregateSegment(
band_segments, 2, formatted_dtype=formatted_dtype, formatted_shape=formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes,
format_function=format_function, close_children=True)
def _create_data_segment_from_imode_b(self, image_segment_index: int, apply_format: bool) -> DataSegment:
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE != 'B':
raise ValueError(
'Requires IMODE = `B`, got `{}` at image segment index {}'.format(
image_header.IMODE, image_segment_index))
# this supports any viable compression scheme
if image_header.IC in self.unsupported_compressions:
raise ValueError(
'Unsupported IC `{}` at image segment index {}'.format(
image_header.IC, image_segment_index))
if image_header.IC in ['NC', 'NM']:
return self._handle_no_compression(image_segment_index, apply_format)
elif image_header.IC in ['C3', 'C5', 'M3', 'M5']:
return self._handle_jpeg(image_segment_index, apply_format)
elif image_header.IC == 'C8':
return self._handle_jpeg2k_no_mask(image_segment_index, apply_format)
elif image_header.IC == 'C8':
return self._handle_jpeg2k_with_mask(image_segment_index, apply_format)
else:
raise ValueError('Got unhandled IC `{}`'.format(image_header.IC))
def _create_data_segment_from_imode_p(self, image_segment_index: int, apply_format: bool) -> DataSegment:
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE != 'P':
raise ValueError(
'Requires IMODE = `P`, got `{}` at image segment index {}'.format(
image_header.IMODE, image_segment_index))
if image_header.IC not in ['NC', 'NM', 'C3', 'M3', 'C5', 'M5']:
raise ValueError(
'IMODE is `P` and the IC is `{}` at image segment index {}'.format(
image_header.IC, image_segment_index))
if image_header.IC in ['NC', 'NM']:
return self._handle_no_compression(image_segment_index, apply_format)
elif image_header.IC in ['C3', 'C5', 'M3', 'M5']:
return self._handle_jpeg(image_segment_index, apply_format)
else:
raise ValueError('Got unhandled IC `{}`'.format(image_header.IC))
def _create_data_segment_from_imode_r(self, image_segment_index: int, apply_format: bool) -> DataSegment:
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE != 'R':
raise ValueError(
'Requires IMODE = `R`, got `{}` at image segment index {}'.format(
image_header.IMODE, image_segment_index))
if image_header.IMODE not in ['NC', 'NM']:
raise ValueError(
'IMODE is `R` and the image is compressed at image segment index {}'.format(
image_segment_index))
if image_header.IC in ['NC', 'NM']:
return self._handle_no_compression(image_segment_index, apply_format)
else:
raise ValueError('Got unhandled IC `{}`'.format(image_header.IC))
def _create_data_segment_from_imode_s(self, image_segment_index: int, apply_format: bool) -> DataSegment:
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE != 'S':
raise ValueError(
'Requires IMODE = `S`, got `{}` at image segment index {}'.format(
image_header.IMODE, image_segment_index))
if image_header.IC not in ['NC', 'NM', 'C3', 'M3', 'C5', 'M5']:
raise ValueError(
'IMODE is `S` and the IC is `{}` at image segment index {}'.format(
image_header.IC, image_segment_index))
if len(image_header.Bands) < 2:
raise ValueError('IMODE S is only valid with multiple bands.')
if image_header.NBPC == 1 and image_header.NBPR == 1:
raise ValueError('IMODE S is only valid with multiple blocks.')
if image_header.IC in ['NC', 'NM']:
return self._handle_imode_s_no_compression(image_segment_index, apply_format)
elif image_header.IC in ['C3', 'C5', 'M3', 'M5']:
return self._handle_imode_s_jpeg(image_segment_index, apply_format)
else:
raise ValueError('Got unhandled IC `{}`'.format(image_header.IC))
def create_data_segment_for_image_segment(
self,
image_segment_index: int,
apply_format: bool) -> DataSegment:
"""
Creates the data segment for the given image segment.
For consistency of simple usage, any bands will be presented in the
final formatted/output dimension, regardless of the value of `apply_format`
or `IMODE`.
For compressed image segments, the `IMODE` has been
abstracted away, and the data segment will be consistent with the raw
shape having bands in the final dimension (analogous to `IMODE=P`).
Note that this also stores a reference to the produced data segment in
the `_image_segment_data_segments` dictionary.
Parameters
----------
image_segment_index : int
apply_format : bool
Leave data raw (False), or apply format function and global
`reverse_axes` and `transpose_axes` values?
Returns
-------
DataSegment
"""
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE == 'B':
out = self._create_data_segment_from_imode_b(image_segment_index, apply_format)
elif image_header.IMODE == 'P':
out = self._create_data_segment_from_imode_p(image_segment_index, apply_format)
elif image_header.IMODE == 'S':
out = self._create_data_segment_from_imode_s(image_segment_index, apply_format)
elif image_header.IMODE == 'R':
out = self._create_data_segment_from_imode_r(image_segment_index, apply_format)
else:
raise ValueError(
'Got unsupported IMODE `{}` at image segment index `{}`'.format(
image_header.IMODE, image_segment_index))
if image_segment_index in self._image_segment_data_segments:
logger.warning(
'Data segment for image segment index {} has already '
'been created.'.format(image_segment_index))
self._image_segment_data_segments[image_segment_index] = out
return out
def create_data_segment_for_collection_element(self, collection_index: int) -> DataSegment:
"""
Creates the data segment overarching the given segment collection.
Parameters
----------
collection_index : int
Returns
-------
DataSegment
"""
block = self.image_segment_collections[collection_index]
if len(block) == 1:
return self.create_data_segment_for_image_segment(block[0], True)
block_definition = self._get_collection_element_coordinate_limits(collection_index)
total_rows = int(numpy.max(block_definition[:, 1]))
total_columns = int(numpy.max(block_definition[:, 3]))
raw_dtype, formatted_dtype, formatted_bands, complex_order, lut = self._get_dtypes(block[0])
format_function = self.get_format_function(raw_dtype, complex_order, lut, 2)
child_segments = []
child_arrangement = []
raw_bands = None
for img_index, block_def in zip(block, block_definition):
child_segment = self.create_data_segment_for_image_segment(img_index, False)
# NB: the bands in the formatted data will be in the final dimension
if raw_bands is None:
raw_bands = 1 if child_segment.formatted_ndim == 2 else \
child_segment.formatted_shape[2]
child_segments.append(child_segment)
child_arrangement.append(
_get_subscript_def(
int(block_def[0]), int(block_def[1]), int(block_def[2]), int(block_def[3]), raw_bands, 2))
transpose = self._get_transpose(formatted_bands)
raw_shape = (total_rows, total_columns) if raw_bands == 1 else (total_rows, total_columns, raw_bands)
formatted_shape = raw_shape[:2] if transpose is None else (raw_shape[1], raw_shape[0])
if formatted_bands > 1:
formatted_shape = formatted_shape + (formatted_bands, )
return BlockAggregateSegment(
child_segments, child_arrangement, 'raw', 0, raw_shape, formatted_dtype, formatted_shape,
reverse_axes=self._reverse_axes, transpose_axes=transpose, format_function=format_function,
close_children=True)
def get_data_segments(self) -> List[DataSegment]:
"""
Gets a data segment for each of these image segment collection.
Returns
-------
List[DataSegment]
"""
out = []
for index in range(len(self.image_segment_collections)):
out.append(self.create_data_segment_for_collection_element(index))
return out
def close(self) -> None:
self._image_segment_data_segments = None
BaseReader.close(self)
########
# base expected functionality for a module with an implemented Reader
def is_a(file_name: Union[str, BinaryIO]) -> Optional[NITFReader]:
"""
Tests whether a given file_name corresponds to a nitf file. Returns a
nitf reader instance, if so.
Parameters
----------
file_name : str|BinaryIO
the file_name to check
Returns
-------
None|NITFReader
`NITFReader` instance if nitf file, `None` otherwise
"""
try:
nitf_details = NITFDetails(file_name)
logger.info('File {} is determined to be a nitf file.'.format(file_name))
return NITFReader(nitf_details)
except SarpyIOError:
# we don't want to catch parsing errors, for now
return None
#####
# NITF writing elements
def interpolate_corner_points_string(
entry: numpy.ndarray,
rows: int,
cols: int,
icp: numpy.ndarray):
"""
Interpolate the corner points for the given subsection from
the given corner points. This supplies entries for the NITF headers.
Parameters
----------
entry : numpy.ndarray
The corner pints of the form `(row_start, row_stop, col_start, col_stop)`
rows : int
The number of rows in the parent image.
cols : int
The number of cols in the parent image.
icp : numpy.ndarray
The parent image corner points in geodetic coordinates.
Returns
-------
str
suitable for IGEOLO entry.
"""
if icp is None:
return ''
if icp.shape[1] == 2:
icp_new = numpy.zeros((icp.shape[0], 3), dtype=numpy.float64)
icp_new[:, :2] = icp
icp = icp_new
icp_ecf = geodetic_to_ecf(icp)
const = 1. / (rows * cols)
pattern = entry[numpy.array([(0, 2), (1, 2), (1, 3), (0, 3)], dtype=numpy.int64)]
out = []
for row, col in pattern:
pt_array = const * numpy.sum(icp_ecf *
(numpy.array([rows - row, row, row, rows - row]) *
numpy.array([cols - col, cols - col, col, col]))[:, numpy.newaxis], axis=0)
pt = LatLonType.from_array(ecf_to_geodetic(pt_array)[:2])
dms = pt.dms_format(frac_secs=False)
out.append('{0:02d}{1:02d}{2:02d}{3:s}'.format(*dms[0]) + '{0:03d}{1:02d}{2:02d}{3:s}'.format(*dms[1]))
return ''.join(out)
def default_image_segmentation(rows: int, cols: int, row_limit: int) -> Tuple[Tuple[int, ...], ...]:
"""
Determine the appropriate segmentation for the image. This is driven
by the SICD/SIDD standard, and not the only generally feasible segmentation
scheme for other NITF file types.
Parameters
----------
rows : int
cols : int
row_limit : int
It is assumed that this follows the NITF guidelines
Returns
-------
Tuple[Tuple[int, ...], ...]
Of the form `((row start, row end, column start, column end))`
"""
im_segments = []
row_offset = 0
while row_offset < rows:
next_rows = min(rows, row_offset + row_limit)
im_segments.append((row_offset, next_rows, 0, cols))
row_offset = next_rows
return tuple(im_segments)
def _flatten_bytes(value: Union[bytes, Sequence]) -> bytes:
if value is None:
return b''
elif isinstance(value, bytes):
return value
elif isinstance(value, Sequence):
return b''.join(_flatten_bytes(entry) for entry in value)
else:
raise TypeError('input must be a bytes object, or a sequence with bytes objects as leaves')
class SubheaderManager(object):
"""
Simple manager object for a NITF subheader, and it's associated information
in the NITF writing process.
Introduced in version 1.3.0.
"""
__slots__ = (
'_subheader', '_subheader_offset', '_subheader_size',
'_item_bytes', '_item_offset', '_item_size',
'_subheader_written', '_item_written')
item_bytes_required = True
"""
Are you required to provide the item bytes?
"""
subheader_type = None
"""
What is the type for the subheader?
"""
def __init__(self, subheader, item_bytes: Optional[bytes] = None):
if not isinstance(subheader, self.subheader_type):
raise TypeError(
'subheader must be of type {} for class {}'.format(
self.subheader_type, self.__class__))
self._subheader = subheader
self._subheader_size = self._subheader.get_bytes_length()
self._subheader_offset = None
self._item_offset = None
self._subheader_written = False
self._item_written = False
self._item_size = None
self._item_bytes = None
if item_bytes is None:
if self.item_bytes_required:
raise ValueError(
'item_bytes is required by class {}.'.format(
self.__class__))
else:
self.item_bytes = item_bytes
@property
def subheader(self):
"""
The subheader.
"""
return self._subheader
@property
def subheader_offset(self) -> Optional[int]:
"""
int: The subheader offset.
"""
return self._subheader_offset
@subheader_offset.setter
def subheader_offset(self, value) -> None:
if self._subheader_offset is not None:
raise ValueError("subheader_offset is read only after being initially defined.")
self._subheader_offset = int(value)
self._item_offset = self._subheader_offset + self.subheader_size
@property
def subheader_size(self) -> int:
"""
int: The subheader size
"""
return self._subheader_size
@property
def item_offset(self) -> Optional[int]:
"""
int: The item offset.
"""
return self._item_offset
@property
def item_size(self) -> Optional[int]:
"""
int: The item size
"""
return self._item_size
@item_size.setter
def item_size(self, value) -> None:
if self._item_size is not None:
raise ValueError("item_size is read only after being initially defined.")
self._item_size = int(value)
@property
def end_of_item(self) -> Optional[int]:
"""
int: The position of the end of respective item. This will be the
offset for the next element.
"""
if self._item_offset is None:
return None
elif self._item_size is None:
return None
return self.item_offset + self.item_size
@property
def subheader_written(self) -> bool:
"""
bool: Has this subheader been written?
"""
return self._subheader_written
@subheader_written.setter
def subheader_written(self, value) -> None:
value = bool(value)
if self._subheader_written and not value:
raise ValueError(
'subheader_written has already been set to True,\n\t'
'it cannot be reverted to False')
self._subheader_written = value
@property
def item_bytes(self) -> Optional[bytes]:
"""
None|bytes: The item bytes.
"""
return self._item_bytes
@item_bytes.setter
def item_bytes(self, value: Union[bytes, Sequence]) -> None:
if self._item_bytes is not None:
raise ValueError("item_bytes is read only after being initially defined.")
if value is None:
self._item_bytes = None
return
# TODO: verify the mask information, in the event that value is a sequence?
value = _flatten_bytes(value)
if self._item_size is not None and len(value) != self._item_size:
raise ValueError(
'item_bytes input has size {},\n\t'
'but item_size has been defined as {}.'.format(len(value), self._item_size))
self._item_bytes = value
self.item_size = len(value)
@property
def item_written(self) -> bool:
"""
bool: Has the item been written?
"""
return self._item_written
@item_written.setter
def item_written(self, value):
value = bool(value)
if self._item_written and not value:
raise ValueError(
'item_written has already been set to True,\n\t'
'it cannot be reverted to False')
self._item_written = value
def write_subheader(self, file_object: BinaryIO) -> None:
"""
Write the subheader, at its specified offset, to the file. If writing
occurs, the file location will be advanced to the end of the subheader
location.
Parameters
----------
file_object : BinaryIO
"""
if self.subheader_written:
return
if self.subheader_offset is None:
return # nothing to be done
the_bytes = self.subheader.to_bytes()
if len(the_bytes) != self._subheader_size:
raise ValueError(
'mismatch between the size of the subheader {}\n\t'
'and the anticipated size of the subheader {}'.format(len(the_bytes), self._subheader_size))
file_object.seek(self.subheader_offset, os.SEEK_SET)
file_object.write(the_bytes)
self.subheader_written = True
def write_item(self, file_object: BinaryIO) -> None:
"""
Write the item bytes (if populated), at its specified offset, to the
file. This requires that the subheader has previously be written. If
writing occurs, the file location will be advanced to the end of the item
location.
Parameters
----------
file_object : BinaryIO
Returns
-------
None
"""
if self.item_written:
return
if self.item_offset is None:
return # nothing to be done
if self.item_bytes is None:
return # nothing to be done
if not self.subheader_written:
return # nothing to be done
file_object.seek(self.item_offset, os.SEEK_SET)
file_object.write(self.item_bytes)
self.item_written = True
class ImageSubheaderManager(SubheaderManager):
item_bytes_required = False
subheader_type = ImageSegmentHeader
@property
def subheader(self) -> ImageSegmentHeader:
"""
ImageSegmentHeader: The image subheader. Any image mask subheader should
be populated in the `mask_subheader` property. The size of this will be
handled independently of the image bytes.
"""
return self._subheader
@property
def item_size(self) -> Optional[int]:
"""
int: The item size.
"""
return self._item_size
@item_size.setter
def item_size(self, value):
if self._item_size is not None:
logger.warning("item_size is read only after being initially defined.")
return
if self.subheader.mask_subheader is None:
self._item_size = int(value)
else:
self._item_size = int(value) + self.subheader.mask_subheader.get_bytes_length()
def write_subheader(self, file_object: BinaryIO) -> None:
if self.subheader_written:
return
SubheaderManager.write_subheader(self, file_object)
if self.subheader.mask_subheader is not None:
file_object.write(self.subheader.mask_subheader.to_bytes())
def write_item(self, file_object: BinaryIO) -> None:
if self.item_written:
return
if self.item_offset is None:
return
if self.item_bytes is None:
return
if not self.subheader_written:
return
if self.subheader.mask_subheader is None:
file_object.seek(self.item_offset, os.SEEK_SET)
else:
file_object.seek(
self.item_offset+self.subheader.mask_subheader.get_bytes_length(), os.SEEK_SET)
file_object.write(self.item_bytes)
self.item_written = True
class GraphicsSubheaderManager(SubheaderManager):
item_bytes_required = True
subheader_type = GraphicsSegmentHeader
@property
def subheader(self) -> GraphicsSegmentHeader:
return self._subheader
class TextSubheaderManager(SubheaderManager):
item_bytes_required = True
subheader_type = TextSegmentHeader
@property
def subheader(self) -> TextSegmentHeader:
return self._subheader
class DESSubheaderManager(SubheaderManager):
item_bytes_required = True
subheader_type = DataExtensionHeader
@property
def subheader(self) -> DataExtensionHeader:
return self._subheader
class RESSubheaderManager(SubheaderManager):
item_bytes_required = True
subheader_type = DataExtensionHeader
@property
def subheader(self) -> ReservedExtensionHeader:
return self._subheader
class NITFWritingDetails(object):
"""
Manager for all the NITF subheader information.
Note that doing anything which modified the size of the headers after
initialization (i.e. adding TREs) will not be reflected
Introduced in version 1.3.0.
"""
__slots__ = (
'_header', '_header_size', '_header_written', '_image_managers',
'_graphics_managers', '_text_managers', '_des_managers', '_res_managers',
'_image_segment_collections', '_image_segment_coordinates', '_collections_clevel')
def __init__(
self,
header: NITFHeader,
image_managers: Optional[Tuple[ImageSubheaderManager, ...]] = None,
image_segment_collections: Optional[Tuple[Tuple[int, ...], ...]] = None,
image_segment_coordinates: Optional[Tuple[Tuple[Tuple[int, ...], ...], ...]] = None,
graphics_managers: Optional[Tuple[GraphicsSubheaderManager, ...]] = None,
text_managers: Optional[Tuple[TextSubheaderManager, ...]] = None,
des_managers: Optional[Tuple[DESSubheaderManager, ...]] = None,
res_managers: Optional[Tuple[RESSubheaderManager, ...]] = None):
"""
Parameters
----------
header : NITFHeader
image_managers : Optional[Tuple[ImageSubheaderManager, ...]]
Should be provided, unless the desire is to write NITF without images
image_segment_collections: Optional[Tuple[Tuple[int, ...], ...]]
Presence contingent on presence of image_managers
image_segment_coordinates: Optional[Tuple[Tuple[Tuple[int, ...], ...], ...]]
Contingent on image_managers. This will be inferred if not provided,
and validated if provided.
graphics_managers: Optional[Tuple[GraphicsSubheaderManager, ...]]
text_managers: Optional[Tuple[TextSubheaderManager, ...]]
des_managers: Optional[Tuple[DESSubheaderManager, ...]]
res_managers: Optional[Tuple[RESSubheaderManager, ...]]
"""
self._collections_clevel = None
self._header = None
self._header_written = False
self._image_managers = None
self._image_segment_collections = None
self._image_segment_coordinates = None
self._graphics_managers = None
self._text_managers = None
self._des_managers = None
self._res_managers = None
self.header = header
self.image_managers = image_managers
self.image_segment_collections = image_segment_collections
self.image_segment_coordinates = image_segment_coordinates
self.graphics_managers = graphics_managers
self.text_managers = text_managers
self.des_managers = des_managers
self.res_managers = res_managers
# set nominal size arrays (for header size purposes), to be corrected later
self.set_all_sizes(require=False)
self._header_size = header.get_bytes_length() # type: int
@property
def header(self) -> NITFHeader:
"""
NITFHeader: The main NITF header. Note that doing anything that changes
the size of that header (i.e. adding TREs) after initialization will
result in a broken state.
"""
return self._header
@header.setter
def header(self, value):
if self._header is not None:
raise ValueError('header is read-only')
if not isinstance(value, NITFHeader):
raise TypeError('header must be of type {}'.format(NITFHeader))
self._header = value
@property
def image_managers(self) -> Optional[Tuple[ImageSubheaderManager, ...]]:
return self._image_managers
@image_managers.setter
def image_managers(self, value):
if self._image_managers is not None:
raise ValueError('image_managers is read-only')
if value is None:
self._image_managers = None
return
if not isinstance(value, tuple):
raise TypeError('image_managers must be a tuple')
for entry in value:
if not isinstance(entry, ImageSubheaderManager):
raise TypeError('image_managers entries must be of type {}'.format(ImageSubheaderManager))
self._image_managers = value
@property
def image_segment_collections(self) -> Tuple[Tuple[int, ...]]:
"""
The definition for how image segments are grouped together to form the
aggregate images.
Each entry corresponds to a single aggregate image, and the entry defines
the image segment indices which are combined to make up the aggregate image.
This must be an ordered partitioning of the set `(0, ..., len(image_managers)-1)`.
Returns
-------
Tuple[Tuple[int, ...]]
"""
return self._image_segment_collections
@image_segment_collections.setter
def image_segment_collections(self, value):
if self._image_segment_collections is not None:
raise ValueError('image_segment_collections is read only')
if self.image_managers is None:
self._image_segment_collections = None
return
if not isinstance(value, tuple):
raise TypeError('image segment collection must be a tuple')
last_index = -1
for entry in value:
if not isinstance(entry, tuple):
raise TypeError('image segment collection must be a tuple of tuples')
if last_index == -1:
if entry[0] != 0:
raise ValueError('The first entry of image segment collection must start at 0.')
for item in entry:
if not isinstance(item, int) or item < 0:
raise TypeError('image segment collection must be a tuple of tuples of non-negative ints')
if item != last_index + 1:
raise ValueError('image segment collection entries must be arranged in ascending order')
last_index = item
if last_index != len(self.image_managers) - 1:
raise ValueError('Mismatch between the number of image segments and the collection entries')
self._image_segment_collections = value
@property
def image_segment_coordinates(self) -> Tuple[Tuple[Tuple[int, ...], ...], ...]:
"""
The image bounds for the segment collection. This is associated with the
`image_segment_collection` property.
Entry `image_segment_coordinates[i]` is associated with the ith aggregate
image. We have `image_segment_coordinates[i]` is a tuple of tuples of the
form
`((row_start, row_end, col_start, col_end)_j,
(row_start, row_end, col_start, col_end)_{j+1}, ...)`.
This indicates that the first image segment associated with
ith aggregate image is at index `j` covering the portion of the aggregate
image determined by bounds `(row_start, row_end, col_start, col_end)_j`,
the second image segment is at index `j+1` covering the portion of the
aggregate determined by bounds `(row_start, row_end, col_start, col_end)_{j+1}`,
and so on.
Returns
-------
Tuple[Tuple[Tuple[int, ...], ...], ...]:
"""
return self._image_segment_coordinates
@image_segment_coordinates.setter
def image_segment_coordinates(self, value):
if self._image_segment_coordinates is not None:
raise ValueError('image_segment_coordinates is read only')
if self.image_managers is None:
self._image_segment_coordinates = None
return
# create the anticipated version
anticipated = []
collections_clevel = []
for coll in self.image_segment_collections:
image_headers = [self.image_managers[image_ind].subheader for image_ind in coll]
coordinate_scheme, clevel = _get_collection_element_coordinate_limits(image_headers, return_clevel=True)
collections_clevel.append(clevel)
# noinspection PyTypeChecker
coordinate_scheme = tuple(tuple(entry) for entry in coordinate_scheme.tolist())
anticipated.append(coordinate_scheme)
self._collections_clevel = tuple(collections_clevel)
anticipated = tuple(anticipated)
if value is None:
self._image_segment_coordinates = anticipated
return
if not isinstance(value, tuple):
raise TypeError('image_segment_coordinates must be a tuple')
if len(value) != len(self.image_segment_collections):
raise ValueError(
'Lengths of image_segment_collections and image_segment_coordinates '
'must match')
for coords, antic in zip(value, anticipated):
if not isinstance(coords, tuple):
raise ValueError('image_segment_coordinates entries must be a tuple')
if len(coords) != len(antic):
raise ValueError(
'image_segment_collections entries and image_segment_coordinates '
'entries must have matching lengths')
if coords != antic:
raise ValueError(
'image_segment_coordinates does not match the anticipated '
'value\n\t{}\n\t{}'.format(value, anticipated))
self._image_segment_coordinates = value
@property
def graphics_managers(self) -> Optional[Tuple[GraphicsSubheaderManager, ...]]:
return self._graphics_managers
@graphics_managers.setter
def graphics_managers(self, value):
if self._graphics_managers is not None:
raise ValueError('graphics_managers is read-only')
if value is None:
self._graphics_managers = None
return
if not isinstance(value, tuple):
raise TypeError('graphics_managers must be a tuple')
for entry in value:
if not isinstance(entry, GraphicsSubheaderManager):
raise TypeError('graphics_managers entries must be of type {}'.format(GraphicsSubheaderManager))
self._graphics_managers = value
@property
def text_managers(self) -> Optional[Tuple[TextSubheaderManager, ...]]:
return self._text_managers
@text_managers.setter
def text_managers(self, value):
if self._text_managers is not None:
raise ValueError('text_managers is read-only')
if value is None:
self._text_managers = None
return
if not isinstance(value, tuple):
raise TypeError('text_managers must be a tuple')
for entry in value:
if not isinstance(entry, TextSubheaderManager):
raise TypeError('text_managers entries must be of type {}'.format(TextSubheaderManager))
self._text_managers = value
@property
def des_managers(self) -> Optional[Tuple[DESSubheaderManager, ...]]:
return self._des_managers
@des_managers.setter
def des_managers(self, value):
if self._des_managers is not None:
raise ValueError('des_managers is read-only')
if value is None:
self._des_managers = None
return
if not isinstance(value, tuple):
raise TypeError('des_managers must be a tuple')
for entry in value:
if not isinstance(entry, DESSubheaderManager):
raise TypeError('des_managers entries must be of type {}'.format(DESSubheaderManager))
self._des_managers = value
@property
def res_managers(self) -> Optional[Tuple[RESSubheaderManager, ...]]:
return self._res_managers
@res_managers.setter
def res_managers(self, value):
if self._res_managers is not None:
raise ValueError('res_managers is read-only')
if value is None:
self._res_managers = None
return
if not isinstance(value, tuple):
raise TypeError('res_managers must be a tuple')
for entry in value:
if not isinstance(entry, RESSubheaderManager):
raise TypeError('res_managers entries must be of type {}'.format(RESSubheaderManager))
self._res_managers = value
def _get_sizes(
self,
managers: Optional[Sequence[SubheaderManager]],
name: str,
require: bool = False) -> Tuple[Optional[numpy.ndarray], Optional[numpy.ndarray]]:
if managers is None:
return None, None
subhead_sizes = numpy.zeros((len(managers), ), dtype='int64')
item_sizes = numpy.zeros((len(managers), ), dtype='int64')
for i, entry in enumerate(managers):
subhead_sizes[i] = entry.subheader_size
item_size = entry.item_size
if item_size is None:
if require:
raise ValueError('item_size for {} at index {} is unset'.format(name, item_size))
else:
item_size = 0
item_sizes[i] = item_size
return subhead_sizes, item_sizes
def _write_items(self, managers: Optional[Sequence[SubheaderManager]], file_object: BinaryIO) -> None:
if managers is None:
return
for index, entry in enumerate(managers):
entry.write_subheader(file_object)
entry.write_item(file_object)
def _verify_item_written(self, managers: Optional[Sequence[SubheaderManager]], name: str) -> None:
if managers is None:
return
for index, entry in enumerate(managers):
if not entry.subheader_written:
logger.error('{} subheader at index {} not written'.format(name, index))
if not entry.item_written:
logger.error('{} data at index {} not written'.format(name, index))
def _get_image_sizes(self, require: bool = False) -> ImageSegmentsType:
"""
Gets the image sizes details for the NITF header.
Returns
-------
ImageSegmentsType
"""
subhead_sizes, item_sizes = self._get_sizes(self.image_managers, 'Image', require=require)
return ImageSegmentsType(subhead_sizes=subhead_sizes, item_sizes=item_sizes)
def _get_graphics_sizes(self, require: bool = False) -> GraphicsSegmentsType:
"""
Gets the graphics sizes details for the NITF header.
Parameters
----------
require : bool
Require all sizes to be set?
Returns
-------
ImageSegmentsType
"""
subhead_sizes, item_sizes = self._get_sizes(self.graphics_managers, 'Graphics', require=require)
return GraphicsSegmentsType(subhead_sizes=subhead_sizes, item_sizes=item_sizes)
def _get_text_sizes(self, require: bool = False) -> TextSegmentsType:
"""
Gets the text sizes details for the NITF header.
Returns
-------
TextSegmentsType
"""
subhead_sizes, item_sizes = self._get_sizes(self.text_managers, 'Text', require=require)
return TextSegmentsType(subhead_sizes=subhead_sizes, item_sizes=item_sizes)
def _get_des_sizes(self, require: bool = False) -> DataExtensionsType:
"""
Gets the image sizes details for the NITF header.
Returns
-------
ImageSegmentsType
"""
subhead_sizes, item_sizes = self._get_sizes(self.des_managers, 'DES', require=require)
return DataExtensionsType(subhead_sizes=subhead_sizes, item_sizes=item_sizes)
def _get_res_sizes(self, require: bool = False) -> ReservedExtensionsType:
"""
Gets the image sizes details for the NITF header.
Returns
-------
ImageSegmentsType
"""
subhead_sizes, item_sizes = self._get_sizes(self.res_managers, 'RES', require=require)
return ReservedExtensionsType(subhead_sizes=subhead_sizes, item_sizes=item_sizes)
def set_first_image_offset(self) -> None:
"""
Sets the first image offset from the header length.
Returns
-------
None
"""
if self.image_managers is None:
return
self.image_managers[0].subheader_offset = self._header_size
def verify_images_have_no_compression(self) -> bool:
"""
Verify that there is no compression set for every image manager. That is,
we are going to directly write a NITF file.
Returns
-------
bool
"""
if self.image_managers is None:
return True
out = True
for entry in self.image_managers:
out &= (entry.subheader.IC in ['NC', 'NM'])
return out
def set_all_sizes(self, require: bool = False) -> None:
"""
This sets the nominal size information in the nitf header, and optionally
verifies that all the item_size values are set.
Parameters
----------
require : bool
Require all sizes to be set? `0` will be used as a placeholder for
header information population.
Returns
-------
None
"""
self.header.ImageSegments = self._get_image_sizes(require=require)
self.header.GraphicsSegments = self._get_graphics_sizes(require=require)
self.header.TextSegments = self._get_text_sizes(require=require)
self.header.DataExtensions = self._get_des_sizes(require=require)
self.header.ReservedExtensions = self._get_res_sizes(require=require)
def verify_all_offsets(self, require: bool = False) -> bool:
"""
This sets and/or verifies all offsets.
Parameters
----------
require : bool
Require all offsets to be set?
Returns
-------
bool
"""
last_offset = self._header_size
if self.image_managers is not None:
for index, entry in enumerate(self.image_managers):
if entry.subheader_offset is None:
entry.subheader_offset = last_offset
elif entry.subheader_offset != last_offset:
raise ValueError(
'image manager at index {} has subheader offset which does not agree\n\t'
'with the end of the previous element'.format(index))
if entry.item_size is None or entry.item_size == 0:
if require:
raise ValueError(
'image manager at index {} has item_size unpopulated or populated as 0'.format(index))
else:
return False
last_offset = entry.end_of_item
if self.graphics_managers is not None:
for index, entry in enumerate(self.graphics_managers):
if entry.subheader_offset is None:
entry.subheader_offset = last_offset
elif entry.subheader_offset != last_offset:
raise ValueError(
'graphics manager at index {} has subheader offset which does not agree\n\t'
'with the end of the previous element'.format(index))
if entry.item_size is None or entry.item_size == 0:
if require:
raise ValueError(
'graphics manager at index {} has item_size unpopulated or populated as 0'.format(index))
else:
return False
last_offset = entry.end_of_item
if self.text_managers is not None:
for index, entry in enumerate(self.text_managers):
if entry.subheader_offset is None:
entry.subheader_offset = last_offset
elif entry.subheader_offset != last_offset:
raise ValueError(
'text manager at index {} has subheader offset which does not agree\n\t'
'with the end of the previous element'.format(index))
if entry.item_size is None or entry.item_size == 0:
if require:
raise ValueError(
'text manager at index {} has item_size unpopulated or populated as 0'.format(index))
else:
return False
last_offset = entry.end_of_item
if self.des_managers is not None:
for index, entry in enumerate(self.des_managers):
if entry.subheader_offset is None:
entry.subheader_offset = last_offset
elif entry.subheader_offset != last_offset:
raise ValueError(
'des manager at index {} has subheader offset which does not agree\n\t'
'with the end of the previous element'.format(index))
if entry.item_size is None or entry.item_size == 0:
if require:
raise ValueError(
'des manager at index {} has item_size unpopulated or populated as 0'.format(index))
else:
return False
last_offset = entry.end_of_item
if self.res_managers is not None:
for index, entry in enumerate(self.res_managers):
if entry.subheader_offset is None:
entry.subheader_offset = last_offset
elif entry.subheader_offset != last_offset:
raise ValueError(
'res manager at index {} has subheader offset which does not agree\n\t'
'with the end of the previous element'.format(index))
if entry.item_size is None or entry.item_size == 0:
if require:
raise ValueError(
'res manager at index {} has item_size unpopulated or populated as 0'.format(index))
else:
return False
last_offset = entry.end_of_item
self.header.FL = last_offset
return True
def set_header_clevel(self) -> None:
"""
Sets the appropriate CLEVEL. This requires that header.FL (file size) has
been previously populated correctly (using :meth:`verify_all_offsets`).
Returns
-------
None
"""
file_size = self.header.FL
if file_size < 50 * (1024 ** 2):
mem_clevel = 3
elif file_size < (1024 ** 3):
mem_clevel = 5
elif file_size < 2 * (1024 ** 3):
mem_clevel = 6
elif file_size < 10 * (1024 ** 3):
mem_clevel = 7
else:
mem_clevel = 9
self.header.CLEVEL = mem_clevel if self._collections_clevel is None else \
max(mem_clevel, max(self._collections_clevel))
def write_header(self, file_object: BinaryIO, overwrite: bool = False) -> None:
"""
Write the main NITF header.
Parameters
----------
file_object : BinaryIO
overwrite : bool
Overwrite, if previously written?
Returns
-------
None
"""
if self._header_written and not overwrite:
return
the_bytes = self.header.to_bytes()
if len(the_bytes) != self._header_size:
raise ValueError(
'The anticipated header length {}\n\t'
'does not match the actual header length {}'.format(self._header_size, len(the_bytes)))
self.set_header_clevel()
file_object.seek(0, os.SEEK_SET)
file_object.write(the_bytes)
self._header_written = True
def write_all_populated_items(self, file_object: BinaryIO) -> None:
"""
Write everything populated. This assumes that the header will start at the
beginning (position 0) of the file-like object.
Parameters
----------
file_object : BinaryIO
Returns
-------
None
"""
self.write_header(file_object, overwrite=False)
self._write_items(self.image_managers, file_object)
self._write_items(self.graphics_managers, file_object)
self._write_items(self.text_managers, file_object)
self._write_items(self.des_managers, file_object)
self._write_items(self.res_managers, file_object)
def verify_all_written(self) -> None:
if not self._header_written:
logger.error('NITF header not written')
self._verify_item_written(self.image_managers, 'image')
self._verify_item_written(self.graphics_managers, 'graphics')
self._verify_item_written(self.text_managers, 'text')
self._verify_item_written(self.des_managers, 'DES')
self._verify_item_written(self.res_managers, 'RES')
#############
# An array based (for only uncompressed images) nitf 2.1 writer
class NITFWriter(BaseWriter):
__slots__ = (
'_file_object', '_file_name', '_in_memory',
'_nitf_writing_details', '_image_segment_data_segments')
def __init__(
self,
file_object: Union[str, BinaryIO],
writing_details: NITFWritingDetails,
check_existence: bool = True):
"""
Parameters
----------
file_object : str|BinaryIO
writing_details : NITFWritingDetails
check_existence : bool
Should we check if the given file already exists?
Raises
------
SarpyIOError
If the given `file_name` already exists
"""
self._nitf_writing_details = None
self._image_segment_data_segments = [] # type: List[DataSegment]
if isinstance(file_object, str):
if check_existence and os.path.exists(file_object):
raise SarpyIOError(
'Given file {} already exists, and a new NITF file cannot be created here.'.format(file_object))
file_object = open(file_object, 'wb')
if not is_file_like(file_object):
raise ValueError('file_object requires a file path or BinaryIO object')
self._file_object = file_object
if is_real_file(file_object):
self._file_name = file_object.name
self._in_memory = False
else:
self._file_name = None
self._in_memory = True
self.nitf_writing_details = writing_details
if not self.nitf_writing_details.verify_images_have_no_compression():
raise ValueError(
'Some image segments indicate compression in the image managers of the nitf_writing_details')
# set the image offset
self.nitf_writing_details.set_first_image_offset()
self._verify_image_segments()
self.verify_collection_compliance()
data_segments = self.get_data_segments()
self.nitf_writing_details.set_all_sizes(require=True) # NB: while no compression supported...
if not self._in_memory:
self.nitf_writing_details.write_all_populated_items(self._file_object)
BaseWriter.__init__(self, data_segments)
@property
def nitf_writing_details(self) -> NITFWritingDetails:
"""
NITFWritingDetails: The NITF subheader details.
"""
return self._nitf_writing_details
@nitf_writing_details.setter
def nitf_writing_details(self, value):
if self._nitf_writing_details is not None:
raise ValueError('nitf_writing_details is read-only')
if not isinstance(value, NITFWritingDetails):
raise TypeError('nitf_writing_details must be of type {}'.format(NITFWritingDetails))
self._nitf_writing_details = value
@property
def image_managers(self) -> Tuple[ImageSubheaderManager, ...]:
return self.nitf_writing_details.image_managers
def _set_image_size(self, image_segment_index: int, item_size: int) -> None:
"""
Sets the image size information. This should be without consideration
for the presence of an image mask, which is handled by with the image
subheader (if present).
Parameters
----------
image_segment_index : int
item_size : int
"""
self.image_managers[image_segment_index].item_size = item_size
@property
def image_segment_collections(self) -> Tuple[Tuple[int, ...]]:
"""
The definition for how image segments are grouped together to form the
aggregate image.
Each entry corresponds to a single output image, and the entry defines
the image segment indices which are combined to make up the output image.
Returns
-------
Tuple[Tuple[int, ...]]
"""
return self.nitf_writing_details.image_segment_collections
def get_image_header(self, index: int) -> ImageSegmentHeader:
"""
Gets the image subheader at the specified index.
Parameters
----------
index : int
Returns
-------
ImageSegmentHeader
"""
return self.image_managers[index].subheader
# noinspection PyMethodMayBeStatic
def _check_image_segment_for_compliance(
self,
index: int,
img_header: ImageSegmentHeader) -> None:
"""
Checks whether the image segment can be (or should be) opened.
Parameters
----------
index : int
The image segment index (for logging)
img_header : ImageSegmentHeader
The image segment header
"""
if img_header.NBPP not in (8, 16, 32, 64):
# numpy basically only supports traditional typing
raise ValueError(
'Image segment at index {} has bits per pixel per band {},\n\t'
'only 8, 16, 32, 64 are supported.'.format(index, img_header.NBPP))
if img_header.is_compressed:
if PIL_Image is None:
raise ValueError(
'Image segment at index {} has unsupported IC value {}.'.format(
index, img_header.IC))
if img_header.IMODE not in ['B', 'P', 'R']:
raise ValueError('Got unsupported IMODE `{}`'.format(img_header.IMODE))
if img_header.mask_subheader is None:
if img_header.IC != 'NC':
raise ValueError('Mask subheader not defined, but IC is not `NC`')
else:
if img_header.IC != 'NM':
raise ValueError('Mask subheader is defined, but IC is not `NM`')
def _verify_image_segments(self) -> None:
for index, entry in enumerate(self.image_managers):
if entry.item_bytes is not None:
raise ValueError(
'The item_bytes is populated for image segment {}.\n\t'
'This is incompatible with array-type image writing'.format(index))
subhead = entry.subheader
self._check_image_segment_for_compliance(index, subhead)
def _construct_block_bounds(self, image_segment_index: int) -> List[Tuple[int, int, int, int]]:
image_header = self.get_image_header(image_segment_index)
# noinspection PyTypeChecker
return _construct_block_bounds(image_header)
def _get_mask_details(
self,
image_segment_index: int) -> Tuple[Optional[numpy.ndarray], int, int]:
"""
Gets the mask offset details.
Parameters
----------
image_segment_index : int
Returns
-------
mask_offsets : Optional[numpy.ndarray]
The mask byte offset from the end of the mask subheader definition.
If `IMODE = S`, then this is two-dimensional, otherwise it is one
dimensional
exclude_value : int
The offset value for excluded block, should always be `0xFFFFFFFF`.
additional_offset : int
The additional offset from the beginning of the image segment data,
necessary to account for the presence of mask subheader.
"""
image_header = self.get_image_header(image_segment_index)
exclude_value = 0xFFFFFFFF
if image_header.is_masked:
offset_shift = image_header.mask_subheader.IMDATOFF
if image_header.mask_subheader.BMR is not None:
mask_offsets = image_header.mask_subheader.BMR
elif image_header.mask_subheader.TMR is not None:
mask_offsets = image_header.mask_subheader.TMR
else:
raise ValueError(
'Image segment at index {} is marked at masked,\n\t'
'but neither BMR nor TMR is defined'.format(image_segment_index))
if mask_offsets.ndim != 2:
raise ValueError('Expected two dimensional raw mask offsets array')
if mask_offsets.shape[0] == 1:
mask_offsets = numpy.reshape(mask_offsets, (-1, ))
return mask_offsets, exclude_value, offset_shift
else:
return None, exclude_value, 0
def _get_dtypes(
self,
image_segment_index: int) -> Tuple[numpy.dtype, numpy.dtype, int, Optional[str], Optional[numpy.ndarray]]:
image_header = self.get_image_header(image_segment_index)
return _get_dtype(image_header)
# noinspection PyMethodMayBeStatic, PyUnusedLocal
def get_format_function(
self,
raw_dtype: numpy.dtype,
complex_order: Optional[str],
lut: Optional[numpy.ndarray],
band_dimension: int,
image_segment_index: Optional[int] = None,
**kwargs) -> Optional[FormatFunction]:
return _get_format_function(raw_dtype, complex_order, lut, band_dimension)
def _verify_image_segment_compatibility(self, index0: int, index1: int) -> bool:
img0 = self.get_image_header(index0)
img1 = self.get_image_header(index1)
return _verify_image_segment_compatibility(img0, img1)
def verify_collection_compliance(self) -> None:
"""
Verify that image segments collections are compatible.
Raises
-------
ValueError
"""
all_compatible = True
for collection_index, the_indices in enumerate(self.image_segment_collections):
if len(the_indices) == 1:
continue
compatible = True
for the_index in the_indices[1:]:
t_compat = self._verify_image_segment_compatibility(the_indices[0], the_index)
if not t_compat:
logger.error(
'Collection index {} has image segments at indices {} and {} incompatible'.format(
collection_index, the_indices[0], the_index))
compatible &= t_compat
all_compatible &= compatible
if not all_compatible:
raise ValueError('Image segment collection incompatibilities')
def _get_collection_element_coordinate_limits(self, collection_index: int) -> Tuple[Tuple[int, ...], ...]:
"""
For the given image segment collection, as defined in the
`image_segment_collections` property value, get the relative coordinate
scheme of the form `[[start_row, end_row, start_column, end_column]]`.
This relies on inspection of `IALVL` and `ILOC` values for this
collection of image segments.
Parameters
----------
collection_index : int
The index into the `image_segment_collection` list.
Returns
-------
block_definition: Tuple[Tuple[int, ...], ...]
of the form `((start_row, end_row, start_column, end_column))`.
"""
return self.nitf_writing_details.image_segment_coordinates[collection_index]
def _handle_no_compression(self, image_segment_index: int, apply_format: bool) -> DataSegment:
# NB: this should definitely set the image size in the manager.
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE not in ['B', 'R', 'P'] or image_header.IC not in ['NC', 'NM']:
raise ValueError(
'Requires IMODE in `(B, R, P)` and IC in `(NC, NM)`,\n\t'
'got `{}` and `{}` at image segment index {}'.format(
image_header.IMODE, image_header.IC, image_segment_index))
raw_bands = len(image_header.Bands)
# get bytes offset to this image segment (relative to start of file)
# this is only necessary if not in_memory processing
offset = 0 if self._in_memory else self.image_managers[image_segment_index].item_offset
raw_dtype, formatted_dtype, formatted_bands, complex_order, lut = self._get_dtypes(image_segment_index)
block_bounds = self._construct_block_bounds(image_segment_index)
assert isinstance(block_bounds, list)
block_size = image_header.get_uncompressed_block_size()
if image_header.IMODE == 'B':
# order inside the block is (bands, rows, columns)
raw_band_dimension = 0
elif image_header.IMODE == 'R':
# order inside the block is (rows, bands, columns)
raw_band_dimension = 1
elif image_header.IMODE == 'P':
# order inside the block is (rows, columns, bands)
raw_band_dimension = 2
else:
raise ValueError('Unhandled IMODE `{}`'.format(image_header.IMODE))
raw_shape = _get_shape(image_header.NROWS, image_header.NCOLS, raw_bands, band_dimension=raw_band_dimension)
# get mask definition details
mask_offsets, exclude_value, additional_offset = self._get_mask_details(image_segment_index)
block_offsets = mask_offsets if mask_offsets is not None else \
numpy.arange(len(block_bounds), dtype='int64')*block_size
# noinspection PyUnresolvedReferences
if not (isinstance(block_offsets, numpy.ndarray) and block_offsets.ndim == 1):
raise ValueError('Got unexpected mask offsets `{}`'.format(block_offsets))
if len(block_bounds) != len(block_offsets):
raise ValueError('Got mismatch between block definition and block offsets definition')
final_block_ending = numpy.max(block_offsets[block_offsets != exclude_value]) + block_size + additional_offset
# set the details in the image manager...
self.image_managers[image_segment_index].item_size = final_block_ending - additional_offset
if not self._in_memory:
self.image_managers[image_segment_index].item_written = True
# NB: it's written in principle by the data segment
# determine output particulars
if apply_format:
format_function = self.get_format_function(
raw_dtype, complex_order, lut, raw_band_dimension,
image_segment_index=image_segment_index)
use_transpose = None
use_reverse = None
formatted_shape = _get_shape(image_header.NROWS, image_header.NCOLS, formatted_bands, band_dimension=2)
else:
format_function = None
use_transpose = None
use_reverse = None
formatted_dtype = raw_dtype
formatted_shape = _get_shape(image_header.NROWS, image_header.NCOLS, raw_bands, band_dimension=2)
# account for rearrangement of bands to final dimension
if raw_bands == 1:
transpose_axes = use_transpose
reverse_axes = use_reverse
elif image_header.IMODE == 'B':
# order inside the block is (bands, rows, columns)
transpose_axes = (1, 2, 0)
reverse_axes = None
elif image_header.IMODE == 'R':
# order inside the block is (rows, bands, columns)
transpose_axes = (0, 2, 1)
reverse_axes = None
elif image_header.IMODE == 'P':
transpose_axes = None
reverse_axes = use_reverse
else:
raise ValueError('Unhandled IMODE `{}`'.format(image_header.IMODE))
if len(block_bounds) == 1:
# there is just a single block, no need to obfuscate behind a
# block aggregate
if self._in_memory:
underlying_array = numpy.full(raw_shape, 0, dtype=raw_dtype)
return NumpyArraySegment(
underlying_array, formatted_dtype, formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes,
format_function=format_function, mode='w')
else:
return NumpyMemmapSegment(
self._file_name, offset, raw_dtype, raw_shape,
formatted_dtype, formatted_shape, reverse_axes=reverse_axes,
transpose_axes=transpose_axes, format_function=format_function,
mode='w', close_file=False)
data_segments = []
child_arrangement = []
for block_index, (block_definition, block_offset) in enumerate(zip(block_bounds, block_offsets)):
if block_offset == exclude_value:
continue # just skip this, since it's masked out
b_rows = block_definition[1] - block_definition[0]
b_cols = block_definition[3] - block_definition[2]
b_raw_shape = _get_shape(b_rows, b_cols, raw_bands, band_dimension=raw_band_dimension)
total_offset = offset + additional_offset + block_offset
if self._in_memory:
underlying_array = numpy.full(b_raw_shape, 0, dtype=raw_dtype)
child_segment = NumpyArraySegment(
underlying_array, raw_dtype, b_raw_shape, mode='w')
else:
child_segment = NumpyMemmapSegment(
self._file_name, total_offset, raw_dtype, b_raw_shape,
raw_dtype, b_raw_shape, mode='w', close_file=False)
# handle block padding situation
row_start, row_end = block_definition[0], min(block_definition[1], image_header.NROWS)
col_start, col_end = block_definition[2], min(block_definition[3], image_header.NCOLS)
# NB: we need not establish a subset segment for writing
if row_end == block_definition[1] and col_end == block_definition[3]:
data_segments.append(child_segment)
else:
subset_def = _get_subscript_def(
0, row_end - row_start, 0, col_end - col_start, raw_bands, raw_band_dimension)
data_segments.append(
SubsetSegment(child_segment, subset_def, 'raw', close_parent=True, squeeze=False))
# determine arrangement of these children
child_def = _get_subscript_def(
row_start, row_end, col_start, col_end, raw_bands, raw_band_dimension)
child_arrangement.append(child_def)
return BlockAggregateSegment(
data_segments, child_arrangement, 'raw', 0, raw_shape,
formatted_dtype, formatted_shape, reverse_axes=reverse_axes,
transpose_axes=transpose_axes, format_function=format_function,
close_children=True)
def _create_data_segment_from_imode_b(self, image_segment_index: int, apply_format: bool) -> DataSegment:
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE != 'B':
raise ValueError(
'Requires IMODE = `B`, got `{}` at image segment index {}'.format(
image_header.IMODE, image_segment_index))
if image_header.IC in ['NC', 'NM']:
return self._handle_no_compression(image_segment_index, apply_format)
else:
raise ValueError('Got unhandled IC `{}`'.format(image_header.IC))
def _create_data_segment_from_imode_p(self, image_segment_index: int, apply_format: bool) -> DataSegment:
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE != 'P':
raise ValueError(
'Requires IMODE = `P`, got `{}` at image segment index {}'.format(
image_header.IMODE, image_segment_index))
if image_header.IC in ['NC', 'NM']:
return self._handle_no_compression(image_segment_index, apply_format)
else:
raise ValueError('Got unhandled IC `{}`'.format(image_header.IC))
def _create_data_segment_from_imode_r(self, image_segment_index: int, apply_format: bool) -> DataSegment:
image_header = self.get_image_header(image_segment_index)
if image_header.IMODE != 'R':
raise ValueError(
'Requires IMODE = `R`, got `{}` at image segment index {}'.format(
image_header.IMODE, image_segment_index))
if image_header.IC in ['NC', 'NM']:
return self._handle_no_compression(image_segment_index, apply_format)
else:
raise ValueError('Got unhandled IC `{}`'.format(image_header.IC))
def create_data_segment_for_image_segment(
self,
image_segment_index: int,
apply_format: bool) -> DataSegment:
"""
Creates the data segment for the given image segment.
For consistency of simple usage, any bands will be presented in the
final formatted/output dimension, regardless of the value of `apply_format`
or `IMODE`.
For compressed image segments, the `IMODE` has been
abstracted away, and the data segment will be consistent with the raw
shape having bands in the final dimension (analogous to `IMODE=P`).
Note that this also stores a reference to the produced data segment in
the `_image_segment_data_segments` list.
This will raise an exception if not performed in the order presented in
the writing manager.
Parameters
----------
image_segment_index : int
apply_format : bool
Leave data raw (False), or apply format function and global
`reverse_axes` and `transpose_axes` values?
Returns
-------
DataSegment
"""
image_manager = self.image_managers[image_segment_index]
assert isinstance(image_manager, ImageSubheaderManager)
if not self._in_memory:
if image_manager.item_offset is None:
raise ValueError(
'Performing file processing and item_offset unpopulated for '
'image segment at index {}'.format(image_segment_index))
if len(self._image_segment_data_segments) != image_segment_index:
raise ValueError('data segments must be constructed in order.')
image_header = image_manager.subheader
if image_header.IMODE == 'B':
out = self._create_data_segment_from_imode_b(image_segment_index, apply_format)
elif image_header.IMODE == 'P':
out = self._create_data_segment_from_imode_p(image_segment_index, apply_format)
elif image_header.IMODE == 'R':
out = self._create_data_segment_from_imode_r(image_segment_index, apply_format)
else:
raise ValueError(
'Got unsupported IMODE `{}` at image segment index `{}`'.format(
image_header.IMODE, image_segment_index))
self._image_segment_data_segments.append(out)
if image_manager.end_of_item is not None:
if image_segment_index < len(self.image_managers) - 1:
self.image_managers[image_segment_index + 1].subheader_offset = image_manager.end_of_item
elif not self._in_memory:
raise ValueError(
'file processing, and item_size unpopulated for image segment at index {}'.format(image_segment_index))
return out
def create_data_segment_for_collection_element(self, collection_index: int) -> DataSegment:
"""
Creates the data segment overarching the given segment collection.
Parameters
----------
collection_index : int
Returns
-------
DataSegment
"""
block = self.image_segment_collections[collection_index]
if len(block) == 1:
return self.create_data_segment_for_image_segment(block[0], True)
block_definition = numpy.array(self._get_collection_element_coordinate_limits(collection_index), dtype='int64')
total_rows = int(numpy.max(block_definition[:, 1]))
total_columns = int(numpy.max(block_definition[:, 3]))
raw_dtype, formatted_dtype, formatted_bands, complex_order, lut = self._get_dtypes(block[0])
format_function = self.get_format_function(raw_dtype, complex_order, lut, 2)
child_segments = []
child_arrangement = []
raw_bands = None
for img_index, block_def in zip(block, block_definition):
child_segment = self.create_data_segment_for_image_segment(img_index, False)
# NB: the bands in the formatted data will be in the final dimension
if raw_bands is None:
raw_bands = 1 if child_segment.formatted_ndim == 2 else \
child_segment.formatted_shape[2]
child_segments.append(child_segment)
child_arrangement.append(
_get_subscript_def(
int(block_def[0]), int(block_def[1]), int(block_def[2]), int(block_def[3]), raw_bands, 2))
raw_shape = (total_rows, total_columns) if raw_bands == 1 else (total_rows, total_columns, raw_bands)
formatted_shape = raw_shape[:2]
if formatted_bands > 1:
formatted_shape = formatted_shape + (formatted_bands, )
return BlockAggregateSegment(
child_segments, child_arrangement, 'raw', 0, raw_shape, formatted_dtype, formatted_shape,
format_function=format_function, close_children=True)
def get_data_segments(self) -> List[DataSegment]:
"""
Gets a data segment for each of these image segment collection.
Returns
-------
List[DataSegment]
"""
out = []
for index in range(len(self.image_segment_collections)):
out.append(self.create_data_segment_for_collection_element(index))
return out
def flush(self, force: bool = False) -> None:
self._validate_closed()
BaseWriter.flush(self, force=force)
try:
if self._in_memory:
if self._image_segment_data_segments is not None:
for index, entry in enumerate(self._image_segment_data_segments):
manager = self.nitf_writing_details.image_managers[index]
if manager.item_written:
continue
if manager.item_bytes is not None:
continue
if force or entry.check_fully_written(warn=force):
manager.item_bytes = entry.get_raw_bytes(warn=False)
check = self.nitf_writing_details.verify_all_offsets(require=False)
if check:
self.nitf_writing_details.write_header(self._file_object, overwrite=True)
self.nitf_writing_details.write_all_populated_items(self._file_object)
except AttributeError:
return
def close(self) -> None:
BaseWriter.close(self) # NB: flush called here
try:
if self.nitf_writing_details is not None:
self.nitf_writing_details.verify_all_written()
except AttributeError:
pass
self._nitf_writing_details = None
self._image_segment_data_segments = None
self._file_object = None
| 165,241 | 38.541039 | 120 | py |
sarpy | sarpy-master/sarpy/io/general/format_function.py | """
Stateful functions for use in format operations for data segments.
This module introduced in version 1.3.0.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
from typing import Union, Tuple, Optional
import numpy
from sarpy.io.general.slice_parsing import get_subscript_result_size
logger = logging.getLogger(__name__)
#######
# slice helper functions
def reformat_slice(
sl_in: slice,
limit_in: int,
mirror: bool) -> slice:
"""
Reformat the slice, with optional reverse operation.
Note that the mirror operation doesn't run the slice backwards across the
same elements, but rather creates a mirror image of the slice. This is
to properly accommodate the data segment reverse symmetry transform.
Parameters
----------
sl_in : slice
From prior processing, it is expected that `sl_in.step` is populated,
and `sl_in.start` is non-negative, and `sl_in.stop` is non-negative or
`None` (only in th event that `sl_in.step < 0`.
limit_in : int
The upper limit for the axis to which this slice pertains.
mirror : bool
Create the mirror image slice?
Returns
-------
slice
"""
if sl_in.step is None:
raise ValueError('input slice has unpopulated step value')
if sl_in.start is not None and sl_in.start < 0:
raise ValueError('input slice has negative start value')
if sl_in.stop is not None and sl_in.stop < 0:
raise ValueError('input slice has negative stop value')
if mirror:
# make the mirror image of the slice, the step maintains the same sign,
# and will be reversed by the format function
if sl_in.step > 0:
start_in = 0 if sl_in.start is None else sl_in.start
stop_in = limit_in if sl_in.stop is None else sl_in.stop
if sl_in.step > (stop_in - start_in):
step_in = stop_in - start_in
else:
step_in = sl_in.step
# what is the last included location?
count = int((stop_in - start_in)/float(step_in))
final_location = start_in + count*step_in
return slice(limit_in - final_location, limit_in - start_in, step_in)
else:
start_in = limit_in - 1 if sl_in.start is None else sl_in.start
stop_in = -1 if sl_in.stop is None else sl_in.stop
if sl_in.step < (stop_in - start_in):
step_in = stop_in - start_in
else:
step_in = sl_in.step
count = int((stop_in - start_in) / float(step_in))
final_location = start_in + count*step_in
return slice(limit_in - final_location, limit_in - start_in, step_in)
else:
return sl_in
#########
# format function implementations
class FormatFunction(object):
"""
Stateful function for data orientation and formatting operations associated
with reading data. *This is specifically intended for use in conjunction
with `DataSegment`.*
This allows mapping from raw data to formatted data, for reading data from
a file and converting it to the form of intended use.
If the reverse process is implemented, it enables converting from formatted
data to raw data, for taking common use data and converting it to the raw
form, for writing data to a file.
Introduced in version 1.3.0.
"""
has_inverse = False
"""
Indicates whether this format function has the inverse call implemented.
"""
__slots__ = ('_raw_shape', '_formatted_shape', '_reverse_axes', '_transpose_axes', '_reverse_transpose_axes')
def __init__(
self,
raw_shape: Optional[Tuple[int, ...]] = None,
formatted_shape: Optional[Tuple[int, ...]] = None,
reverse_axes: Optional[Tuple[int, ...]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None):
"""
Parameters
----------
raw_shape : None|Tuple[int, ...]
formatted_shape : None|Tuple[int, ...]
reverse_axes : None|Tuple[int, ...]
transpose_axes : None|Tuple[int, ...]
"""
self._raw_shape = None
self._formatted_shape = None
self._reverse_axes = None
self._transpose_axes = None
self._reverse_transpose_axes = None
self.set_raw_shape(raw_shape)
self.set_formatted_shape(formatted_shape)
self.set_reverse_axes(reverse_axes)
self.set_transpose_axes(transpose_axes)
@property
def raw_shape(self) -> Optional[Tuple[int, ...]]:
"""
None|Tuple[int, ...]: The expected full possible raw shape.
"""
return self._raw_shape
def set_raw_shape(self, value: Optional[Tuple[int, ...]]) -> None:
if self._raw_shape is not None:
if value is None or value != self._raw_shape:
raise ValueError('raw_shape is read only once set')
return # nothing to be done
self._raw_shape = value
@property
def raw_ndim(self) -> int:
if self.raw_shape is None:
raise ValueError('raw_shape must be set')
return len(self._raw_shape)
@property
def formatted_shape(self) -> Optional[Tuple[int, ...]]:
"""
None|Tuple[int, ...]: The expected output shape basis.
"""
return self._formatted_shape
def set_formatted_shape(self, value: Optional[Tuple[int, ...]]) -> None:
if self._formatted_shape is not None:
if value is None or value != self._formatted_shape:
raise ValueError('formatted_shape is read only once set')
return # nothing to be done
self._formatted_shape = value
@property
def formatted_ndim(self) -> int:
if self.formatted_shape is None:
raise ValueError('formatted_shape must be set')
return len(self._formatted_shape)
@property
def reverse_axes(self) -> Optional[Tuple[int, ...]]:
"""
None|Tuple[int, ...]: The collection of axes (with respect to raw order)
along which we will reverse as part of transformation to output data order.
If not `None`, then this will be a tuple in strictly increasing order.
"""
return self._reverse_axes
def set_reverse_axes(self, value: Optional[Tuple[int, ...]]) -> None:
if self._reverse_axes is not None:
if value is None or value != self._reverse_axes:
raise ValueError('reverse_axes is read only once set')
return # nothing to be done
self._reverse_axes = value
@property
def transpose_axes(self) -> Tuple[int, ...]:
"""
None|Tuple[int, ...]: The transpose order for switching from raw order to
output order, prior to applying any format function.
"""
return self._transpose_axes
def set_transpose_axes(self, value: Optional[Tuple[int, ...]]) -> None:
if self._transpose_axes is not None:
if value is None or value != self._transpose_axes:
raise ValueError('transpose_axes is read only once set')
return # nothing to be done
if value is None:
return # nothing to be done
self._transpose_axes = value
# inverts the transpose axes mapping
self._reverse_transpose_axes = tuple([value.index(i) for i in range(len(value))])
def _get_populated_transpose_axes(self) -> Tuple[int, ...]:
trans_axes = tuple(range(len(self.raw_shape))) if self.transpose_axes is None else \
self.transpose_axes
return trans_axes
def _verify_shapes_set(self) -> None:
if self.raw_shape is None or self.formatted_shape is None:
raise ValueError('raw_shape and formatted_shape must both be set.')
def _reverse_and_transpose(
self,
array: numpy.ndarray,
inverse=False) -> numpy.ndarray:
"""
Performs the reverse and transpose operations. This applies to data in raw
format.
Parameters
----------
array : numpy.ndarray
inverse : bool
If `True`, then this should be the opposite operation.
Returns
-------
numpy.ndarray
"""
if array.ndim != self.raw_ndim:
raise ValueError('Got unexpected raw data shape')
if inverse:
if self.transpose_axes is not None:
# NB: this requires a copy, if not trivial
array = numpy.transpose(array, axes=self._reverse_transpose_axes)
if self.reverse_axes is not None:
# NB: these are simply view operations
for index in self.reverse_axes:
array = numpy.flip(array, axis=index)
else:
if self.reverse_axes is not None:
# NB: these are simply view operations
for index in self.reverse_axes:
array = numpy.flip(array, axis=index)
if self.transpose_axes is not None:
# NB: this requires a copy, if not trivial
array = numpy.transpose(array, axes=self.transpose_axes)
return array
def __call__(
self,
array: numpy.ndarray,
subscript: Tuple[slice, ...],
squeeze=True) -> numpy.ndarray:
"""
Performs the reformatting operation. The output data will have
dimensions of size 1 squeezed by this operation, it should not generally
be done before.
Parameters
----------
array : numpy.ndarray
The input raw array.
subscript : Tuple[slice, ...]
The slice definition which yielded the input raw array.
squeeze : bool
Apply numpy.squeeze operation, which eliminates dimensions of size 1?
Returns
-------
numpy.ndarray
The output formatted array.
"""
array = self._reverse_and_transpose(array, inverse=False)
array = self._forward_functional_step(array, subscript)
if squeeze:
return numpy.squeeze(array)
else:
return array
def inverse(
self,
array: numpy.ndarray,
subscript: Tuple[slice, ...]) -> numpy.ndarray:
"""
Inverse operation which takes in formatted data, and returns
corresponding raw data.
Parameters
----------
array : numpy.ndarray
The input formatted data.
subscript : Tuple[slice, ...]
The slice definition which yielded the formatted data.
Returns
-------
numpy.ndarray
Raises
------
ValueError
A value error should be raised if `inverse=True` and
`has_inverse=False`.
"""
if not self.has_inverse:
raise ValueError('has_inverse is False')
array = self._reverse_functional_step(array, subscript)
array = self._reverse_and_transpose(array, inverse=True)
return array
def validate_shapes(self) -> None:
"""
Validates that the provided `raw_shape` and `formatted_shape` are sensible.
This should be called only after setting the appropriate values for the
`raw_shape`, `formatted_shape`, `reverse_axes` and `transpose_axes` properties.
Raises
------
ValueError
Raises a ValueError if the shapes are not compatible according to this
function and the transpose axes argument.
"""
raise NotImplementedError
def transform_formatted_slice(
self,
subscript: Tuple[slice, ...]) -> Tuple[slice, ...]:
"""
Transform from the subscript definition in formatted coordinates to
subscript definition with respect to raw coordinates.
Parameters
----------
subscript : Tuple[slice, ...]
Returns
-------
Tuple[slice, ...]
Raises
------
ValueError
Raised if the desired requirement cannot be met.
"""
raise NotImplementedError
def transform_raw_slice(
self,
subscript: Tuple[slice, ...]) -> Tuple[slice, ...]:
"""
Transform from the subscript definition in raw coordinates to
subscript definition with respect to formatted coordinates.
Parameters
----------
subscript : Tuple[slice, ...]
Returns
-------
Tuple[slice, ...]
Raises
------
ValueError
Raised if the desired requirement cannot be met.
"""
raise NotImplementedError
def _forward_functional_step(
self,
array: numpy.ndarray,
subscript: Tuple[slice, ...]) -> numpy.ndarray:
"""
Performs the functional operation. This should perform on raw data following
the reorientation operations provided by :func:`_reverse_and_transpose`.
Parameters
----------
array : numpy.ndarray
The raw data to be transformed.
subscript : Tuple[int, ...]
The subscript in raw coordinates which would yield the raw data.
Returns
-------
numpy.ndarray
"""
raise NotImplementedError
# noinspection PyTypeChecker
def _reverse_functional_step(
self,
array: numpy.ndarray,
subscript: Tuple[slice, ...]) -> numpy.ndarray:
"""
Performs the reverse functional operation. This should perform on formatted data,
followed by the reorientation operations provided by :func:`_reverse_and_transpose`.
Parameters
----------
array : numpy.ndarray
The formatted data to be inverted.
subscript : Tuple[slice, ...]
The subscript in formatted coordinates which would yield the formatted data.
Returns
-------
numpy.ndarray
"""
if not self.has_inverse:
raise ValueError('has_inverse is False')
raise NotImplementedError
class IdentityFunction(FormatFunction):
"""
A format function allowing only reversing and transposing operations, the
actual functional step is simply the identity function.
Introduced in version 1.3.0.
"""
has_inverse = True
def validate_shapes(self) -> None:
self._verify_shapes_set()
if self.raw_ndim != self.formatted_ndim:
raise ValueError('raw_shape and formatted_shape must have the same length ')
trans_axes = self._get_populated_transpose_axes()
if self.raw_ndim != len(trans_axes):
raise ValueError('raw_shape and transpose_axes must have the same length ')
# we should have formatted_shape[i] == raw_shape[trans_axes[i]]
expected_formatted_shape = tuple([self.raw_shape[index] for index in trans_axes])
if expected_formatted_shape != self.formatted_shape:
raise ValueError(
'Input_shape `{}` and transpose_axes `{}` yields expected output shape `{}`\n\t'
'got formatted_shape `{}`'.format(
self.raw_shape, self.transpose_axes, expected_formatted_shape, self.formatted_shape))
def transform_formatted_slice(
self,
subscript: Tuple[slice, ...]) -> Tuple[slice, ...]:
if len(subscript) != self.formatted_ndim:
raise ValueError('The length of subscript and formatted_shape must match')
reverse_axes = () if self.reverse_axes is None else self.reverse_axes
rev_transpose_axes = tuple(range(len(self.raw_shape))) if self.transpose_axes is None else \
self._reverse_transpose_axes
# we will reorder from formatted order into raw order, using the opposite
# of the transpose axes definition, reversing any axes required according
# to reverse_axes definition (in raw order)
out = []
for i, index in enumerate(rev_transpose_axes):
# formatted order @ index corresponds to raw order @ i
rev = (i in reverse_axes)
shape_limit = self.raw_shape[i] # also self.formatted_shape[index]
out.append(reformat_slice(subscript[index], shape_limit, rev))
return tuple(out)
def transform_raw_slice(
self,
subscript: Tuple[slice, ...]) -> Tuple[slice, ...]:
if len(subscript) != self.raw_ndim:
raise ValueError('The length of subscript and raw_shape must match')
reverse_axes = () if self.reverse_axes is None else self.reverse_axes
transpose_axes = tuple(range(len(self.formatted_shape))) if self.transpose_axes is None else \
self.transpose_axes
# we will reorder from raw order into formatted order, using the transpose
# axes definition, reversing any axes required according to reverse_axes
# definition (in raw order)
out = []
for i, index in enumerate(transpose_axes):
# raw order @ index corresponds to formatted order @ i
rev = (index in reverse_axes)
shape_limit = self.formatted_shape[i] # also self.raw_shape[index]
out.append(reformat_slice(subscript[index], shape_limit, rev))
return tuple(out)
def _forward_functional_step(
self,
array: numpy.ndarray,
subscript: Tuple[slice, ...]) -> numpy.ndarray:
# the only operations are reordering/reversing, performed by _reverse_and_transpose
return array
def _reverse_functional_step(
self,
array: numpy.ndarray,
subscript: Tuple[slice, ...]) -> numpy.ndarray:
return array
class ComplexFormatFunction(FormatFunction):
"""
Reformats data from real/imaginary dimension pairs to complex64 output,
assuming that the raw data has fixed dimensionality and the real/imaginary
pairs fall along a given band dimension.
Introduced in version 1.3.0.
"""
has_inverse = True
_allowed_ordering = ('IQ', 'QI', 'MP', 'PM')
__slots__ = (
'_band_dimension', '_order', '_raw_dtype')
def __init__(
self,
raw_dtype: Union[str, numpy.dtype],
order: str,
raw_shape: Optional[Tuple[int, ...]] = None,
formatted_shape: Optional[Tuple[int, ...]] = None,
reverse_axes: Optional[Tuple[int, ...]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None,
band_dimension: int = -1):
"""
Parameters
----------
raw_dtype : str|numpy.dtype
The raw datatype. Valid options dependent on the value of order.
order : str
One of `('IQ', 'QI', 'MP', 'PM')`. The options `('IQ', 'QI')` allow
raw_dtype `('int8', 'int16', 'int32', 'float16', 'float32', 'float64')`. The
options `('MP', 'PM')` allow raw_dtype
`('uint8', 'uint16', 'uint32', 'float16', 'float32', 'float64')`.
raw_shape : None|Tuple[int, ...]
formatted_shape : None|Tuple[int, ...]
reverse_axes : None|Tuple[int, ...]
transpose_axes : None|Tuple[int, ...]
band_dimension : int
Which band is the complex dimension, **after** the transpose operation.
"""
self._raw_dtype = numpy.dtype(raw_dtype) # type: numpy.dtype
self._band_dimension = None
self._order = None
self._set_order(order)
FormatFunction.__init__(
self, raw_shape=raw_shape, formatted_shape=formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes)
self._set_band_dimension(band_dimension)
def set_raw_shape(self, value: Optional[Tuple[int, ...]]) -> None:
FormatFunction.set_raw_shape(self, value)
if self._band_dimension is not None:
self._set_band_dimension(self._band_dimension)
@property
def band_dimension(self) -> int:
"""
int: The band dimension, in raw data after the transpose operation.
"""
return self._band_dimension
def _set_band_dimension(self, value: int) -> None:
if not isinstance(value, int):
raise TypeError('band_dimension must be an integer')
if self._raw_shape is None:
self._band_dimension = value
return
if not (-self.raw_ndim <= value < self.raw_ndim):
raise ValueError('band_dimension out of bounds.')
if value < 0:
value = value + self.raw_ndim
if self._band_dimension is not None:
if ((value - self._band_dimension) % self.raw_ndim) != 0:
raise ValueError('band_dimension is read only once set')
self._band_dimension = value
@property
def order(self) -> str:
"""
str: The order string, once of `('IQ', 'QI', 'MP', 'PM')`.
"""
return self._order
def _set_order(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError('order must be an string')
value = value.strip().upper()
if value not in self._allowed_ordering:
raise ValueError(
'Order is required to be one of {},\n\t'
'got `{}`'.format(self._allowed_ordering, value))
if self._order is not None:
if value != self._order:
raise ValueError('order is read only once set')
self._order = value
if self._order in ['IQ', 'QI']:
if self._raw_dtype.name not in [
'int8', 'int16', 'int32', 'float16', 'float32', 'float64']:
raise ValueError(
'order is {}, and raw_dtype ({}, {}) must be one of '
'int8, int16, int32, float16, float32, or float64'.format(
self._order, self._raw_dtype, self._raw_dtype.name))
elif self._order in ['MP', 'PM']:
if self._raw_dtype.name not in [
'uint8', 'uint16', 'uint32', 'float16', 'float32', 'float64']:
raise ValueError(
'order is {}, and raw_dtype must be one of '
'uint8, uint16, uint32, float16, float32, or float64'.format(
self._order))
else:
raise ValueError('Got unhandled ordering value `{}`'.format(
self._order))
def validate_shapes(self) -> None:
self._verify_shapes_set()
self._set_band_dimension(self._band_dimension)
trans_axes = self._get_populated_transpose_axes()
if self.raw_ndim != len(trans_axes):
raise ValueError('raw_shape and transpose_axes must have the same length ')
arranged_shape = tuple([self.raw_shape[index] for index in trans_axes])
if (arranged_shape[self.band_dimension] % 2) != 0:
raise ValueError(
'Input_shape `{}`, transpose_axes `{}` yields rearranged shape `{}`\n\t'
'entry in band_dimension `{}` should be even'.format(
self.raw_shape, self.transpose_axes, arranged_shape, self.band_dimension))
after_mapping_shape = [entry for entry in arranged_shape]
after_mapping_shape[self.band_dimension] = int(after_mapping_shape[self.band_dimension]/2)
after_mapping_shape = tuple(after_mapping_shape)
if self.raw_ndim == self.formatted_ndim:
if after_mapping_shape != self.formatted_shape:
raise ValueError(
'Input_shape `{}`, transpose_axes `{}`, band dimension `{}` '
'yields expected output shape `{}`\n\t'
'got formatted_shape `{}`'.format(
self.raw_shape, self.transpose_axes, self.band_dimension,
after_mapping_shape, self.formatted_shape))
elif self.raw_ndim == self.formatted_ndim + 1:
reduced_shape = [entry for entry in after_mapping_shape]
reduced_shape.pop(self.band_dimension)
reduced_shape = tuple(reduced_shape)
if reduced_shape != self.formatted_shape:
raise ValueError(
'Input_shape `{}`, transpose_axes `{}`, band dimension `{}` '
'yields expected output shape `{}`\n\t'
'got formatted_shape `{}`'.format(
self.raw_shape, self.transpose_axes, self.band_dimension,
reduced_shape, self.formatted_shape))
else:
raise ValueError(
'Input_shape `{}`, transpose_axes `{}`, band dimension `{}` '
'yields expected output shape `{}`\n\t'
'got formatted_shape `{}`'.format(
self.raw_shape, self.transpose_axes, self.band_dimension,
arranged_shape, self.formatted_shape))
def transform_formatted_slice(
self,
subscript: Tuple[slice, ...]) -> Tuple[slice, ...]:
if len(subscript) != len(self.formatted_shape):
raise ValueError('The length of subscript and formatted_shape must match')
reverse_axes = () if self.reverse_axes is None else self.reverse_axes
rev_transpose_axes = tuple(range(len(self.raw_shape))) if self.transpose_axes is None else \
self._reverse_transpose_axes
if self.raw_ndim == self.formatted_ndim:
# there has been no collapse in dimension
use_subscript = subscript
else:
# pad at band dimension (in the order after transpose operation)
use_subscript = [entry for entry in subscript]
use_subscript.insert(self.band_dimension, slice(0, 2, 1))
# we will reorder from formatted order into raw order, using the opposite
# of the transpose axes definition, reversing any axes required according
# to reverse_axes definition (in raw order)
out = []
for i, index in enumerate(rev_transpose_axes):
# formatted order @ index corresponds to raw order @ i (possibly padded for missing band dimension)
rev = (i in reverse_axes)
shape_limit = self.raw_shape[i]
if self.raw_ndim == self.formatted_ndim:
# the band dimension is not flattened, we have to transform
temp_sl = reformat_slice(use_subscript[index], shape_limit, rev)
if index == self.band_dimension and temp_sl.step not in [-1, 1]:
raise ValueError(
'Slicing along the complex dimension and applying this format function\n\t'
'is only only permitted using step +/-1')
if temp_sl.step > 0:
start = 2*temp_sl.start if index == self.band_dimension else temp_sl.start
# noinspection PyTypeChecker
stop = 2*temp_sl.stop if index == self.band_dimension else temp_sl.stop
out.append(slice(start, stop, 1))
elif temp_sl.step < 0:
start = 2*temp_sl.start if index == self.band_dimension else temp_sl.start
if temp_sl.stop is None:
stop = None
elif index == self.band_dimension:
stop = 2*temp_sl.stop
else:
stop = temp_sl.stop
out.append(slice(start, stop, -1))
else:
out.append(reformat_slice(use_subscript[index], shape_limit, rev))
return tuple(out)
def transform_raw_slice(
self,
subscript: Tuple[slice, ...]) -> Tuple[slice, ...]:
if len(subscript) != self.raw_ndim:
raise ValueError('The length of subscript and raw_shape must match')
reverse_axes = () if self.reverse_axes is None else self.reverse_axes
transpose_axes = tuple(range(len(self.formatted_shape))) if self.transpose_axes is None else \
self.transpose_axes
# we will reorder from raw order into formatted order, using the transpose
# axes definition, reversing any axes required according to reverse_axes
# definition (in raw order)
out = []
for i, index in enumerate(transpose_axes):
# raw order @ index corresponds to formatted order @ i
rev = (index in reverse_axes)
shape_limit = self.raw_shape[index] # also self.formatted_shape[i]
if index == self.band_dimension and self.formatted_ndim < self.raw_ndim:
# the band dimension has collapsed, so omit anything here
continue
else:
out.append(reformat_slice(subscript[index], shape_limit, rev))
return tuple(out)
def _forward_magnitude_theta(
self,
data: numpy.ndarray,
out: numpy.ndarray,
magnitude: numpy.ndarray,
theta: numpy.ndarray,
subscript: Tuple[slice, ...]) -> None:
if data.dtype.name in ['uint8', 'uint16', 'uint32']:
bit_depth = data.dtype.itemsize * 8
theta = theta*2*numpy.pi/(1 << bit_depth)
out.real = magnitude*numpy.cos(theta)
out.imag = magnitude*numpy.sin(theta)
def _forward_functional_step(
self,
data: numpy.ndarray,
subscript: Tuple[slice, ...]) -> numpy.ndarray:
if data.ndim != self.raw_ndim:
raise ValueError('Expected raw data of dimension {}'.format(self.raw_ndim))
if (data.shape[self.band_dimension] % 2) != 0:
raise ValueError(
'Requires {} dimensional raw data with even size along dimension {}'.format(
self.raw_ndim, self.band_dimension))
band_dim_size = data.shape[self.band_dimension]
if self.formatted_ndim < self.raw_ndim:
out_shape = data.shape[:self.band_dimension] + data.shape[self.band_dimension + 1:]
else:
out_shape = data.shape[:self.band_dimension] + \
(int(band_dim_size/2), ) + \
data.shape[self.band_dimension + 1:]
out = numpy.empty(out_shape, dtype='complex64')
if self.order == 'IQ':
out.real = numpy.reshape(
data.take(indices=range(0, band_dim_size, 2), axis=self.band_dimension), out.shape)
out.imag = numpy.reshape(
data.take(indices=range(1, band_dim_size, 2), axis=self.band_dimension), out.shape)
elif self.order == 'QI':
out.imag = numpy.reshape(
data.take(indices=range(0, band_dim_size, 2), axis=self.band_dimension), out.shape)
out.real = numpy.reshape(
data.take(indices=range(1, band_dim_size, 2), axis=self.band_dimension), out.shape)
elif self.order in ['MP', 'PM']:
if self.order == 'MP':
mag = numpy.reshape(
data.take(indices=range(0, band_dim_size, 2), axis=self.band_dimension), out.shape)
theta = numpy.reshape(
data.take(indices=range(1, band_dim_size, 2), axis=self.band_dimension), out.shape)
else:
mag = numpy.reshape(
data.take(indices=range(1, band_dim_size, 2), axis=self.band_dimension), out.shape)
theta = numpy.reshape(
data.take(indices=range(0, band_dim_size, 2), axis=self.band_dimension), out.shape)
self._forward_magnitude_theta(data, out, mag, theta, subscript)
else:
raise ValueError('Unhandled order value {}'.format(self.order))
return out
def _reverse_magnitude_theta(
self,
data: numpy.ndarray,
out: numpy.ndarray,
magnitude: numpy.ndarray,
theta: numpy.ndarray,
slice0: Tuple[slice, ...],
slice1: Tuple[slice, ...]) -> None:
if self._raw_dtype.name in ['uint8', 'uint16', 'uint32']:
bit_depth = self._raw_dtype.itemsize * 8
theta *= (1 << bit_depth) / (2 * numpy.pi)
theta = numpy.round(theta)
magnitude = numpy.round(magnitude)
if self.order == 'MP':
out[slice0] = magnitude
out[slice1] = theta
else:
out[slice1] = magnitude
out[slice0] = theta
def _reverse_functional_step(
self,
data: numpy.ndarray,
subscript: Tuple[slice, ...]) -> numpy.ndarray:
if data.ndim != self.formatted_ndim:
raise ValueError('Expected formatted data of dimension {}'.format(self.formatted_ndim))
if self.formatted_ndim < self.raw_ndim:
out_shape = data.shape[:self.band_dimension] + (2,) + data.shape[self.band_dimension:]
use_shape = data.shape[:self.band_dimension] + (1,) + data.shape[self.band_dimension:]
else:
band_dim_size = data.shape[self.band_dimension]
out_shape = data.shape[:self.band_dimension] + \
(2*band_dim_size, ) + \
data.shape[self.band_dimension + 1:]
use_shape = data.shape[:self.band_dimension] + \
(band_dim_size, ) + \
data.shape[self.band_dimension + 1:]
slice0 = []
slice1 = []
for index, siz in enumerate(out_shape):
if index == self.band_dimension:
slice0.append(slice(0, siz, 2))
slice1.append(slice(1, siz, 2))
else:
slice0.append(slice(0, siz, 1))
slice1.append(slice(0, siz, 1))
slice0 = tuple(slice0)
slice1 = tuple(slice1)
out = numpy.empty(out_shape, dtype=self._raw_dtype)
if self.order == 'IQ':
out[slice0] = numpy.reshape(data.real, use_shape)
out[slice1] = numpy.reshape(data.imag, use_shape)
elif self.order == 'QI':
out[slice1] = numpy.reshape(data.real, use_shape)
out[slice0] = numpy.reshape(data.imag, use_shape)
elif self.order in ['MP', 'PM']:
magnitude = numpy.reshape(numpy.abs(data), use_shape)
theta = numpy.reshape(numpy.arctan2(data.imag, data.real), use_shape)
theta[theta < 0] += 2*numpy.pi
self._reverse_magnitude_theta(data, out, magnitude, theta, slice0, slice1)
else:
raise ValueError('Unhandled order value {}'.format(self.order))
return out
class SingleLUTFormatFunction(FormatFunction):
"""
Reformat the raw data according to the use of a single 8-bit lookup table.
In the case of a 2-d LUT, and effort to slice on the final dimension
(from the LUT) is not supported.
Introduced in version 1.3.0.
"""
has_inverse = False
__slots__ = ('_lookup_table', )
def __init__(
self,
lookup_table: numpy.ndarray,
raw_shape: Optional[Tuple[int, ...]] = None,
formatted_shape: Optional[Tuple[int, ...]] = None,
reverse_axes: Optional[Tuple[int, ...]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None):
"""
Parameters
----------
lookup_table : numpy.ndarray
The 8-bit lookup table.
raw_shape : None|Tuple[int, ...]
formatted_shape : None|Tuple[int, ...]
reverse_axes : None|Tuple[int, ...]
transpose_axes : None|Tuple[int, ...]
"""
self._lookup_table = None
if not isinstance(lookup_table, numpy.ndarray):
raise ValueError('requires a numpy.ndarray, got {}'.format(type(lookup_table)))
if lookup_table.dtype.name != 'uint8':
raise ValueError('requires a numpy.ndarray of uint8 dtype, got {}'.format(lookup_table.dtype))
if lookup_table.ndim == 2 and lookup_table.shape[1] == 1:
lookup_table = numpy.reshape(lookup_table, (-1, ))
self._lookup_table = lookup_table
FormatFunction.__init__(
self, raw_shape=raw_shape, formatted_shape=formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes)
@property
def lookup_table(self) -> numpy.ndarray:
return self._lookup_table
def validate_shapes(self) -> None:
self._verify_shapes_set()
trans_axes = self._get_populated_transpose_axes()
if self.raw_ndim != len(trans_axes):
raise ValueError('raw_shape and transpose_axes must have the same length')
arranged_shape = [self.raw_shape[index] for index in trans_axes]
if self.lookup_table.ndim == 2:
arranged_shape.append(self.lookup_table.shape[1])
arranged_shape = tuple(arranged_shape)
if arranged_shape != self.formatted_shape:
raise ValueError(
'Input_shape `{}`, transpose_axes `{}` and lookup table\n\t'
'yields expected output shape `{}` got formatted_shape `{}`'.format(
self.raw_shape, self.transpose_axes, arranged_shape, self.formatted_shape))
def transform_formatted_slice(
self,
subscript: Tuple[slice, ...]) -> Tuple[slice, ...]:
if len(subscript) != self.formatted_ndim:
raise ValueError('The length of subscript and formatted_shape must match')
reverse_axes = () if self.reverse_axes is None else self.reverse_axes
rev_transpose_axes = tuple(range(len(self.raw_shape))) if self.transpose_axes is None else \
self._reverse_transpose_axes
# we will reorder from formatted order into raw order, using the opposite
# of the transpose axes definition, reversing any axes required according
# to reverse_axes definition (in raw order)
out = []
# NB: for 2-d LUT, the final slice will be ignored here (as it should)
for i, index in enumerate(rev_transpose_axes):
# formatted order @ index corresponds to raw order @ i
rev = (i in reverse_axes)
shape_limit = self.raw_shape[i] # also self.formatted_shape[index]
out.append(reformat_slice(subscript[index], shape_limit, rev))
return tuple(out)
def transform_raw_slice(
self,
subscript: Tuple[slice, ...]) -> Tuple[slice, ...]:
if len(subscript) != self.raw_ndim:
raise ValueError('The length of subscript and raw_shape must match')
reverse_axes = () if self.reverse_axes is None else self.reverse_axes
transpose_axes = tuple(range(len(self.formatted_shape))) if self.transpose_axes is None else \
self.transpose_axes
# we will reorder from raw order into formatted order, using the transpose
# axes definition, reversing any axes required according to reverse_axes
# definition (in raw order)
out = []
for i, index in enumerate(transpose_axes):
# raw order @ index corresponds to formatted order @ i
rev = (index in reverse_axes)
shape_limit = self.formatted_shape[index] # also self.raw_shape[i]
out.append(reformat_slice(subscript[index], shape_limit, rev))
if self.raw_ndim < self.formatted_ndim:
# 2-d lookup table
lim = self.formatted_shape[-1]
out.append(slice(0, lim, 1))
return tuple(out)
def _forward_functional_step(
self,
array: numpy.ndarray,
subscript: Tuple[slice, ...]) -> numpy.ndarray:
if not isinstance(array, numpy.ndarray):
raise ValueError('requires a numpy.ndarray, got {}'.format(type(array)))
if array.dtype.name not in ['uint8', 'uint16']:
raise ValueError('requires a numpy.ndarray of uint8 or uint16 dtype, '
'got {}'.format(array.dtype.name))
if array.ndim != 2:
raise ValueError('Requires a two-dimensional numpy.ndarray, got shape {}'.format(array.shape))
temp = numpy.reshape(array, (-1, ))
out = self.lookup_table[temp]
if self.lookup_table.ndim == 2:
out_shape = array.shape + (self.lookup_table.shape[1], )
else:
out_shape = array.shape
return numpy.reshape(out, out_shape)
def __call__(
self,
array: numpy.ndarray,
subscript: Tuple[slice, ...],
squeeze=True) -> numpy.ndarray:
array = self._reverse_and_transpose(array, inverse=False)
array = self._forward_functional_step(array, subscript)
if self.raw_ndim < self.formatted_ndim:
# apply slice in the band (final dimension)
array = array.take(
indices=numpy.arange(self.formatted_shape[-1])[subscript[-1]], axis=-1)
# ensure shape is as expected - any squeeze handled consistently
out_shape = get_subscript_result_size(subscript, self.formatted_shape)
array = numpy.reshape(array, out_shape)
if squeeze:
return numpy.squeeze(array)
else:
return array
| 41,501 | 38.45057 | 113 | py |
sarpy | sarpy-master/sarpy/io/general/data_segment.py | """
The object definitions for reading and writing data in single conceptual units
using an interface based on slicing definitions and numpy arrays with formatting
operations.
This module introduced in version 1.3.0.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
import os
from typing import Union, Tuple, Sequence, BinaryIO, Optional
import numpy
from sarpy.io.general.format_function import FormatFunction, IdentityFunction
from sarpy.io.general.slice_parsing import verify_subscript, get_slice_result_size, \
get_subscript_result_size
from sarpy.io.general.utils import h5py, is_file_like
if h5py is not None:
from h5py import File as h5pyFile, Dataset as h5pyDataset
else:
h5pyFile = None
h5pyDataset = None
logger = logging.getLogger(__name__)
####
# helper functions
def _reverse_slice(slice_in: slice) -> slice:
"""
Given a slice with negative step, this returns a slice which will define the
same elements traversed in the opposite direction. Note that this is not
the same as the mirror operation.
Parameters
----------
slice_in : slice
Returns
-------
slice
Raises
------
ValueError
"""
if slice_in.step > 0:
raise ValueError('This is only applicable to slices with negative step value')
stop = 0 if slice_in.stop is None else slice_in.stop + 1
mult = int(numpy.floor((stop - slice_in.start)/slice_in.step))
final_entry = slice_in.start + mult*slice_in.step
return slice(final_entry, slice_in.start-slice_in.step, -slice_in.step)
def _find_slice_overlap(
slice_in: slice,
ref_slice: slice) -> Tuple[Optional[slice], Optional[slice]]:
"""
Finds the overlap of the slice with a contiguous interval slice.
Parameters
----------
slice_in : slice
ref_slice : slice
Returns
-------
child_slice : None|slice
The overlap expressed as a slice relative to the sliced coordinates.
None if there is no overlap.
parent_slice : None|slice
The overlap expressed as a slice relative to the overall indices.
None if there is no overlap.
"""
if ref_slice.step not in [1, -1]:
raise ValueError('Reference slice must have step +/-1')
if ref_slice.step > 0:
start_ind = ref_slice.start
stop_ind = ref_slice.stop
else:
start_ind = 0 if ref_slice.stop is None else ref_slice.stop + 1
stop_ind = ref_slice.start + 1
if slice_in.step > 0:
if slice_in.stop <= start_ind or slice_in.start >= stop_ind:
# there is no overlap
return None, None
# find minimum multiplier so that slice_in.start + mult*slice_in.step >= start_ind
# mult >= (start_ind - slice_in.start)/slice_in.step
child_start = 0 if start_ind <= slice_in.start else \
int(numpy.ceil((start_ind - slice_in.start)/slice_in.step))
parent_start = slice_in.start - start_ind + child_start*slice_in.step
# find end - maximum multiplier so that slice_in.start + mult*slice_in.step <= stop_ind
# mult <= (stop_ind - slice_in.start)/slice_in.step
max_ind = min(slice_in.stop, stop_ind)
child_stop = int(numpy.floor((max_ind - slice_in.start)/slice_in.step))
parent_stop = slice_in.start - start_ind + child_stop*slice_in.step
else:
if slice_in.start < start_ind or (slice_in.stop is not None and slice_in.stop >= stop_ind):
# there is no overlap
# noinspection PyTypeChecker
return None, None
# find minimum multiplier so that slice_in.start + mult*slice_in.step <= stop_ind-1
# mult >= (stop_ind - 1 - slice_in.start)/slice_in.step
child_start = 0 if slice_in.start < stop_ind else int(numpy.ceil((stop_ind - 1 - slice_in.start)/slice_in.step))
parent_start = slice_in.start - start_ind + child_start*slice_in.step
# find end - first multiplier so that slice_in.start + mult*slice_in.step < start_ind - 1
# mult > (start_ind - slice_in.start)/slice_in.step
if slice_in.stop is None:
min_ind = max(start_ind-1, -1)
else:
min_ind = max(start_ind-1, slice_in.stop)
child_stop = int(numpy.ceil((min_ind - slice_in.start)/slice_in.step))
parent_stop = slice_in.start - start_ind + child_stop*slice_in.step
if parent_stop < 0:
parent_stop = None
if ref_slice.step < 0:
# noinspection PyTypeChecker
return _reverse_slice(slice(parent_start, parent_stop, slice_in.step)), \
_reverse_slice(slice(child_start, child_stop, 1))
else:
# noinspection PyTypeChecker
return slice(parent_start, parent_stop, slice_in.step), \
slice(child_start, child_stop, 1)
def _infer_subscript_for_write(
data: numpy.ndarray,
start_indices: Union[None, int, Tuple[int, ...]],
subscript: Union[None, Sequence[slice]],
full_shape: Tuple[int, ...]) -> Tuple[slice, ...]:
"""
Helper function, for writing operation, which infers the subscript definition
between the given start_indices or (possibly partially defined) subscript.
Parameters
----------
data : numpy.ndarray
start_indices : None|int|Tuple[int, ...]
subscript : None|Sequence[slice]
full_shape : Tuple[int, ...]
Returns
-------
Tuple[slice, ...]
"""
if start_indices is None and subscript is None:
if data.shape == full_shape:
return verify_subscript(None, full_shape)
else:
raise ValueError('One of start_indices or subscript must be provided.')
if start_indices is not None:
if isinstance(start_indices, int):
start_indices = (start_indices, )
if len(start_indices) < len(full_shape):
start_indices = start_indices + tuple(0 for _ in range(len(start_indices), len(full_shape)))
subscript = tuple([slice(entry1, entry1+entry2, 1) for entry1, entry2 in zip(start_indices, data.shape)])
subscript, result_shape = get_subscript_result_size(subscript, full_shape)
if result_shape != data.shape:
raise ValueError(
'Inferred subscript `{}` with shape `{}`\n\t'
'does not match data.shape `{}`'.format(subscript, result_shape, data.shape))
return subscript
def extract_string_from_subscript(
subscript: Union[None, int, slice, Tuple]) -> Tuple[Union[None, int, slice, Sequence], Tuple[str, ...]]:
"""
Extracts any string elements (stripped and made all lowercase) from subscript entries.
Parameters
----------
subscript : None|str|int|slice|Sequence
Returns
-------
subscript: None|int|slice|Sequence
With string entries removed
strings : Tuple[str, ...]
The string entries, stripped and made all lower case.
"""
string_entries = []
if isinstance(subscript, str):
string_entries.append(subscript.strip().lower())
subscript = None
elif isinstance(subscript, Sequence):
new_subscript = []
for entry in subscript:
if isinstance(entry, str):
string_entries.append(entry.strip().lower())
else:
new_subscript.append(entry)
if len(string_entries) > 0:
subscript = tuple(new_subscript)
return subscript, tuple(string_entries)
#####
# Abstract data segment definition and derived element implementations
class DataSegment(object):
"""
Partially abstract base class representing one conceptual fragment of data
read or written as an array. This is generally designed for images, but is
general enough to support other usage.
Introduced in version 1.3.0.
.. warning::
The format function instance will be modified in place. Do not use the
same format function instance across multiple data segments.
"""
_allowed_modes = ('r', 'w')
__slots__ = (
'_closed', '_mode',
'_raw_dtype', '_raw_shape', '_formatted_dtype', '_formatted_shape',
'_reverse_axes', '_transpose_axes', '_reverse_transpose_axes',
'_format_function')
def __init__(
self,
raw_dtype: Union[str, numpy.dtype],
raw_shape: Tuple[int, ...],
formatted_dtype: Union[str, numpy.dtype],
formatted_shape: Tuple[int, ...],
reverse_axes: Union[None, int, Sequence[int]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None,
format_function: Optional[FormatFunction] = None,
mode: str = 'r'):
"""
Parameters
----------
raw_dtype : str|numpy.dtype
raw_shape : Tuple[int, ...]
formatted_dtype : str|numpy.dtype
formatted_shape : Tuple[int, ...]
reverse_axes : None|int|Sequence[int]
The collection of axes (in raw order) to reverse, prior to applying
transpose operation
transpose_axes : None|Tuple[int, ...]
The transpose operation to perform to the raw data, after applying
any axis reversal, and before applying any format function
format_function : None|FormatFunction
Note that the format function instance
mode : str
"""
self._closed = False
self._mode = None
self._set_mode(mode)
self._raw_shape = None
self._set_raw_shape(raw_shape)
self._raw_dtype = None
self._set_raw_dtype(raw_dtype)
self._formatted_shape = None
self._set_formatted_shape(formatted_shape)
self._formatted_dtype = None
self._set_formatted_dtype(formatted_dtype)
self._reverse_axes = None
self._set_reverse_axes(reverse_axes)
self._transpose_axes = None
self._reverse_transpose_axes = None
self._set_transpose_axes(transpose_axes)
self._format_function = None
self._set_format_function(format_function)
self._validate_shapes()
@property
def raw_shape(self) -> Tuple[int, ...]:
"""
Tuple[int, ...]: The raw shape.
"""
return self._raw_shape
def _set_raw_shape(self, value: Tuple[int, ...]) -> None:
if not isinstance(value, tuple):
raise TypeError(
'raw_shape must be specified by a tuple of ints, got type `{}`'.format(type(value)))
for entry in value:
if not isinstance(entry, int):
raise TypeError(
'raw_shape must be specified by a tuple of ints, got `{}`'.format(value))
if entry <= 0:
raise ValueError(
'raw_shape must be specified by a tuple of positive ints, got `{}`'.format(value))
self._raw_shape = value
@property
def raw_ndim(self) -> int:
"""
int: The number of raw dimensions.
"""
return len(self._raw_shape)
@property
def mode(self) -> str:
"""
str: The mode.
"""
return self._mode
def _set_mode(self, value: str) -> None:
if not isinstance(value, str):
raise TypeError('Mode must be a string value')
value = value.strip().lower()
if value not in self._allowed_modes:
raise ValueError('mode must be one of {}'.format(self._allowed_modes))
self._mode = value
@property
def raw_dtype(self) -> numpy.dtype:
"""
numpy.dtype: The data type of the data returned by the :func:`read_raw` function.
"""
return self._raw_dtype
def _set_raw_dtype(self, value) -> None:
if not isinstance(value, numpy.dtype):
try:
value = numpy.dtype(value)
except Exception as e:
raise ValueError(
'Tried interpreting raw_dtype value as a numpy.dtype, '
'and failed with error\n\t{}'.format(e))
self._raw_dtype = value
@property
def formatted_shape(self) -> Tuple[int, ...]:
"""
Tuple[int, ...]: The formatted data shape.
"""
return self._formatted_shape
def _set_formatted_shape(self, value: Tuple[int, ...]) -> None:
if not isinstance(value, tuple):
raise TypeError(
'formatted_shape must be specified by a tuple of ints, got type `{}`'.format(type(value)))
for entry in value:
if not isinstance(entry, int):
raise TypeError(
'formatted_shape must be specified by a tuple of ints, got `{}`'.format(value))
if entry <= 0:
raise ValueError(
'formatted_shape must be specified by a tuple of positive ints, got `{}`'.format(value))
self._formatted_shape = value
@property
def formatted_dtype(self) -> numpy.dtype:
"""
numpy.dtype: The data type of the formatted data, which will be returned
by the :func:`read` function.
"""
return self._formatted_dtype
def _set_formatted_dtype(self, value) -> None:
if not isinstance(value, numpy.dtype):
try:
value = numpy.dtype(value)
except Exception as e:
raise ValueError(
'Tried interpreting formatted_dtype value as a numpy.dtype, '
'and failed with error\n\t{}'.format(e))
self._formatted_dtype = value
@property
def formatted_ndim(self) -> int:
"""
int: The number of formatted dimensions.
"""
return len(self._formatted_shape)
@property
def reverse_axes(self) -> Optional[Tuple[int, ...]]:
"""
None|Tuple[int, ...]: The collection of axes (with respect to raw order)
along which we will reverse as part of transformation to formatted data order.
If not `None`, then this will be a tuple in strictly increasing order.
"""
return self._reverse_axes
def _set_reverse_axes(self, value: Union[None, int, Tuple[int, ...]]) -> None:
if value is None:
self._reverse_axes = None
return
if isinstance(value, int):
value = (value, )
else:
value = tuple(sorted(list(set(int(entry) for entry in value))))
for entry in value:
if not (0 <= entry < self.raw_ndim):
raise ValueError('reverse_axes entries must be less than raw_ndim')
self._reverse_axes = value
@property
def transpose_axes(self) -> Tuple[int, ...]:
"""
None|Tuple[int, ...]: The transpose order for switching from raw order to
formatted order, prior to applying any format function.
If populated, this must be a permutation of `(0, 1, ..., raw_ndim-1)`.
"""
return self._transpose_axes
def _set_transpose_axes(self, value: Union[None, Tuple[int, ...]]) -> None:
if value is None:
self._transpose_axes = None
return
value = tuple([int(entry) for entry in value])
if set(value) != set(range(self.raw_ndim)):
raise ValueError('transpose_axes must be a permutation of range(raw_ndim), got\n\t{}'.format(value))
self._transpose_axes = value
@property
def format_function(self) -> FormatFunction:
"""
The format function which will be applied to the raw data.
Returns
-------
FormatFunction
"""
return self._format_function
def _set_format_function(self, value: Optional[FormatFunction]) -> None:
if value is None:
value = IdentityFunction()
if not isinstance(value, FormatFunction):
raise ValueError('Got unexpected format_function value of type `{}`'.format(type(value)))
# set our important property values
value.set_raw_shape(self.raw_shape)
value.set_formatted_shape(self.formatted_shape)
value.set_reverse_axes(self.reverse_axes)
value.set_transpose_axes(self.transpose_axes)
self._format_function = value
@property
def can_write_regular(self) -> bool:
"""
bool: Can this data segment write regular data, which requires a function
inverse?
"""
return self.mode == 'w' and self.format_function.has_inverse
@property
def closed(self) -> bool:
"""
bool: Is the data segment closed? Reading or writing will result in a ValueError
"""
return self._closed
def _validate_closed(self):
if not hasattr(self, '_closed') or self._closed:
raise ValueError('I/O operation of closed data segment')
def _validate_shapes(self) -> None:
"""
Validate the raw_shape and formatted_shape values.
"""
self.format_function.validate_shapes()
# read related methods
def verify_raw_subscript(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice, Tuple[int, ...]]]]) -> Tuple[slice, ...]:
"""
Verifies that the structure of the subscript is in keeping with the raw
shape, and fills in any missing dimensions.
Parameters
----------
subscript : None|int|slice|Sequence[int|slice|Tuple[int, ...]]
Returns
-------
Tuple[slice, ...]
Guaranteed to be a tuple of slices of length `raw_ndim`.
"""
return verify_subscript(subscript, self._raw_shape)
def verify_formatted_subscript(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice, Tuple[int, ...]]]]) -> Tuple[slice, ...]:
"""
Verifies that the structure of the subscript is in keeping with the formatted
shape, and fills in any missing dimensions.
Parameters
----------
subscript : None|int|slice|Sequence[int|slice|Tuple[int, ...]]
Returns
-------
Tuple[slice, ...]
Guaranteed to be a tuple of slices of length `formatted_ndim`.
"""
return verify_subscript(subscript, self._formatted_shape)
def _interpret_subscript(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice, Tuple[int, ...]]]],
raw: bool = False) -> Tuple[slice, ...]:
"""
Restructures the subscript to be a tuple of slices guaranteed to be the same
length as the dimension of the return.
Parameters
----------
subscript : None|int|slice|Tuple[slice, ...]
raw : bool
If `True` then this should apply to raw coordinates, if `False` it
should apply to original coordinates.
Returns
-------
Tuple[slice, ...]
"""
if raw:
return verify_subscript(subscript, self._raw_shape)
else:
return verify_subscript(subscript, self._formatted_shape)
def __getitem__(
self,
subscript: Union[None, int, slice, str, Sequence[Union[None, int, slice, str]]]) -> numpy.ndarray:
"""
Fetch the data via slice definition.
Parameters
----------
subscript : None|int|slice|str|tuple
Returns
-------
numpy.ndarray
"""
# TODO: document the string entries situation
self._validate_closed()
subscript, string_entries = extract_string_from_subscript(subscript)
use_raw = ('raw' in string_entries)
squeeze = ('nosqueeze' not in string_entries)
if use_raw:
return self.read_raw(subscript, squeeze=squeeze)
else:
return self.read(subscript, squeeze=squeeze)
def read(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice, Tuple[int, ...]]]],
squeeze=True) -> numpy.ndarray:
"""
In keeping with data segment mode, read the data slice specified relative
to the formatted data coordinates. This requires that `mode` is `'r'`.
Parameters
----------
subscript : None|int|slice|Sequence[int|slice|Tuple[int, ...]]
squeeze : bool
Apply the numpy.squeeze operation, which eliminates dimension of size 1?
Returns
-------
numpy.ndarray
"""
self._validate_closed()
if self.mode != 'r':
raise ValueError('Requires mode = "r"')
norm_subscript = self.verify_formatted_subscript(subscript)
raw_subscript = self.format_function.transform_formatted_slice(norm_subscript)
raw_data = self.read_raw(raw_subscript, squeeze=False)
return self.format_function(raw_data, raw_subscript, squeeze=squeeze)
# noinspection PyTypeChecker
def read_raw(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice, Tuple[int, ...]]]],
squeeze=True) -> numpy.ndarray:
"""
In keeping with data segment mode, read raw data from the source, without
reformatting and or applying symmetry operations. This requires that `mode`
is `'r'`.
Parameters
----------
subscript : None|int|slice|Sequence[int|slice|Tuple[int, ...]]
These arguments are relative to raw data shape and order, no symmetry
operations have been applied.
squeeze : bool
Apply numpy.squeeze, which eliminates any dimensions of size 1?
Returns
-------
numpy.ndarray
This will be of data type given by `raw_dtype`.
"""
if self.mode != 'r':
raise ValueError('Requires mode == "r"')
raise NotImplementedError
def _verify_write_raw_details(self, data: numpy.ndarray) -> None:
if self.mode != 'w':
raise ValueError('I/O Error, functionality requires mode == "w"')
if data.dtype.itemsize != self.raw_dtype.itemsize:
raise ValueError(
'Expected data dtype itemsize `{}`, got `{}`'.format(self.raw_dtype.itemsize, data.dtype.itemsize))
if data.dtype != self.raw_dtype:
logger.warning('Expected data dtype `{}`, got `{}`.'.format(self.raw_dtype, data.dtype))
def write(
self,
data: numpy.ndarray,
start_indices: Union[None, int, Tuple[int, ...]] = None,
subscript: Union[None, Sequence[slice]] = None,
**kwargs) -> None:
"""
In keeping with data segment mode, write the data provided in formatted
form, assuming the slice specified relative to the formatted data coordinates.
This requires that `mode` is `'w'`, and `format_function.has_inverse == True`,
because we have to apply the format function inverse to the provided data.
**Only one of `start_indices` and `subscript` should be specified.**
Parameters
----------
data : numpy.ndarray
The data in formatted form, to be transferred to raw form and written.
start_indices : None|int|Tuple[int, ...]
Assuming a contiguous chunk of data, this provides the starting
indices of the chunk. Any missing (tail) coordinates will be filled
in with 0's.
subscript : None|Sequence[slice]
The subscript definition in formatted coordinates.
kwargs
Returns
-------
None
"""
self._validate_closed()
if not self.can_write_regular:
raise ValueError(
'I/O error, functionality requires mode = "w"\n\t'
'and the ability to invert the format function')
if data.dtype.itemsize != self.formatted_dtype.itemsize:
raise ValueError(
'Expected data dtype itemsize `{}`, got `{}`'.format(
self.formatted_dtype.itemsize, data.dtype.itemsize))
if data.dtype != self.formatted_dtype:
logger.warning('Expected data dtype `{}`, got `{}`.'.format(
self.formatted_dtype, data.dtype))
subscript = _infer_subscript_for_write(
data, start_indices, subscript, self.formatted_shape)
raw_data = self.format_function.inverse(data, subscript)
raw_subscript = self.format_function.transform_formatted_slice(subscript)
self.write_raw(raw_data, subscript=raw_subscript, **kwargs)
def write_raw(
self,
data: numpy.ndarray,
start_indices: Union[None, int, Tuple[int, ...]] = None,
subscript: Union[None, Sequence[slice]] = None,
**kwargs) -> None:
"""
In keeping with data segment mode, write the data provided in raw form,
assuming the slice specified relative to raw data coordinates. This
requires that `mode` is `'w'`.
**Only one of `start_indices` and `subscript` should be specified.**
Parameters
----------
data : numpy.ndarray
The data in raw form.
start_indices : None|int|Tuple[int, ...]
Assuming a contiguous chunk of data, this provides the starting
indices of the chunk. Any missing (tail) coordinates will be filled
in with 0's.
subscript : None|Tuple[slice, ...]
The subscript definition in raw coordinates.
kwargs
Returns
-------
None
"""
raise NotImplementedError
def check_fully_written(self, warn: bool = False) -> bool:
"""
Checks that all expected pixel data is fully written.
Parameters
----------
warn : bool
Log warning with some details if not fully written.
Returns
-------
bool
"""
raise NotImplementedError
def get_raw_bytes(self, warn: bool = True) -> Union[bytes, Tuple]:
"""
This returns the bytes for the underlying raw data.
.. warning::
A data segment is *conceptually* represented in raw data as a single
numpy array of appropriate shape and data type. When the data segment
is formed from component pieces, then the return of this function may
deviate significantly from the raw byte representation of such an
array after consideration of data order and pad pixels.
Parameters
----------
warn : bool
If `True`, then a check will be performed to ensure that the data
has been fully written and warnings printed if the answer is no.
Returns
-------
bytes|Tuple
The result will be a `bytes` object, unless the data segment is
made up of a collection of child data segments, in which case the
result will be a Tuple consisting of their `get_raw_bytes` returns.
"""
raise NotImplementedError
def flush(self) -> None:
"""
Should perform, if possible, any necessary steps to flush any unwritten
data to the file.
Returns
-------
None
"""
return
def close(self):
"""
This should perform any necessary clean-up operations, like closing
open file handles, deleting any temp files, etc
"""
if not hasattr(self, '_closed') or self._closed:
return
self._closed = True
def __del__(self):
# NB: this is called when the object is marked for garbage collection
# (i.e. reference count goes to 0)
# This order in which this happens may be unreliable
self.close()
class ReorientationSegment(DataSegment):
"""
Define a basic ordering of a given DataSegment. The raw data will be
presented as the parent data segment's formatted data.
Introduced in version 1.3.0.
"""
__slots__ = ('_parent', '_close_parent')
def __init__(
self,
parent: DataSegment,
formatted_dtype: Optional[Union[str, numpy.dtype]] = None,
formatted_shape: Optional[Tuple[int, ...]] = None,
reverse_axes: Optional[Union[int, Sequence[int]]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None,
format_function: Optional[FormatFunction] = None,
close_parent: bool = True):
"""
Parameters
----------
parent : DataSegment
formatted_dtype : str|numpy.dtype
formatted_shape : Tuple[int, ...]
reverse_axes : None|int|Sequence[int]
The collection of axes (in raw order) to reverse, prior to applying
transpose operation
transpose_axes : None|Tuple[int, ...]
The transpose operation to perform, after applying any axis reversal,
and before applying any format function
close_parent : bool
Call parent.close() when close is called?
"""
self._parent = None
self._close_parent = None
self.close_parent = close_parent
intermediate_shape = self._set_parent(parent, transpose_axes)
if format_function is None:
formatted_dtype = parent.formatted_dtype
formatted_shape = intermediate_shape
else:
if formatted_dtype is None or formatted_shape is None:
raise ValueError(
'If format_function is provided,\n\t'
'then formatted_dtype and formatted_shape must be provided.')
mode = parent.mode
if mode == 'w' and not parent.can_write_regular:
raise ValueError('Requires that the parent can write regular data')
DataSegment.__init__(
self, parent.formatted_dtype, parent.formatted_shape, formatted_dtype, formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes,
format_function=format_function, mode=mode)
@property
def parent(self) -> DataSegment:
return self._parent
def _set_parent(self,
parent: DataSegment,
transpose_axes: Union[None, Tuple[int, ...]]) -> Tuple[int, ...]:
if transpose_axes is None:
trans_axes = tuple(range(parent.formatted_ndim))
else:
if len(transpose_axes) != parent.formatted_ndim:
raise ValueError('transpose_axes must have length {}'.format(parent.formatted_ndim))
trans_axes = transpose_axes
self._parent = parent
return tuple([parent.formatted_shape[index] for index in trans_axes])
@property
def close_parent(self) -> bool:
"""
bool: Call parent.close() when close is called?
"""
return self._close_parent
@close_parent.setter
def close_parent(self, value):
self._close_parent = bool(value)
def read_raw(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice, Tuple[int, ...]]]],
squeeze=True) -> numpy.ndarray:
self._validate_closed()
if self.mode != 'r':
raise ValueError('Requires mode == "r"')
return self.parent.read(subscript, squeeze=squeeze)
def check_fully_written(self, warn: bool = False) -> bool:
return self.parent.check_fully_written(warn=warn)
def write_raw(
self,
data: numpy.ndarray,
start_indices: Union[None, int, Tuple[int, ...]] = None,
subscript: Union[None, Sequence[slice]] = None,
**kwargs):
"""
In keeping with data segment mode, write the data provided in raw form,
assuming the slice specified relative to raw data coordinates. This
requires that `mode` is `'w'`.
Note that raw form/order for the data segment is simply a reordered version
of the formatted data for parent. This **is not** related to raw data
with respect to the parent. To write raw data with respect to the parent,
use :func:`parent.write_raw` instead.
**Only one of `start_indices` and `subscript` should be specified.**
Parameters
----------
data : numpy.ndarray
The data in raw form.
start_indices : None|int|Tuple[int, ...]
Assuming a contiguous chunk of data, this provides the starting
indices of the chunk. Any missing (tail) coordinates will be filled
in with 0's.
subscript : None|Sequence[slice]
The subscript definition in raw coordinates.
kwargs
Returns
-------
None
"""
self._validate_closed()
self._verify_write_raw_details(data)
subscript = _infer_subscript_for_write(data, start_indices, subscript, self.raw_shape)
parent_form_subscript = self.format_function.transform_formatted_slice(subscript)
self.parent.write(data, subscript=parent_form_subscript, **kwargs)
def get_raw_bytes(self, warn: bool = True) -> Union[bytes, Tuple]:
self._validate_closed()
return self.parent.get_raw_bytes(warn=warn)
def flush(self) -> None:
self._validate_closed()
try:
self.parent.flush()
except AttributeError:
return
def close(self):
try:
if self._closed:
return
self.flush()
if self.close_parent:
self.parent.close()
DataSegment.close(self)
self._parent = None
except AttributeError:
return
class SubsetSegment(DataSegment):
"""
Define a subset of a given DataSegment, with formatting handled by the
parent data segment.
Introduced in version 1.3.0.
"""
_allowed_modes = ('r', 'w')
__slots__ = (
'_parent', '_formatted_subset_definition', '_raw_subset_definition',
'_original_formatted_indices', '_original_raw_indices', '_squeeze',
'_close_parent', '_pixels_written', '_expected_pixels_written')
def __init__(
self,
parent: DataSegment,
subset_definition: Tuple[slice, ...],
coordinate_basis: str,
squeeze: bool = True,
close_parent: bool = True):
"""
Parameters
----------
parent : DataSegment
subset_definition : Tuple[slice, ...]
coordinate_basis : str
The coordinate basis for the subset definition, it should be one of
`('raw', 'formatted')`.
squeeze: bool
Eliminate the dimensions that are size 1 in the subset?
close_parent : bool
Call parent.close() when close is called?
"""
self._close_parent = None
self.close_parent = close_parent
self._original_formatted_indices = None # the original indices matched to the new, with entry -1 when flat
self._original_raw_indices = None
self._formatted_subset_definition = None
self._raw_subset_definition = None
self._squeeze = squeeze
self._parent = parent
raw_shape, formatted_shape = self._validate_subset_definition(
subset_definition, coordinate_basis)
DataSegment.__init__(
self, parent.raw_dtype, raw_shape, parent.formatted_dtype, formatted_shape,
mode=parent.mode)
self._pixels_written = 0
if self.mode == 'w':
self._expected_pixels_written = int(numpy.prod(raw_shape))
else:
self._expected_pixels_written = 0
def _validate_shapes(self) -> None:
# handled else where
pass
@property
def parent(self) -> DataSegment:
return self._parent
@property
def formatted_subset_definition(self) -> Tuple[slice, ...]:
"""
Tuple[slice]: The subset definition, in formatted coordinates.
"""
return self._formatted_subset_definition
@property
def raw_subset_definition(self) -> Tuple[slice, ...]:
"""
Tuple[slice]: The subset definition, in raw coordinates.
"""
return self._raw_subset_definition
@property
def close_parent(self) -> bool:
"""
bool: Call parent.close() when close is called?
"""
return self._close_parent
@close_parent.setter
def close_parent(self, value):
self._close_parent = bool(value)
def _validate_subset_definition(
self,
subset_definition: Tuple[slice, ...],
coordinate_basis: str) -> Tuple[Tuple[int, ...], Tuple[int, ...]]:
"""
Validates the subset definition.
Parameters
----------
subset_definition : Tuple[slice, ...]
coordinate_basis : str
Returns
-------
raw_shape : Tuple[int, ...]
formatted_shape : Tuple[int, ...]
"""
raw_shape = []
formatted_shape = []
original_indices = []
raw_indices = []
coordinate_basis = coordinate_basis.strip().lower()
if coordinate_basis == 'raw':
raw_def = self.parent.verify_raw_subscript(subset_definition)
form_def = self.parent.format_function.transform_raw_slice(raw_def)
elif coordinate_basis == 'formatted':
form_def = self.parent.verify_formatted_subscript(subset_definition)
raw_def = self.parent.format_function.transform_formatted_slice(form_def)
else:
raise ValueError('Got unexpected coordinate basis `{}`'.format(coordinate_basis))
for index, entry in enumerate(form_def):
this_size = get_slice_result_size(entry)
if self._squeeze and this_size == 1:
logger.info('Entry at index {} of subset definition yields a single entry'.format(index))
original_indices.append(-1)
else:
formatted_shape.append(this_size)
original_indices.append(index)
self._formatted_subset_definition = form_def
self._raw_subset_definition = raw_def
self._original_formatted_indices = tuple(original_indices)
for index, entry in enumerate(raw_def):
this_size = get_slice_result_size(entry)
if self._squeeze and this_size == 1:
logger.info('Raw slice at index {} of subset definition yields a single entry'.format(index))
raw_indices.append(-1)
else:
raw_shape.append(this_size)
raw_indices.append(index)
self._original_raw_indices = tuple(raw_indices)
return tuple(raw_shape), tuple(formatted_shape)
def _get_parent_subscript(
self,
norm_subscript: Tuple[slice, ...],
this_shape: Tuple[int, ...],
full_shape: Tuple[int, ...],
use_indices: Tuple[int, ...],
subset_definition: Tuple[slice, ...]) -> Tuple[slice, ...]:
"""
Helper function for defining a parent subscript from the subset subscript definition.
Parameters
----------
norm_subscript : Tuple[slice, ...]
The normalized subset subscript.
this_shape : Tuple[int, ...]
The shape in the subset domain.
full_shape : Tuple[int, ...]
The full parent shape in the given domain.
use_indices : Tuple[int, ...]
Structure helping to identify the dimensions from the parent which
have been preserved, and which have collapsed.
subset_definition : Tuple[slice, ...]
The subset definition with respect to parent coordinates.
Returns
-------
Tuple[slice, ...]
"""
out = []
for full_size, out_index, slice_def in zip(full_shape, use_indices, subset_definition):
if out_index == -1:
out.append(slice_def)
else:
part_def = norm_subscript[out_index]
step = part_def.step*slice_def.step
# now, extract start and stop
# the logic of this is terrible, so let's just do it the easy way
the_array = numpy.arange(full_size)[slice_def][part_def]
if len(the_array) < 1:
raise KeyError('Got invalid slice definition {} for shape {}'.format(norm_subscript, this_shape))
start = the_array[0]
stop = the_array[-1] + step
if stop < 0:
stop = None
elif stop > full_size:
stop = full_size
out.append(slice(start, stop, step))
return tuple(out)
def get_parent_raw_subscript(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice]]]) -> Tuple[slice, ...]:
"""
Gets the raw parent subscript from the raw subset subscript definition.
Parameters
----------
subscript : None|int|slice|Sequence[int|slice]
Returns
-------
Tuple[slice, ...]
"""
return self._get_parent_subscript(
self.verify_raw_subscript(subscript), self.raw_shape, self.parent.raw_shape,
self._original_raw_indices, self._raw_subset_definition)
def get_parent_formatted_subscript(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice]]]) -> Tuple[slice, ...]:
"""
Gets the formatted parent subscript from the formatted subset subscript definition.
Parameters
----------
subscript : None|int|slice|Sequence[int|slice]
Returns
-------
Tuple[slice, ...]
"""
out = self._get_parent_subscript(
self.verify_formatted_subscript(subscript), self.formatted_shape, self.parent.formatted_shape,
self._original_formatted_indices, self._formatted_subset_definition)
return out
def read_raw(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice, Tuple[int, ...]]]],
squeeze=True) -> numpy.ndarray:
self._validate_closed()
if self.mode != 'r':
raise ValueError('Requires mode == "r"')
norm_subscript = self.get_parent_raw_subscript(subscript)
if squeeze:
return self.parent.read_raw(norm_subscript, squeeze=True)
else:
data = self.parent.read_raw(norm_subscript, squeeze=False)
use_shape = []
for check, size in zip(self._original_raw_indices, data.shape):
if not self._squeeze or check != -1:
use_shape.append(size)
if squeeze:
return numpy.squeeze(data)
else:
return numpy.reshape(data, tuple(use_shape))
def read(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice]]],
squeeze=True) -> numpy.ndarray:
self._validate_closed()
if self.mode != 'r':
raise ValueError('Requires mode == "r"')
norm_subscript = self.get_parent_formatted_subscript(subscript)
if squeeze:
return self.parent.read(norm_subscript, squeeze=True)
else:
data = self.parent.read(norm_subscript, squeeze=False)
use_shape = []
for check, size in zip(self._original_formatted_indices, data.shape):
if not self._squeeze or check != -1:
use_shape.append(size)
if squeeze:
return numpy.squeeze(data)
else:
return numpy.reshape(data, tuple(use_shape))
def check_fully_written(self, warn: bool = False) -> bool:
if self.mode == 'r':
return True
if self._pixels_written < self._expected_pixels_written:
if warn:
logger.error(
'Segment expected {} pixels written, but only {} pixels were written'.format(
self._expected_pixels_written, self._pixels_written))
return False
elif self._pixels_written == self._expected_pixels_written:
return True
else:
if warn:
logger.error(
'Segment expected {} pixels written,\n\t'
'but {} pixels were written.\n\t'
'This redundancy may be an error'.format(
self._expected_pixels_written, self._pixels_written))
return False
def _update_pixels_written(self, written: int) -> None:
new_pixels_written = self._pixels_written + written
if self._pixels_written <= self._expected_pixels_written < new_pixels_written:
logger.error(
'Segment expected {} pixels written,\n\t'
'but now has {} pixels written.\n\t'
'This redundancy may be an error'.format(
self._expected_pixels_written, new_pixels_written))
self._pixels_written = new_pixels_written
def write_raw(
self,
data: numpy.ndarray,
start_indices: Union[None, int, Tuple[int, ...]] = None,
subscript: Union[None, Sequence[slice]] = None,
**kwargs):
self._validate_closed()
self._verify_write_raw_details(data)
subscript = _infer_subscript_for_write(data, start_indices, subscript, self.raw_shape)
parent_subscript = self.get_parent_raw_subscript(subscript)
self.parent.write_raw(data, subscript=parent_subscript, **kwargs)
self._update_pixels_written(data.size)
def get_raw_bytes(self, warn: bool = True) -> Union[bytes, Tuple]:
"""
This returns the bytes for the underlying raw data **of the parent segment.**
The only writing use case considered at present is for the blocks including
padding inside a NITF file.
Parameters
----------
warn : bool
If `True`, then a check will be performed to ensure that the data
has been fully written.
Returns
-------
bytes|Tuple
The result will be a `bytes` object, unless the data segment is
made up of a collection of child data segments, in which case the
result will be a Tuple consisting of their `get_raw_bytes` returns.
"""
self._validate_closed()
if warn and not self.check_fully_written(warn=True):
logger.error(
'There has been a call to `get_raw_bytes` from {},\n\t'
'but all pixels are not fully written'.format(self.__class__))
return self.parent.get_raw_bytes(warn=False)
def flush(self) -> None:
self._validate_closed()
try:
self.parent.flush()
except AttributeError:
return
def close(self):
try:
if self._closed:
return
self.flush()
if self.close_parent:
self.parent.close()
DataSegment.close(self)
self._parent = None
except (ValueError, AttributeError):
return
class BandAggregateSegment(DataSegment):
"""
This stacks a collection of data segments, which must have compatible details,
together along a new (final) band dimension.
Note that :func:`read` and :func:`read_raw` return identical results here.
To access raw data from the children, use access on the `children` property.
Introduced in version 1.3.0.
"""
__slots__ = ('_band_dimension', '_children', '_close_children')
def __init__(
self,
children: Sequence[DataSegment],
band_dimension: int,
formatted_dtype: Optional[Union[str, numpy.dtype]] = None,
formatted_shape: Optional[Tuple[int, ...]] = None,
reverse_axes: Optional[Union[int, Sequence[int]]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None,
format_function: Optional[FormatFunction] = None,
close_children: bool = True):
"""
Parameters
----------
children : Sequence[DataSegment]
band_dimension : int
The band dimension. This must remain unchanged in transpose_axes,
and is not permitted to be reversed by reverse_axes.
formatted_dtype : str|numpy.dtype
formatted_shape : Tuple[int, ...]
reverse_axes : None|int|Sequence[int]
The collection of axes (in raw order) to reverse, prior to applying
transpose operation
transpose_axes : None|Tuple[int, ...]
The transpose operation to perform, after applying any axis reversal,
and before applying any format function
format_function : None|FormatFunction
close_children : bool
"""
self._band_dimension = None
self._close_children = None
self.close_children = close_children
self._children = None
self._set_band_dimension(band_dimension, reverse_axes, transpose_axes)
raw_dtype, raw_shape, form_shape, the_mode = self._set_children(children, transpose_axes)
if format_function is None:
formatted_dtype = raw_dtype
formatted_shape = form_shape
else:
if formatted_dtype is None or formatted_shape is None:
raise ValueError(
'If format_function is provided,\n\t'
'then formatted_dtype and formatted_shape must be provided.')
DataSegment.__init__(
self, raw_dtype, raw_shape, formatted_dtype, formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes,
format_function=format_function, mode=the_mode)
@property
def band_dimension(self) -> int:
"""
int: The band dimension, in raw data after the transpose operation.
"""
return self._band_dimension
def _set_band_dimension(
self,
value: int,
reverse_axes: Union[None, int, Sequence[int]],
transpose_axes: Union[None, Tuple[int, ...]]) -> None:
if not isinstance(value, int):
raise TypeError('band_dimension must be an integer')
if value < 0:
raise TypeError('band_dimension must be non-negative')
if transpose_axes is not None:
if value != transpose_axes[value]:
raise ValueError('band_dimension is not permitted to be changed by transpose_axes.')
if reverse_axes is None:
pass
elif isinstance(reverse_axes, int):
if value == reverse_axes:
raise ValueError('Reversal along the band dimension is not permitted')
else:
if value in reverse_axes:
raise ValueError('Reversal along the band dimension is not permitted')
if self._band_dimension is not None:
if value != self._band_dimension:
raise ValueError('band_dimension is read only once set')
return # nothing to be done
self._band_dimension = value
@property
def close_children(self) -> bool:
"""
bool: Call child.close() when close is called?
"""
return self._close_children
@close_children.setter
def close_children(self, value):
self._close_children = bool(value)
@property
def children(self) -> Tuple[DataSegment, ...]:
"""
The collection of children that we are stacking.
Returns
-------
Tuple[DataSegment, ...]
"""
return self._children
def _set_children(
self,
children: Sequence[DataSegment],
transpose_axes: Optional[Tuple[int, ...]]) -> Tuple[numpy.dtype, Tuple[int, ...], Tuple[int, ...], str]:
if len(children) < 2:
raise ValueError('Cannot define a BandAggregateSegment based on fewer than 2 segments.')
child_shape = children[0].formatted_shape
the_dtype = children[0].formatted_dtype
the_mode = children[0].mode
if transpose_axes is None:
transpose_axes = tuple(range(0, len(child_shape) + 1))
raw_shape = [entry for entry in child_shape]
raw_shape.insert(self.band_dimension, len(children))
raw_shape = tuple(raw_shape)
form_shape = tuple(raw_shape[entry] for entry in transpose_axes)
use_children = []
for child in children:
if child.formatted_shape != child_shape:
raise ValueError('All children must have the same formatted shape')
if child.formatted_dtype != the_dtype:
raise ValueError('All children must have the same formatted dtype')
if child.mode != the_mode:
raise ValueError('All children must have the same mode')
if child.mode == 'w' and not child.can_write_regular:
raise ValueError('write mode requires that all children can write regular data')
use_children.append(child)
self._children = tuple(use_children)
return the_dtype, raw_shape, form_shape, the_mode
@property
def bands(self) -> int:
"""
int: The number of bands (child data segments)
"""
return len(self.children)
def read_raw(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice, Tuple[int, ...]]]],
squeeze=True) -> numpy.ndarray:
self._validate_closed()
if self.mode != 'r':
raise ValueError('Requires mode == "r"')
norm_subscript, the_shape = get_subscript_result_size(subscript, self.raw_shape)
out = numpy.empty(the_shape, dtype=self.raw_dtype)
full_band_subscript = tuple(slice(0, entry, 1) for entry in the_shape)
for out_index, index in enumerate(numpy.arange(self.bands)[norm_subscript[self.band_dimension]]):
child_subscript = norm_subscript[:self.band_dimension] + norm_subscript[self.band_dimension+1:]
band_subscript = full_band_subscript[:self.band_dimension] + \
(out_index, ) + \
full_band_subscript[self.band_dimension+1:]
out[band_subscript] = self.children[index].read(child_subscript, squeeze=False)
if squeeze:
return numpy.squeeze(out)
else:
return out
def check_fully_written(self, warn: bool = False) -> bool:
if self.mode == 'r':
return True
out = True
for i, child in enumerate(self.children):
done = child.check_fully_written(warn=warn)
if warn and not done:
logger.error('Band {} of BandAggregateSegment indicates incomplete writing'.format(i))
out &= done
return out
def write_raw(
self,
data: numpy.ndarray,
start_indices: Union[None, int, Tuple[int, ...]] = None,
subscript: Union[None, Sequence[slice]] = None,
**kwargs):
"""
In keeping with data segment mode, write the data provided in raw form,
assuming the slice specified relative to raw data coordinates. This
requires that `mode` is `'w'`.
Note that raw form/order for the data segment is simply a reordered version
of the formatted data for parent. This **is not** related to raw data
with respect to the parent. To write raw data with respect to the parent,
use :func:`parent.write_raw` instead.
**Only one of `start_indices` and `subscript` should be specified.**
Parameters
----------
data : numpy.ndarray
The data in raw form.
start_indices : None|int|Tuple[int, ...]
Assuming a contiguous chunk of data, this provides the starting
indices of the chunk. Any missing (tail) coordinates will be filled
in with 0's.
subscript : None|Sequence[slice]
The subscript definition in raw coordinates.
kwargs
Returns
-------
None
"""
self._validate_closed()
self._verify_write_raw_details(data)
norm_subscript = _infer_subscript_for_write(
data, start_indices, subscript, self.raw_shape)
# iterate over each band, and write the raw data there...
for out_index, index in enumerate(
numpy.arange(self.bands)[norm_subscript[self.band_dimension]]):
child_subscript = norm_subscript[:self.band_dimension] + \
norm_subscript[self.band_dimension+1:]
band_subscript = norm_subscript[:self.band_dimension] + \
(out_index, ) + \
norm_subscript[self.band_dimension+1:]
self.children[index].write(
data[band_subscript], subscript=child_subscript, **kwargs)
def get_raw_bytes(self, warn: bool = True) -> Union[bytes, Tuple]:
self._validate_closed()
return tuple(entry.get_raw_bytes(warn=warn) for entry in self.children)
def flush(self) -> None:
self._validate_closed()
try:
if self.children is not None:
for child in self.children:
child.flush()
except AttributeError:
return
def close(self):
try:
if self._closed:
return
self.flush()
if self._children is not None:
for entry in self._children:
entry.close()
DataSegment.close(self)
self._children = None
except AttributeError:
return
class BlockAggregateSegment(DataSegment):
"""
Combines a collection of child data segments, according to a given
block definition. All children must have the same formatted_dtype. This
implementation is motivated by a two-dimensional block arrangement, but is
entirely general.
No effort is made to ensure that the block definition spans the whole space,
nor is any effort made to ensure that blocks do not overlap.
If there are holes present in block definition, then data read across any
hole will be populated with `missing_data_value`. Data attempted to write
across any hole will simply be ignored.
Introduced in version 1.3.0.
"""
__slots__ = (
'_children', '_formatted_child_arrangement', '_raw_child_arrangement',
'_missing_data_value', '_close_children')
def __init__(
self,
children: Sequence[DataSegment],
child_arrangement: Sequence[Tuple[slice, ...]],
coordinate_basis: str,
missing_data_value,
raw_shape: Tuple[int, ...],
formatted_dtype: Union[str, numpy.dtype],
formatted_shape: Tuple[int, ...],
reverse_axes: Optional[Union[int, Sequence[int]]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None,
format_function: Optional[FormatFunction] = None,
close_children: bool = True):
"""
Parameters
----------
children : Sequence[DataSegment]
The collection of children
child_arrangement : Sequence[Tuple[slice, ...]]
The collection of definitions for each block. Overlap in definition
is permitted.
missing_data_value
Missing data value, which must be compatible with
raw_dtype=child.formatted_dtype.
close_children : bool
"""
self.close_children = close_children
self._close_children = None
self._children = None
self._formatted_child_arrangement = None
self._raw_child_arrangement = None
self._missing_data_value = missing_data_value
raw_dtype = children[0].formatted_dtype
the_mode = children[0].mode
DataSegment.__init__(
self, raw_dtype, raw_shape, formatted_dtype, formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes,
format_function=format_function, mode=the_mode)
self._set_children(children, child_arrangement, coordinate_basis)
@property
def close_children(self) -> bool:
"""
bool: Call child.close() when close is called?
"""
return self._close_children
@close_children.setter
def close_children(self, value):
self._close_children = bool(value)
@property
def children(self) -> Tuple[DataSegment, ...]:
"""
The collection of children that we are stacking together.
Returns
-------
Tuple[DataSegment, ...]
"""
return self._children
def _set_children(
self,
children: Sequence[DataSegment],
child_arrangement: Sequence[Tuple[slice, ...]],
coordinate_basis: str) -> None:
if len(children) != len(child_arrangement):
raise ValueError('We must have the same number of children as child_arrangement entries')
coordinate_basis = coordinate_basis.strip().lower()
if coordinate_basis == 'raw':
raw_arrangement = [self.verify_raw_subscript(entry) for entry in child_arrangement]
formatted_arrangement = [self.format_function.transform_raw_slice(entry) for entry in raw_arrangement]
elif coordinate_basis == 'formatted':
formatted_arrangement = [self.verify_formatted_subscript(entry) for entry in child_arrangement]
raw_arrangement = [self.format_function.transform_formatted_slice(entry) for entry in formatted_arrangement]
else:
raise ValueError('Got unexpected coordinate basis `{}`'.format(coordinate_basis))
for i, (child, raw_def, form_def) in enumerate(zip(children, raw_arrangement, formatted_arrangement)):
if child.formatted_dtype != self.raw_dtype:
raise ValueError(
'Each child.formatted_dtype must be identical to\n\t'
'self.raw_dtype = {}'.format(self.raw_dtype))
for entry in raw_def:
if entry.step not in [1, -1]:
raise ValueError('Each entry of child_arrangement must have step +/-1.')
for entry in form_def:
if entry.step not in [1, -1]:
raise ValueError('Each entry of child_arrangement must have step +/-1.')
# verify the shape is sensible
_, result_shape = get_subscript_result_size(raw_def, self.raw_shape)
if result_shape != child.formatted_shape:
raise ValueError(
'child_arrangement definition expects child {} to have formatted_shape {},\n\t'
'but it has formatted_shape {}'.format(i, result_shape, child.formatted_shape))
self._children = tuple(children)
self._raw_child_arrangement = tuple(raw_arrangement)
self._formatted_child_arrangement = tuple(formatted_arrangement)
def read_raw(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice, Tuple[int, ...]]]],
squeeze=True) -> numpy.ndarray:
self._validate_closed()
if self.mode != 'r':
raise ValueError('Requires mode == "r"')
subscript, formatted_shape = get_subscript_result_size(subscript, self.raw_shape)
out = numpy.full(formatted_shape, fill_value=self._missing_data_value, dtype=self.raw_dtype)
for entry, child in zip(self._raw_child_arrangement, self._children):
use_block = True
parent_subscript = []
child_subscript = []
for data_slice, block_slice in zip(subscript, entry):
child_entry, par_entry = _find_slice_overlap(data_slice, block_slice)
if par_entry is None:
use_block = False
# there is no overlap
break
else:
parent_subscript.append(par_entry)
child_subscript.append(child_entry)
if use_block:
out[tuple(parent_subscript)] = child.read_raw(tuple(child_subscript), squeeze=False)
if squeeze:
return numpy.squeeze(out)
else:
return out
def check_fully_written(self, warn: bool = False) -> bool:
if self.mode == 'r':
return True
out = True
for i, child in enumerate(self.children):
done = child.check_fully_written(warn=warn)
if warn and not done:
logger.error('Block {} of BlockAggregateSegment indicates incomplete writing'.format(i))
out &= done
return out
def write_raw(
self,
data: numpy.ndarray,
start_indices: Union[None, int, Tuple[int, ...]] = None,
subscript: Union[None, Sequence[slice]] = None,
**kwargs):
"""
In keeping with data segment mode, write the data provided in raw form,
assuming the slice specified relative to raw data coordinates. This
requires that `mode` is `'w'`.
Note that raw form/order for the data segment is broken into blocks of
reordered version of the formatted data for each child. This **is not**
related to raw data with respect to the child. To write raw data with
respect to the parent, use :func:`child.write_raw` instead.
**Only one of `start_indices` and `subscript` should be specified.**
Parameters
----------
data : numpy.ndarray
The data in raw form.
start_indices : None|int|Tuple[int, ...]
Assuming a contiguous chunk of data, this provides the starting
indices of the chunk. Any missing (tail) coordinates will be filled
in with 0's.
subscript : None|Sequence[slice]
The subscript definition in raw coordinates.
kwargs
Returns
-------
None
"""
self._validate_closed()
self._verify_write_raw_details(data)
norm_subscript = _infer_subscript_for_write(data, start_indices, subscript, self.raw_shape)
for entry, child in zip(self._raw_child_arrangement, self._children):
# determine if there is overlap of norm_subscript with this block,
# and write the appropriate data, if so.
use_block = True
data_subscript = []
child_subscript = []
for lim, data_slice, block_slice in zip(self.raw_shape, norm_subscript, entry):
child_entry, par_entry = _find_slice_overlap(data_slice, block_slice)
# this expresses the overlap between our data slice in overall coordinates
# relative to the entire raw_shape, and coordinates relative to
# just the block in question
if par_entry is None:
use_block = False
# there is no overlap
break
# we need to convert from overall coordinates relative to the entire
# raw image, to coordinates relative to just the data shape
_, data_entry = _find_slice_overlap(slice(0, lim, 1), par_entry)
data_subscript.append(data_entry)
child_subscript.append(child_entry)
if use_block:
child.write(data[tuple(data_subscript)], subscript=tuple(child_subscript), **kwargs)
def get_raw_bytes(self, warn: bool = True) -> Union[bytes, Tuple]:
self._validate_closed()
return tuple(entry.get_raw_bytes(warn=warn) for entry in self.children)
def flush(self) -> None:
self._validate_closed()
try:
if self.children is not None:
for child in self.children:
child.flush()
except AttributeError:
return
def close(self):
try:
if self._closed:
return
self.flush()
if self._children is not None:
for entry in self._children:
entry.close()
DataSegment.close(self)
self._children = None
except AttributeError:
return
####
# Concrete implementations
class NumpyArraySegment(DataSegment):
"""
DataSegment based on reading from a numpy.ndarray.
Introduced in version 1.3.0.
"""
__slots__ = ('_underlying_array', '_pixels_written', '_expected_pixels_written')
def __init__(
self,
underlying_array: numpy.ndarray,
formatted_dtype: Optional[Union[str, numpy.dtype]] = None,
formatted_shape: Optional[Tuple[int, ...]] = None,
reverse_axes: Optional[Union[int, Sequence[int]]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None,
format_function: Optional[FormatFunction] = None,
mode: str = 'r'):
"""
Parameters
----------
underlying_array : numpy.ndarray
formatted_dtype : str|numpy.dtype
formatted_shape : Tuple[int, ...]
reverse_axes : None|int|Sequence[int]
The collection of axes (in raw order) to reverse, prior to applying
transpose operation
transpose_axes : None|Tuple[int, ...]
The transpose operation to perform to the raw data, after applying
any axis reversal, and before applying any format function
format_function : None|FormatFunction
mode : str
"""
if not isinstance(underlying_array, numpy.ndarray):
raise TypeError(
'underlying array must be a numpy.ndarray, got type `{}`'.format(
type(underlying_array)))
self._underlying_array = underlying_array
self._pixels_written = 0
if formatted_dtype is None:
if format_function is None:
formatted_dtype = underlying_array.dtype
else:
raise ValueError(
'Format function is provided, so formatted_dtype must be provided.')
if formatted_shape is None:
if format_function is None:
if transpose_axes is None:
formatted_shape = underlying_array.shape
else:
formatted_shape = [underlying_array.shape[index] for index in transpose_axes]
else:
raise ValueError(
'Format function is provided, so formatted_shape must be provided.')
DataSegment.__init__(
self, underlying_array.dtype, underlying_array.shape, formatted_dtype, formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes, format_function=format_function,
mode=mode)
if self.mode == 'w':
self._expected_pixels_written = self._underlying_array.size
else:
self._expected_pixels_written = 0
@property
def underlying_array(self) -> numpy.ndarray:
"""
The underlying data array.
Returns
-------
numpy.ndarray
"""
return self._underlying_array
def read_raw(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice, Tuple[int, ...]]]],
squeeze=True) -> numpy.ndarray:
self._validate_closed()
if self.mode != 'r':
raise ValueError('Requires mode == "r"')
subscript, out_shape = get_subscript_result_size(subscript, self.raw_shape)
out = self._underlying_array[subscript] # squeezed by default
if squeeze:
return numpy.squeeze(out)
else:
return numpy.reshape(out, out_shape)
def check_fully_written(self, warn: bool = False) -> bool:
if self.mode == 'r':
return True
if self._pixels_written < self._expected_pixels_written:
if warn:
logger.error(
'Segment expected {} pixels written, but only {} pixels were written'.format(
self._expected_pixels_written, self._pixels_written))
return False
elif self._pixels_written == self._expected_pixels_written:
return True
else:
if warn:
logger.error(
'Segment expected {} pixels written,\n\t'
'but {} pixels were written.\n\t'
'This redundancy may be an error'.format(
self._expected_pixels_written, self._pixels_written))
return False
def _update_pixels_written(self, written: int) -> None:
new_pixels_written = self._pixels_written + written
if self._pixels_written <= self._expected_pixels_written < new_pixels_written:
logger.error(
'Segment expected {} pixels written,\n\t'
'but now has {} pixels written.\n\t'
'This redundancy may be an error'.format(
self._expected_pixels_written, new_pixels_written))
self._pixels_written = new_pixels_written
def write_raw(
self,
data: numpy.ndarray,
start_indices: Optional[Union[int, Tuple[int, ...]]] = None,
subscript: Optional[Sequence[slice]] = None,
**kwargs):
self._validate_closed()
self._verify_write_raw_details(data)
subscript = _infer_subscript_for_write(data, start_indices, subscript, self.raw_shape)
self._underlying_array[subscript] = data
self._update_pixels_written(data.size)
def get_raw_bytes(self, warn: bool = False) -> Union[bytes, Tuple]:
self._validate_closed()
if warn and not self.check_fully_written(warn=True):
logger.error(
'There has been a call to `get_raw_bytes` from {},\n\t'
'but all pixels are not fully written'.format(self.__class__))
return self.underlying_array.tobytes()
def flush(self) -> None:
self._validate_closed()
try:
if self.mode == 'w' and hasattr(self._underlying_array, 'flush'):
# noinspection PyUnresolvedReferences
self._underlying_array.flush()
except AttributeError:
return
def close(self) -> None:
try:
if self._closed:
return
self.flush()
self._underlying_array = None
DataSegment.close(self)
except AttributeError:
return
class NumpyMemmapSegment(NumpyArraySegment):
"""
DataSegment based on establishing a numpy memmap, and using that as the
underlying array.
Introduced in version 1.3.0.
"""
__slots__ = (
'_file_object', '_memory_map', '_close_file')
def __init__(
self,
file_object: Union[str, BinaryIO],
data_offset: int,
raw_dtype: Union[str, numpy.dtype],
raw_shape: Tuple[int, ...],
formatted_dtype: Optional[Union[str, numpy.dtype]] = None,
formatted_shape: Optional[Tuple[int, ...]] = None,
reverse_axes: Optional[Union[int, Sequence[int]]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None,
format_function: Optional[FormatFunction] = None,
mode: str = 'r',
close_file: bool = False):
"""
Parameters
----------
file_object : str|BinaryIO
data_offset : int
raw_dtype : str|numpy.dtype
raw_shape : Tuple[int, ...]
formatted_dtype : str|numpy.dtype
formatted_shape : Tuple[int, ...]
reverse_axes : None|int|Sequence[int]
The collection of axes (in raw order) to reverse, prior to applying
transpose operation
transpose_axes : None|Tuple[int, ...]
The transpose operation to perform to the raw data, after applying
any axis reversal, and before applying any format function
format_function : None|FormatFunction
mode : str
close_file : bool
"""
self._close_file = None
if isinstance(file_object, str):
close_file = True
self.close_file = close_file
self._file_object = file_object
self._pixels_written = 0
self._expected_pixels_written = 0
mmap_mode = 'r' if mode == 'r' else 'r+'
self._memory_map = numpy.memmap(
file_object,
offset=data_offset,
dtype=raw_dtype,
shape=raw_shape,
mode=mmap_mode)
NumpyArraySegment.__init__(
self, self._memory_map, formatted_dtype=formatted_dtype, formatted_shape=formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes,
format_function=format_function, mode=mode)
@property
def close_file(self) -> bool:
"""
bool: Close the file object when complete?
"""
return self._close_file
@close_file.setter
def close_file(self, value):
self._close_file = bool(value)
def flush(self) -> None:
try:
if self.mode == 'w':
self._memory_map.flush()
except AttributeError:
pass
def close(self):
try:
if self._closed:
return
NumpyArraySegment.close(self) # NB: this calls flush
self._memory_map = None
if self._close_file and hasattr(self._file_object, 'close'):
self._file_object.close()
self._file_object = None
except AttributeError:
return
class HDF5DatasetSegment(DataSegment):
"""
DataSegment based on reading from an hdf5 file, using the h5py library.
Introduced in version 1.3.0.
"""
_allowed_modes = ('r', )
__slots__ = (
'_file_object', '_data_set', '_close_file')
def __init__(
self,
file_object: Union[str, h5pyFile],
data_set: Union[str, h5pyDataset],
formatted_dtype: Optional[Union[str, numpy.dtype]] = None,
formatted_shape: Optional[Tuple[int, ...]] = None,
reverse_axes: Optional[Union[int, Sequence[int]]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None,
format_function: Optional[FormatFunction] = None,
close_file: bool = False):
"""
Parameters
----------
file_object : str|h5py.File
data_set : str|h5py.Dataset
formatted_dtype : str|numpy.dtype
formatted_shape : Tuple[int, ...]
reverse_axes : None|int|Sequence[int]
The collection of axes (in raw order) to reverse, prior to applying
transpose operation
transpose_axes : None|Tuple[int, ...]
The transpose operation to perform to the raw data, after applying
any axis reversal, and before applying any format function
format_function : None|FormatFunction
close_file : bool
"""
self._close_file = None
self._file_object = None
self._data_set = None
if h5py is None:
raise ValueError(
'h5py was not successfully imported, and no hdf5 file can be read')
if isinstance(file_object, str):
close_file = True
self._set_file_object(file_object)
self._set_data_set(data_set)
if format_function is not None:
if formatted_dtype is None or formatted_shape is None:
raise ValueError(
'format_function is supplied, so formatted_dtype and formatted_shape must also be supplied')
else:
formatted_dtype = self.data_set.dtype
raw_shape = self.data_set.shape
if transpose_axes is None:
formatted_shape = raw_shape
else:
formatted_shape = tuple(raw_shape[entry] for entry in transpose_axes)
self.close_file = close_file
DataSegment.__init__(
self, self.data_set.dtype, self.data_set.shape, formatted_dtype, formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes,
format_function=format_function, mode='r')
@property
def close_file(self) -> bool:
"""
bool: Close the file object when complete?
"""
return self._close_file
@close_file.setter
def close_file(self, value):
self._close_file = bool(value)
@property
def file_object(self) -> h5pyFile:
return self._file_object
def _set_file_object(self, value) -> None:
if isinstance(value, str):
value = h5py.File(value, mode='r')
if not isinstance(value, h5py.File):
raise ValueError('Requires a path to a hdf5 file or h5py.File object')
self._file_object = value
@property
def data_set(self) -> h5pyDataset:
return self._data_set
def _set_data_set(self, value) -> None:
if isinstance(value, str):
value = self.file_object[value]
if not isinstance(value, h5py.Dataset):
raise ValueError('Requires a dataset path or h5py.Dataset object')
self._data_set = value
def read_raw(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice, Tuple[int, ...]]]],
squeeze=True) -> numpy.ndarray:
self._validate_closed()
subscript, out_shape = get_subscript_result_size(subscript, self.raw_shape)
# NB: h5py does not support slicing with a negative step (right now)
# we need to read the identical elements in positive order,
# then reverse. Note that this is not the same as using the mirror
# image slice, which is used in the reverse_axes operations.
reverse = []
use_subscript = []
for index, (the_size, entry) in enumerate(zip(self.raw_shape, subscript)):
if entry.step < 0:
use_subscript.append(_reverse_slice(entry))
reverse.append(index)
else:
use_subscript.append(entry)
use_subscript = tuple(use_subscript)
out = numpy.reshape(self.data_set[use_subscript], out_shape)
for index in reverse:
out = numpy.flip(out, axis=index)
if squeeze:
return numpy.squeeze(out)
else:
return out
def write_raw(
self,
data: numpy.ndarray,
start_indices: Union[None, int, Tuple[int, ...]] = None,
subscript: Union[None, Sequence[slice]] = None,
**kwargs):
raise NotImplementedError
def get_raw_bytes(self, warn: bool = True) -> Union[bytes, Tuple]:
raise NotImplementedError
def check_fully_written(self, warn: bool = False) -> bool:
return True
def close(self) -> None:
try:
if self._closed:
return
self._data_set = None
if self._close_file and hasattr(self.file_object, 'close'):
self.file_object.close()
self._file_object = None
DataSegment.close(self)
except AttributeError:
pass
class FileReadDataSegment(DataSegment):
"""
Read a data array manually from a file - this is primarily intended for cloud
usage.
Introduced in version 1.3.0.
"""
_allowed_modes = ('r', )
__slots__ = (
'_file_object', '_data_offset', '_close_file')
def __init__(
self,
file_object: BinaryIO,
data_offset: int,
raw_dtype: Union[str, numpy.dtype],
raw_shape: Tuple[int, ...],
formatted_dtype: Union[str, numpy.dtype],
formatted_shape: Tuple[int, ...],
reverse_axes: Optional[Union[int, Sequence[int]]] = None,
transpose_axes: Optional[Tuple[int, ...]] = None,
format_function: Optional[FormatFunction] = None,
close_file: bool = False):
"""
Parameters
----------
file_object : BinaryIO
data_offset : int
raw_dtype : str|numpy.dtype
raw_shape : Tuple[int, ...]
formatted_dtype : str|numpy.dtype
formatted_shape : Tuple[int, ...]
reverse_axes : None|int|Sequence[int]
The collection of axes (in raw order) to reverse, prior to applying
transpose operation
transpose_axes : None|Tuple[int, ...]
The transpose operation to perform to the raw data, after applying
any axis reversal, and before applying any format function
format_function : None|FormatFunction
close_file : bool
"""
self._file_object = None
self._data_offset = None
self._close_file = None
self.close_file = close_file
self._set_data_offset(data_offset)
self._set_file_object(file_object)
DataSegment.__init__(
self, raw_dtype, raw_shape, formatted_dtype, formatted_shape,
reverse_axes=reverse_axes, transpose_axes=transpose_axes,
format_function=format_function, mode='r')
@property
def close_file(self) -> bool:
"""
bool: Close the file object when complete?
"""
return self._close_file
@close_file.setter
def close_file(self, value):
self._close_file = bool(value)
@property
def file_object(self) -> BinaryIO:
return self._file_object
def _set_file_object(self, value) -> None:
if not is_file_like(value):
raise ValueError('Requires a file-like object')
self._file_object = value
@property
def data_offset(self) -> int:
"""
int: The offset of the data in bytes from the start of the file-like
object.
"""
return self._data_offset
def _set_data_offset(self, value: int) -> None:
value = int(value)
if value < 0:
raise ValueError('data_offset must be non-negative.')
self._data_offset = value
def read_raw(
self,
subscript: Union[None, int, slice, Sequence[Union[int, slice, Tuple[int, ...]]]],
squeeze=True) -> numpy.ndarray:
self._validate_closed()
subscript, out_shape = get_subscript_result_size(subscript, self.raw_shape)
init_slice = subscript[0]
init_reverse = (init_slice.step < 0)
if init_reverse:
init_slice = _reverse_slice(init_slice)
pixel_per_row = 1 if self.formatted_ndim == 1 else int(numpy.prod(self.raw_shape[1:]))
row_stride = self.raw_dtype.itemsize*pixel_per_row
start_row = init_slice.start
rows = init_slice.stop - init_slice.start
# read the whole contiguous chunk from start_row up to the final row
# seek to the proper start location
start_loc = self._data_offset + start_row*row_stride
self.file_object.seek(start_loc, os.SEEK_SET)
total_size = rows*row_stride
# read our data
data = self.file_object.read(total_size)
if len(data) != total_size:
raise ValueError(
'Tried to read {} bytes of data, but received {}.\n'
'The most likely reason for this is a malformed chipper, \n'
'which attempts to read more data than the file contains'.format(total_size, len(data)))
# define temp array from this data
data = numpy.frombuffer(data, self._raw_dtype, rows*pixel_per_row)
data = numpy.reshape(data, (rows, ) + self.raw_shape[1:])
# extract our data
out = data[(slice(None, None, init_slice.step), ) + subscript[1:]]
out = numpy.reshape(out, out_shape)
if init_reverse:
out = numpy.flip(out, axis=0)
if squeeze:
out = numpy.copy(numpy.squeeze(out))
else:
out = numpy.copy(out)
del data
return out
def write_raw(
self,
data: numpy.ndarray,
start_indices: Union[None, int, Tuple[int, ...]] = None,
subscript: Union[None, Sequence[slice]] = None,
**kwargs):
if self.mode != 'w':
raise ValueError('I/O Error, functionality requires mode == "w"')
raise NotImplementedError
def get_raw_bytes(self, warn: bool = True) -> Union[bytes, Tuple]:
raise NotImplementedError
def check_fully_written(self, warn: bool = False) -> bool:
return True
def close(self) -> None:
try:
if self._closed:
return
if self._close_file:
if hasattr(self.file_object, 'close'):
self.file_object.close()
self._file_object = None
DataSegment.close(self)
except AttributeError:
return
| 89,754 | 34.773216 | 120 | py |
sarpy | sarpy-master/sarpy/io/general/converter.py | """
This module provides utilities for attempting to open other image files not
opened by the sicd, sidd, cphd, or crsd reader collections.
"""
import os
from typing import Callable
from sarpy.io.general.base import SarpyIOError, BaseReader, check_for_openers
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
###########
# Module variables
_openers = []
_parsed_openers = False
def register_opener(open_func: Callable) -> None:
"""
Provide a new opener.
Parameters
----------
open_func : Callable
This is required to be a function which takes a single argument (file name).
This function should return a sarpy.io.general.base.BaseReader instance
if the referenced file is viable for the underlying type, and None otherwise.
Returns
-------
None
"""
if not callable(open_func):
raise TypeError('open_func must be a callable')
if open_func not in _openers:
_openers.append(open_func)
def parse_openers() -> None:
"""
Automatically find the viable openers (i.e. :func:`is_a`) in the various modules.
Returns
-------
"""
global _parsed_openers
if _parsed_openers:
return
_parsed_openers = True
check_for_openers('sarpy.io.general', register_opener)
def open_general(file_name: str) -> BaseReader:
"""
Given a file, try to find and return the appropriate reader object.
Parameters
----------
file_name : str
Returns
-------
BaseReader
Raises
------
SarpyIOError
"""
if not os.path.exists(file_name):
raise SarpyIOError('File {} does not exist.'.format(file_name))
# parse openers, if not already done
parse_openers()
# see if we can find a reader though trial and error
for opener in _openers:
reader = opener(file_name)
if reader is not None:
return reader
# If for loop completes, no matching file format was found.
raise SarpyIOError('Unable to determine image format.')
| 2,051 | 22.318182 | 85 | py |
sarpy | sarpy-master/sarpy/io/general/__init__.py | """
This package mostly centered on base implementations for reader architecture.
"""
__classification__ = 'UNCLASSIFIED'
def open(*args, **kwargs):
from .converter import open_general
return open_general(*args, **kwargs)
| 233 | 20.272727 | 77 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/base.py | """
Base NITF Header functionality definition.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
from weakref import WeakKeyDictionary
from typing import Union, List, Tuple
from collections import OrderedDict
import struct
import numpy
from sarpy.compliance import bytes_to_string
from .tres.registration import find_tre
logger = logging.getLogger(__name__)
# Base NITF type
class BaseNITFElement(object):
@classmethod
def minimum_length(cls):
"""
The minimum size in bytes that takes to write this header element.
Returns
-------
int
"""
raise NotImplementedError
def get_bytes_length(self):
"""
Get the length of the serialized bytes array
Returns
-------
int
"""
raise NotImplementedError
def to_bytes(self):
"""
Write the object to a properly packed str.
Returns
-------
bytes
"""
raise NotImplementedError
@classmethod
def from_bytes(cls, value, start):
"""
Parameters
----------
value: bytes|str
the header string to scrape
start : int
the beginning location in the string
Returns
-------
"""
raise NotImplementedError
def to_json(self):
"""
Serialize element to a json representation. This is intended to allow
a simple presentation of the element.
Returns
-------
dict
"""
raise NotImplementedError
# Basic input and output interpreters
def _get_bytes(val, length):
if val is None:
return b''
elif isinstance(val, int):
frm_str = '{0:0' + str(length) + 'd}'
return frm_str.format(val).encode('utf-8')
elif isinstance(val, str):
frm_str = '{0:' + str(length) + 's}'
return frm_str.format(val).encode('utf-8')
elif isinstance(val, bytes):
if len(val) >= length:
return val[:length]
else:
return val + b'\x00' * (length - len(val))
else:
raise TypeError('Unhandled type {}'.format(type(val)))
def _parse_int(val, length, default, name, instance):
"""
Parse and/or validate the integer input.
Parameters
----------
val : None|int|bytes
length : int
default : None|int
Returns
-------
int
"""
if val is None:
return default
else:
val = int(val)
if -int(10)**(length-1) < val < int(10)**length:
return val
raise ValueError(
'Integer {} cannot be rendered as a string of {} characters for '
'attribute {} of class {}'.format(val, length, name, instance.__class__.__name__))
def _parse_float(val, default):
"""
Sort of a special case.
Parameters
----------
val : None|float|bytes
default : None|float
Returns
-------
float
"""
if val is None:
return default
else:
return float(val)
def _parse_str(val, length, default, name, instance):
"""
Parse and/or validate the string input.
Parameters
----------
val : None|str|bytes
length : int
default : None|str
Returns
-------
str
"""
if val is None:
return default
if isinstance(val, bytes):
val = bytes_to_string(val)
elif not isinstance(val, str):
val = str(val)
val = val.rstrip()
if len(val) <= length:
return val
else:
logger.warning(
'Got string input value of length {} for attribute {} of class {}, '
'which is longer than the allowed length {}, so '
'truncating'.format(len(val), name, instance.__class__.__name__, length))
return val[:length]
def _parse_bytes(val, length, default, name, instance):
"""
Validate the raw/bytes input.
Parameters
----------
val : None|bytes
length : int
default : None|int
Returns
-------
int
"""
if val is None:
return default
elif isinstance(val, bytes):
if len(val) <= length:
return val
else:
logger.warning(
'Got string input value of length {} for attribute {} of class {}, '
'which is longer than the allowed length {}, so '
'truncating'.format(len(val), name, instance.__class__.__name__, length))
return val[:length]
else:
raise TypeError(
'Expected type int or bytes for attribute {} of class {}, '
'and got {}'.format(name, instance.__class__.__name__, type(val)))
def _parse_nitf_element(val, nitf_type, default_args, name, instance):
if not issubclass(nitf_type, BaseNITFElement):
raise TypeError(
'nitf_type for attribute {} of class {} must be a subclass of '
'BaseNITFElement'.format(name, nitf_type.__class__.__name__))
if val is None:
if default_args is None:
return None
return nitf_type(**default_args)
elif isinstance(val, bytes):
return nitf_type.from_bytes(val, 0)
elif isinstance(val, nitf_type):
return val
else:
raise ValueError(
'Attribute {} for class {} requires an input of type bytes or {}. '
'Got {}'.format(name, instance.__class__.__name__, nitf_type, type(val)))
# NITF Descriptors
class _BasicDescriptor(object):
"""A descriptor object for reusable properties. Note that is required that the calling instance is hashable."""
_typ_string = None
def __init__(self, name, required, length, docstring=''):
self.data = WeakKeyDictionary() # our instance reference dictionary
# WeakDictionary use is subtle here. A reference to a particular class instance in this dictionary
# should not be the thing keeping a particular class instance from being destroyed.
self.name = name
self.required = required
self.length = length
self.__doc__ = docstring
self._format_docstring()
def _format_docstring(self):
docstring = self.__doc__
if docstring is None:
docstring = ''
if (self._typ_string is not None) and (not docstring.startswith(self._typ_string)):
docstring = '{} {}'.format(self._typ_string, docstring)
suff = self._docstring_suffix()
if suff is not None:
docstring = '{} {}'.format(docstring, suff)
if not self.required:
docstring = '{} {}'.format(docstring, ' **Conditional.**')
self.__doc__ = docstring
def _docstring_suffix(self):
return None
def _get_default(self, instance):
return None
def __get__(self, instance, owner):
"""The getter.
Parameters
----------
instance : object
the calling class instance
owner : object
the type of the class - that is, the actual object to which this descriptor is assigned
Returns
-------
object
the return value
"""
if instance is None:
# this has been access on the class, so return the class
return self
fetched = self.data.get(instance, None)
if fetched is not None or not self.required:
return fetched
else:
msg = 'Required field {} of class {} is not populated.'.format(self.name, instance.__class__.__name__)
raise AttributeError(msg)
def __set__(self, instance, value):
"""The setter method.
Parameters
----------
instance : object
the calling class instance
value
the value to use in setting - the type depends of the specific extension of this base class
Returns
-------
bool
this base class, and only this base class, handles the required compliance and None behavior and has
a return. This returns True if this the setting value was None, and False otherwise.
"""
# NOTE: This is intended to handle this case for every extension of this class. Hence the boolean return,
# which extensions SHOULD NOT implement. This is merely to follow DRY principles.
if value is None:
default_value = self._get_default(instance)
if default_value is not None:
self.data[instance] = default_value
return True
elif self.required:
raise ValueError(
'Attribute {} of class {} cannot be assigned None.'.format(self.name, instance.__class__.__name__))
self.data[instance] = None
return True
# note that the remainder must be implemented in each extension
return False # this is probably a bad habit, but this returns something for convenience alone
class _StringDescriptor(_BasicDescriptor):
"""A descriptor for string type"""
_typ_string = 'str:'
def __init__(self, name, required, length, default_value='', docstring=None):
self._default_value = default_value
super(_StringDescriptor, self).__init__(
name, required, length, docstring=docstring)
def _get_default(self, instance):
return self._default_value
def _docstring_suffix(self):
if self._default_value is not None and len(self._default_value) > 0:
return ' Default value is :code:`{}`.'.format(self._default_value)
def __set__(self, instance, value):
if super(_StringDescriptor, self).__set__(instance, value): # the None handler...kinda hacky
return
self.data[instance] = _parse_str(value, self.length, self._default_value, self.name, instance)
class _StringEnumDescriptor(_BasicDescriptor):
"""A descriptor for enumerated (specified) string type.
**The valid entries are case-sensitive and should be stripped of white space on each end.**"""
_typ_string = 'str:'
def __init__(self, name, required, length, values, default_value=None, docstring=None):
self.values = values
self._default_value = default_value
super(_StringEnumDescriptor, self).__init__(
name, required, length, docstring=docstring)
if (self._default_value is not None) and (self._default_value not in self.values):
self._default_value = None
def _get_default(self, instance):
return self._default_value
def _docstring_suffix(self):
suff = ' Takes values in :code:`{}`.'.format(self.values)
if self._default_value is not None and len(self._default_value) > 0:
suff += ' Default value is :code:`{}`.'.format(self._default_value)
return suff
def __set__(self, instance, value):
if value is None:
if self._default_value is not None:
self.data[instance] = self._default_value
else:
super(_StringEnumDescriptor, self).__set__(instance, value)
return
val = _parse_str(value, self.length, self._default_value, self.name, instance)
if val in self.values:
self.data[instance] = val
elif self._default_value is not None:
msg = 'Attribute {} of class {} received {}, but values ARE REQUIRED to be ' \
'one of {}. It has been set to the default ' \
'value.'.format(self.name, instance.__class__.__name__, value, self.values)
logger.error(msg)
self.data[instance] = self._default_value
else:
msg = 'Attribute {} of class {} received {}, but values ARE REQUIRED to be ' \
'one of {}. This should be resolved, or it may cause unexpected ' \
'issues.'.format(self.name, instance.__class__.__name__, value, self.values)
logger.error(msg)
self.data[instance] = val
class _IntegerDescriptor(_BasicDescriptor):
"""A descriptor for integer type"""
_typ_string = 'int:'
def __init__(self, name, required, length, default_value=0, docstring=None):
self._default_value = default_value
super(_IntegerDescriptor, self).__init__(
name, required, length, docstring=docstring)
def _get_default(self, instance):
return self._default_value
def _docstring_suffix(self):
if self._default_value is not None:
return ' Default value is :code:`{}`.'.format(self._default_value)
def __set__(self, instance, value):
if super(_IntegerDescriptor, self).__set__(instance, value): # the None handler...kinda hacky
return
iv = _parse_int(value, self.length, self._default_value, self.name, instance)
self.data[instance] = iv
class _FloatDescriptor(_BasicDescriptor):
"""A special case descriptor for float type"""
_typ_string = 'float:'
def __init__(self, name, required, length, default_value=0, docstring=None):
self._default_value = default_value
super(_FloatDescriptor, self).__init__(
name, required, length, docstring=docstring)
def _get_default(self, instance):
return self._default_value
def _docstring_suffix(self):
if self._default_value is not None:
return ' Default value is :code:`{}`.'.format(self._default_value)
def __set__(self, instance, value):
if super(_FloatDescriptor, self).__set__(instance, value): # the None handler...kinda hacky
return
iv = _parse_float(value, self._default_value)
self.data[instance] = iv
class _RawDescriptor(_BasicDescriptor):
"""A descriptor for bytes type"""
_typ_string = 'bytes:'
def __init__(self, name, required, length, default_value=None, docstring=None):
self._default_value = default_value
super(_RawDescriptor, self).__init__(
name, required, length, docstring=docstring)
def _get_default(self, instance):
return self._default_value
def __set__(self, instance, value):
if super(_RawDescriptor, self).__set__(instance, value): # the None handler...kinda hacky
return
iv = _parse_bytes(value, self.length, self._default_value, self.name, instance)
self.data[instance] = iv
class _NITFElementDescriptor(_BasicDescriptor):
"""A descriptor for properties of a specified type assumed to be an extension of Serializable"""
def __init__(self, name, required, the_type, default_args=None, docstring=None):
self.the_type = the_type
self._typ_string = str(the_type).strip().split('.')[-1][:-2] + ':'
self._default_args = default_args
super(_NITFElementDescriptor, self).__init__(name, required, None, docstring=docstring)
def _get_default(self, instance):
if self._default_args is not None:
return self.the_type(**self._default_args)
return None
def __set__(self, instance, value):
if super(_NITFElementDescriptor, self).__set__(instance, value): # the None handler...kinda hacky
return
self.data[instance] = _parse_nitf_element(value, self.the_type, self._default_args, self.name, instance)
# Concrete NITF element types
class NITFElement(BaseNITFElement):
_ordering = ()
_lengths = {}
_binary_format = {}
def __init__(self, **kwargs):
for fld in self._ordering:
# noinspection PyBroadException
try:
setattr(self, fld, kwargs.get(fld, None))
except Exception:
logger.critical('Failed setting attribute {} for class {}'.format(fld, self.__class__))
raise
@classmethod
def minimum_length(cls):
"""
The minimum size in bytes that takes to write this header element.
Returns
-------
int
"""
return sum(cls._lengths.values())
def _get_attribute_length(self, fld):
if fld not in self._ordering:
return 0
if fld in self._lengths:
return self._lengths[fld]
else:
val = getattr(self, fld)
if val is None:
return 0
elif isinstance(val, BaseNITFElement):
return val.get_bytes_length()
else:
raise TypeError(
'Unhandled type {} for attribute {} of '
'class {}'.format(type(val), fld, self.__class__.__name__))
def _get_attribute_bytes(self, fld):
if fld not in self._ordering:
return b''
val = getattr(self, fld)
if isinstance(val, BaseNITFElement):
return val.to_bytes()
elif fld in self._binary_format:
return struct.pack(self._binary_format[fld], val)
elif fld in self._lengths:
return _get_bytes(val, self._lengths[fld])
else:
raise ValueError(
'Unhandled attribute {} for class {}'.format(fld, self.__class__.__name__))
def get_bytes_length(self):
"""
Get the length of the serialized bytes array
Returns
-------
int
"""
return sum(self._get_attribute_length(fld) for fld in self._ordering)
def to_bytes(self):
"""
Write the object to a properly packed str.
Returns
-------
bytes
"""
return b''.join(self._get_attribute_bytes(fld) for fld in self._ordering)
@classmethod
def _parse_attribute(cls, fields, attribute, value, start):
"""
Parameters
----------
fields : dict
The attribute:value dictionary.
attribute : str
The attribute name.
value : bytes
The bytes array to be parsed.
start : int
The present position in `value`.
Returns
-------
int
The position in `value` after parsing this attribute.
"""
if attribute not in cls._ordering:
raise ValueError('Unexpected attribute {}'.format(attribute))
if attribute in fields:
return start
if attribute in cls._binary_format:
if attribute not in cls._lengths:
raise ValueError(
'attribute {} has binary format specified, but no length specified '
'for class {}'.format(attribute, cls))
end = start + cls._lengths[attribute]
fields[attribute] = struct.unpack(cls._binary_format[attribute], value[start:end])[0]
return end
elif attribute in cls._lengths:
end = start + cls._lengths[attribute]
fields[attribute] = value[start:end]
return end
elif hasattr(cls, attribute):
the_typ = getattr(cls, attribute).the_type
assert issubclass(the_typ, BaseNITFElement)
the_value = the_typ.from_bytes(value, start)
fields[attribute] = the_value
return start + the_value.get_bytes_length()
else:
raise ValueError('Cannot parse attribute {} for class {}'.format(attribute, cls))
@classmethod
def from_bytes(cls, value, start):
"""
Parameters
----------
value: bytes|str
the header string to scrape
start : int
the beginning location in the string
Returns
-------
"""
fields = {}
loc = start
for fld in cls._ordering:
loc = cls._parse_attribute(fields, fld, value, loc)
return cls(**fields)
def to_json(self):
out = OrderedDict()
for fld in self._ordering:
if self._get_attribute_length(fld) == 0:
continue
value = getattr(self, fld)
if value is None:
out[fld] = ''
elif isinstance(value, (str, bytes, int)):
out[fld] = value
elif isinstance(value, BaseNITFElement):
out[fld] = value.to_json()
else:
logger.error(
'Got unhandled type `{}` for json serialization for '
'attribute `{}` of class {}'.format(type(value), fld, self.__class__))
return out
class NITFLoop(NITFElement):
__slots__ = ('_values', )
_ordering = ('values', )
_child_class = None # must be a subclass of NITFElement
_count_size = 0 # type: int
def __init__(self, values=None, **kwargs):
if not issubclass(self._child_class, NITFElement):
raise TypeError('_child_class for {} must be a subclass of NITFElement'.format(self.__class__.__name__))
self._values = tuple()
super(NITFLoop, self).__init__(values=values, **kwargs)
@property
def values(self):
# type: () -> Tuple[_child_class, ...]
return self._values
@values.setter
def values(self, value):
if value is None:
self._values = ()
return
if not isinstance(value, tuple):
value = tuple(value)
for i, entry in enumerate(value):
if not isinstance(entry, self._child_class):
raise TypeError(
'values must be of type {}, got entry {} of type {}'.format(
self._child_class, i, type(entry)))
self._values = value
def __len__(self):
return len(self._values)
def __getitem__(self, item):
# type: (Union[int, slice]) -> Union[_child_class, List[_child_class]]
return self._values[item]
def get_bytes_length(self):
return self._count_size + sum(entry.get_bytes_length() for entry in self._values)
@classmethod
def minimum_length(cls):
return cls._count_size
@classmethod
def _parse_count(cls, value, start):
loc = start
count = int(value[loc:loc+cls._count_size])
loc += cls._count_size
return count, loc
@classmethod
def from_bytes(cls, value, start):
if not issubclass(cls._child_class, NITFElement):
raise TypeError('_child_class for {} must be a subclass of NITFElement'.format(cls.__name__))
count, loc = cls._parse_count(value, start)
if count == 0:
return cls(values=None)
values = []
for i in range(count):
val = cls._child_class.from_bytes(value, loc)
loc += val.get_bytes_length()
values.append(val)
return cls(values=values)
def _counts_bytes(self):
frm_str = '{0:0'+str(self._count_size) + 'd}'
return frm_str.format(len(self.values)).encode('utf-8')
def to_bytes(self):
return self._counts_bytes() + b''.join(entry.to_bytes() for entry in self._values)
def to_json(self):
return [entry.to_json() for entry in self._values]
class Unstructured(NITFElement):
"""
A possible NITF element pattern which is largely unparsed -
just a bytes array of a given length
"""
__slots__ = ('_data', )
_ordering = ('data', )
_size_len = 1
def __init__(self, data=None, **kwargs):
self._data = None
if not (isinstance(self._size_len, int) and self._size_len > 0):
raise TypeError(
'class variable _size_len for {} must be a positive '
'integer'.format(self.__class__.__name__))
super(Unstructured, self).__init__(data=data, **kwargs)
@property
def data(self): # type: () -> Union[None, bytes, NITFElement, TREList]
return self._data
@data.setter
def data(self, value):
if value is None:
self._data = None
return
if not isinstance(value, (bytes, NITFElement)):
raise TypeError(
'data requires bytes or NITFElement type. '
'Got type {}'.format(type(value)))
siz_lim = 10**self._size_len - 1
if isinstance(value, bytes):
len_cond = (len(value) > siz_lim)
else:
len_cond = value.get_bytes_length() > siz_lim
if len_cond:
raise ValueError('The provided data is longer than {}'.format(siz_lim))
self._data = value
self._populate_data()
def _populate_data(self):
"""
Populate the _data attribute from bytes to some other appropriate object.
Returns
-------
None
"""
pass
@classmethod
def minimum_length(cls):
return cls._size_len
def _get_attribute_bytes(self, attribute):
if attribute == 'data':
siz_frm = '{0:0' + str(self._size_len) + '}'
data = self.data
if data is None:
return b'0'*self._size_len
if isinstance(data, NITFElement):
data = data.to_bytes()
if isinstance(data, bytes):
return siz_frm.format(len(data)).encode('utf-8') + data
else:
raise TypeError(
'Got unexpected data type {} for attribute {} of class {}'.format(
type(data), attribute, self.__class__))
return super(Unstructured, self)._get_attribute_bytes(attribute)
def _get_attribute_length(self, attribute):
if attribute == 'data':
data = self.data
if data is None:
return self._size_len
elif isinstance(data, NITFElement):
return self._size_len + data.get_bytes_length()
else:
return self._size_len + len(data)
return super(Unstructured, self)._get_attribute_length(attribute)
@classmethod
def _parse_attribute(cls, fields, attribute, value, start):
if attribute == 'data':
length = int(value[start:start + cls._size_len])
start += cls._size_len
fields['data'] = value[start:start + length]
return start + length
return super(Unstructured, cls)._parse_attribute(fields, attribute, value, start)
class _ItemArrayHeaders(BaseNITFElement):
"""
Item array in the NITF header (i.e. Image Segment, Text Segment).
This is not really meant to be used directly.
"""
__slots__ = ('subhead_sizes', 'item_sizes')
_subhead_len = 0
_item_len = 0
def __init__(self, subhead_sizes=None, item_sizes=None, **kwargs):
"""
Parameters
----------
subhead_sizes : numpy.ndarray|None
item_sizes : numpy.ndarray|None
"""
if subhead_sizes is None or item_sizes is None:
subhead_sizes = numpy.zeros((0, ), dtype=numpy.int64)
item_sizes = numpy.zeros((0,), dtype=numpy.int64)
if subhead_sizes.shape != item_sizes.shape or len(item_sizes.shape) != 1:
raise ValueError(
'the subhead_offsets and item_offsets arrays must one-dimensional and the same length')
self.subhead_sizes = subhead_sizes
"""
numpy.ndarray: the subheader sizes
"""
self.item_sizes = item_sizes
"""
numpy.ndarray: the item size
"""
super(_ItemArrayHeaders, self).__init__(**kwargs)
def get_bytes_length(self):
return 3 + (self._subhead_len + self._item_len)*self.subhead_sizes.size
@classmethod
def minimum_length(cls):
return 3
@classmethod
def from_bytes(cls, value, start):
"""
Parameters
----------
value : bytes|str
start : int
Returns
-------
_ItemArrayHeaders
"""
subhead_len, item_len = int(cls._subhead_len), int(cls._item_len)
if len(value) < start + 3:
raise ValueError('value must have length at least {}. Got {}'.format(start+3, len(value)))
loc = start
count = int(value[loc:loc+3])
length = 3 + count*(subhead_len + item_len)
if len(value) < start + length:
raise ValueError('value must have length at least {}. Got {}'.format(start+length, len(value)))
loc += 3
subhead_sizes = numpy.zeros((count, ), dtype=numpy.int64)
item_sizes = numpy.zeros((count, ), dtype=numpy.int64)
for i in range(count):
subhead_sizes[i] = int(value[loc: loc+subhead_len])
loc += subhead_len
item_sizes[i] = int(value[loc: loc+item_len])
loc += item_len
return cls(subhead_sizes, item_sizes)
def to_bytes(self):
out = '{0:03d}'.format(self.subhead_sizes.size)
subh_frm = '{0:0' + str(self._subhead_len) + 'd}'
item_frm = '{0:0' + str(self._item_len) + 'd}'
for sh_off, it_off in zip(self.subhead_sizes, self.item_sizes):
out += subh_frm.format(sh_off) + item_frm.format(it_off)
return out.encode('utf-8')
def to_json(self):
return OrderedDict([
('subheader_sizes', self.subhead_sizes.tolist()),
('item_sizes', self.item_sizes.tolist())])
######
# TRE Elements
class TRE(BaseNITFElement):
"""
An abstract TRE class - this should not be instantiated directly.
"""
@property
def TAG(self):
"""
str: The TRE tag.
"""
raise NotImplementedError
@property
def DATA(self):
"""
The TRE data.
"""
raise NotImplementedError
@property
def EL(self):
"""
int: The TRE element length.
"""
raise NotImplementedError
def get_bytes_length(self):
raise NotImplementedError
def to_bytes(self):
raise NotImplementedError
@classmethod
def minimum_length(cls):
return 11
@classmethod
def from_bytes(cls, value, start):
tag = value[start:start+6]
known_tre = find_tre(tag)
if known_tre is not None:
try:
return known_tre.from_bytes(value, start)
except Exception as e:
logger.error(
"Returning unparsed tre, because we failed parsing tre as "
"type {} with exception\n\t{}".format(known_tre.__name__, e))
return UnknownTRE.from_bytes(value, start)
def to_json(self):
out = OrderedDict([('tag', self.TAG), ('length', self.EL)])
if isinstance(self.DATA, bytes):
out['data'] = self.DATA
else:
out['data'] = self.DATA.to_json()
return out
class UnknownTRE(TRE):
__slots__ = ('_TAG', '_data')
def __init__(self, TAG, data):
"""
Parameters
----------
TAG : str
data : bytes
"""
self._data = None
if isinstance(TAG, bytes):
TAG = TAG.decode('utf-8')
if not isinstance(TAG, str):
raise TypeError('TAG must be a string. Got {}'.format(type(TAG)))
if len(TAG) > 6:
raise ValueError('TAG must be 6 or fewer characters')
self._TAG = TAG
self._data = data
@property
def TAG(self):
return self._TAG
@property
def DATA(self): # type: () -> bytes
return self._data
@DATA.setter
def DATA(self, value):
if not isinstance(value, bytes):
raise TypeError('data must be a bytes instance. Got {}'.format(type(value)))
self._data = value
@property
def EL(self):
return len(self._data)
def get_bytes_length(self):
return 11 + self.EL
def to_bytes(self):
return '{0:6s}{1:05d}'.format(self.TAG, self.EL).encode('utf-8') + self._data
@classmethod
def from_bytes(cls, value, start):
tag = value[start:start+6]
lng = int(value[start+6:start+11])
return cls(tag, value[start+11:start+11+lng])
class TREList(NITFElement):
"""
A list of TREs. This is meant to be used indirectly through one of the header
type objects, which controls the parsing appropriately.
"""
__slots__ = ('_tres', )
_ordering = ('tres', )
def __init__(self, tres=None, **kwargs):
self._tres = []
super(TREList, self).__init__(tres=tres, **kwargs)
@property
def tres(self):
# type: () -> List[TRE]
return self._tres
@tres.setter
def tres(self, value):
if value is None:
self._tres = []
return
if not isinstance(value, (list, tuple)):
raise TypeError('tres must be a list or tuple')
for i, entry in enumerate(value):
if not isinstance(entry, TRE):
raise TypeError(
'Each entry of tres must be of type TRE. '
'Entry {} is type {}'.format(i, type(entry)))
self._tres = value
def _get_attribute_bytes(self, attribute):
if attribute == 'tres':
if len(self._tres) == 0:
return b''
return b''.join(entry.to_bytes() for entry in self._tres)
return super(TREList, self)._get_attribute_bytes(attribute)
def _get_attribute_length(self, attribute):
if attribute == 'tres':
if len(self._tres) == 0:
return 0
return sum(entry.get_bytes_length() for entry in self._tres)
return super(TREList, self)._get_attribute_length(attribute)
@classmethod
def _parse_attribute(cls, fields, attribute, value, start):
if attribute == 'tres':
if len(value) == start:
fields['tres'] = []
return start
tres = []
loc = start
while loc < len(value):
anticipated_length = int(value[loc+6:loc+11]) + 11
tre = TRE.from_bytes(value, loc)
parsed_length = tre.get_bytes_length()
if parsed_length != anticipated_length:
logger.error(
'The given length for TRE {} instance is {}, but the constructed length is {}. '
'This is the result of a malformed TRE object definition. '
'If possible, this should be reported to the sarpy team for review/repair.'.format(
tre.TAG, anticipated_length, parsed_length))
loc += anticipated_length
tres.append(tre)
fields['tres'] = tres
return len(value)
return super(TREList, cls)._parse_attribute(fields, attribute, value, start)
def __len__(self):
return len(self._tres)
def __getitem__(self, item):
# type: (Union[int, slice, str]) -> Union[None, TRE, List[TRE]]
if isinstance(item, (int, slice)):
return self._tres[item]
elif isinstance(item, str):
for entry in self.tres:
if entry.TAG == item:
return entry
return None
else:
raise TypeError('Got unhandled type {}'.format(type(item)))
def to_json(self):
return [entry.to_json() for entry in self._tres]
class TREHeader(Unstructured):
def _populate_data(self):
if isinstance(self._data, bytes):
data = TREList.from_bytes(self._data, 0)
self._data = data
class UserHeaderType(Unstructured):
__slots__ = ('_data', '_ofl')
_ordering = ('data', )
_size_len = 5
_ofl_len = 3
def __init__(self, OFL=None, data=None, **kwargs):
self._ofl = None
self._data = None
self.OFL = OFL
super(UserHeaderType, self).__init__(data=data, **kwargs)
@property
def OFL(self): # type: () -> int
return self._ofl
@OFL.setter
def OFL(self, value):
if value is None:
self._ofl = 0
return
value = int(value)
if not (0 <= value <= 999):
raise ValueError('ofl requires an integer value in the range 0-999.')
self._ofl = value
def _populate_data(self):
if isinstance(self._data, bytes):
data = TREList.from_bytes(self._data, 0)
self._data = data
@classmethod
def minimum_length(cls):
return cls._size_len
def _get_attribute_bytes(self, attribute):
if attribute == 'data':
siz_frm = '{0:0' + str(self._size_len) + 'd}'
ofl_frm = '{0:0' + str(self._ofl_len) + 'd}'
data = self.data
if data is None:
return b'0'*self._size_len
if isinstance(data, NITFElement):
data = data.to_bytes()
if isinstance(data, bytes):
return siz_frm.format(len(data) + self._ofl_len).encode('utf-8') + \
ofl_frm.format(self._ofl).encode('utf-8') + data
else:
raise TypeError('Got unexpected data type {}'.format(type(data)))
return super(Unstructured, self)._get_attribute_bytes(attribute)
def _get_attribute_length(self, attribute):
if attribute == 'data':
data = self.data
if data is None:
return self._size_len
elif isinstance(data, NITFElement):
return self._size_len + self._ofl_len + data.get_bytes_length()
else:
return self._size_len + self._ofl_len + len(data)
return super(UserHeaderType, self)._get_attribute_length(attribute)
@classmethod
def _parse_attribute(cls, fields, attribute, value, start):
if attribute == 'data':
length = int(value[start:start + cls._size_len])
start += cls._size_len
if length > 0:
ofl = int(value[start:start+cls._ofl_len])
fields['OFL'] = ofl
fields['data'] = value[start+cls._ofl_len:start + length]
else:
fields['OFL'] = 0
fields['data'] = None
return start + length
return super(UserHeaderType, cls)._parse_attribute(fields, attribute, value, start)
| 38,243 | 30.143322 | 119 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/image.py | """
The image subheader definitions.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
import struct
from collections import OrderedDict
from typing import Union
import numpy
from .base import NITFElement, NITFLoop, UserHeaderType, _IntegerDescriptor,\
_StringDescriptor, _StringEnumDescriptor, _NITFElementDescriptor, _parse_str
from .security import NITFSecurityTags, NITFSecurityTags0
logger = logging.getLogger(__name__)
######
# General components
class ImageBand(NITFElement):
"""
Single image band, part of the image bands collection
"""
_ordering = ('IREPBAND', 'ISUBCAT', 'IFC', 'IMFLT', 'LUTD')
_lengths = {'IREPBAND': 2, 'ISUBCAT': 6, 'IFC': 1, 'IMFLT': 3}
IREPBAND = _StringDescriptor(
'IREPBAND', True, 2, default_value='',
docstring='Representation. This field shall contain a valid indicator of the processing '
'required to display the nth band of the image with regard to the general image type '
'as recorded in the `IREP` field. The significance of each band in the image can be '
'derived from the combination of the `ICAT`, and `ISUBCAT` fields. Valid values of '
'the `IREPBAND` field depend on the value of '
'the `IREP` field.') # type: str
ISUBCAT = _StringDescriptor(
'ISUBCAT', True, 6, default_value='',
docstring='Subcategory. The purpose of this field is to provide the significance of the band '
'of the image with regard to the specific category (`ICAT` field) '
'of the overall image.') # type: str
IFC = _StringEnumDescriptor(
'IFC', True, 1, {'N', }, default_value='N',
docstring=' Image Filter Condition.') # type: str
IMFLT = _StringDescriptor(
'IMFLT', True, 3, default_value='',
docstring='Standard Image Filter Code. This field is reserved '
'for future use.') # type: str
def __init__(self, **kwargs):
self._LUTD = None
super(ImageBand, self).__init__(**kwargs)
@classmethod
def minimum_length(cls):
return 13
@property
def LUTD(self):
"""
The Look-up Table (LUT) data.
Returns
-------
None|numpy.ndarray
"""
return self._LUTD
@LUTD.setter
def LUTD(self, value):
if value is None:
self._LUTD = None
return
if not isinstance(value, numpy.ndarray):
raise TypeError('LUTD must be a numpy array')
if value.dtype.name != 'uint8':
raise ValueError('LUTD must be a numpy array of dtype uint8, got {}'.format(value.dtype.name))
if value.ndim != 2:
raise ValueError('LUTD must be a two-dimensional array')
if value.shape[0] > 4:
raise ValueError(
'The number of LUTD bands (axis 0) must be 4 or fewer. '
'Got LUTD shape {}'.format(value.shape))
if value.shape[1] > 65536:
raise ValueError(
'The number of LUTD elemnts (axis 1) must be 65536 or fewer. '
'Got LUTD shape {}'.format(value.shape))
self._LUTD = value
@property
def NLUTS(self):
"""
Number of LUTS for the Image Band. This field shall contain the number
of LUTs associated with the nth band of the image. LUTs are allowed
only if the value of the `PVTYPE` field is :code:`INT` or :code:`B`.
Returns
-------
int
"""
return 0 if self._LUTD is None else self._LUTD.shape[0]
@property
def NELUTS(self):
"""
Number of LUT Entries for the Image Band. This field shall contain
the number of entries in each of the LUTs for the nth image band.
Returns
-------
int
"""
return 0 if self._LUTD is None else self._LUTD.shape[1]
def _get_attribute_bytes(self, attribute):
if attribute == 'LUTD':
if self.NLUTS == 0:
out = b'0'
else:
out = '{0:d}{1:05d}'.format(self.NLUTS, self.NELUTS).encode() + \
struct.pack('{}B'.format(self.NLUTS * self.NELUTS), *self.LUTD.flatten())
return out
else:
return super(ImageBand, self)._get_attribute_bytes(attribute)
def _get_attribute_length(self, attribute):
if attribute == 'LUTD':
nluts = self.NLUTS
if nluts == 0:
return 1
else:
neluts = self.NELUTS
return 6 + nluts * neluts
else:
return super(ImageBand, self)._get_attribute_length(attribute)
@classmethod
def _parse_attribute(cls, fields, attribute, value, start):
if attribute == 'LUTD':
loc = start
nluts = int(value[loc:loc + 1])
loc += 1
if nluts == 0:
fields['LUTD'] = None
else:
neluts = int(value[loc:loc + 5])
loc += 5
siz = nluts * neluts
lutd = numpy.array(
struct.unpack('{}B'.format(siz), value[loc:loc + siz]), dtype=numpy.uint8).reshape(
(nluts, neluts))
fields['LUTD'] = lutd
loc += siz
return loc
return super(ImageBand, cls)._parse_attribute(fields, attribute, value, start)
class ImageBands(NITFLoop):
_child_class = ImageBand
_count_size = 1
@classmethod
def _parse_count(cls, value, start):
loc = start
count = int(value[loc:loc + cls._count_size])
loc += cls._count_size
if count == 0:
# (only) if there are more than 9, a longer field is used
count = int(value[loc:loc + 5])
loc += 5
return count, loc
def _counts_bytes(self):
siz = len(self.values)
if siz <= 9:
return '{0:1d}'.format(siz).encode()
else:
return '0{0:05d}'.format(siz).encode()
class ImageComment(NITFElement):
_ordering = ('COMMENT', )
_lengths = {'COMMENT': 80}
COMMENT = _StringDescriptor('COMMENT', True, 80, default_value='', docstring='The image comment')
class ImageComments(NITFLoop):
_child_class = ImageComment
_count_size = 1
########
# Masked image header - this is a binary structure
class MaskSubheader(NITFElement):
_ordering = (
'IMDATOFF', 'BMRLNTH', 'TMRLNTH', 'TPXCDLNTH', 'TPXCD', 'BMR', 'TMR')
_lengths = {
'IMDATOFF': 4, 'BMRLNTH': 2, 'TMRLNTH': 2, 'TPXCDLNTH': 2}
_binary_format = {
'IMDATOFF': '>I', 'BMRLNTH': '>H', 'TMRLNTH': '>H', 'TPXCDLNTH': '>H'}
# descriptors
IMDATOFF = _IntegerDescriptor(
'IMDATOFF', True, 10,
docstring='Blocked image data offset. This is the size of the masked subheader '
'in bytes.') # type: int
BMRLNTH = _IntegerDescriptor(
'BMRLNTH', True, 5,
docstring='Block mask record length') # type: int
TMRLNTH = _IntegerDescriptor(
'TMRLNTH', True, 5,
docstring='Transparent Pixel Mask Record Length') # type: int
TPXCDLNTH = _IntegerDescriptor(
'TPXCDLNTH', True, 5,
docstring='Transparent Output Pixel Code Length in bits.') # type: int
def __init__(self, band_depth=1, blocks=1, **kwargs):
self._band_depth = band_depth
self._blocks = blocks
self._TPXCD = None
self._BMR = None
self._TMR = None
super(MaskSubheader, self).__init__(**kwargs)
@property
def band_depth(self):
"""
int: The number of band elements. Will only be > 1 if band-sequential format.
"""
return self._band_depth
@property
def blocks(self):
"""
int: The number of blocks.
"""
return self._blocks
@property
def TPXCD(self):
"""
bytes: The Transparent output pixel code.
"""
return self._TPXCD
@TPXCD.setter
def TPXCD(self, value):
if self.TPXCDLNTH == 0:
self._TPXCD = None
return
if not isinstance(value, bytes):
raise TypeError('TPXCD must be of type bytes.')
expected_length = self._get_attribute_length('TPXCD')
if len(value) != expected_length:
raise ValueError(
'Provided TPXCD data is required to be of length {}, '
'but got length {}'.format(expected_length, len(value)))
self._TPXCD = value
@property
def BMR(self):
# type: () -> Union[None, numpy.ndarray]
"""
None|numpy.ndarray: The block mask records array. This will be None if
and only if `BMRLNTH=0`. Each entry records the offset in bytes from
the beginning of the blocked image data to the first byte of the respective
block. If the block is not recorded/transmitted (i.e. present), then the
offset value is defaulted to `0xFFFFFFFF`.
"""
return self._BMR
@BMR.setter
def BMR(self, value):
if value is None:
if self.BMRLNTH != 0:
raise ValueError('BMR array is None, but BMRLNTH={}'.format(self.BMRLNTH))
self._BMR = None
else:
if self.BMRLNTH != 4:
raise ValueError('BMR array is provided, but BMRLNTH={}'.format(self.BMRLNTH))
if not isinstance(value, numpy.ndarray):
value = numpy.array(value, dtype='uint32')
if value.shape != (self.band_depth, self.blocks):
raise ValueError(
'BMR array is of shape {}, and must be of '
'shape {}'.format(value.shape, (self.band_depth, self.blocks)))
if value.dtype.name != 'uint32':
raise ValueError(
'BMR array has dtype {}, and must be of '
'dtype uint32'.format(value.dtype.name))
self._BMR = value
@property
def TMR(self):
# type: () -> Union[None, numpy.ndarray]
"""
None|numpy.ndarray: The transparent mask records array. This will be None if
and only if `TMRLNTH=0`. Each entry records the offset in bytes from
the beginning of the blocked image data to the first byte of the respective
block (if this block contains pad pixels), or the default value `0xFFFFFFFF`
to indicate that this block does not contain pad pixels.
"""
return self._TMR
@TMR.setter
def TMR(self, value):
if value is None:
if self.TMRLNTH != 0:
raise ValueError('TMR array is None, but TMRLNTH={}'.format(self.TMRLNTH))
self._TMR = None
else:
if self.TMRLNTH != 4:
raise ValueError('TMR array is provided, but TMRLNTH={}'.format(self.TMRLNTH))
if not isinstance(value, numpy.ndarray):
value = numpy.array(value, dtype='uint32')
if value.shape != (self.band_depth, self.blocks):
raise ValueError(
'TMR array is of shape {}, and must be of '
'shape {}'.format(value.shape, (self.band_depth, self.blocks)))
if value.dtype.name != 'uint32':
raise ValueError(
'TMR array has dtype {}, and must be of '
'dtype uint32'.format(value.dtype.name))
self._TMR = value
@staticmethod
def define_tpxcd_length(tpxcdlnth):
"""
Gets the appropriate length for the TPXCD data.
Parameters
----------
tpxcdlnth : int
The TPXCDLNTH value.
Returns
-------
int
"""
missing = (tpxcdlnth % 8)
if missing == 0:
return int(tpxcdlnth/8)
else:
return int((tpxcdlnth + (8 - missing))/8)
@classmethod
def _parse_attribute(cls, fields, attribute, value, start):
if attribute == 'BMR':
if fields['BMRLNTH'] == 0:
fields['BMR'] = None
return start
else:
count = fields['band_depth']*fields['blocks']
end = start+4*count
array = numpy.array(struct.unpack('>{}I'.format(count), value[start:end]), dtype='uint32')
fields['BMR'] = numpy.resize(array, (fields['band_depth'], fields['blocks']))
return end
elif attribute == 'TMR':
if fields['TMRLNTH'] == 0:
fields['TMR'] = None
return start
else:
count = fields['band_depth']*fields['blocks']
end = start+4*count
array = numpy.array(struct.unpack('>{}I'.format(count), value[start:end]), dtype='uint32')
fields['TMR'] = numpy.resize(array, (fields['band_depth'], fields['blocks']))
return end
elif attribute == 'TPXCD':
length = cls.define_tpxcd_length(fields['TPXCDLNTH'])
if length == 0:
fields['TPXCD'] = None
return start
else:
end = start + length
fields['TPXCD'] = value[start:end]
return end
else:
return super(MaskSubheader, cls)._parse_attribute(fields, attribute, value, start)
def _get_attribute_length(self, fld):
if fld in ['BMR', 'TMR']:
value = getattr(self, fld)
if value is None:
return 0
else:
return value.size*4
elif fld == 'TPXCD':
return self.define_tpxcd_length(self.TPXCDLNTH)
else:
return super(MaskSubheader, self)._get_attribute_length(fld)
def _get_attribute_bytes(self, fld):
if fld in ['BMR', 'TMR']:
value = getattr(self, fld)
if value is None:
return b''
else:
return struct.pack('>{}I'.format(value.size), *numpy.reshape(value, (-1,)))
elif fld == 'TPXCD':
if self._TPXCD is None:
return b''
return self._TPXCD
else:
return super(MaskSubheader, self)._get_attribute_bytes(fld)
@classmethod
def from_bytes(cls, value, start, band_depth=1, blocks=1):
fields = {
'band_depth': band_depth, 'blocks': blocks}
loc = start
for fld in cls._ordering:
loc = cls._parse_attribute(fields, fld, value, loc)
out = cls(**fields)
input_length = len(value)-start
out_length = out.get_bytes_length()
if input_length != out_length:
logger.error(
'The MaskSubheader object is being serialized from a bytes buffer of length {},\n\t'
'but would serialize to a bytes object of length {}.\n\t'
'This is likely a result of faulty serialization,\n\t '
'and represents an error.'.format(input_length, out_length))
return out
def to_json(self):
out = OrderedDict([('band_depth', self.band_depth), ('blocks', self.blocks)])
for fld in self._ordering:
value = getattr(self, fld)
if value is None:
continue
if fld in ['BMR', 'TMR']:
out[fld] = value.tolist()
else:
out[fld] = value
return out
#########
# NITF 2.1 version
class ImageSegmentHeader(NITFElement):
"""
The image segment header - see standards document Joint BIIF Profile (JBP) for more
information.
"""
_ordering = (
'IM', 'IID1', 'IDATIM', 'TGTID',
'IID2', 'Security', 'ENCRYP', 'ISORCE',
'NROWS', 'NCOLS', 'PVTYPE', 'IREP',
'ICAT', 'ABPP', 'PJUST', 'ICORDS',
'IGEOLO', 'Comments', 'IC', 'COMRAT', 'Bands',
'ISYNC', 'IMODE', 'NBPR', 'NBPC', 'NPPBH',
'NPPBV', 'NBPP', 'IDLVL', 'IALVL',
'ILOC', 'IMAG', 'UserHeader', 'ExtendedHeader')
_lengths = {
'IM': 2, 'IID1': 10, 'IDATIM': 14, 'TGTID': 17,
'IID2': 80, 'ENCRYP': 1, 'ISORCE': 42,
'NROWS': 8, 'NCOLS': 8, 'PVTYPE': 3, 'IREP': 8,
'ICAT': 8, 'ABPP': 2, 'PJUST': 1, 'ICORDS': 1,
'IGEOLO': 60, 'IC': 2, 'COMRAT': 4, 'ISYNC': 1, 'IMODE': 1,
'NBPR': 4, 'NBPC': 4, 'NPPBH': 4, 'NPPBV': 4,
'NBPP': 2, 'IDLVL': 3, 'IALVL': 3, 'ILOC': 10,
'IMAG': 4, 'UDIDL': 5, 'IXSHDL': 5}
# Descriptors
IM = _StringEnumDescriptor(
'IM', True, 2, {'IM', }, default_value='IM',
docstring='File part type.') # type: str
IID1 = _StringDescriptor(
'IID1', True, 10, default_value='',
docstring='Image Identifier 1. This field shall contain a valid alphanumeric identification code '
'associated with the image. The valid codes are determined by '
'the application.') # type: str
IDATIM = _StringDescriptor(
'IDATIM', True, 14, default_value='',
docstring='Image Date and Time. This field shall contain the time (UTC) of the image '
'acquisition in the format :code:`YYYYMMDDhhmmss`.') # type: str
TGTID = _StringDescriptor(
'TGTID', True, 17, default_value='',
docstring='Target Identifier. This field shall contain the identification of the primary target '
'in the format, :code:`BBBBBBBBBBOOOOOCC`, consisting of ten characters of Basic Encyclopedia '
'`(BE)` identifier, followed by five characters of facility OSUFFIX, followed by the two '
'character country code as specified in FIPS PUB 10-4.') # type: str
IID2 = _StringDescriptor(
'IID2', True, 80, default_value='',
docstring='Image Identifier 2. This field can contain the identification of additional '
'information about the image.') # type: str
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags, default_args={},
docstring='The image security tags.') # type: NITFSecurityTags
ENCRYP = _StringEnumDescriptor(
'ENCRYP', True, 1, {'0'}, default_value='0',
docstring='Encryption.') # type: str
ISORCE = _StringDescriptor(
'ISORCE', True, 42, default_value='',
docstring='Image Source. This field shall contain a description of the source of the image. '
'If the source of the data is classified, then the description shall be preceded by '
'the classification, including codeword(s).') # type: str
NROWS = _IntegerDescriptor(
'NROWS', True, 8, default_value=0,
docstring='Number of Significant Rows in Image. This field shall contain the total number of rows '
'of significant pixels in the image. When the product of the values of the `NPPBV` field '
'and the `NBPC` field is greater than the value of the `NROWS` field '
r'(:math:`NPPBV \cdot NBPC > NROWS`), the rows indexed with the value of the `NROWS` field '
r'to (:math:`NPPBV\cdot NBPC - 1`) shall contain fill data. NOTE: Only the rows indexed '
'0 to the value of the `NROWS` field minus 1 of the image contain significant data. '
'The pixel fill values are determined by the application.') # type: int
NCOLS = _IntegerDescriptor(
'NCOLS', True, 8, default_value=0,
docstring='Number of Significant Columns in Image. This field shall contain the total number of '
'columns of significant pixels in the image. When the product of the values of the `NPPBH` '
'field and the `NBPR` field is greater than the `NCOLS` field '
r'(:math:`NPPBH\cdot NBPR > NCOLS`), the columns indexed with the value of the `NCOLS` field '
r'to (:math:`NPPBH\cdot NBPR - 1`) shall contain fill data. NOTE: Only the columns '
'indexed 0 to the value of the `NCOLS` field minus 1 of the image contain significant data. '
'The pixel fill values are determined by the application.') # type: int
PVTYPE = _StringEnumDescriptor(
'PVTYPE', True, 3, {'INT', 'B', 'SI', 'R', 'C'},
docstring='Pixel Value Type. This field shall contain an indicator of the type of computer representation '
'used for the value for each pixel for each band in the image. ') # type: str
IREP = _StringEnumDescriptor(
'IREP', True, 8,
{'MONO', 'RGB', 'RGB/LUT', 'MULTI', 'NODISPLY', 'NVECTOR', 'POLAR', 'VPH', 'YCbCr601'},
default_value='NODISPLY',
docstring='Image Representation. This field shall contain a valid indicator of the processing required '
'in order to display an image.') # type: str
ICAT = _StringDescriptor(
'ICAT', True, 8, default_value='SAR',
docstring='Image Category. This field shall contain a valid indicator of the specific category of image, '
'raster or grid data. The specific category of an IS reveals its intended use or the nature '
'of its collector.') # type: str
ABPP = _IntegerDescriptor(
'ABPP', True, 2,
docstring='Actual Bits-Per-Pixel Per Band. This field shall contain the number of "significant bits" for '
'the value in each band of each pixel without compression. Even when the image is compressed, '
'`ABPP` contains the number of significant bits per pixel that were present in the image '
'before compression. This field shall be less than or equal to Number of Bits Per Pixel '
'(field `NBPP`). The number of adjacent bits within each `NBPP` is '
'used to represent the value.') # type: int
PJUST = _StringEnumDescriptor(
'PJUST', True, 1, {'L', 'R'}, default_value='R',
docstring='Pixel Justification. When `ABPP` is not equal to `NBPP`, this field indicates whether the '
'significant bits are left justified (:code:`L`) or right '
'justified (:code:`R`).') # type: str
ICORDS = _StringEnumDescriptor(
'ICORDS', True, 1, {'', 'U', 'G', 'N', 'S', 'D'}, default_value='G',
docstring='Image Coordinate Representation. This field shall contain a valid code indicating the type '
'of coordinate representation used for providing an approximate location of the image in the '
'Image Geographic Location field (`IGEOLO`).') # type: str
Comments = _NITFElementDescriptor(
'Comments', True, ImageComments, default_args={},
docstring='The image comments.') # type: ImageComments
Bands = _NITFElementDescriptor(
'Bands', True, ImageBands, default_args={},
docstring='The image bands.') # type: ImageBands
ISYNC = _IntegerDescriptor(
'ISYNC', True, 1, default_value=0,
docstring='Image Sync code. This field is reserved for future use. ') # type: int
IMODE = _StringEnumDescriptor(
'IMODE', True, 1, {'B', 'P', 'R', 'S'}, default_value='P',
docstring='Image Mode. This field shall indicate how the Image Pixels are '
'stored in the NITF file.') # type: str
NBPR = _IntegerDescriptor(
'NBPR', True, 4, default_value=1,
docstring='Number of Blocks Per Row. This field shall contain the number of image blocks in a row of '
'blocks (paragraph 5.4.2.2) in the horizontal direction. If the image consists of only a '
'single block, this field shall contain the value one.') # type: int
NBPC = _IntegerDescriptor(
'NBPC', True, 4, default_value=1,
docstring='Number of Blocks Per Column. This field shall contain the number of image blocks in a column '
'of blocks (paragraph 5.4.2.2) in the vertical direction. If the image consists of only a '
'single block, this field shall contain the value one.') # type: int
NPPBH = _IntegerDescriptor(
'NPPBH', True, 4, default_value=0,
docstring='Number of Pixels Per Block Horizontal. This field shall contain the number of pixels horizontally '
'in each block of the image. It shall be the case that the product of the values of the `NBPR` '
'field and the `NPPBH` field is greater than or equal to the value of the `NCOLS` field '
r'(:math:`NBPR\cdot NPPBH \geq NCOLS`). When NBPR is :code:`1`, setting the `NPPBH` '
'value to :code:`0` designates that the number of pixels horizontally is specified by the '
'value in NCOLS.') # type: int
NPPBV = _IntegerDescriptor(
'NPPBV', True, 4, default_value=0,
docstring='Number of Pixels Per Block Vertical. This field shall contain the number of pixels vertically '
'in each block of the image. It shall be the case that the product of the values of the `NBPC` '
'field and the `NPPBV` field is greater than or equal to the value of the `NROWS` field '
r'(:math:`NBPC\cdot NPPBV \geq NROWS`). When `NBPC` is :code:`1`, setting the `NPPBV` value '
r'to :code:`0` designates that the number of pixels vertically is specified by '
r'the value in `NROWS`.') # type: int
NBPP = _IntegerDescriptor(
'NBPP', True, 2, default_value=0,
docstring='Number of Bits Per Pixel Per Band.') # type: int
IDLVL = _IntegerDescriptor(
'IDLVL', True, 3, default_value=0,
docstring='Image Display Level. This field shall contain a valid value that indicates the display level of '
'the image relative to other displayed file components in a composite display. The valid values '
'are :code:`1-999`. The display level of each displayable segment (image or graphic) within a file '
'shall be unique.') # type: int
IALVL = _IntegerDescriptor(
'IALVL', True, 3, default_value=0,
docstring='Attachment Level. This field shall contain a valid value that indicates the attachment '
'level of the image.') # type: int
ILOC = _StringDescriptor(
'ILOC', True, 10, default_value='',
docstring='Image Location. The image location is the location of the first pixel of the first line of the '
'image. This field shall contain the image location offset from the `ILOC` or `SLOC` value '
'of the segment to which the image is attached or from the origin of the CCS when the image '
'is unattached (`IALVL` contains :code:`0`). A row or column value of :code:`0` indicates no offset. '
'Positive row and column values indicate offsets down and to the right while negative row and '
'column values indicate offsets up and to the left.') # type: str
IMAG = _StringDescriptor(
'IMAG', True, 4, default_value='1.0',
docstring='Image Magnification. This field shall contain the magnification (or reduction) factor of the '
'image relative to the original source image. Decimal values are used to indicate magnification, '
'and decimal fraction values indicate reduction. For example, :code:`2.30` indicates the original '
'image has been magnified by a factor of :code:`2.30`, while :code:`0.5` indicates '
'the original image has been reduced by a factor of 2.') # type: str
UserHeader = _NITFElementDescriptor(
'UserHeader', True, UserHeaderType, default_args={},
docstring='User defined header.') # type: UserHeaderType
ExtendedHeader = _NITFElementDescriptor(
'ExtendedHeader', True, UserHeaderType, default_args={},
docstring='Extended subheader - TRE list.') # type: UserHeaderType
def __init__(self, **kwargs):
self._IC = None
self._COMRAT = None
self._IGEOLO = None
self._mask_subheader = None
super(ImageSegmentHeader, self).__init__(**kwargs)
@property
def is_masked(self):
"""
bool: Does this image segment contain a mask?
"""
return self.IC in ['NM', 'M1', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8']
@property
def is_compressed(self):
"""
bool: Is this image segment compressed?
"""
return self.IC not in ['NC', 'NM']
@property
def IC(self):
"""
str: Image Compression. This field shall contain a valid code indicating
the form of compression used in representing the image data.
Valid values for this field are, :code:`C1` to represent bi-level, :code:`C3`
to represent JPEG, :code:`C4` to represent Vector Quantization, :code:`C5`
to represent lossless JPEG, :code:`I1` to represent down sampled JPEG,
and :code:`NC` to represent the image is not compressed. Also valid are
:code:`M1, M3, M4`, and :code:`M5` for compressed images, and :code:`NM`
for uncompressed images indicating an image that contains a block
mask and/or a pad pixel mask. :code:`C6` and :code:`M6` are reserved values
that will represent a future correlated multicomponent compression
algorithm. :code:`C7` and :code:`M7` are reserved values that will represent
a future complex SAR compression. :code:`C8` and :code:`M8` are the values
for ISO standard compression JPEG 2000.
The format of a mask image is identical to the format of its corresponding non-masked image
except for the presence of an Image Data Mask at the beginning of
the image data area. The format of the Image Data Mask is described
in paragraph 5.4.3.2 and is shown in table A-3(A). The definitions
of the compression schemes associated with codes :code:`C1/M1, C3/M3, C4/M4, C5/M5`
are given, respectively, in ITU- T T.4, AMD2, MIL-STD-188-198A,
MIL-STD- 188-199, and NGA N0106-97. :code:`C1` is found in ITU- T T.4 AMD2,
:code:`C3` is found in MIL-STD-188-198A, :code:`C4` is found in MIL-STD-188-199,
and :code:`C5` and :code:`I1` are found in NGA N0106-97. (NOTE: :code:`C2` (ARIDPCM) is not
valid in NITF 2.1.) The definition of the compression scheme associated
with codes :code:`C8/M8` is found in ISO/IEC 15444- 1:2000 (with amendments 1 and 2).
"""
return self._IC
@IC.setter
def IC(self, value):
value = _parse_str(value, 2, 'NC', 'IC', self)
if value not in {
'NC', 'NM', 'C0', 'C1', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'I1',
'M1', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8'}:
raise ValueError('IC got invalid value {}'.format(value))
self._IC = value
if value in ('NC', 'NM'):
self._COMRAT = None
elif self._COMRAT is not None:
self._COMRAT = '\x20'*4
@property
def COMRAT(self):
"""
None|str: Compression Rate Code. If the IC field contains one of
:code:`C1, C3, C4, C5, C8, M1, M3, M4, M5, M8, I1`, this field shall be contain
a code indicating the compression rate for the image.
If `IC` is :code:`NC` or :code:`NM`, then this will be set to :code:`None`.
"""
return self._COMRAT
@COMRAT.setter
def COMRAT(self, value):
value = _parse_str(value, 4, None, 'COMRAT', self)
if value is None and self.IC not in ('NC', 'NM'):
value = '\x20'*4
logger.error(
'COMRAT value is None, but IC is not in {"NC", "NM"}.\n\t'
'This must be resolved.')
if value is not None and self.IC in ('NC', 'NM'):
value = None
logger.error(
'COMRAT value is something other than None, but IC in {"NC", "NM"}.\n\t'
'This is invalid, and COMRAT is being set to None.')
self._COMRAT = value
@property
def IGEOLO(self):
"""
None|str: Image Geographic Location. This field, when present, shall contain
an approximate geographic location which is not intended for analytical purposes
(e.g., targeting, mensuration, distance calculation); it is intended to support
general user appreciation for the image location (e.g., cataloguing). The
representation of the image corner locations is specified in the `ICORDS` field.
The locations of the four corners of the (significant) image data shall be given
in image coordinate order: (0,0), (0, MaxCol), (MaxRow, MaxCol), (MaxRow, 0).
MaxCol and MaxRow shall be determined from the values contained, respectively,
in the `NCOLS` field and the `NROWS` field.
"""
return self._IGEOLO
@IGEOLO.setter
def IGEOLO(self, value):
value = _parse_str(value, 60, None, 'IGEOLO', self)
if value is None and self.ICORDS.strip() != '':
value = '\x20'*60
if value is not None and self.ICORDS.strip() == '':
value = None
self._IGEOLO = value
@property
def mask_subheader(self):
# type: () -> Union[None, MaskSubheader]
"""
None|MaskSubheader: The mask subheader, if it has been appended.
"""
return self._mask_subheader
@mask_subheader.setter
def mask_subheader(self, value):
if value is None:
self._mask_subheader = None
return
if not isinstance(value, MaskSubheader):
raise ValueError(
'mask_subheader is expected to be an instance of MaskSubheader. '
'Got type {}'.format(type(value)))
if self.IC not in ['NM', 'M1', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8']:
raise ValueError(
'IC={}, which does not indicate the presence of a mask '
'subheader'.format(self.IC))
self._mask_subheader = value
def _get_attribute_length(self, fld):
if fld in ['COMRAT', 'IGEOLO']:
if getattr(self, '_'+fld) is None:
return 0
else:
return self._lengths[fld]
else:
return super(ImageSegmentHeader, self)._get_attribute_length(fld)
@classmethod
def minimum_length(cls):
# COMRAT and IGEOLO may not be there
return super(ImageSegmentHeader, cls).minimum_length() - 64
@classmethod
def _parse_attribute(cls, fields, attribute, value, start):
if attribute == 'IC':
val = value[start:start+2].decode('utf-8')
fields['IC'] = val
if val in ('NC', 'NM'):
fields['COMRAT'] = None
return start+2
elif attribute == 'ICORDS':
fields['ICORDS'] = value[start:start+1]
if fields['ICORDS'] == b' ':
fields['IGEOLO'] = None
return start+1
else:
return super(ImageSegmentHeader, cls)._parse_attribute(fields, attribute, value, start)
def get_uncompressed_block_size(self) -> int:
"""
Gets the size of an uncompressed block.
Note that if `IMODE == 'S'`, then each block consists of a single band.
Otherwise, a block consists of all bands.
Returns
-------
int
"""
nppbv = self.NROWS if self.NPPBV == 0 else self.NPPBV
nppbh = self.NCOLS if self.NPPBH == 0 else self.NPPBH
if self.IMODE == 'S':
return int(nppbh*nppbv*self.NBPP/8)
else:
return int(nppbh*nppbv*len(self.Bands)*self.NBPP/8)
def get_full_uncompressed_image_size(self) -> int:
"""
Gets the full size in bytes of the uncompressed image including any padding
in the blocks.
Returns
-------
int
"""
total_blocks = self.NBPR*self.NBPC
if self.IMODE == 'S':
total_blocks *= len(self.Bands)
return total_blocks*self.get_uncompressed_block_size()
def get_clevel(self) -> int:
"""
Gets the CLEVEL value for this image segment.
Returns
-------
int
"""
dim_size = max(self.NROWS, self.NCOLS)
if dim_size <= 2048:
return 3
elif dim_size <= 8192:
return 5
elif dim_size <= 65536:
return 6
else:
return 7
#########
# NITF 2.0 version
class ImageSegmentHeader0(NITFElement):
"""
The image segment header for NITF version 2.0 - see standards document
MIL-STD-2500A for more information.
"""
_ordering = (
'IM', 'IID', 'IDATIM', 'TGTID',
'ITITLE', 'Security', 'ENCRYP', 'ISORCE',
'NROWS', 'NCOLS', 'PVTYPE', 'IREP',
'ICAT', 'ABPP', 'PJUST', 'ICORDS',
'IGEOLO', 'Comments', 'IC', 'COMRAT', 'Bands',
'ISYNC', 'IMODE', 'NBPR', 'NBPC', 'NPPBH',
'NPPBV', 'NBPP', 'IDLVL', 'IALVL',
'ILOC', 'IMAG', 'UserHeader', 'ExtendedHeader')
_lengths = {
'IM': 2, 'IID': 10, 'IDATIM': 14, 'TGTID': 17,
'ITITLE': 80, 'ENCRYP': 1, 'ISORCE': 42,
'NROWS': 8, 'NCOLS': 8, 'PVTYPE': 3, 'IREP': 8,
'ICAT': 8, 'ABPP': 2, 'PJUST': 1, 'ICORDS': 1,
'IGEOLO': 60, 'IC': 2, 'COMRAT': 4, 'ISYNC': 1, 'IMODE': 1,
'NBPR': 4, 'NBPC': 4, 'NPPBH': 4, 'NPPBV': 4,
'NBPP': 2, 'IDLVL': 3, 'IALVL': 3, 'ILOC': 10,
'IMAG': 4, 'UDIDL': 5, 'IXSHDL': 5}
# Descriptors
IM = _StringEnumDescriptor(
'IM', True, 2, {'IM', }, default_value='IM',
docstring='File part type.') # type: str
IID = _StringDescriptor(
'IID', True, 10, default_value='',
docstring='Image Identifier 1. This field shall contain a valid alphanumeric identification code '
'associated with the image. The valid codes are determined by '
'the application.') # type: str
IDATIM = _StringDescriptor(
'IDATIM', True, 14, default_value='',
docstring='Image Date and Time. This field shall contain the time (UTC) of the image '
'acquisition in the format :code:`YYYYMMDDhhmmss`.') # type: str
TGTID = _StringDescriptor(
'TGTID', True, 17, default_value='',
docstring='Target Identifier. This field shall contain the identification of the primary target '
'in the format, :code:`BBBBBBBBBBOOOOOCC`, consisting of ten characters of Basic Encyclopedia '
'`(BE)` identifier, followed by five characters of facility OSUFFIX, followed by the two '
'character country code as specified in FIPS PUB 10-4.') # type: str
ITITLE = _StringDescriptor(
'ITITLE', True, 80, default_value='',
docstring='Image Identifier 2. This field can contain the identification of additional '
'information about the image.') # type: str
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags0, default_args={},
docstring='The image security tags.') # type: NITFSecurityTags0
ENCRYP = _StringEnumDescriptor(
'ENCRYP', True, 1, {'0'}, default_value='0',
docstring='Encryption.') # type: str
ISORCE = _StringDescriptor(
'ISORCE', True, 42, default_value='',
docstring='Image Source. This field shall contain a description of the source of the image. '
'If the source of the data is classified, then the description shall be preceded by '
'the classification, including codeword(s).') # type: str
NROWS = _IntegerDescriptor(
'NROWS', True, 8, default_value=0,
docstring='Number of Significant Rows in Image. This field shall contain the total number of rows '
'of significant pixels in the image. When the product of the values of the `NPPBV` field '
'and the `NBPC` field is greater than the value of the `NROWS` field '
r'(:math:`NPPBV \cdot NBPC > NROWS`), the rows indexed with the value of the `NROWS` field '
r'to (:math:`NPPBV\cdot NBPC - 1`) shall contain fill data. NOTE: Only the rows indexed '
'0 to the value of the `NROWS` field minus 1 of the image contain significant data. '
'The pixel fill values are determined by the application.') # type: int
NCOLS = _IntegerDescriptor(
'NCOLS', True, 8, default_value=0,
docstring='Number of Significant Columns in Image. This field shall contain the total number of '
'columns of significant pixels in the image. When the product of the values of the `NPPBH` '
'field and the `NBPR` field is greater than the `NCOLS` field '
r'(:math:`NPPBH\cdot NBPR > NCOLS`), the columns indexed with the value of the `NCOLS` field '
r'to (:math:`NPPBH\cdot NBPR - 1`) shall contain fill data. NOTE: Only the columns '
'indexed 0 to the value of the `NCOLS` field minus 1 of the image contain significant data. '
'The pixel fill values are determined by the application.') # type: int
PVTYPE = _StringEnumDescriptor(
'PVTYPE', True, 3, {'INT', 'B', 'SI', 'R', 'C'},
docstring='Pixel Value Type. This field shall contain an indicator of the type of computer representation '
'used for the value for each pixel for each band in the image. ') # type: str
IREP = _StringEnumDescriptor(
'IREP', True, 8,
{'MONO', 'RGB', 'RGB/LUT', 'MULTI', 'NODISPLY', 'NVECTOR', 'POLAR', 'VPH', 'YCbCr601'},
default_value='NODISPLY',
docstring='Image Representation. This field shall contain a valid indicator of the processing required '
'in order to display an image.') # type: str
ICAT = _StringDescriptor(
'ICAT', True, 8, default_value='SAR',
docstring='Image Category. This field shall contain a valid indicator of the specific category of image, '
'raster or grid data. The specific category of an IS reveals its intended use or the nature '
'of its collector.') # type: str
ABPP = _IntegerDescriptor(
'ABPP', True, 2,
docstring='Actual Bits-Per-Pixel Per Band. This field shall contain the number of "significant bits" for '
'the value in each band of each pixel without compression. Even when the image is compressed, '
'`ABPP` contains the number of significant bits per pixel that were present in the image '
'before compression. This field shall be less than or equal to Number of Bits Per Pixel '
'(field `NBPP`). The number of adjacent bits within each `NBPP` is '
'used to represent the value.') # type: int
PJUST = _StringEnumDescriptor(
'PJUST', True, 1, {'L', 'R'}, default_value='R',
docstring='Pixel Justification. When `ABPP` is not equal to `NBPP`, this field indicates whether the '
'significant bits are left justified (:code:`L`) or right '
'justified (:code:`R`).') # type: str
ICORDS = _StringEnumDescriptor(
'ICORDS', True, 1, {'U', 'G', 'C', 'N'}, default_value='G',
docstring='Image Coordinate Representation. This field shall contain a valid code indicating the type '
'of coordinate representation used for providing an approximate location of the image in the '
'Image Geographic Location field (`IGEOLO`).') # type: str
Comments = _NITFElementDescriptor(
'Comments', True, ImageComments, default_args={},
docstring='The image comments.') # type: ImageComments
Bands = _NITFElementDescriptor(
'Bands', True, ImageBands, default_args={},
docstring='The image bands.') # type: ImageBands
ISYNC = _IntegerDescriptor(
'ISYNC', True, 1, default_value=0,
docstring='Image Sync code. This field is reserved for future use. ') # type: int
IMODE = _StringEnumDescriptor(
'IMODE', True, 1, {'B', 'P', 'R', 'S'}, default_value='P',
docstring='Image Mode. This field shall indicate how the Image Pixels are '
'stored in the NITF file.') # type: str
NBPR = _IntegerDescriptor(
'NBPR', True, 4, default_value=1,
docstring='Number of Blocks Per Row. This field shall contain the number of image blocks in a row of '
'blocks (paragraph 5.4.2.2) in the horizontal direction. If the image consists of only a '
'single block, this field shall contain the value one.') # type: int
NBPC = _IntegerDescriptor(
'NBPC', True, 4, default_value=1,
docstring='Number of Blocks Per Column. This field shall contain the number of image blocks in a column '
'of blocks (paragraph 5.4.2.2) in the vertical direction. If the image consists of only a '
'single block, this field shall contain the value one.') # type: int
NPPBH = _IntegerDescriptor(
'NPPBH', True, 4, default_value=0,
docstring='Number of Pixels Per Block Horizontal. This field shall contain the number of pixels horizontally '
'in each block of the image. It shall be the case that the product of the values of the `NBPR` '
'field and the `NPPBH` field is greater than or equal to the value of the `NCOLS` field '
r'(:math:`NBPR\cdot NPPBH \geq NCOLS`). When NBPR is :code:`1`, setting the `NPPBH` '
'value to :code:`0` designates that the number of pixels horizontally is specified by the '
'value in NCOLS.') # type: int
NPPBV = _IntegerDescriptor(
'NPPBV', True, 4, default_value=0,
docstring='Number of Pixels Per Block Vertical. This field shall contain the number of pixels vertically '
'in each block of the image. It shall be the case that the product of the values of the `NBPC` '
'field and the `NPPBV` field is greater than or equal to the value of the `NROWS` field '
r'(:math:`NBPC\cdot NPPBV \geq NROWS`). When `NBPC` is :code:`1`, setting the `NPPBV` value '
r'to :code:`0` designates that the number of pixels vertically is specified by '
r'the value in `NROWS`.') # type: int
NBPP = _IntegerDescriptor(
'NBPP', True, 2, default_value=0,
docstring='Number of Bits Per Pixel Per Band.') # type: int
IDLVL = _IntegerDescriptor(
'IDLVL', True, 3, default_value=0,
docstring='Image Display Level. This field shall contain a valid value that indicates the display level of '
'the image relative to other displayed file components in a composite display. The valid values '
'are :code:`1-999`. The display level of each displayable segment (image or graphic) within a file '
'shall be unique.') # type: int
IALVL = _IntegerDescriptor(
'IALVL', True, 3, default_value=0,
docstring='Attachment Level. This field shall contain a valid value that indicates the attachment '
'level of the image.') # type: int
ILOC = _StringDescriptor(
'ILOC', True, 10, default_value='',
docstring='Image Location. The image location is the location of the first pixel of the first line of the '
'image. This field shall contain the image location offset from the `ILOC` or `SLOC` value '
'of the segment to which the image is attached or from the origin of the CCS when the image '
'is unattached (`IALVL` contains :code:`0`). A row or column value of :code:`0` indicates no offset. '
'Positive row and column values indicate offsets down and to the right while negative row and '
'column values indicate offsets up and to the left.') # type: str
IMAG = _StringDescriptor(
'IMAG', True, 4, default_value='1.0',
docstring='Image Magnification. This field shall contain the magnification (or reduction) factor of the '
'image relative to the original source image. Decimal values are used to indicate magnification, '
'and decimal fraction values indicate reduction. For example, :code:`2.30` indicates the original '
'image has been magnified by a factor of :code:`2.30`, while :code:`0.5` indicates '
'the original image has been reduced by a factor of 2.') # type: str
UserHeader = _NITFElementDescriptor(
'UserHeader', True, UserHeaderType, default_args={},
docstring='User defined header.') # type: UserHeaderType
ExtendedHeader = _NITFElementDescriptor(
'ExtendedHeader', True, UserHeaderType, default_args={},
docstring='Extended subheader - TRE list.') # type: UserHeaderType
def __init__(self, **kwargs):
self._IC = None
self._COMRAT = None
self._IGEOLO = None
self._mask_subheader = None
super(ImageSegmentHeader0, self).__init__(**kwargs)
@property
def is_masked(self):
"""
bool: Does this image segment contain a mask?
"""
return self.IC in ['NM', 'M1', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8']
@property
def is_compressed(self):
"""
bool: Is this image segment compressed?
"""
return self.IC not in ['NC', 'NM']
@property
def IC(self):
"""
str: Image Compression. This field shall contain a valid code indicating
the form of compression used in representing the image data.
Valid values for this field are, :code:`C1` to represent bi-level, :code:`C3`
to represent JPEG, :code:`C4` to represent Vector Quantization, :code:`C5`
to represent lossless JPEG, :code:`I1` to represent down sampled JPEG,
and :code:`NC` to represent the image is not compressed. Also valid are
:code:`M1, M3, M4`, and :code:`M5` for compressed images, and :code:`NM`
for uncompressed images indicating an image that contains a block
mask and/or a pad pixel mask. :code:`C6` and :code:`M6` are reserved values
that will represent a future correlated multicomponent compression
algorithm. :code:`C7` and :code:`M7` are reserved values that will represent
a future complex SAR compression. :code:`C8` and :code:`M8` are the values
for ISO standard compression JPEG 2000.
The format of a mask image is identical to the format of its corresponding non-masked image
except for the presence of an Image Data Mask at the beginning of
the image data area. The format of the Image Data Mask is described
in paragraph 5.4.3.2 and is shown in table A-3(A). The definitions
of the compression schemes associated with codes :code:`C1/M1, C3/M3, C4/M4, C5/M5`
are given, respectively, in ITU- T T.4, AMD2, MIL-STD-188-198A,
MIL-STD- 188-199, and NGA N0106-97. :code:`C1` is found in ITU- T T.4 AMD2,
:code:`C3` is found in MIL-STD-188-198A, :code:`C4` is found in MIL-STD-188-199,
and :code:`C5` and :code:`I1` are found in NGA N0106-97. (NOTE: :code:`C2` (ARIDPCM) is not
valid in NITF 2.1.) The definition of the compression scheme associated
with codes :code:`C8/M8` is found in ISO/IEC 15444- 1:2000 (with amendments 1 and 2).
"""
return self._IC
@IC.setter
def IC(self, value):
value = _parse_str(value, 2, 'NC', 'IC', self)
if value not in {
'NC', 'NM', 'C1', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'I1',
'M1', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8'}:
raise ValueError('IC got invalid value {}'.format(value))
self._IC = value
if value in ('NC', 'NM'):
self._COMRAT = None
elif self._COMRAT is not None:
self._COMRAT = '\x20'*4
@property
def COMRAT(self):
"""
None|str: Compression Rate Code. If the IC field contains one of
:code:`C1, C3, C4, C5, C8, M1, M3, M4, M5, M8, I1`, this field shall be contain
a code indicating the compression rate for the image.
If `IC` is :code:`NC` or :code:`NM`, then this will be set to :code:`None`.
"""
return self._COMRAT
@COMRAT.setter
def COMRAT(self, value):
value = _parse_str(value, 4, None, 'COMRAT', self)
if value is None and self.IC not in ('NC', 'NM'):
value = '\x20'*4
logger.error(
'COMRAT value is None, but IC is not in {"NC", "NM"}.\n\t'
'This must be resolved.')
if value is not None and self.IC in ('NC', 'NM'):
value = None
logger.error(
'COMRAT value is something other than None, but IC in {"NC", "NM"}.\n\t'
'This is invalid, and COMRAT is being set to None.')
self._COMRAT = value
@property
def IGEOLO(self):
"""
None|str: Image Geographic Location. This field, when present, shall contain
an approximate geographic location which is not intended for analytical purposes
(e.g., targeting, mensuration, distance calculation); it is intended to support
general user appreciation for the image location (e.g., cataloguing). The
representation of the image corner locations is specified in the `ICORDS` field.
The locations of the four corners of the (significant) image data shall be given
in image coordinate order: (0,0), (0, MaxCol), (MaxRow, MaxCol), (MaxRow, 0).
MaxCol and MaxRow shall be determined from the values contained, respectively,
in the `NCOLS` field and the `NROWS` field.
"""
return self._IGEOLO
@IGEOLO.setter
def IGEOLO(self, value):
value = _parse_str(value, 60, None, 'IGEOLO', self)
if value is None and self.ICORDS.strip() != '':
value = '\x20'*60
if value is not None and self.ICORDS.strip() == '':
value = None
self._IGEOLO = value
@property
def mask_subheader(self):
# type: () -> Union[None, MaskSubheader]
"""
None|MaskSubheader: The mask subheader, if it has been appended.
"""
return self._mask_subheader
@mask_subheader.setter
def mask_subheader(self, value):
if value is None:
self._mask_subheader = None
return
if not isinstance(value, MaskSubheader):
raise ValueError(
'mask_subheader is expected to be an instance of MaskSubheader. '
'Got type {}'.format(type(value)))
if self.IC not in ['NM', 'M1', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8']:
raise ValueError(
'IC={}, which does not indicate the presence of a mask '
'subheader'.format(self.IC))
self._mask_subheader = value
def _get_attribute_length(self, fld):
if fld in ['COMRAT', 'IGEOLO']:
if getattr(self, '_'+fld) is None:
return 0
else:
return self._lengths[fld]
else:
return super(ImageSegmentHeader0, self)._get_attribute_length(fld)
@classmethod
def minimum_length(cls):
# COMRAT and IGEOLO may not be there
return super(ImageSegmentHeader0, cls).minimum_length() - 64
@classmethod
def _parse_attribute(cls, fields, attribute, value, start):
if attribute == 'IC':
val = value[start:start+2].decode('utf-8')
fields['IC'] = val
if val in ('NC', 'NM'):
fields['COMRAT'] = None
out = start+2
elif attribute == 'ICORDS':
fields['ICORDS'] = value[start:start+1]
if fields['ICORDS'] == b'N':
fields['IGEOLO'] = None
out = start+1
else:
out = super(ImageSegmentHeader0, cls)._parse_attribute(fields, attribute, value, start)
return out
def get_uncompressed_block_size(self) -> int:
"""
Gets the size of an uncompressed block.
Note that if `IMODE == 'S'`, then each block consists of a single band.
Otherwise, a block consists of all bands.
Returns
-------
int
"""
nppbv = self.NROWS if self.NPPBV == 0 else self.NPPBV
nppbh = self.NCOLS if self.NPPBH == 0 else self.NPPBH
if self.IMODE == 'S':
return int(nppbh*nppbv*self.NBPP/8)
else:
return int(nppbh*nppbv*len(self.Bands)*self.NBPP/8)
def get_full_uncompressed_image_size(self) -> int:
"""
Gets the full size in bytes of the uncompressed image including any padding
in the blocks.
Returns
-------
int
"""
total_blocks = self.NBPR*self.NBPC
if self.IMODE == 'S':
total_blocks *= len(self.Bands)
return total_blocks*self.get_uncompressed_block_size()
def get_clevel(self) -> int:
"""
Gets the CLEVEL value for this image segment.
Returns
-------
int
"""
dim_size = max(self.NROWS, self.NCOLS)
if dim_size <= 2048:
return 3
elif dim_size <= 8192:
return 5
elif dim_size <= 65536:
return 6
else:
return 7
| 57,993 | 44.237129 | 120 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/symbol.py | """
The symbol header element definition - only applies to NITF 2.0
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import struct
import numpy
from .base import NITFElement, UserHeaderType, _IntegerDescriptor,\
_StringDescriptor, _StringEnumDescriptor, _NITFElementDescriptor
from .security import NITFSecurityTags0
class SymbolSegmentHeader(NITFElement):
"""
Symbol segment subheader for NITF version 2.0 - see standards document
MIL-STD-2500A for more information.
"""
_ordering = (
'SY', 'SID', 'SNAME', 'Security', 'ENCRYP', 'STYPE',
'NLIPS', 'NPIXPL', 'NWDTH', 'NBPP', 'SDLVL', 'SALVL', 'SLOC',
'SLOC2', 'SCOLOR', 'SNUM', 'SROT', 'DLUT', 'UserHeader')
_lengths = {
'SY': 2, 'SID': 10, 'SNAME': 20, 'ENCRYP': 1, 'STYPE': 1,
'NLIPS': 4, 'NPIXPL': 4, 'NWDTH': 4, 'NBPP': 1,
'SDLVL': 3, 'SALVL': 3, 'SLOC': 10, 'SLOC2': 10, 'SCOLOR': 1,
'SNUM': 6, 'SROT': 3}
SY = _StringEnumDescriptor('SY', True, 2, {'SY', }) # type: str
SID = _StringDescriptor('SID', True, 10) # type: str
SNAME = _StringDescriptor('SNAME', True, 20) # type: str
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags0, default_args={}) # type: NITFSecurityTags0
ENCRYP = _StringEnumDescriptor('ENCRYP', True, 1, {'0'}) # type: str
STYPE = _StringDescriptor('STYPE', True, 1) # type: str
NLIPS = _IntegerDescriptor('NLIPS', True, 4) # type: int
NPIXPL = _IntegerDescriptor('NPIXPL', True, 4) # type: int
NWDTH = _IntegerDescriptor('NWDTH', True, 4) # type: int
NBPP = _IntegerDescriptor('NBPP', True, 1) # type: int
SDLVL = _IntegerDescriptor('SDLVL', True, 3) # type: int
SALVL = _IntegerDescriptor('SALVL', True, 3) # type: int
SLOC = _StringDescriptor('SLOC', True, 10) # type: str
SLOC2 = _StringDescriptor('SLOC2', True, 10) # type: str
SCOLOR = _StringDescriptor('SCOLOR', True, 1) # type: str
SNUM = _StringDescriptor('SLOC2', True, 6) # type: str
SROT = _IntegerDescriptor('SROT', True, 3) # type: int
UserHeader = _NITFElementDescriptor(
'UserHeader', True, UserHeaderType, default_args={}) # type: UserHeaderType
def __init__(self, **kwargs):
self._DLUT = None
super(SymbolSegmentHeader, self).__init__(**kwargs)
@classmethod
def minimum_length(cls):
return 13
@property
def DLUT(self):
"""
The Look-up Table (LUT) data.
Returns
-------
None|numpy.ndarray
"""
return self._DLUT
@DLUT.setter
def DLUT(self, value):
if value is None:
self._DLUT = None
return
if not isinstance(value, numpy.ndarray):
raise TypeError('DLUT must be a numpy array')
if value.dtype.name != 'uint8':
raise ValueError('DLUT must be a numpy array of dtype uint8, got {}'.format(value.dtype.name))
if value.ndim != 2 or value.shape[1] != 3:
raise ValueError('DLUT must be a two-dimensional array of shape (N, 3).')
if value.size > 256:
raise ValueError(
'The number of DLUT elements must be 256 or fewer. '
'Got DLUT shape {}'.format(value.shape))
self._DLUT = value
@property
def NELUT(self):
"""
Number of LUT Entries.
Returns
-------
int
"""
return 0 if self._DLUT is None else self._DLUT.size
def _get_attribute_bytes(self, attribute):
if attribute == 'DLUT':
if self.NELUT == 0:
out = b'000'
else:
out = '{0:d}'.format(self.NELUT).encode() + \
struct.pack('{}B'.format(self.NELUT*3), *self.DLUT.flatten())
return out
else:
return super(SymbolSegmentHeader, self)._get_attribute_bytes(attribute)
def _get_attribute_length(self, attribute):
if attribute == 'DLUT':
return 3 + self.NELUT*3
else:
return super(SymbolSegmentHeader, self)._get_attribute_length(attribute)
@classmethod
def _parse_attribute(cls, fields, attribute, value, start):
if attribute == 'DLUT':
loc = start
nelut = int(value[loc:loc + 3])
loc += 3
if nelut == 0:
fields['DLUT'] = None
else:
fields['DLUT'] = numpy.array(
struct.unpack(
'{}B'.format(3*nelut),
value[loc:loc + 3*nelut]), dtype=numpy.uint8).reshape((nelut, 3))
loc += nelut*3
return loc
return super(SymbolSegmentHeader, cls)._parse_attribute(fields, attribute, value, start)
| 4,818 | 34.696296 | 106 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/nitf_head.py | """
The main NITF header definitions.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
from .base import NITFElement, UserHeaderType, _IntegerDescriptor,\
_StringDescriptor, _StringEnumDescriptor, _NITFElementDescriptor, _RawDescriptor, \
_ItemArrayHeaders
from .security import NITFSecurityTags, NITFSecurityTags0
logger = logging.getLogger(__name__)
#############
# NITF 2.1 version
class ImageSegmentsType(_ItemArrayHeaders):
"""
This holds the image subheader and item sizes.
"""
_subhead_len = 6
_item_len = 10
class GraphicsSegmentsType(_ItemArrayHeaders):
"""
This holds the graphics subheader and item sizes.
"""
_subhead_len = 4
_item_len = 6
class TextSegmentsType(_ItemArrayHeaders):
"""
This holds the text subheader size and item sizes.
"""
_subhead_len = 4
_item_len = 5
class DataExtensionsType(_ItemArrayHeaders):
"""
This holds the data extension subheader and item sizes.
"""
_subhead_len = 4
_item_len = 9
class ReservedExtensionsType(_ItemArrayHeaders):
"""
This holds the reserved extension subheader and item sizes.
"""
_subhead_len = 4
_item_len = 7
class NITFHeader(NITFElement):
"""
The main NITF file header for NITF version 2.1 - see standards document
Joint BIIF Profile (JBP) for more information.
"""
_ordering = (
'FHDR', 'FVER', 'CLEVEL', 'STYPE',
'OSTAID', 'FDT', 'FTITLE', 'Security',
'FSCOP', 'FSCPYS', 'ENCRYP', 'FBKGC',
'ONAME', 'OPHONE', 'FL', 'HL',
'ImageSegments', 'GraphicsSegments', 'NUMX',
'TextSegments', 'DataExtensions', 'ReservedExtensions',
'UserHeader', 'ExtendedHeader')
_lengths = {
'FHDR': 4, 'FVER': 5, 'CLEVEL': 2, 'STYPE': 4,
'OSTAID': 10, 'FDT': 14, 'FTITLE': 80,
'FSCOP': 5, 'FSCPYS': 5, 'ENCRYP': 1, 'FBKGC': 3,
'ONAME': 24, 'OPHONE': 18, 'FL': 12, 'HL': 6,
'NUMX': 3}
CLEVEL = _IntegerDescriptor(
'CLEVEL', True, 2, default_value=0,
docstring='Complexity Level. This field shall contain the complexity level required to '
'interpret fully all components of the file. Valid entries are assigned in '
'accordance with complexity levels established in Table A-10.') # type: int
STYPE = _StringDescriptor(
'STYPE', True, 4, default_value='BF01',
docstring='Standard Type. Standard type or capability. A BCS-A character string `BF01` '
'which indicates that this file is formatted using ISO/IEC IS 12087-5. '
'NITF02.10 is intended to be registered as a profile of ISO/IEC IS 12087-5.') # type: str
OSTAID = _StringDescriptor(
'OSTAID', True, 10, default_value='',
docstring='Originating Station ID. This field shall contain the identification code or name of '
'the originating organization, system, station, or product. It shall not be '
'filled with BCS spaces') # type: str
FDT = _StringDescriptor(
'FDT', True, 14, default_value='',
docstring='File Date and Time. This field shall contain the time (UTC) of the files '
'origination in the format `YYYYMMDDhhmmss`.') # type: str
FTITLE = _StringDescriptor(
'FTITLE', True, 80, default_value='',
docstring='File Title. This field shall contain the title of the file.') # type: str
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags, default_args={},
docstring='The image security tags.') # type: NITFSecurityTags
FSCOP = _IntegerDescriptor(
'FSCOP', True, 5, default_value=0,
docstring='File Copy Number. This field shall contain the copy number of the file.') # type: int
FSCPYS = _IntegerDescriptor(
'FSCPYS', True, 5, default_value=0,
docstring='File Number of Copies. This field shall contain the total number of '
'copies of the file.') # type: int
ENCRYP = _StringEnumDescriptor(
'ENCRYP', True, 1, {'0'}, default_value='0',
docstring='Encryption.') # type: str
FBKGC = _RawDescriptor(
'FBKGC', True, 3, default_value=b'\x00\x00\x00',
docstring='File Background Color. This field shall contain the three color components of '
'the file background in the order Red, Green, Blue.') # type: bytes
ONAME = _StringDescriptor(
'ONAME', True, 24, default_value='',
docstring='Originator Name. This field shall contain a valid name for the operator '
'who originated the file.') # type: str
OPHONE = _StringDescriptor(
'OPHONE', True, 18, default_value='',
docstring='Originator Phone Number. This field shall contain a valid phone number '
'for the operator who originated the file.') # type: str
FL = _IntegerDescriptor(
'FL', True, 12, docstring='The size in bytes of the entire file.') # type: int
ImageSegments = _NITFElementDescriptor(
'ImageSegments', True, ImageSegmentsType, default_args={},
docstring='The image segment basic information.') # type: ImageSegmentsType
GraphicsSegments = _NITFElementDescriptor(
'GraphicsSegments', True, GraphicsSegmentsType, default_args={},
docstring='The graphics segment basic information.') # type: GraphicsSegmentsType
TextSegments = _NITFElementDescriptor(
'TextSegments', True, TextSegmentsType, default_args={},
docstring='The text segment basic information.') # type: TextSegmentsType
DataExtensions = _NITFElementDescriptor(
'DataExtensions', True, DataExtensionsType, default_args={},
docstring='The data extension basic information.') # type: DataExtensionsType
ReservedExtensions = _NITFElementDescriptor(
'ReservedExtensions', True, ReservedExtensionsType, default_args={},
docstring='The reserved extension basic information.') # type: ReservedExtensionsType
UserHeader = _NITFElementDescriptor(
'UserHeader', True, UserHeaderType, default_args={},
docstring='User defined header.') # type: UserHeaderType
ExtendedHeader = _NITFElementDescriptor(
'ExtendedHeader', True, UserHeaderType, default_args={},
docstring='Extended subheader - TRE list.') # type: UserHeaderType
def __init__(self, **kwargs):
self._FHDR = 'NITF'
self._FVER = '02.10'
self._NUMX = 0
super(NITFHeader, self).__init__(**kwargs)
@property
def FHDR(self):
"""
str: File Profile Name. This field shall contain the character string uniquely denoting
that the file is formatted using NITF. Always `NITF`.
"""
return self._FHDR
@FHDR.setter
def FHDR(self, value):
# static value "NITF", but the NITFElement extension usage requires phantom setting
pass
@property
def FVER(self):
"""
str: File Version. This field shall contain a BCS-A character string uniquely
denoting the version. Always `02.10`.
"""
return self._FVER
@FVER.setter
def FVER(self, value):
# static value "02.10", but the NITFElement extension usage requires phantom setting
pass
@property
def NUMX(self):
"""
int: Reserved for future use. Always :code:`0`.
"""
return self._NUMX
@NUMX.setter
def NUMX(self, value):
# static value 0, but the NITFElement extension usage requires phantom setting
pass
@property
def HL(self):
"""
int: The length of this header object in bytes.
"""
return self.get_bytes_length()
@HL.setter
def HL(self, value):
# derived value 0, but the NITFElement extension usage requires phantom setting
pass
#############
# NITF 2.0 version
class SymbolSegmentsType(_ItemArrayHeaders):
"""
This holds the symbol subheader and item sizes.
"""
_subhead_len = 4
_item_len = 6
class LabelSegmentsType(_ItemArrayHeaders):
"""
This holds the label subheader and item sizes.
"""
_subhead_len = 4
_item_len = 3
class NITFHeader0(NITFElement):
"""
The main NITF file header for NITF version 2.0 - see standards document
MIL-STD-2500A for more information.
"""
_ordering = (
'FHDR', 'FVER', 'CLEVEL', 'STYPE', 'OSTAID', 'FDT', 'FTITLE', 'Security',
'FSCOP', 'FSCPYS', 'ENCRYP', 'ONAME', 'OPHONE', 'FL', 'HL',
'ImageSegments', 'SymbolSegments', 'LabelSegments', 'TextSegments',
'DataExtensions', 'ReservedExtensions', 'UserHeader', 'ExtendedHeader')
_lengths = {
'FHDR': 4, 'FVER': 5, 'CLEVEL': 2, 'STYPE': 4,
'OSTAID': 10, 'FDT': 14, 'FTITLE': 80,
'FSCOP': 5, 'FSCPYS': 5, 'ENCRYP': 1,
'ONAME': 27, 'OPHONE': 18, 'FL': 12, 'HL': 6}
CLEVEL = _IntegerDescriptor(
'CLEVEL', True, 2, default_value=0,
docstring='Complexity Level. This field shall contain the complexity level required to '
'interpret fully all components of the file. Valid entries are assigned in '
'accordance with complexity levels established in Table A-10.') # type: int
STYPE = _StringDescriptor(
'STYPE', True, 4, default_value='BF01',
docstring='Standard Type. Standard type or capability. A BCS-A character string `BF01` '
'which indicates that this file is formatted using ISO/IEC IS 12087-5. '
'NITF02.10 is intended to be registered as a profile of ISO/IEC IS 12087-5.') # type: str
OSTAID = _StringDescriptor(
'OSTAID', True, 10, default_value='',
docstring='Originating Station ID. This field shall contain the identification code or name of '
'the originating organization, system, station, or product. It shall not be '
'filled with BCS spaces') # type: str
FDT = _StringDescriptor(
'FDT', True, 14, default_value='',
docstring='File Date and Time. This field shall contain the time (UTC) of the files '
'origination in the format `YYYYMMDDhhmmss`.') # type: str
FTITLE = _StringDescriptor(
'FTITLE', True, 80, default_value='',
docstring='File Title. This field shall contain the title of the file.') # type: str
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags0, default_args={},
docstring='The image security tags.') # type: NITFSecurityTags0
FSCOP = _StringDescriptor(
'FSCOP', True, 5, default_value=' 0',
docstring='File Copy Number. This field shall contain the copy number of the file.') # type: str
FSCPYS = _StringDescriptor(
'FSCPYS', True, 5, default_value=' 0',
docstring='File Number of Copies. This field shall contain the total number of '
'copies of the file.') # type: str
ENCRYP = _StringEnumDescriptor(
'ENCRYP', True, 1, {'0'}, default_value='0',
docstring='Encryption.') # type: str
ONAME = _StringDescriptor(
'ONAME', True, 27, default_value='',
docstring='Originator Name. This field shall contain a valid name for the operator '
'who originated the file.') # type: str
OPHONE = _StringDescriptor(
'OPHONE', True, 18, default_value='',
docstring='Originator Phone Number. This field shall contain a valid phone number '
'for the operator who originated the file.') # type: str
FL = _IntegerDescriptor(
'FL', True, 12, docstring='The size in bytes of the entire file.') # type: int
ImageSegments = _NITFElementDescriptor(
'ImageSegments', True, ImageSegmentsType, default_args={},
docstring='The image segment basic information.') # type: ImageSegmentsType
SymbolSegments = _NITFElementDescriptor(
'SymbolSegments', True, SymbolSegmentsType, default_args={},
docstring='The symbols segment basic information.') # type: SymbolSegmentsType
LabelSegments = _NITFElementDescriptor(
'LabelSegments', True, LabelSegmentsType, default_args={},
docstring='The labels segment basic information.') # type: LabelSegmentsType
TextSegments = _NITFElementDescriptor(
'TextSegments', True, TextSegmentsType, default_args={},
docstring='The text segment basic information.') # type: TextSegmentsType
DataExtensions = _NITFElementDescriptor(
'DataExtensions', True, DataExtensionsType, default_args={},
docstring='The data extension basic information.') # type: DataExtensionsType
ReservedExtensions = _NITFElementDescriptor(
'ReservedExtensions', True, ReservedExtensionsType, default_args={},
docstring='The reserved extension basic information.') # type: ReservedExtensionsType
UserHeader = _NITFElementDescriptor(
'UserHeader', True, UserHeaderType, default_args={},
docstring='User defined header.') # type: UserHeaderType
ExtendedHeader = _NITFElementDescriptor(
'ExtendedHeader', True, UserHeaderType, default_args={},
docstring='Extended subheader - TRE list.') # type: UserHeaderType
def __init__(self, **kwargs):
self._FHDR = 'NITF'
self._FVER = '02.00'
super(NITFHeader0, self).__init__(**kwargs)
@property
def FHDR(self):
"""
str: File Profile Name. This field shall contain the character string uniquely denoting
that the file is formatted using NITF. Always `NITF`.
"""
return self._FHDR
@FHDR.setter
def FHDR(self, value):
# static value "NITF", but the NITFElement extension usage requires phantom setting
pass
@property
def FVER(self):
"""
str: File Version. This field shall contain a BCS-A character string uniquely
denoting the version, should generally be `02.00` or `01.10`.
"""
return self._FVER
@FVER.setter
def FVER(self, value):
if isinstance(value, bytes) and not isinstance(value, str):
value = value.decode('utf-8')
if not isinstance(value, str):
raise TypeError('FVER is required to be a string')
if len(value) != 5:
raise ValueError('FVER must have length 5')
if value not in ['02.00', '01.10']:
logger.warning('Got unexpected version {}, and NITF parsing is likely to fail.'.format(value))
self._FVER = value
@property
def HL(self):
"""
int: The length of this header object in bytes.
"""
return self.get_bytes_length()
@HL.setter
def HL(self, value):
# derived value, but the NITFElement extension usage requires phantom setting
pass
| 14,973 | 38.718833 | 108 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/graphics.py | """
The graphics header element definition.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
from .base import NITFElement, UserHeaderType, _IntegerDescriptor,\
_StringDescriptor, _StringEnumDescriptor, _NITFElementDescriptor
from .security import NITFSecurityTags
class GraphicsSegmentHeader(NITFElement):
"""
Graphics segment subheader - see standards document Joint BIIF Profile (JBP) for more
information.
"""
_ordering = (
'SY', 'SID', 'SNAME', 'Security', 'ENCRYP', 'SFMT',
'SSTRUCT', 'SDLVL', 'SALVL', 'SLOC', 'SBND1',
'SCOLOR', 'SBND2', 'SRES2', 'UserHeader')
_lengths = {
'SY': 2, 'SID': 10, 'SNAME': 20, 'ENCRYP': 1,
'SFMT': 1, 'SSTRUCT': 13, 'SDLVL': 3, 'SALVL': 3,
'SLOC': 10, 'SBND1': 10, 'SCOLOR': 1, 'SBND2': 10,
'SRES2': 2}
SY = _StringEnumDescriptor(
'SY', True, 2, {'SY', }, default_value='SY',
docstring='File part type.') # type: str
SID = _StringDescriptor(
'SID', True, 10, default_value='',
docstring='Graphic Identifier. This field shall contain a valid alphanumeric identification code '
'associated with the graphic. The valid codes are determined by the application.') # type: str
SNAME = _StringDescriptor(
'SNAME', True, 20, default_value='',
docstring='Graphic name. This field shall contain an alphanumeric name for the graphic.') # type: str
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags, default_args={},
docstring='The security tags.') # type: NITFSecurityTags
ENCRYP = _StringEnumDescriptor(
'ENCRYP', True, 1, {'0'}, default_value='0',
docstring='Encryption.') # type: str
SFMT = _StringDescriptor(
'SFMT', True, 1, default_value='C',
docstring='Graphic Type. This field shall contain a valid indicator of the '
'representation type of the graphic.') # type: str
SSTRUCT = _IntegerDescriptor(
'SSTRUCT', True, 13, default_value=0,
docstring='Reserved for Future Use.') # type: int
SDLVL = _IntegerDescriptor(
'SDLVL', True, 3, default_value=1,
docstring='Graphic Display Level. This field shall contain a valid value that indicates '
'the graphic display level of the graphic relative to other displayed file '
'components in a composite display. The valid values are :code:`1-999`. '
'The display level of each displayable file component (image or graphic) '
'within a file shall be unique.') # type: int
SALVL = _IntegerDescriptor(
'SALVL', True, 3, default_value=0,
docstring='Graphic Attachment Level. This field shall contain a valid value '
'that indicates the attachment level of the graphic. Valid values for '
'this field are 0 and the display level value of any other '
'image or graphic in the file.') # type: int
SLOC = _IntegerDescriptor(
'SLOC', True, 10, default_value=0,
docstring='Graphic Location. The graphics location is specified by providing the location '
'of the graphics origin point relative to the position (location of the CCS, image, '
'or graphic to which it is attached. This field shall contain the graphic location '
'offset from the `ILOC` or `SLOC` value of the CCS, image, or graphic to which the graphic '
'is attached or from the origin of the CCS when the graphic is unattached (`SALVL = 0`). '
'A row and column value of :code:`0` indicates no offset. Positive row and column values indicate '
'offsets down and to the right, while negative row and column values indicate '
'offsets up and to the left.') # type: int
SBND1 = _IntegerDescriptor(
'SBND1', True, 10, default_value=0,
docstring='First Graphic Bound Location. This field shall contain an ordered pair of '
'integers defining a location in Cartesian coordinates for use with CGM graphics. It is '
'the upper left corner of the bounding box for the CGM graphic.') # type: int
SCOLOR = _StringEnumDescriptor(
'SCOLOR', True, 1, {'C', 'M'}, default_value='M',
docstring='Graphic Color. If `SFMT = C`, this field shall contain a :code:`C` if the CGM contains any '
'color pieces or an :code:`M` if it is monochrome (i.e., black, '
'white, or levels of grey).') # type: str
SBND2 = _IntegerDescriptor(
'SBND2', True, 10, default_value=0,
docstring='Second Graphic Bound Location. This field shall contain an ordered pair of '
'integers defining a location in Cartesian coordinates for use with CGM graphics. '
'It is the lower right corner of the bounding box for the CGM graphic.') # type: int
SRES2 = _IntegerDescriptor(
'SRES2', True, 2, default_value=0,
docstring='Reserved for Future Use.') # type: int
UserHeader = _NITFElementDescriptor(
'UserHeader', True, UserHeaderType, default_args={},
docstring='User defined header.') # type: UserHeaderType
| 5,334 | 55.755319 | 117 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/text.py | """
The text extension subheader definitions.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
from .base import NITFElement, UserHeaderType, _IntegerDescriptor,\
_StringDescriptor, _StringEnumDescriptor, _NITFElementDescriptor
from .security import NITFSecurityTags, NITFSecurityTags0
########
# NITF 2.1
class TextSegmentHeader(NITFElement):
"""
Text Segment Subheader for NITF version 2.1 - see standards document
Joint BIIF Profile (JBP) for more information.
"""
_ordering = (
'TE', 'TEXTID', 'TXTALVL', 'TXTDT', 'TXTITL', 'Security',
'ENCRYP', 'TXTFMT', 'UserHeader')
_lengths = {
'TE': 2, 'TEXTID': 7, 'TXTALVL': 3, 'TXTDT': 14, 'TXTITL': 80,
'ENCRYP': 1, 'TXTFMT': 3}
TE = _StringEnumDescriptor(
'TE', True, 2, {'TE', }, default_value='TE',
docstring='File part type.') # type: str
TEXTID = _StringDescriptor(
'TEXTID', True, 7, default_value='',
docstring='Text Identifier. This field shall contain a valid alphanumeric identification '
'code associated with the text item. The valid codes are determined '
'by the application.') # type: str
TXTALVL = _IntegerDescriptor(
'TXTALVL', True, 3, default_value=0,
docstring='Text Attachment Level. This field shall contain a valid value that '
'indicates the attachment level of the text.') # type: int
TXTDT = _StringDescriptor(
'TXTDT', True, 14, default_value='',
docstring='Text Date and Time. This field shall contain the time (UTC) of origination '
'of the text in the format :code:`YYYYMMDDhhmmss`') # type: str
TXTITL = _StringDescriptor(
'TXTITL', True, 80, default_value='',
docstring='Text Title. This field shall contain the title of the text item.') # type: str
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags, default_args={},
docstring='The security tags.') # type: NITFSecurityTags
ENCRYP = _StringEnumDescriptor(
'ENCRYP', True, 1, {'0'}, default_value='0',
docstring='Encryption.') # type: str
TXTFMT = _StringEnumDescriptor(
'TXTFMT', True, 3, {'', 'MTF', 'STA', 'UT1', 'U8S'}, default_value='',
docstring='Text Format. This field shall contain a valid three character code '
'indicating the format or type of text data. Valid codes are :code:`MTF` to '
'indicate USMTF (Refer to MIL-STD-6040 for examples of the USMTF format), '
':code:`STA` to indicate BCS, :code:`UT1` to indicate ECS text formatting, and '
':code:`U8S` to indicate U8S text formatting.') # type: str
UserHeader = _NITFElementDescriptor(
'UserHeader', True, UserHeaderType, default_args={},
docstring='User defined header.') # type: UserHeaderType
########
# NITF 2.0
class TextSegmentHeader0(NITFElement):
"""
Text Segment Subheader for NITF version 2.0 - see standards document
MIL-STD-2500A for more information.
"""
_ordering = (
'TE', 'TEXTID', 'TXTDT', 'TXTITL', 'Security',
'ENCRYP', 'TXTFMT', 'UserHeader')
_lengths = {
'TE': 2, 'TEXTID': 10, 'TXTDT': 14, 'TXTITL': 80,
'ENCRYP': 1, 'TXTFMT': 3}
TE = _StringEnumDescriptor(
'TE', True, 2, {'TE', }, default_value='TE',
docstring='File part type.') # type: str
TEXTID = _StringDescriptor(
'TEXTID', True, 10, default_value='',
docstring='Text Identifier. This field shall contain a valid alphanumeric identification '
'code associated with the text item. The valid codes are determined '
'by the application.') # type: str
TXTDT = _StringDescriptor(
'TXTDT', True, 14, default_value='',
docstring='Text Date and Time. This field shall contain the time (UTC) of origination '
'of the text in the format :code:`YYYYMMDDhhmmss`') # type: str
TXTITL = _StringDescriptor(
'TXTITL', True, 80, default_value='',
docstring='Text Title. This field shall contain the title of the text item.') # type: str
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags0, default_args={},
docstring='The security tags.') # type: NITFSecurityTags0
ENCRYP = _StringEnumDescriptor(
'ENCRYP', True, 1, {'0'}, default_value='0',
docstring='Encryption.') # type: str
TXTFMT = _StringDescriptor(
'TXTFMT', True, 3, default_value='',
docstring='Text Format. This field shall contain a valid three character code '
'indicating the format or type of text data. Valid codes are :code:`MTF` to '
'indicate USMTF (Refer to MIL-STD-6040 for examples of the USMTF format), '
':code:`STA` to indicate BCS, :code:`UT1` to indicate ECS text formatting, and '
':code:`U8S` to indicate U8S text formatting.') # type: str
UserHeader = _NITFElementDescriptor(
'UserHeader', True, UserHeaderType, default_args={},
docstring='User defined header.') # type: UserHeaderType
| 5,243 | 45.821429 | 98 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/security.py | """
The security tags definitions, which are used in each NITF subheader element.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
from .base import NITFElement, _StringDescriptor, _StringEnumDescriptor, _parse_str
#########
# NITF 2.1 version
class NITFSecurityTags(NITFElement):
"""
The NITF security tags object for NITF version 2.1 - see standards document
Joint BIIF Profile (JBP) for more information.
In the NITF standard, this object is simply redefined (is an identical way)
for each of the main header and subheader objects. This object is intended to
be flexibly used for any and all of these.
"""
_ordering = (
'CLAS', 'CLSY', 'CODE', 'CTLH', 'REL', 'DCTP', 'DCDT', 'DCXM',
'DG', 'DGDT', 'CLTX', 'CAPT', 'CAUT', 'CRSN', 'SRDT', 'CTLN')
_lengths = {
'CLAS': 1, 'CLSY': 2, 'CODE': 11, 'CTLH': 2,
'REL': 20, 'DCTP': 2, 'DCDT': 8, 'DCXM': 4,
'DG': 1, 'DGDT': 8, 'CLTX': 43, 'CAPT': 1,
'CAUT': 40, 'CRSN': 1, 'SRDT': 8, 'CTLN': 15}
CLAS = _StringEnumDescriptor(
'CLAS', True, 1, {'U', 'R', 'C', 'S', 'T'}, default_value='U',
docstring='The classification level.') # type: str
CLSY = _StringDescriptor(
'CLSY', True, 2, default_value='',
docstring='Security Classification System. This field shall contain valid values '
'indicating the national or multinational security system used to classify this element. '
'Country Codes per FIPS PUB 10-4 shall be used to indicate national security systems. '
'The designator :code:`XN` is for classified data generated by a component using NATO security '
'system marking guidance. This code is outside the FIPS 10-4 document listing, and was '
'selected to not duplicate existing codes.') # type: str
CODE = _StringDescriptor(
'CODE', True, 11, default_value='',
docstring='Codewords. This field shall contain a valid indicator of the security '
'compartments associated. Valid values include one or more of the digraphs found '
'in table A-4. Multiple entries shall be separated by a single ECS spaces (0x20). '
'The selection of a relevant set of codewords is application '
'specific.') # type: str
CTLH = _StringDescriptor(
'CTLH', True, 2, default_value='',
docstring='Control and Handling. This field shall contain valid additional security control '
'and/or handling instructions (caveats) associated with this element.') # type: str
REL = _StringDescriptor(
'REL', True, 20, default_value='',
docstring='Releasing Instructions. This field shall contain a valid list of country and/or '
'multilateral entity codes to which countries and/or multilateral entities this element'
'is authorized for release. Valid items in the list are one or more country codes as '
'found in FIPS PUB 10-4 and/or codes identifying multilateral entities.') # type: str
DCTP = _StringEnumDescriptor(
'DCTP', True, 2, {'', 'DD', 'DE', 'GD', 'GE', 'O', 'X'}, default_value='',
docstring='Declassification Type. This field shall contain a valid indicator of the type of '
'security declassification or downgrading instructions which apply '
'to this element.') # type: str
DCDT = _StringDescriptor(
'DCDT', True, 8, default_value='',
docstring='Declassification Date. This field shall indicate the date on which this element '
'is to be declassified if the value in Declassification '
'Type is :code:`DD`.') # type: str
DCXM = _StringEnumDescriptor(
'DCXM', True, 4,
{'', 'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X7', 'X8',
'25X1', '25X2', '25X3', '25X4', '25X5', '25X6', '25X7', '25X8', '25X9',
'DN10', 'DNI'}, default_value='',
docstring='Declassification Exemption. This field shall indicate the reason this element is '
'exempt from automatic declassification if the value in Declassification '
'Type is :code:`X`.') # type: str
DG = _StringEnumDescriptor(
'DG', True, 1, {'', 'S', 'C', 'R'}, default_value='',
docstring='Downgrade. This field shall indicate the classification level to which this element is to '
'be downgraded if the values in Declassification Type are '
':code:`GD` or :code:`GE`.') # type: str
DGDT = _StringDescriptor(
'DGDT', True, 8, default_value='',
docstring='Downgrade Date. This field shall indicate the date on which this element is to be downgraded '
'if the value in Declassification Type is :code:`GD`.') # type: str
CLTX = _StringDescriptor(
'CLTX', True, 43, default_value='',
docstring='Classification Text. This field shall be used to provide additional information about '
'classification to include identification of a declassification or downgrading event if the '
'values in Declassification Type are DE or GE. It may also be used to identify multiple '
'classification sources and/or any other special handling rules. '
'Values are user defined free text.') # type: str
CAPT = _StringEnumDescriptor(
'CAPT', True, 1, {'', 'O', 'D', 'M'}, default_value='',
docstring='Classification Authority Type. This field shall indicate the type of authority '
'used to classify this element.') # type: str
CAUT = _StringDescriptor(
'CAUT', True, 40, default_value='',
docstring='Classification Authority. This field shall identify the classification authority '
'for this element dependent upon the value in Classification Authority Type. Values are user '
'defined free text which should contain the following information: original classification '
'authority name and position or personal identifier if the value in Classification Authority '
'Type is O; title of the document or security classification guide used to classify this element '
'if the value in Classification Authority Type is D; and Derive-Multiple if the classification '
'was derived from multiple sources. In the latter case, the originator will maintain a record '
'of the sources used in accordance with existing security directives. One of the multiple '
'sources may also be identified in Classification Text if desired') # type: str
CRSN = _StringEnumDescriptor(
'CRSN', True, 1, {'', 'A', 'B', 'C', 'D', 'E', 'F', 'G'}, default_value='',
docstring='Classification Reason. This field shall contain values indicating the reason for '
'classifying the graphic. Valid values are A to G. These correspond to the reasons for '
'original classification per E.O. 12958, Section 1.5.(a) to (g).') # type: str
SRDT = _StringDescriptor(
'SRDT', True, 8, default_value='',
docstring='Security Source Date. This field shall indicate the date of the source used to derive '
'the classification of the graphic. In the case of multiple sources, the date of the '
'most recent source shall be used.') # type: str
CTLN = _StringDescriptor(
'CTLN', True, 15, default_value='',
docstring='Security Control Number. This field shall contain a valid security control number '
'associated with the graphic. The format of the security control number shall be in '
'accordance with the regulations governing the appropriate '
'security channel(s).') # type: str
#########
# NITF 2.0 version
class NITFSecurityTags0(NITFElement):
"""
The NITF security tags object for NITF version 2.0 - see standards document
MIL-STD-2500A for more information.
In the NITF standard, this object is simply redefined (is an identical way)
for each of the main header and subheader objects. This object is intended to
be flexibly used for any and all of these.
"""
_ordering = (
'CLAS', 'CODE', 'CTLH', 'REL', 'CAUT', 'CTLN', 'DWNG', 'DEVT')
_lengths = {
'CLAS': 1, 'CODE': 40, 'CTLH': 40, 'REL': 40, 'CAUT': 20,
'CTLN': 20, 'DWNG': 6, 'DEVT': 40}
CLAS = _StringEnumDescriptor(
'CLAS', True, 1, {'U', 'R', 'C', 'S', 'T'}, default_value='U') # type: str
CODE = _StringDescriptor('CODE', True, 40) # type: str
CTLH = _StringDescriptor('CTLH', True, 40) # type: str
REL = _StringDescriptor('REL', True, 40) # type: str
CAUT = _StringDescriptor('CAUT', True, 20) # type: str
CTLN = _StringDescriptor('CTLN', True, 20) # type: str
DWNG = _StringDescriptor('DWNG', True, 6) # type: str
def __init__(self, **kwargs):
self._DEVT = None
super(NITFSecurityTags0, self).__init__(**kwargs)
@property
def DEVT(self):
return self._DEVT
@DEVT.setter
def DEVT(self, value):
self._DEVT = _parse_str(value, 40, None, 'DEVT', self)
def _get_attribute_length(self, fld):
if fld == 'DEVT':
return 0 if self.DWNG != '999998' else self._lengths[fld]
else:
return super(NITFSecurityTags0, self)._get_attribute_length(fld)
@classmethod
def minimum_length(cls):
# DEVT may not be there
return super(NITFSecurityTags0, cls).minimum_length() - 40
@classmethod
def _parse_attribute(cls, fields, attribute, value, start):
if attribute == 'DEVT':
if fields['DWNG'] == b'999998':
fields['DEVT'] = value[start:start+40].decode('utf-8')
return start+40
else:
fields['DEVT'] = None
# nothing to be done
return start
else:
return super(NITFSecurityTags0, cls)._parse_attribute(fields, attribute, value, start)
| 10,274 | 53.078947 | 116 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/label.py | """
The label extension subheader definitions - applies only to NITF version 2.0
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
from .base import NITFElement, UserHeaderType, _IntegerDescriptor, _RawDescriptor, \
_StringDescriptor, _StringEnumDescriptor, _NITFElementDescriptor
from .security import NITFSecurityTags0
class LabelSegmentHeader(NITFElement):
"""
Symbol segment subheader for NITF version 2.0 - see standards document
MIL-STD-2500A for more information.
"""
_ordering = (
'LA', 'LID', 'Security', 'ENCRYP', 'LFS', 'LCW', 'LCH',
'LDLVL', 'LALVL', 'LLOC', 'LTC', 'LBC', 'UserHeader')
_lengths = {
'LA': 2, 'LID': 7, 'ENCRYP': 1, 'LFS': 1, 'LCW': 2, 'LCH': 2,
'LDLVL': 3, 'LALVL': 3, 'LLOC': 10, 'LTC': 3, 'LBC': 3}
#######
LA = _StringEnumDescriptor(
'LA', True, 2, {'LA', }, default_value='LA') # type: str
LID = _StringDescriptor('LID', True, 10) # type: str
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags0, default_args={}) # type: NITFSecurityTags0
ENCRYP = _StringEnumDescriptor(
'ENCRYP', True, 1, {'0'}, default_value='0',
docstring='Encryption.') # type: str
LFS = _StringDescriptor('LFS', True, 1) # type: str
LCW = _StringDescriptor('LCW', True, 2, default_value='00') # type: str
LCH = _StringDescriptor('LCH', True, 2, default_value='00') # type: str
LDLVL = _IntegerDescriptor(
'LDLVL', True, 3, default_value=1) # type: int
LALVL = _IntegerDescriptor(
'LALVL', True, 3, default_value=1) # type: int
LLOC = _StringDescriptor('LLOC', True, 10) # type: str
LTC = _RawDescriptor(
'LTC', True, 3, default_value=b'\x00\x00\x00') # type: bytes
LBC = _RawDescriptor(
'LBC', True, 3, default_value=b'\xff\xff\xff') # type: bytes
UserHeader = _NITFElementDescriptor(
'UserHeader', True, UserHeaderType, default_args={},
docstring='User defined header.') # type: UserHeaderType
| 2,056 | 40.979592 | 88 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/des.py | """
The data extension header element definition.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
from typing import Union
from .base import BaseNITFElement, NITFElement, Unstructured, _IntegerDescriptor,\
_StringDescriptor, _StringEnumDescriptor, _NITFElementDescriptor, \
_parse_str, _parse_int, _parse_nitf_element
from .security import NITFSecurityTags, NITFSecurityTags0
logger = logging.getLogger(__name__)
class XMLDESSubheader(NITFElement):
"""
The standard XML Data Extension user header used in SICD and SIDD, described
in SICD standard 2014-09-30, Volume II, page 29
"""
_ordering = (
'DESSHL', 'DESCRC', 'DESSHFT', 'DESSHDT',
'DESSHRP', 'DESSHSI', 'DESSHSV', 'DESSHSD',
'DESSHTN', 'DESSHLPG', 'DESSHLPT', 'DESSHLI',
'DESSHLIN', 'DESSHABS')
_lengths = {
'DESSHL': 4, 'DESCRC': 5, 'DESSHFT': 8, 'DESSHDT': 20,
'DESSHRP': 40, 'DESSHSI': 60, 'DESSHSV': 10,
'DESSHSD': 20, 'DESSHTN': 120, 'DESSHLPG': 125,
'DESSHLPT': 25, 'DESSHLI': 20, 'DESSHLIN': 120,
'DESSHABS': 200}
DESSHFT = _StringDescriptor(
'DESSHFT', True, 8, default_value='XML',
docstring='XML File Type. Data in this field shall be representative of the XML File Type. '
'Examples :code:`XSD, XML, DTD, XSL, XSLT`.') # type: str
DESSHDT = _StringDescriptor(
'DESSHDT', True, 20, default_value='',
docstring='Date and Time. This field shall contain the time (UTC) of the XML files '
'origination in the format :code:`YYYY-MM-DDThh:mm:ssZ`.') # type: str
DESSHRP = _StringDescriptor(
'DESSHRP', True, 40, default_value='',
docstring='Responsible Party - Organization Identifier. Identification of the '
'organization responsible for the content of the DES.') # type: str
DESSHSI = _StringDescriptor(
'DESSHSI', True, 60, default_value='',
docstring='Specification Identifier. Name of the specification used for the '
'XML data content.') # type: str
DESSHSV = _StringDescriptor(
'DESSHSV', True, 10, default_value='',
docstring='Specification Version. Version or edition of the specification.') # type: str
DESSHSD = _StringDescriptor(
'DESSHSD', True, 20, default_value='',
docstring='Specification Date. Version or edition date for the specification '
'in the format :code:`YYYY-MM-DDThh:mm:ssZ`.') # type: str
DESSHTN = _StringDescriptor(
'DESSHTN', True, 120, default_value='',
docstring='Target Namespace. Identification of the target namespace, if any, '
'designated within the XML data content.') # type: str
DESSHLPG = _StringDescriptor(
'DESSHLPG', True, 125, default_value='',
docstring='Location - Polygon. Five-point boundary enclosing the area applicable to the '
'DES, expressed as the closed set of coordinates of the polygon (last point '
'replicates first point). **NOTE** This is only an approximate reference so '
'specifying the coordinate reference system is unnecessary.\n'
'Recorded as paired latitude and longitude values in decimal degrees with '
'no separator. Each latitude and longitude value includes an explicit :code:`+` '
'or :code:`-`.\n'
'The precision for recording the values in the subheader is dictated by the field '
'size constraint.') # type: str
DESSHLPT = _StringDescriptor(
'DESSHLPT', True, 25, default_value='',
docstring='Location - Point. Single geographic point applicable to the DES.') # type: str
DESSHLI = _StringDescriptor(
'DESSHLI', True, 20, default_value='',
docstring='Location - Identifier. Identifier used to represent a geographic area. An '
'alphanumeric value identifying an instance in the designated namespace. When '
'this field is recorded with other than the default value, the Location Identifier '
'Namespace URI shall also be recorded.') # type: str
DESSHLIN = _StringDescriptor(
'DESSHLIN', True, 120, default_value='',
docstring='Location Identifier Namespace URI. URI for the Namespace where the Location '
'Identifier is described.') # type: str
DESSHABS = _StringDescriptor(
'DESSHABS', True, 200, default_value='',
docstring='Abstract. Brief narrative summary of the content of the DES.') # type: str
def __init__(self, **kwargs):
self._DESSHL = 773
self._DESCRC = 99999
super(XMLDESSubheader, self).__init__(**kwargs)
@property
def DESSHL(self):
"""
int: User defined subheader length
"""
return self._DESSHL
@DESSHL.setter
def DESSHL(self, value):
self._DESSHL = 773
@property
def DESCRC(self):
"""
int: Cyclic redundancy check code, or 99999 when CRC not calculated/used.
"""
return self._DESCRC
@DESCRC.setter
def DESCRC(self, value):
self._DESCRC = 99999
##########
# DES - NITF 2.1 version
class DESUserHeader(Unstructured):
_size_len = 4
class DataExtensionHeader(NITFElement):
"""
The data extension subheader - see standards document Joint BIIF Profile (JBP) for more
information.
"""
_ordering = ('DE', 'DESID', 'DESVER', 'Security', 'DESOFLW', 'DESITEM', 'UserHeader')
_lengths = {'DE': 2, 'DESID': 25, 'DESVER': 2, 'DESOFLW': 6, 'DESITEM': 3}
DE = _StringEnumDescriptor(
'DE', True, 2, {'DE', }, default_value='DE',
docstring='File part type.') # type: str
DESVER = _IntegerDescriptor(
'DESVER', True, 2, default_value=1,
docstring='Version of the Data Definition. This field shall contain the alphanumeric '
'version number of the use of the tag. The version number is assigned as '
'part of the registration process.') # type: int
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags, default_args={},
docstring='The security tags.') # type: NITFSecurityTags
def __init__(self, **kwargs):
self._DESID = None
self._DESOFLW = None
self._DESITEM = None
self._UserHeader = None
super(DataExtensionHeader, self).__init__(**kwargs)
@property
def DESID(self):
"""
str: Unique DES Type Identifier. This field shall contain a valid alphanumeric
identifier properly registered with the ISMC.
"""
return self._DESID
@DESID.setter
def DESID(self, value):
value = _parse_str(value, 25, 'XML_DATA_CONTENT', 'DESID', self)
self._DESID = value
if value == 'TRE_OVERFLOW':
if self.DESOFLW is None:
self._DESOFLW = ''
if self.DESITEM is None:
self._DESITEM = 0
else:
self._DESOFLW = None
self._DESITEM = None
@property
def DESOFLW(self):
"""
None|str: DES Overflowed Header Type. This field shall be populated if
`DESID = "TRE_OVERFLOW"`.
Its presence indicates that the DES contains a TRE that would not fit in the file
header or segment subheader where it would ordinarily be located. Its value indicates
the segment type to which the enclosed TRE is relevant. If populated, must be one of
:code:`{"XHD", "IXSHD", "SXSHD", "TXSHD", "UDHD", "UDID"}`.
"""
return self._DESOFLW
@DESOFLW.setter
def DESOFLW(self, value):
value = _parse_str(value, 6, None, 'DESOFLW', self)
if self._DESID == 'TRE_OVERFLOW':
if value is None:
logger.error(
'DESOFLW value is None, but DESID == "TRE_OVERFLOW".\n\t'
'This must be resolved.')
self._DESOFLW = ''
elif value not in {'XHD', 'IXSHD', 'SXSHD', 'TXSHD', 'UDHD', 'UDID'}:
logger.error(
"DESOFLW value got {},\n\t"
"but must be one {'XHD', 'IXSHD', 'SXSHD', 'TXSHD', 'UDHD', 'UDID'}\n\t"
"This must be resolved.")
self._DESOFLW = ''
else:
self._DESOFLW = value
else:
if value is not None:
logger.error(
'DESID != "TRE_OVERFLOW",\n\t'
'but DESOFLW value is not None.\n\t'
'This is invalid, so setting DESOFLW to None')
self._DESOFLW = None
@property
def DESITEM(self):
"""
None|int: DES Data Item Overflowed. This field shall be present if `DESOFLW` is present.
It shall contain the number of the data item in the file, of the type indicated in
`DESOFLW` to which the TRE in the segment apply.
"""
return self._DESITEM
@DESITEM.setter
def DESITEM(self, value):
value = _parse_int(value, 3, None, 'DESITEM', self)
if self._DESID == 'TRE_OVERFLOW':
if value is None:
logger.error(
'DESITEM value is None, but DESID == "TRE_OVERFLOW".\n\t'
'This must be resolved.')
self._DESITEM = 0
else:
self._DESITEM = value
else:
if value is not None:
logger.error(
'DESID != "TRE_OVERFLOW", but DESITEM value is not None.\n\t'
'This is invalid, so setting DESITEM to None')
self._DESITEM = None
@property
def UserHeader(self): # type: () -> Union[DESUserHeader, XMLDESSubheader]
"""
DESUserHeader: The DES user header.
"""
return self._UserHeader
@UserHeader.setter
def UserHeader(self, value):
if not isinstance(value, BaseNITFElement):
value = _parse_nitf_element(value, DESUserHeader, {}, 'UserHeader', self)
self._UserHeader = value
self._load_header_data()
def _load_header_data(self):
"""
Load any user defined header specifics.
Returns
-------
None
"""
if not isinstance(self._UserHeader, DESUserHeader):
return
if self.DESID.strip() == 'XML_DATA_CONTENT':
# try loading sicd
if self._UserHeader.get_bytes_length() == 777:
# It could be a version 1.0 or greater SICD
data = self._UserHeader.to_bytes()
try:
data = XMLDESSubheader.from_bytes(data, 0)
self._UserHeader = data
except Exception as e:
logger.error(
'DESID is "XML_DATA_CONTENT" and data is the right length for SICD,\n\t'
'but parsing failed with error {}'.format(e))
elif self.DESID.strip() == 'STREAMING_FILE_HEADER':
# LOW Priority - I think that this is deprecated?
pass
def _get_attribute_length(self, fld):
if fld == 'DESOFLW':
return 0 if self._DESOFLW is None else self._lengths['DESOFLW']
elif fld == 'DESITEM':
return 0 if self._DESITEM is None else self._lengths['DESITEM']
else:
return super(DataExtensionHeader, self)._get_attribute_length(fld)
@classmethod
def _parse_attribute(cls, fields, attribute, value, start):
if attribute == 'UserHeader':
val = DESUserHeader.from_bytes(value, start)
fields['UserHeader'] = val
return start + val.get_bytes_length()
elif attribute == 'DESID':
val = value[start:start+cls._lengths['DESID']].decode('utf-8')
fields['DESID'] = val
if val.strip() != 'TRE_OVERFLOW':
fields['DESOFLW'] = None
fields['DESITEM'] = None
return start+cls._lengths['DESID']
else:
return super(DataExtensionHeader, cls)._parse_attribute(fields, attribute, value, start)
##########
# DES - NITF 2.0 version
class DataExtensionHeader0(NITFElement):
"""
The data extension subheader - see standards document Joint BIIF Profile (JBP) for more
information.
"""
_ordering = ('DE', 'DESTAG', 'DESVER', 'Security', 'DESOFLW', 'DESITEM', 'UserHeader')
_lengths = {'DE': 2, 'DESTAG': 25, 'DESVER': 2, 'DESOFLW': 6, 'DESITEM': 3}
DE = _StringEnumDescriptor(
'DE', True, 2, {'DE', }, default_value='DE',
docstring='File part type.') # type: str
DESVER = _IntegerDescriptor(
'DESVER', True, 2, default_value=1,
docstring='Version of the Data Definition. This field shall contain the alphanumeric '
'version number of the use of the tag. The version number is assigned as '
'part of the registration process.') # type: int
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags0, default_args={},
docstring='The security tags.') # type: NITFSecurityTags0
def __init__(self, **kwargs):
self._DESTAG = None
self._DESOFLW = None
self._DESITEM = None
self._UserHeader = None
super(DataExtensionHeader0, self).__init__(**kwargs)
@property
def DESTAG(self):
"""
str: Unique DES Type Identifier. This field shall contain a valid alphanumeric
identifier properly registered with the ISMC.
"""
return self._DESTAG
@DESTAG.setter
def DESTAG(self, value):
value = _parse_str(value, 25, 'XML_DATA_CONTENT', 'DESTAG', self)
self._DESTAG = value
if value.strip() in ['TRE_OVERFLOW', 'Registered Extensions', 'Controlled Extensions']:
if self.DESOFLW is None:
self._DESOFLW = ''
if self.DESITEM is None:
self._DESITEM = 0
else:
self._DESOFLW = None
self._DESITEM = None
@property
def DESOFLW(self):
"""
None|str: DES Overflowed Header Type. This field shall be populated if
`DESTAG in ['TRE_OVERFLOW', 'Registered Extensions', 'Controlled Extensions']`.
Its presence indicates that the DES contains a TRE that would not fit in the file
header or segment subheader where it would ordinarily be located. Its value indicates
the segment type to which the enclosed TRE is relevant. If populated, must be one of
:code:`{"XHD", "IXSHD", "SXSHD", "TXSHD", "UDHD", "UDID"}`.
"""
return self._DESOFLW
@DESOFLW.setter
def DESOFLW(self, value):
value = _parse_str(value, 6, None, 'DESOFLW', self)
if self._DESTAG.strip() in ['TRE_OVERFLOW', 'Registered Extensions', 'Controlled Extensions']:
if value is None:
logger.error(
'DESOFLW value is None,\n\t'
'but DESTAG in [TRE_OVERFLOW, Registered Extensions, Controlled Extensions].\n\t'
'This must be resolved.')
self._DESOFLW = ''
elif value not in {'XHD', 'IXSHD', 'SXSHD', 'TXSHD', 'UDHD', 'UDID'}:
logger.error(
"DESOFLW value got {},\n\t"
"but must be one {'XHD', 'IXSHD', 'SXSHD', 'TXSHD', 'UDHD', 'UDID'}.\n\t"
"This must be resolved.")
self._DESOFLW = ''
else:
self._DESOFLW = value
else:
if value is not None:
logger.error(
'DESTAG not in [TRE_OVERFLOW, Registered Extensions, Controlled Extensions],\n\t'
'but DESOFLW value is not None.\n\tThis is invalid, so setting DESOFLW to None')
self._DESOFLW = None
@property
def DESITEM(self):
"""
None|int: DES Data Item Overflowed. This field shall be present if `DESOFLW` is present.
It shall contain the number of the data item in the file, of the type indicated in
`DESOFLW` to which the TRE in the segment apply.
"""
return self._DESITEM
@DESITEM.setter
def DESITEM(self, value):
value = _parse_int(value, 3, None, 'DESITEM', self)
if self._DESTAG.strip() in ['TRE_OVERFLOW', 'Registered Extensions', 'Controlled Extensions']:
if value is None:
logger.error(
'DESITEM value is None,\n\t'
'but DESTAG in [TRE_OVERFLOW, Registered Extensions, Controlled Extensions].\n\t'
'This must be resolved.')
self._DESITEM = 0
else:
self._DESITEM = value
else:
if value is not None:
logger.error(
'DESTAG not in [TRE_OVERFLOW, Registered Extensions, Controlled Extensions],\n\t'
'but DESITEM value is not None.\n\tThis is invalid, so setting DESITEM to None')
self._DESITEM = None
@property
def UserHeader(self): # type: () -> Union[DESUserHeader, XMLDESSubheader]
"""
DESUserHeader: The DES user header.
"""
return self._UserHeader
@UserHeader.setter
def UserHeader(self, value):
if not isinstance(value, BaseNITFElement):
value = _parse_nitf_element(value, DESUserHeader, {}, 'UserHeader', self)
self._UserHeader = value
self._load_header_data()
def _load_header_data(self):
"""
Load any user defined header specifics.
Returns
-------
None
"""
if not isinstance(self._UserHeader, DESUserHeader):
return
if self.DESTAG.strip() == 'XML_DATA_CONTENT':
# try loading sicd
if self._UserHeader.get_bytes_length() == 777:
# It could be a version 1.0 or greater SICD
data = self._UserHeader.to_bytes()
try:
data = XMLDESSubheader.from_bytes(data, 0)
self._UserHeader = data
except Exception as e:
logger.error(
'DESTAG is "XML_DATA_CONTENT" and data is the right length for SICD,\n\t'
'but parsing failed with error {}'.format(e))
elif self.DESTAG.strip() == 'STREAMING_FILE_HEADER':
# LOW Priority - I think that this is deprecated?
pass
def _get_attribute_length(self, fld):
if fld == 'DESOFLW':
return 0 if self._DESOFLW is None else self._lengths['DESOFLW']
elif fld == 'DESITEM':
return 0 if self._DESITEM is None else self._lengths['DESITEM']
else:
return super(DataExtensionHeader0, self)._get_attribute_length(fld)
@classmethod
def _parse_attribute(cls, fields, attribute, value, start):
if attribute == 'UserHeader':
val = DESUserHeader.from_bytes(value, start)
fields['UserHeader'] = val
return start + val.get_bytes_length()
elif attribute == 'DESTAG':
val = value[start:start+cls._lengths['DESTAG']].decode('utf-8')
fields['DESTAG'] = val
if val.strip() not in ['TRE_OVERFLOW', 'Registered Extensions', 'Controlled Extensions']:
fields['DESOFLW'] = None
fields['DESITEM'] = None
return start+cls._lengths['DESTAG']
else:
return super(DataExtensionHeader0, cls)._parse_attribute(fields, attribute, value, start)
| 19,830 | 38.741483 | 102 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/__init__.py |
__classification__ = "UNCLASSIFIED"
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/res.py | """
The reserved extension subheader definitions.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
from .base import BaseNITFElement, NITFElement, Unstructured, _IntegerDescriptor,\
_StringDescriptor, _StringEnumDescriptor, _NITFElementDescriptor, \
_parse_nitf_element
from .security import NITFSecurityTags, NITFSecurityTags0
class RESUserHeader(Unstructured):
_size_len = 4
class ReservedExtensionHeader(NITFElement):
"""
The reserved extension subheader - see standards document Joint BIIF Profile (JBP) for more
information.
"""
_ordering = ('RE', 'RESID', 'RESVER', 'Security', 'UserHeader')
_lengths = {'RE': 2, 'RESID': 25, 'RESVER': 2}
RE = _StringEnumDescriptor(
'RE', True, 2, {'RE', }, default_value='RE',
docstring='File part type.') # type: str
RESID = _StringDescriptor(
'RESID', True, 25, default_value='',
docstring='Unique RES Type Identifier. This field shall contain a valid alphanumeric '
'identifier properly registered with the ISMC.') # type: str
RESVER = _IntegerDescriptor(
'RESVER', True, 2, default_value=1,
docstring='Version of the Data Definition. This field shall contain the alphanumeric version '
'number of the use of the tag. The version number is assigned as part of the '
'registration process.') # type: int
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags, default_args={},
docstring='The security tags.') # type: NITFSecurityTags
def __init__(self, **kwargs):
self._RESID = None
self._UserHeader = None
super(ReservedExtensionHeader, self).__init__(**kwargs)
@property
def UserHeader(self): # type: () -> RESUserHeader
"""
RESUserHeader: The RES user header.
"""
return self._UserHeader
@UserHeader.setter
def UserHeader(self, value):
if not isinstance(value, BaseNITFElement):
value = _parse_nitf_element(value, RESUserHeader, {}, 'UserHeader', self)
self._UserHeader = value
self._load_header_data()
def _load_header_data(self):
"""
Load any user defined header specifics.
Returns
-------
None
"""
pass
@classmethod
def _parse_attribute(cls, fields, attribute, value, start):
if attribute == 'UserHeader':
val = RESUserHeader.from_bytes(value, start)
fields['UserHeader'] = val
return start + val.get_bytes_length()
else:
return super(ReservedExtensionHeader, cls)._parse_attribute(fields, attribute, value, start)
class ReservedExtensionHeader0(NITFElement):
"""
The reserved extension subheader for NITF version 2.0 - see standards
document MIL-STD-2500A for more information.
"""
_ordering = ('RE', 'RESID', 'RESVER', 'Security', 'UserHeader')
_lengths = {'RE': 2, 'RESID': 25, 'RESVER': 2}
RE = _StringEnumDescriptor(
'RE', True, 2, {'RE', }, default_value='RE',
docstring='File part type.') # type: str
RESID = _StringDescriptor(
'RESID', True, 25, default_value='',
docstring='Unique RES Type Identifier. This field shall contain a valid alphanumeric '
'identifier properly registered with the ISMC.') # type: str
RESVER = _IntegerDescriptor(
'RESVER', True, 2, default_value=1,
docstring='Version of the Data Definition. This field shall contain the alphanumeric version '
'number of the use of the tag. The version number is assigned as part of the '
'registration process.') # type: int
Security = _NITFElementDescriptor(
'Security', True, NITFSecurityTags0, default_args={},
docstring='The security tags.') # type: NITFSecurityTags0
def __init__(self, **kwargs):
self._RESID = None
self._UserHeader = None
super(ReservedExtensionHeader0, self).__init__(**kwargs)
@property
def UserHeader(self): # type: () -> RESUserHeader
"""
RESUserHeader: The RES user header.
"""
return self._UserHeader
@UserHeader.setter
def UserHeader(self, value):
if not isinstance(value, BaseNITFElement):
value = _parse_nitf_element(value, RESUserHeader, {}, 'UserHeader', self)
self._UserHeader = value
self._load_header_data()
def _load_header_data(self):
"""
Load any user defined header specifics.
Returns
-------
None
"""
pass
@classmethod
def _parse_attribute(cls, fields, attribute, value, start):
if attribute == 'UserHeader':
val = RESUserHeader.from_bytes(value, start)
fields['UserHeader'] = val
return start + val.get_bytes_length()
else:
return super(ReservedExtensionHeader0, cls)._parse_attribute(fields, attribute, value, start)
| 5,073 | 33.517007 | 105 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/tre_elements.py | """
Module contained elements for defining TREs - really intended as read only objects.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
from collections import OrderedDict
from typing import Union, List
from ..base import TRE
logger = logging.getLogger(__name__)
def _parse_type(typ_string, leng, value, start):
"""
Parameters
----------
typ_string : str
leng : int
value : bytes
start : int
Returns
-------
str|int|bytes
"""
byt = value[start:start + leng]
if typ_string == 's':
return byt.decode('utf-8').strip()
elif typ_string == 'd':
return int(byt)
elif typ_string == 'b':
return byt
else:
raise ValueError('Got unrecognized type string {}'.format(typ_string))
def _create_format(typ_string, leng):
if typ_string == 's':
return '{0:' + '{0:d}'.format(leng) + 's}'
elif typ_string == 'd':
return '{0:0' + '{0:d}'.format(leng) + 'd}'
else:
return ValueError('Unknown typ_string {}'.format(typ_string))
class TREElement(object):
"""
Basic TRE element class
"""
def __init__(self):
self._field_ordering = []
self._field_format = {}
self._bytes_length = 0
def __str__(self):
return '{0:s}({1:s})'.format(self.__class__.__name__, self.to_dict())
def __repr__(self):
return '{0:s}(b"'.format(self.__class__.__name__) + self.to_bytes().decode() + '")'
def add_field(self, attribute, typ_string, leng, value):
"""
Add a field/attribute to the object - as we deserialize.
Parameters
----------
attribute : str
The new field/attribute name for out object instance.
typ_string : str
One of 's' (string attribute), 'd' (integer attribute), or 'b' raw/bytes attribute
leng : int
The length in bytes of the representation of this attribute
value : bytes
The bytes array of the object we are deserializing
Returns
-------
None
"""
if hasattr(self, attribute):
logger.error(
'This instance of TRE element {} already has an attribute {},\n\t'
'but the `add_field()` method is being called for this attribute name again.\n\t'
'This is almost certainly an error.'.format(self.__class__, attribute))
try:
val = _parse_type(typ_string, leng, value, self._bytes_length)
setattr(self, attribute, val)
except Exception as e:
raise ValueError(
'Failed creating field {} with exception \n\t{}'.format(attribute, e))
self._bytes_length += leng
self._field_ordering.append(attribute)
self._field_format[attribute] = _create_format(typ_string, leng)
def add_loop(self, attribute, length, child_type, value, *args):
"""
Add an attribute from a loop construct of a given type to the object - as we deserialize.
Parameters
----------
attribute : str
The new field/attribute name for out object instance.
length : int
The number of loop iterations present.
child_type : type
The type of the child - must extend TREElement
value : bytes
The bytes array of the object we are deserializing
args
Any optional positional arguments that the child_type constructor should have.
Returns
-------
None
"""
try:
obj = TRELoop(length, child_type, value, self._bytes_length, *args)
setattr(self, attribute, obj)
except Exception as e:
raise ValueError(
'Failed creating loop {} of type {} with exception\n\t{}'.format(attribute, child_type, e))
self._bytes_length += obj.get_bytes_length()
self._field_ordering.append(attribute)
def _attribute_to_bytes(self, attribute):
"""
Get byte representation for the given attribute.
Parameters
----------
attribute : str
Returns
-------
bytes
"""
val = getattr(self, attribute, None)
if val is None:
return b''
elif isinstance(val, TREElement):
return val.to_bytes()
elif isinstance(val, bytes):
return val
elif isinstance(val, (int, str)):
return self._field_format[attribute].format(val).encode('utf-8')
else:
raise TypeError('Got unhandled type {}'.format(type(val)))
def to_dict(self):
"""
Create a dictionary representation of the object.
Returns
-------
dict
"""
out = OrderedDict()
for fld in self._field_ordering:
val = getattr(self, fld)
if val is None or isinstance(val, (bytes, str, int)):
out[fld] = val
elif isinstance(val, TREElement):
out[fld] = val.to_dict()
else:
raise TypeError('Unhandled type {}'.format(type(val)))
return out
def get_bytes_length(self):
"""
The length in bytes of the serialized representation.
Returns
-------
int
"""
return self._bytes_length
def to_bytes(self):
"""
Serialize to bytes.
Returns
-------
bytes
"""
items = [self._attribute_to_bytes(fld) for fld in self._field_ordering]
return b''.join(items)
def to_json(self):
"""
Gets a json representation of this element.
Returns
-------
dict|list
"""
out = OrderedDict()
for fld in self._field_ordering:
value = getattr(self, fld)
if isinstance(value, TREElement):
out[fld] = value.to_json()
else:
out[fld] = value
class TRELoop(TREElement):
"""
Provides the TRE loop construct
"""
def __init__(self, length, child_type, value, start, *args, **kwargs):
"""
Parameters
----------
length : int
child_type : type
value : bytes
start : int
args
optional positional args for child class construction
kwargs
optional keyword arguments for child class construction
"""
if not issubclass(child_type, TREElement):
raise TypeError('child_class must be a subclass of TREElement.')
super(TRELoop, self).__init__()
self._data = []
loc = start
for i in range(length):
entry = child_type(value[loc:], *args, **kwargs)
leng = entry.get_bytes_length()
self._bytes_length += leng
loc += leng
self._data.append(entry)
def to_dict(self):
return [entry.to_dict() for entry in self._data]
def to_bytes(self):
return b''.join(entry.to_bytes() for entry in self._data)
def __len__(self):
return len(self._data)
def __getitem__(self, item): # type: (Union[int, slice]) -> Union[TREElement, List[TREElement]]
return self._data[item]
def to_json(self):
"""
Gets a json representation of this element.
Returns
-------
dict|list
"""
return [entry.to_json() for entry in self._data]
class TREExtension(TRE):
"""
Extend this object to provide concrete TRE implementations.
"""
__slots__ = ('_data', )
_tag_value = None
_data_type = None
def __init__(self, value):
if not issubclass(self._data_type, TREElement):
raise TypeError('_data_type must be a subclass of TREElement. Got type {}'.format(self._data_type))
if not isinstance(self._tag_value, str):
raise TypeError('_tag_value must be a string')
if len(self._tag_value) > 6:
raise ValueError('Tag value must have 6 or fewer characters.')
self._data = None
self.DATA = value
@property
def TAG(self):
return self._tag_value
@property
def DATA(self): # type: () -> _data_type
return self._data
@DATA.setter
def DATA(self, value):
# type: (Union[bytes, _data_type]) -> None
if isinstance(value, self._data_type):
self._data = value
elif isinstance(value, bytes):
self._data = self._data_type(value)
else:
raise TypeError(
'data must be of {} type or a bytes array. '
'Got {}'.format(self._data_type, type(value)))
@property
def EL(self):
if self._data is None:
return 0
return self._data.get_bytes_length()
@classmethod
def minimum_length(cls):
return 11
def get_bytes_length(self):
return 11 + self.EL
def to_bytes(self):
return ('{0:6s}{1:05d}'.format(self.TAG, self.EL)).encode('utf-8') + self._data.to_bytes()
@classmethod
def from_bytes(cls, value, start):
tag_value = value[start:start+6].decode('utf-8').strip()
lng = int(value[start+6:start+11])
if tag_value != cls._tag_value:
raise ValueError('tag value must be {}. Got {}'.format(cls._tag_value, tag_value))
return cls(value[start+11:start+11+lng])
| 9,519 | 27 | 111 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/registration.py | """
Module for maintaining the TRE registry
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas Mccullough"
import logging
import pkgutil
from importlib import import_module
import inspect
from sarpy.compliance import bytes_to_string
logger = logging.getLogger(__name__)
###############
# module variables
_TRE_Registry = {}
_parsed_package = False
_default_tre_packages = 'sarpy.io.general.nitf_elements.tres'
def register_tre(tre_type, tre_id=None, replace=False):
"""
Register a type in the TRE registry.
Parameters
----------
tre_type : type
A subclass of TRE
tre_id : None|str
The id for the type. The class name will be used if not supplied.
replace : bool
Should we replace if a TRE with given id if already registered?
Returns
-------
None
"""
from sarpy.io.general.nitf_elements.tres.tre_elements import TREExtension
if not issubclass(tre_type, TREExtension):
raise TypeError('tre_type must be a subclass of sarpy.io.general.nitf_elements.header.TRE')
if tre_type in [TREExtension, ]:
return
if tre_id is None:
tre_id = tre_type.__name__
if not isinstance(tre_id, str):
raise TypeError('tre_id must be a string, got type {}'.format(type(tre_id)))
if tre_id in _TRE_Registry:
if replace:
logger.warning(
'TRE with id {} is already registered.\n\t'
'We are replacing the definition.'.format(tre_type))
else:
logger.warning(
'TRE with id {} is already registered.\n\t'
'We are NOT replacing the definition.'.format(tre_type))
return
_TRE_Registry[tre_id] = tre_type
def find_tre(tre_id):
"""
Try to find a TRE with given id in our registry. Return `None` if not found.
Parameters
----------
tre_id : str|bytes
Returns
-------
sarpy.io.general.nitf_elements.base.TRE|None
"""
if not _parsed_package:
parse_package()
if isinstance(tre_id, bytes):
tre_id = bytes_to_string(tre_id)
if not isinstance(tre_id, str):
raise TypeError('tre_id must be of type string. Got {}'.format(tre_id))
return _TRE_Registry.get(tre_id.strip(), None)
def parse_package(packages=None):
"""
Walk the packages contained in `packages`, find all subclasses of TRE, and register them.
Returns
-------
None
"""
def evaluate(the_module):
for element_name, element_type in inspect.getmembers(the_module, inspect.isclass):
if issubclass(element_type, TREExtension) and element_type != TREExtension:
register_tre(element_type, tre_id=element_name, replace=False)
from sarpy.io.general.nitf_elements.tres.tre_elements import TREExtension
if packages is None:
global _parsed_package
if _parsed_package:
return # already parsed the default packages
else:
_parsed_package = True
packages = _default_tre_packages
if isinstance(packages, str):
packages = [packages, ]
logger.info('Finding and registering TREs contained in packages {}'.format(packages))
# walk the packages, find all subclasses of TRE, dump them into our dictionary
for start_package in packages:
module = import_module(start_package)
evaluate(module)
for details in pkgutil.walk_packages(module.__path__, start_package + '.'):
_, module_name, is_pkg = details
sub_module = import_module(module_name)
evaluate(sub_module)
logger.info('We now have {} registered TREs'.format(len(_TRE_Registry)))
| 3,710 | 27.328244 | 99 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/__init__.py |
__classification__ = "UNCLASSIFIED"
| 37 | 11.666667 | 35 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/unclass/SNSRA.py |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class SENBAND1(TREElement):
def __init__(self, value):
super(SENBAND1, self).__init__()
self.add_field('SENBAND', 's', 10, value)
self.add_field('SEN_BANDWL', 's', 3, value)
self.add_field('SEN_CEN_F', 's', 3, value)
self.add_field('POLARIZATION', 's', 2, value)
self.add_field('AZ_BWIDTH', 's', 6, value)
self.add_field('EL_BWIDTH', 's', 6, value)
self.add_field('DYN_RNGE', 's', 4, value)
self.add_field('SENCALFAC', 's', 15, value)
class SENBAND2(TREElement):
def __init__(self, value):
super(SENBAND2, self).__init__()
self.add_field('SENBAND', 's', 10, value)
self.add_field('SEN_FOV_T', 's', 3, value)
self.add_field('SEN_FOV_T_U', 's', 1, value)
self.add_field('SEN_IFOV_T', 's', 3, value)
self.add_field('SEN_IFOV_T_U', 's', 1, value)
self.add_field('SEN_FOV_CT', 's', 5, value)
self.add_field('SEN_IFOV_CT', 's', 3, value)
self.add_field('SEN_IFOV_CT_U', 's', 1, value)
self.add_field('SEN_FOR_T', 's', 3, value)
self.add_field('SEN_FOR_CT', 's', 3, value)
self.add_field('SEN_L_WAVE', 's', 4, value)
self.add_field('SEN_U_WAVE', 's', 4, value)
self.add_field('SUBBANDS', 's', 3, value)
self.add_field('SENFLENGTH', 's', 4, value)
self.add_field('SENFNUM', 's', 4, value)
self.add_field('LINESAMPLES', 's', 4, value)
self.add_field('DETECTTYPE', 's', 12, value)
self.add_field('POLARIZATION', 's', 2, value)
self.add_field('DYN_RNGE', 's', 4, value)
self.add_field('SENCALFAC', 's', 15, value)
class SNSRAType(TREElement):
def __init__(self, value):
super(SNSRAType, self).__init__()
self.add_field('VERNUM', 's', 4, value)
self.add_field('SENNAME', 's', 20, value)
self.add_field('SENTYPE', 's', 1, value)
self.add_field('SENMODE', 's', 10, value)
self.add_field('SENSCAN', 's', 12, value)
self.add_field('SENSOR_ID', 's', 10, value)
self.add_field('MPLAN', 's', 3, value)
self.add_field('SENSERIAL', 's', 4, value)
self.add_field('SENOPORG', 's', 10, value)
self.add_field('SENMFG', 's', 12, value)
self.add_field('ABSWVER', 's', 7, value)
self.add_field('AVG_ALT', 's', 5, value)
if self.SENTYPE == 'R':
self.add_field('FOC_X', 's', 7, value)
self.add_field('FOC_Y', 's', 7, value)
self.add_field('FOC_Z', 's', 7, value)
self.add_field('NUM_SENBAND', 'd', 1, value)
self.add_loop('SENBANDs', self.NUM_SENBAND, SENBAND1, value)
if self.SENTYPE in ['I', 'E']:
self.add_field('NUM_SENBAND', 'd', 1, value)
self.add_loop('SENBANDs', self.NUM_SENBAND, SENBAND2, value)
class SNSRA(TREExtension):
_tag_value = 'SNSRA'
_data_type = SNSRAType
| 3,042 | 39.573333 | 72 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/unclass/ACFTA.py |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class ACFTA_132Type(TREElement):
def __init__(self, value):
super(ACFTA_132Type, self).__init__()
self.add_field('AC_MSN_ID', 's', 10, value)
self.add_field('SCTYPE', 's', 1, value)
self.add_field('SCNUM', 's', 4, value)
self.add_field('SENSOR_ID', 's', 3, value)
self.add_field('PATCH_TOT', 's', 4, value)
self.add_field('MTI_TOT', 's', 3, value)
self.add_field('PDATE', 's', 7, value)
self.add_field('IMHOSTNO', 's', 3, value)
self.add_field('IMREQID', 's', 5, value)
self.add_field('SCENE_SOURCE', 's', 1, value)
self.add_field('MPLAN', 's', 2, value)
self.add_field('ENTLOC', 's', 21, value)
self.add_field('ENTALT', 's', 6, value)
self.add_field('EXITLOC', 's', 21, value)
self.add_field('EXITALT', 's', 6, value)
self.add_field('TMAP', 's', 7, value)
self.add_field('RCS', 's', 3, value)
self.add_field('ROW_SPACING', 's', 7, value)
self.add_field('COL_SPACING', 's', 7, value)
self.add_field('SENSERIAL', 's', 4, value)
self.add_field('ABSWVER', 's', 7, value)
class ACFTA_132(TREExtension):
_tag_value = 'ACFTA'
_data_type = ACFTA_132Type
class ACFTA_154Type(TREElement):
def __init__(self, value):
super(ACFTA_154Type, self).__init__()
self.add_field('AC_MSN_ID', 's', 10, value)
self.add_field('AC_TAIL_NO', 's', 10, value)
self.add_field('SENSOR_ID', 's', 10, value)
self.add_field('SCENE_SOURCE', 's', 1, value)
self.add_field('SCNUM', 's', 6, value)
self.add_field('PDATE', 's', 8, value)
self.add_field('IMHOSTNO', 's', 6, value)
self.add_field('IMREQID', 's', 5, value)
self.add_field('MPLAN', 's', 3, value)
self.add_field('ENTLOC', 's', 21, value)
self.add_field('ENTALT', 's', 6, value)
self.add_field('EXITLOC', 's', 21, value)
self.add_field('EXITALT', 's', 6, value)
self.add_field('TMAP', 's', 7, value)
self.add_field('ROW_SPACING', 's', 7, value)
self.add_field('COL_SPACING', 's', 7, value)
self.add_field('SENSERIAL', 's', 6, value)
self.add_field('ABSWVER', 's', 7, value)
self.add_field('PATCH_TOT', 's', 4, value)
self.add_field('MTI_TOT', 's', 3, value)
class ACFTA_154(TREExtension):
_tag_value = 'ACFTA'
_data_type = ACFTA_154Type
class ACFTA_199Type(TREElement):
def __init__(self, value):
super(ACFTA_199Type, self).__init__()
self.add_field('AC_MSN_ID', 's', 20, value)
self.add_field('AC_TAIL_NO', 's', 10, value)
self.add_field('AC_TO', 's', 12, value)
self.add_field('SENSOR_ID_TYPE', 's', 4, value)
self.add_field('SENSOR_ID', 's', 6, value)
self.add_field('SCENE_SOURCE', 's', 1, value)
self.add_field('SCNUM', 's', 6, value)
self.add_field('PDATE', 's', 8, value)
self.add_field('IMHOSTNO', 's', 6, value)
self.add_field('IMREQID', 's', 5, value)
self.add_field('MPLAN', 's', 3, value)
self.add_field('ENTLOC', 's', 25, value)
self.add_field('ENTELV', 's', 6, value)
self.add_field('ELVUNIT', 's', 1, value)
self.add_field('EXITLOC', 's', 25, value)
self.add_field('EXITELV', 's', 6, value)
self.add_field('TMAP', 's', 7, value)
self.add_field('RESERVD1', 's', 1, value)
self.add_field('ROW_SPACING', 's', 7, value)
self.add_field('COL_SPACING', 's', 7, value)
self.add_field('FOCAL_LENGTH', 's', 6, value)
self.add_field('SENSERIAL', 's', 6, value)
self.add_field('ABSWVER', 's', 7, value)
self.add_field('CAL_DATE', 's', 8, value)
self.add_field('PATCH_TOT', 's', 4, value)
self.add_field('MTI_TOT', 's', 3, value)
class ACFTA_199(TREExtension):
_tag_value = 'ACFTA'
_data_type = ACFTA_199Type
class ACFTA(TREExtension):
_tag_value = 'ACFTA'
def __init__(self):
raise ValueError(
'Not to be implemented directly. '
'Use of one ACFTA_132, ACFTA_154, or ACFTA_199')
@classmethod
def from_bytes(cls, value, start):
"""
Parameters
----------
value : bytes
start : int
Returns
-------
ACFTA_132|ACFTA_154|ACFTA_199
"""
tag_value = value[start:start+6].decode('utf-8').strip()
if tag_value != cls._tag_value:
raise ValueError('tag value must be {}. Got {}'.format(cls._tag_value, tag_value))
lng = int(value[start+6:start+11])
if lng == 132:
return ACFTA_132.from_bytes(value, start)
elif lng == 154:
return ACFTA_154.from_bytes(value, start)
elif lng == 199:
return ACFTA_199.from_bytes(value, start)
else:
raise ValueError('the data must be length 132, 154, or 199. Got {}'.format(lng))
| 5,101 | 35.442857 | 94 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/unclass/GEOLOB.py |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class GEOLOBType(TREElement):
def __init__(self, value):
super(GEOLOBType, self).__init__()
self.add_field('ARV', 's', 9, value)
self.add_field('BRV', 's', 9, value)
self.add_field('LSO', 's', 15, value)
self.add_field('PSO', 's', 15, value)
class GEOLOB(TREExtension):
_tag_value = 'GEOLOB'
_data_type = GEOLOBType
| 495 | 23.8 | 51 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/unclass/HISTOA.py |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class IPCOM(TREElement):
def __init__(self, value):
super(IPCOM, self).__init__()
self.add_field('IPCOM', 's', 80, value)
class EVENT(TREElement):
def __init__(self, value):
super(EVENT, self).__init__()
self.add_field('PDATE', 's', 14, value)
self.add_field('PSITE', 's', 10, value)
self.add_field('PAS', 's', 10, value)
self.add_field('NIPCOM', 'd', 1, value)
self.add_loop('IPCOMs', self.NIPCOM, IPCOM, value)
self.add_field('IBPP', 's', 2, value)
self.add_field('IPVTYPE', 's', 3, value)
self.add_field('INBWC', 's', 10, value)
self.add_field('DISP_FLAG', 's', 1, value)
self.add_field('ROT_FLAG', 's', 1, value)
if self.ROT_FLAG == '1':
self.add_field('ROT_ANGLE', 's', 8, value)
self.add_field('ASYM_FLAG', 's', 1, value)
if self.ASYM_FLAG == '1':
self.add_field('ZOOMROW', 's', 7, value)
self.add_field('ZOOMCOL', 's', 7, value)
self.add_field('PROJ_FLAG', 's', 1, value)
self.add_field('SHARP_FLAG', 's', 1, value)
if self.SHARP_FLAG == '1':
self.add_field('SHARPFAM', 's', 2, value)
self.add_field('SHARPMEM', 's', 2, value)
self.add_field('MAG_FLAG', 's', 1, value)
if self.MAG_FLAG == '1':
self.add_field('MAG_LEVEL', 's', 7, value)
self.add_field('DRA_FLAG', 's', 1, value)
if self.DRA_FLAG == '1':
self.add_field('DRA_MULT', 's', 7, value)
self.add_field('DRA_SUB', 's', 5, value)
self.add_field('TTC_FLAG', 's', 1, value)
if self.TTC_FLAG == '1':
self.add_field('TTCFAM', 's', 2, value)
self.add_field('TTCMEM', 's', 2, value)
self.add_field('DEVLUT_FLAG', 's', 1, value)
self.add_field('OBPP', 's', 2, value)
self.add_field('OPVTYPE', 's', 3, value)
self.add_field('OUTBWC', 's', 10, value)
class HISTOAType(TREElement):
def __init__(self, value):
super(HISTOAType, self).__init__()
self.add_field('SYSTYPE', 's', 20, value)
self.add_field('PC', 's', 12, value)
self.add_field('PE', 's', 4, value)
self.add_field('REMAP_FLAG', 's', 1, value)
self.add_field('LUTID', 's', 2, value)
self.add_field('NEVENTS', 'd', 2, value)
self.add_loop('EVENTs', self.NEVENTS, EVENT, value)
class HISTOA(TREExtension):
_tag_value = 'HISTOA'
_data_type = HISTOAType
| 2,630 | 36.585714 | 59 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/unclass/RSMPIA.py |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class RSMPIAType(TREElement):
def __init__(self, value):
super(RSMPIAType, self).__init__()
self.add_field('IID', 's', 80, value)
self.add_field('EDITION', 's', 40, value)
self.add_field('R0', 's', 21, value)
self.add_field('RX', 's', 21, value)
self.add_field('RY', 's', 21, value)
self.add_field('RZ', 's', 21, value)
self.add_field('RXX', 's', 21, value)
self.add_field('RXY', 's', 21, value)
self.add_field('RXZ', 's', 21, value)
self.add_field('RYY', 's', 21, value)
self.add_field('RYZ', 's', 21, value)
self.add_field('RZZ', 's', 21, value)
self.add_field('C0', 's', 21, value)
self.add_field('CX', 's', 21, value)
self.add_field('CY', 's', 21, value)
self.add_field('CZ', 's', 21, value)
self.add_field('CXX', 's', 21, value)
self.add_field('CXY', 's', 21, value)
self.add_field('CXZ', 's', 21, value)
self.add_field('CYY', 's', 21, value)
self.add_field('CYZ', 's', 21, value)
self.add_field('CZZ', 's', 21, value)
self.add_field('RNIS', 's', 3, value)
self.add_field('CNIS', 's', 3, value)
self.add_field('TNIS', 's', 3, value)
self.add_field('RSSIZ', 's', 21, value)
self.add_field('CSSIZ', 's', 21, value)
class RSMPIA(TREExtension):
_tag_value = 'RSMPIA'
_data_type = RSMPIAType
| 1,555 | 35.186047 | 51 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/unclass/PIAPRC.py |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class ST(TREElement):
def __init__(self, value):
super(ST, self).__init__()
self.add_field('SECTITLE', 's', 40, value)
self.add_field('PPNUM', 's', 5, value)
self.add_field('TPP', 's', 3, value)
class RO(TREElement):
def __init__(self, value):
super(RO, self).__init__()
self.add_field('REQORG', 's', 64, value)
class KW(TREElement):
def __init__(self, value):
super(KW, self).__init__()
self.add_field('KEYWORD', 's', 255, value)
class AR(TREElement):
def __init__(self, value):
super(AR, self).__init__()
self.add_field('ASSRPT', 's', 20, value)
class AT(TREElement):
def __init__(self, value):
super(AT, self).__init__()
self.add_field('ATEXT', 's', 255, value)
class PIAPRCType(TREElement):
def __init__(self, value):
super(PIAPRCType, self).__init__()
self.add_field('ACCID', 's', 64, value)
self.add_field('FMCTL', 's', 32, value)
self.add_field('SDET', 's', 1, value)
self.add_field('PCODE', 's', 2, value)
self.add_field('PSUBE', 's', 6, value)
self.add_field('PIDNM', 's', 20, value)
self.add_field('PNAME', 's', 10, value)
self.add_field('MAKER', 's', 2, value)
self.add_field('CTIME', 's', 14, value)
self.add_field('MAPID', 's', 40, value)
self.add_field('STREP', 'd', 2, value)
self.add_loop('STs', self.STREP, ST, value)
self.add_field('ROREP', 'd', 2, value)
self.add_loop('ROs', self.ROREP, RO, value)
self.add_field('KWREP', 'd', 2, value)
self.add_loop('KWs', self.KWREP, KW, value)
self.add_field('ARREP', 'd', 2, value)
self.add_loop('ARs', self.ARREP, AR, value)
self.add_field('ATREP', 'd', 2, value)
self.add_loop('ATs', self.ATREP, AT, value)
class PIAPRC(TREExtension):
_tag_value = 'PIAPRC'
_data_type = PIAPRCType
| 2,074 | 29.514706 | 51 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/unclass/BNDPLB.py |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class PT(TREElement):
def __init__(self, value):
super(PT, self).__init__()
self.add_field('LON', 's', 15, value)
self.add_field('LAT', 's', 15, value)
class BNDPLBType(TREElement):
def __init__(self, value):
super(BNDPLBType, self).__init__()
self.add_field('NUMPTS', 'd', 4, value)
self.add_loop('PTs', self.NUMPTS, PT, value)
class BNDPLB(TREExtension):
_tag_value = 'BNDPLB'
_data_type = BNDPLBType
| 596 | 22.88 | 52 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/unclass/SNSPSB.py |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class PT(TREElement):
def __init__(self, value):
super(PT, self).__init__()
self.add_field('LON', 's', 15, value)
self.add_field('LAT', 's', 15, value)
class BP(TREElement):
def __init__(self, value):
super(BP, self).__init__()
self.add_field('NUM_PTS', 'd', 2, value)
self.add_loop('PTs', self.NUM_PTS, PT, value)
class BND(TREElement):
def __init__(self, value):
super(BND, self).__init__()
self.add_field('BID', 's', 5, value)
self.add_field('WS1', 's', 5, value)
self.add_field('WS2', 's', 5, value)
class AUX(TREElement):
def __init__(self, value):
super(AUX, self).__init__()
self.add_field('API', 's', 20, value)
self.add_field('APF', 's', 1, value)
self.add_field('UNIAPX', 's', 7, value)
self.add_field('APN', 's', 10, value)
self.add_field('APR', 's', 20, value)
self.add_field('APA', 's', 20, value)
class SNS(TREElement):
def __init__(self, value):
super(SNS, self).__init__()
self.add_field('NUM_BP', 'd', 2, value)
self.add_loop('BPs', self.NUM_BP, BP, value)
self.add_field('NUM_BND', 'd', 2, value)
self.add_loop('BNDs', self.NUM_BND, BND, value)
self.add_field('UNIRES', 's', 3, value)
self.add_field('REX', 's', 6, value)
self.add_field('REY', 's', 6, value)
self.add_field('GSX', 's', 6, value)
self.add_field('GSY', 's', 6, value)
self.add_field('GSL', 's', 12, value)
self.add_field('PLTFM', 's', 8, value)
self.add_field('INS', 's', 8, value)
self.add_field('MOD', 's', 4, value)
self.add_field('PRL', 's', 5, value)
self.add_field('ACT', 's', 18, value)
self.add_field('UNINOA', 's', 3, value)
self.add_field('NOA', 's', 7, value)
self.add_field('UNIANG', 's', 3, value)
self.add_field('ANG', 's', 7, value)
self.add_field('UNIALT', 's', 3, value)
self.add_field('ALT', 's', 9, value)
self.add_field('LONSCC', 's', 10, value)
self.add_field('LATSCC', 's', 10, value)
self.add_field('UNISAE', 's', 3, value)
self.add_field('SAZ', 's', 7, value)
self.add_field('SEL', 's', 7, value)
self.add_field('UNIRPY', 's', 3, value)
self.add_field('ROL', 's', 7, value)
self.add_field('PIT', 's', 7, value)
self.add_field('YAW', 's', 7, value)
self.add_field('UNIPXT', 's', 3, value)
self.add_field('PIXT', 's', 14, value)
self.add_field('UNISPE', 's', 7, value)
self.add_field('ROS', 's', 22, value)
self.add_field('PIS', 's', 22, value)
self.add_field('YAS', 's', 22, value)
self.add_field('NUM_AUX', 'd', 3, value)
self.add_loop('AUXs', self.NUM_AUX, AUX, value)
class SNSPSBType(TREElement):
def __init__(self, value):
super(SNSPSBType, self).__init__()
self.add_field('NUMSNS', 'd', 2, value)
self.add_loop('SNSs', self.NUMSNS, SNS, value)
class SNSPSB(TREExtension):
_tag_value = 'SNSPSB'
_data_type = SNSPSBType
| 3,271 | 33.808511 | 55 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/unclass/SENSRA.py |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class SENSRAType(TREElement):
def __init__(self, value):
super(SENSRAType, self).__init__()
self.add_field('REFROW', 's', 8, value)
self.add_field('REFCOL', 's', 8, value)
self.add_field('SNSMODEL', 's', 6, value)
self.add_field('SNSMOUNT', 's', 3, value)
self.add_field('SENSLOC', 's', 21, value)
self.add_field('SNALTSRC', 's', 1, value)
self.add_field('SENSALT', 's', 6, value)
self.add_field('SNALUNIT', 's', 1, value)
self.add_field('SENSAGL', 's', 5, value)
self.add_field('SNSPITCH', 's', 7, value)
self.add_field('SENSROLL', 's', 8, value)
self.add_field('SENSYAW', 's', 8, value)
self.add_field('PLTPITCH', 's', 7, value)
self.add_field('PLATROLL', 's', 8, value)
self.add_field('PLATHDG', 's', 5, value)
self.add_field('GRSPDSRC', 's', 1, value)
self.add_field('GRDSPEED', 's', 6, value)
self.add_field('GRSPUNIT', 's', 1, value)
self.add_field('GRDTRACK', 's', 5, value)
self.add_field('VERTVEL', 's', 5, value)
self.add_field('VERTVELU', 's', 1, value)
self.add_field('SWATHFRM', 's', 4, value)
self.add_field('NSWATHS', 's', 4, value)
self.add_field('SPOTNUM', 's', 3, value)
class SENSRA(TREExtension):
_tag_value = 'SENSRA'
_data_type = SENSRAType
| 1,502 | 36.575 | 51 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/unclass/PIAEVA.py |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class PIAEVAType(TREElement):
def __init__(self, value):
super(PIAEVAType, self).__init__()
self.add_field('EVENTNAME', 's', 38, value)
self.add_field('EVENTTYPE', 's', 8, value)
class PIAEVA(TREExtension):
_tag_value = 'PIAEVA'
_data_type = PIAEVAType
| 416 | 22.166667 | 51 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/unclass/ACFTB.py |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class ACFTBType(TREElement):
def __init__(self, value):
super(ACFTBType, self).__init__()
self.add_field('AC_MSN_ID', 's', 20, value)
self.add_field('AC_TAIL_NO', 's', 10, value)
self.add_field('AC_TO', 's', 12, value)
self.add_field('SENSOR_ID_TYPE', 's', 4, value)
self.add_field('SENSOR_ID', 's', 6, value)
self.add_field('SCENE_SOURCE', 's', 1, value)
self.add_field('SCNUM', 's', 6, value)
self.add_field('PDATE', 's', 8, value)
self.add_field('IMHOSTNO', 's', 6, value)
self.add_field('IMREQID', 's', 5, value)
self.add_field('MPLAN', 's', 3, value)
self.add_field('ENTLOC', 's', 25, value)
self.add_field('LOC_ACCY', 's', 6, value)
self.add_field('ENTELV', 's', 6, value)
self.add_field('ELV_UNIT', 's', 1, value)
self.add_field('EXITLOC', 's', 25, value)
self.add_field('EXITELV', 's', 6, value)
self.add_field('TMAP', 's', 7, value)
self.add_field('ROW_SPACING', 's', 7, value)
self.add_field('ROW_SPACING_UNITS', 's', 1, value)
self.add_field('COL_SPACING', 's', 7, value)
self.add_field('COL_SPACING_UNITS', 's', 1, value)
self.add_field('FOCAL_LENGTH', 's', 6, value)
self.add_field('SENSERIAL', 's', 6, value)
self.add_field('ABSWVER', 's', 7, value)
self.add_field('CAL_DATE', 's', 8, value)
self.add_field('PATCH_TOT', 's', 4, value)
self.add_field('MTI_TOT', 's', 3, value)
class ACFTB(TREExtension):
_tag_value = 'ACFTB'
_data_type = ACFTBType
| 1,732 | 38.386364 | 58 | py |
sarpy | sarpy-master/sarpy/io/general/nitf_elements/tres/unclass/SECTGA.py |
from ..tre_elements import TREExtension, TREElement
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class SECTGAType(TREElement):
def __init__(self, value):
super(SECTGAType, self).__init__()
self.add_field('SEC_ID', 's', 12, value)
self.add_field('SEC_BE', 's', 15, value)
self.add_field('RESVD001', 's', 1, value)
class SECTGA(TREExtension):
_tag_value = 'SECTGA'
_data_type = SECTGAType
| 461 | 23.315789 | 51 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.